filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_31878 | # -*- coding: utf-8 -*-
class Customer(object):
"""Implementation of the 'Customer' model.
The finicity customer record
Attributes:
id (string): Finicity’s ID for the customer
username (string): The username associated with the customer
first_name (string): The first name associated with the customer
last_name (string): The last name associated with the customer
mtype (CustomerTypeEnum): active or testing
created_date (string): The date the customer was created
"""
# Create a mapping from Model property names to API property names
_names = {
"id":'id',
"username":'username',
"first_name":'firstName',
"last_name":'lastName',
"mtype":'type',
"created_date":'createdDate'
}
def __init__(self,
id=None,
username=None,
first_name=None,
last_name=None,
mtype=None,
created_date=None,
additional_properties = {}):
"""Constructor for the Customer class"""
# Initialize members of the class
self.id = id
self.username = username
self.first_name = first_name
self.last_name = last_name
self.mtype = mtype
self.created_date = created_date
# Add additional model properties to the instance
self.additional_properties = additional_properties
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
id = dictionary.get('id')
username = dictionary.get('username')
first_name = dictionary.get('firstName')
last_name = dictionary.get('lastName')
mtype = dictionary.get('type')
created_date = dictionary.get('createdDate')
# Clean out expected properties from dictionary
for key in cls._names.values():
if key in dictionary:
del dictionary[key]
# Return an object of this model
return cls(id,
username,
first_name,
last_name,
mtype,
created_date,
dictionary)
|
the-stack_106_31882 | """scrapli.driver.network_driver"""
import logging
import re
import warnings
from abc import ABC, abstractmethod
from collections import namedtuple
from typing import Any, Dict, List, Optional, Union
from scrapli.driver.generic_driver import GenericDriver
from scrapli.exceptions import CouldNotAcquirePrivLevel, UnknownPrivLevel
from scrapli.helper import get_prompt_pattern
from scrapli.response import Response
PrivilegeLevel = namedtuple(
"PrivilegeLevel",
"pattern "
"name "
"deescalate_priv "
"deescalate "
"escalate_priv "
"escalate "
"escalate_auth "
"escalate_prompt "
"requestable "
"level",
)
NoPrivLevel = PrivilegeLevel("", "", "", "", "", "", "", "", "", "")
PRIVS: Dict[str, PrivilegeLevel] = {}
LOG = logging.getLogger("scrapli_base")
class NetworkDriver(GenericDriver, ABC):
@abstractmethod
def __init__(
self, auth_secondary: str = "", **kwargs: Any,
):
"""
BaseNetworkDriver Object
Args:
auth_secondary: password to use for secondary authentication (enable)
**kwargs: keyword args to pass to inherited class(es)
Returns:
N/A # noqa: DAR202
Raises:
N/A
"""
super().__init__(**kwargs)
self.auth_secondary = auth_secondary
self.privs = PRIVS
self.default_desired_priv: str = ""
self._current_priv_level = NoPrivLevel
self.textfsm_platform: str = ""
self.genie_platform: str = ""
self.failed_when_contains: List[str] = []
def _determine_current_priv(self, current_prompt: str) -> PrivilegeLevel:
"""
Determine current privilege level from prompt string
Args:
current_prompt: string of current prompt
Returns:
PrivilegeLevel: NamedTuple of current privilege level
Raises:
UnknownPrivLevel: if privilege level cannot be determined
"""
for priv_level in self.privs.values():
prompt_pattern = get_prompt_pattern("", priv_level.pattern)
if re.search(prompt_pattern, current_prompt.encode()):
return priv_level
raise UnknownPrivLevel
def _escalate(self) -> None:
"""
Escalate to the next privilege level up
Args:
N/A
Returns:
N/A # noqa: DAR202
Raises:
UnknownPrivLevel: if priv level cant be attained
TypeError: if invalid next prompt value
"""
current_priv = self._determine_current_priv(self.channel.get_prompt())
if not current_priv.escalate:
return
next_priv = self.privs.get(current_priv.escalate_priv, None)
if next_priv is None:
raise UnknownPrivLevel(
f"Could not get next priv level, current priv is {current_priv.name}"
)
next_prompt = next_priv.pattern
if current_priv.escalate_auth:
if not self.auth_secondary:
err = (
"Privilege escalation generally requires an `auth_secondary` password, "
"but none is set!"
)
msg = f"***** {err} {'*' * (80 - len(err))}"
fix = (
"scrapli will try to escalate privilege without entering a password but may "
"fail.\nSet an `auth_secondary` password if your device requires a password to "
"increase privilege, otherwise ignore this message."
)
warning = "\n" + msg + "\n" + fix + "\n" + msg
warnings.warn(warning)
else:
escalate_cmd: str = current_priv.escalate
escalate_prompt: str = current_priv.escalate_prompt
escalate_auth = self.auth_secondary
if not isinstance(next_prompt, str):
raise TypeError(
f"got {type(next_prompt)} for {current_priv.name} escalate priv, "
"expected str"
)
self.channel.send_inputs_interact(
[escalate_cmd, escalate_prompt, escalate_auth, next_prompt],
hidden_response=True,
)
self.channel.comms_prompt_pattern = next_priv.pattern
return
self.channel.comms_prompt_pattern = next_priv.pattern
self.channel.send_input(current_priv.escalate)
def _deescalate(self) -> None:
"""
Deescalate to the next privilege level down
Args:
N/A
Returns:
N/A # noqa: DAR202
Raises:
UnknownPrivLevel: if no default priv level set to deescalate to
"""
current_priv = self._determine_current_priv(self.channel.get_prompt())
if current_priv.deescalate:
next_priv = self.privs.get(current_priv.deescalate_priv, None)
if not next_priv:
raise UnknownPrivLevel(
"NetworkDriver has no default priv levels, set them or use a network driver"
)
self.channel.comms_prompt_pattern = next_priv.pattern
self.channel.send_input(current_priv.deescalate)
def acquire_priv(self, desired_priv: str) -> None:
"""
Acquire desired priv level
Args:
desired_priv: string name of desired privilege level
(see scrapli.driver.<driver_category.device_type>.driver for levels)
Returns:
N/A # noqa: DAR202
Raises:
CouldNotAcquirePrivLevel: if requested priv level not attained
"""
priv_attempt_counter = 0
while True:
current_priv = self._determine_current_priv(self.channel.get_prompt())
if current_priv == self.privs[desired_priv]:
self._current_priv_level = current_priv
return
if priv_attempt_counter > len(self.privs):
raise CouldNotAcquirePrivLevel(
f"Could not get to '{desired_priv}' privilege level."
)
if current_priv.level > self.privs[desired_priv].level:
self._deescalate()
else:
self._escalate()
priv_attempt_counter += 1
def _update_response(self, response: Response) -> None:
"""
Update response with network driver specific data
This happens here as the underlying channel provides a response object but is unaware of any
of the network/platform specific attributes that may need to get updated
Args:
response: response to update
Returns:
N/A # noqa: DAR202
Raises:
N/A
"""
response.textfsm_platform = self.textfsm_platform
response.genie_platform = self.genie_platform
def send_command(
self,
command: str,
strip_prompt: bool = True,
failed_when_contains: Optional[Union[str, List[str]]] = None,
) -> Response:
"""
Send a command
Super method will raise TypeError if anything but a string is passed here!
Args:
command: string to send to device in privilege exec mode
strip_prompt: True/False strip prompt from returned output
failed_when_contains: string or list of strings indicating failure if found in response
Returns:
Response: Scrapli Response object
Raises:
N/A
"""
if self._current_priv_level.name != self.default_desired_priv:
self.acquire_priv(self.default_desired_priv)
if failed_when_contains is None:
failed_when_contains = self.failed_when_contains
response = super().send_command(
command, strip_prompt=strip_prompt, failed_when_contains=failed_when_contains
)
self._update_response(response)
return response
def send_commands(
self,
commands: List[str],
strip_prompt: bool = True,
failed_when_contains: Optional[Union[str, List[str]]] = None,
) -> List[Response]:
"""
Send multiple commands
Super method will raise TypeError if anything but a list of strings is passed here!
Args:
commands: list of strings to send to device in privilege exec mode
strip_prompt: True/False strip prompt from returned output
failed_when_contains: string or list of strings indicating failure if found in response
Returns:
responses: list of Scrapli Response objects
Raises:
N/A
"""
if self._current_priv_level.name != self.default_desired_priv:
self.acquire_priv(self.default_desired_priv)
if failed_when_contains is None:
failed_when_contains = self.failed_when_contains
responses = super().send_commands(
commands, strip_prompt=strip_prompt, failed_when_contains=failed_when_contains
)
for response in responses:
self._update_response(response)
return responses
def send_interactive(self, interact: List[str], hidden_response: bool = False) -> Response:
"""
Send inputs in an interactive fashion; used to handle prompts
accepts inputs and looks for expected prompt;
sends the appropriate response, then waits for the "finale"
returns the results of the interaction
could be "chained" together to respond to more than a "single" staged prompt
Args:
interact: list of four string elements representing...
channel_input - initial input to send
expected_prompt - prompt to expect after initial input
response - response to prompt
final_prompt - final prompt to expect
hidden_response: True/False response is hidden (i.e. password input)
Returns:
Response: scrapli Response object
Raises:
N/A
"""
if self._current_priv_level.name != self.default_desired_priv:
self.acquire_priv(self.default_desired_priv)
response = self.channel.send_inputs_interact(interact, hidden_response)
return response
def send_configs(
self,
configs: Union[str, List[str]],
strip_prompt: bool = True,
failed_when_contains: Optional[Union[str, List[str]]] = None,
) -> List[Response]:
"""
Send configuration(s)
Args:
configs: string or list of strings to send to device in config mode
strip_prompt: True/False strip prompt from returned output
failed_when_contains: string or list of strings indicating failure if found in response
Returns:
responses: List of Scrape Response objects
Raises:
N/A
"""
if isinstance(configs, str):
configs = [configs]
self.acquire_priv("configuration")
if failed_when_contains is None:
failed_when_contains = self.failed_when_contains
responses = []
for config in configs:
responses.append(
super().send_command(
config, strip_prompt=strip_prompt, failed_when_contains=failed_when_contains
)
)
for response in responses:
self._update_response(response)
self.acquire_priv(self.default_desired_priv)
return responses
|
the-stack_106_31883 | #
# Copyright (c) 2017-2020 cTuning foundation.
# See CK COPYRIGHT.txt for copyright details.
#
# See CK LICENSE for licensing details.
# See CK COPYRIGHT for copyright details.
#
# Convert raw output of a GEMM test program to the CK format.
#
# Developer(s):
# - Anton Lokhmotov, dividiti, 2017, 2020
#
import json
import os
import re
import struct
def ck_preprocess(i):
ck=i['ck_kernel']
rt=i['run_time']
meta=i['meta']
env=i['env']
new_env = {}
files_to_push = []
if i['target_os_dict'].get('remote','') == 'yes' and env.get('CK_PUSH_LIBS_TO_REMOTE', 'yes').lower() == 'yes':
lib_dir = i['deps']['library']['dict']['env'].get('CK_ENV_LIB_ARMCL')
lib_name = i['deps']['library']['dict']['env'].get('CK_ENV_LIB_ARMCL_DYNAMIC_CORE_NAME')
new_env['CK_ENV_ARMCL_CORE_LIB_PATH'] = os.path.join(lib_dir, 'lib', lib_name)
files_to_push.append("$<<CK_ENV_ARMCL_CORE_LIB_PATH>>$")
files_to_push.append("$<<CK_ENV_LIB_STDCPP_DYNAMIC>>$")
return {'return': 0, 'new_env': new_env, 'run_input_files': files_to_push}
def ck_postprocess(i):
ck=i['ck_kernel']
rt=i['run_time']
env=i['env']
deps=i['deps']
# Dictionary to return.
d={}
# Load xOpenME output.
r=ck.load_json_file({'json_file':rt['fine_grain_timer_file']})
if r['return']>0: return r
d=r['dict']
drts=d.get('run_time_state',{})
# Save final environment variables (can be changed in the pipeline)
d['env']={}
for k in env:
d['env'][k]=env[k]
rr={}
rr['return']=0
# Call process output vector
r=ck.access({'action':'run', 'module_uoa':'script', 'data_uoa':'process-nntest',
'code':'output', 'func':'process',
'dict':{'file_in':d['env']['CK_OUT_RAW_DATA'],
'file_out':'tmp-ck-output.json',
'data':d, 'env':env, 'deps':deps}})
if r['return']>0: return r
# Call dvdt prof script
r=ck.access({'action':'run', 'module_uoa':'script', 'data_uoa':'ctuning.process.dvdt-prof',
'code':'dvdt_prof', 'func':'process',
'dict':{'file_in':rt['run_cmd_out1'], 'file_out':'tmp-dvdt-prof.json',
'data':d, 'env':env, 'deps':deps}})
if r['return']>0: return r
# Call MALI HWC collector
r=ck.access({'action':'run', 'module_uoa':'script', 'data_uoa': 'mali-hwc',
'code':'process', 'func':'read',
'dict':{'data':d, 'env':env, 'deps':deps, 'continue_if_no_file':'yes'}})
if r['return']==0:
if env.get('CK_ADD_RAW_MALI_HWC','').lower()=='yes':
d['mali_hwc']=r['hwc']
# Process total time
total_time=0.0
if drts.get('time_setup',0.0)!=0.0: total_time+=drts['time_setup']
if drts.get('time_test',0.0)!=0.0: total_time+=drts['time_test']
d['execution_time']=total_time
d['execution_time_kernel_0']=total_time
# Calculate floating-point operations (flops) for C[M][N] = alpha * A[M][K] * B[K][N] + beta * C[M][N]
# and then billion flops per second (GFLOPS).
alpha = float(env.get('CK_GEMM_ALPHA', '1.0'))
beta = float(env.get('CK_GEMM_BETA', '0.0'))
K = int(env.get('CK_GEMM_K', '1024'))
M = int(env.get('CK_GEMM_M', '1024'))
N = int(env.get('CK_GEMM_N', '1024'))
flops = 0
if alpha != 0.0: # multiply matrix A by matrix B
flops += M * N * 2 * K
if alpha != 1.0: # multiply by scalar alpha
flops += M * N
if beta != 0.0:
if beta != 1.0:
flops += M * N # multiply matrix C by scalar beta
flops += M * N # add matrix (alpha A * B) and matrix (beta * C)
Gflops = 1e-9 * flops
GFLOPS = Gflops / drts['time_test']
ck.out('GFLOPS = {0:.3f}'.format(GFLOPS))
d['flops'] = flops
d['Gflops'] = Gflops
d['GFLOPS'] = GFLOPS
if d.get('post_processed','')=='yes':
r=ck.save_json_to_file({'json_file':rt['fine_grain_timer_file'], 'dict':d, 'sort_keys':'yes'})
if r['return']>0: return r
else:
rr['return']=1
rr['error']='failed to find required info in test output!'
return rr
def ck_check_output(i):
ck=i['ck_kernel']
env=i.get('env',{})
r=ck.access({'action':'check_numerical',
'module_uoa':'program.output',
'file1':i['file1'],
'file2':i['file2'],
'abs_threshold':env.get('CK_ABS_DIFF_THRESHOLD','')})
return r
# Do not add anything here!
|
the-stack_106_31884 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from CTFd.utils import set_config
from tests.helpers import *
from freezegun import freeze_time
def test_api_hint_404():
"""Are admin protected resources accessible by admins/non-admins"""
app = create_ctfd()
endpoints = ['/api/v1/configs/{}',
'/api/v1/challenges/types',
'/api/v1/statistics/teams',
'/api/v1/flags/{}',
'/api/v1/statistics/users/{}',
'/api/v1/configs',
'/api/v1/statistics/challenges/solves/percentages',
'/api/v1/tags/{}',
'/api/v1/pages',
'/api/v1/files/{}',
'/api/v1/challenges/{}/tags',
'/api/v1/hints',
'/api/v1/challenges/{}/files',
'/api/v1/flags',
'/api/v1/submissions/{}',
'/api/v1/challenges/{}/flags',
'/api/v1/awards/{}',
'/api/v1/unlocks',
'/api/v1/challenges/{}/hints',
'/api/v1/statistics/submissions/{}',
'/api/v1/flags/types/{}',
'/api/v1/tags',
'/api/v1/statistics/challenges/{}',
'/api/v1/files',
'/api/v1/flags/types',
'/api/v1/submissions',
'/api/v1/pages/{}']
with app.app_context():
register_user(app)
client = login_as_user(app)
for endpoint in endpoints:
r = client.get(endpoint.format(1))
assert r.status_code == 302
assert r.location.startswith('http://localhost/login')
destroy_ctfd(app)
|
the-stack_106_31888 | from django.contrib.postgres.search import SearchVector
from celery import shared_task
from .models import ProductSKU
@shared_task
def update_search_vector(obj_id):
product = ProductSKU.objects.get(id=obj_id)
product.search_vector = (
SearchVector('name', weight='A')
+ SearchVector('detail', weight='B')
+ SearchVector('summary', weight='C')
)
print('search vector updated!')
|
the-stack_106_31889 | #!/usr/bin/python3
import csv
from operator import itemgetter
import sys
current_venue = ""
current_bat = ""
current_runs=0
current_deli=0
li=[]
tot_list=[]
for line in sys.stdin:
line = line.strip()
line_val = line.split(",")
if(len(line_val)==5):
venue, bat, runs, deli = line_val[0]+','+line_val[1], line_val[2], line_val[3], line_val[4]
else:
venue, bat, runs, deli = line_val[0], line_val[1], line_val[2], line_val[3]
#print(venue, bat, runs,deli)
try:
runs=int(runs)
deli=int(deli)
except ValueError:
continue
if(current_venue == venue and current_bat == bat):
current_runs += runs
current_deli += deli
else:
if current_venue:
li.extend([current_venue,current_bat,current_runs,current_deli])
#print(li)
tot_list.append(li)
current_venue = venue
current_bat = bat
current_runs = runs
current_deli = deli
li=[]
if current_venue == venue and current_bat == bat:
li.extend([current_venue,current_bat,current_runs,current_deli])
#print(li)
tot_list.append(li)
tot_list.sort()
final_list=list(filter(lambda x:x[3]>=10,tot_list))
current_venue = final_list[0][0]
hero=final_list[0][1]
max_strike=(100*final_list[0][2])/final_list[0][3]
max_runs=final_list[0][2]
for l in final_list[1:]:
strike=(100*l[2])/l[3]
if(current_venue == l[0] and strike>max_strike):
hero=l[1]
max_runs=l[2]
max_strike=strike
elif(current_venue == l[0] and strike==max_strike and l[2]>max_runs):
hero=l[1]
max_runs=l[2]
elif(l[0]!=current_venue):
print(current_venue,',',hero,sep="")
current_venue=l[0]
hero=l[1]
max_runs=l[2]
max_strike=strike
print(current_venue,',',hero,sep="")
#print(l[0],',',l[1],',',l[2],',',l[3],strike)
|
the-stack_106_31893 | import logging
from typing import Optional
from fastapi import Depends, HTTPException, status
from fastapi.param_functions import Header
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials, SecurityScopes
from fastapi.security import utils as security_utils
from dependency_injector import wiring
from karp import bootstrap, services
from karp.domain import model
from karp.services.auth_service import AuthService
# from karp.auth.auth import auth
from karp.errors import ClientErrorCodes, KarpError
from .containers import WebAppContainer
# bus = bootstrap.bootstrap()
auth_scheme = HTTPBearer()
logger = logging.getLogger("karp")
def bearer_scheme(authorization=Header(None)):
if not authorization:
return None
# authorization: str = authorization.get("Authorization")
scheme, credentials = security_utils.get_authorization_scheme_param(authorization)
if not (scheme and credentials):
return None
return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials)
@wiring.inject
def get_current_user(
security_scopes: SecurityScopes,
credentials: Optional[HTTPAuthorizationCredentials] = Depends(bearer_scheme),
auth_service: AuthService = Depends(wiring.Provide[WebAppContainer.auth_service]),
) -> Optional[model.User]:
if not credentials:
return None
if security_scopes.scopes:
authenticate_value = f'Bearer scope="{security_scopes.scope_str}"'
else:
authenticate_value = "Bearer"
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": authenticate_value},
# code=ClientErrorCodes.NOT_PERMITTED,
)
try:
logger.debug(
"webapp.app_config.get_current_user: Calling auth_service with credentials = %s",
credentials,
)
return auth_service.authenticate(credentials.scheme, credentials.credentials)
except KarpError:
raise credentials_exception
|
the-stack_106_31894 | import _plotly_utils.basevalidators
class NamelengthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="namelengthsrc",
parent_name="scatterpolargl.hoverlabel",
**kwargs
):
super(NamelengthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
the-stack_106_31895 | import os
import unittest
#import satsearch.config as config
#from satsearch.scene import Scenes
from datetime import datetime
from satstac import utils
class Test(unittest.TestCase):
path = os.path.dirname(__file__)
remote_url = 'https://landsat-stac.s3.amazonaws.com/catalog.json'
def test_dict_merge(self):
dict1 = {
'key1': {
'subkey1': 'val'
}
}
dict2 = {
'key1': {
'subkey2': 'val'
},
'key2': 'val'
}
_dict = utils.dict_merge(dict1, dict2)
assert('key1' in _dict)
assert('key2' in _dict)
assert('subkey1' in _dict['key1'])
assert('subkey2' in _dict['key1'])
_dict = utils.dict_merge(dict1, dict2, add_keys=False)
assert('key1' in _dict)
assert('key2' not in _dict)
assert('subkey1' in _dict['key1'])
assert('subkey2' not in _dict['key1'])
def test_download_nosuchfile(self):
with self.assertRaises(Exception):
utils.download_file('http://nosuchfile')
def test_get_s3_signed_url(self):
url = utils.get_s3_signed_url(self.remote_url)
assert(len(url) == 2)
def test_get_s3_public_url(self):
envs = dict(os.environ)
if 'AWS_ACCESS_KEY_ID' in envs:
del os.environ['AWS_ACCESS_KEY_ID']
if 'AWS_BUCKET_ACCESS_KEY_ID' in envs:
del os.environ['AWS_BUCKET_ACCESS_KEY_ID']
url = utils.get_s3_signed_url(self.remote_url)
assert(len(url) == 2)
assert(url[0] == self.remote_url)
assert(url[1] is None)
os.environ.clear()
os.environ.update(envs)
def test_terminal_calendar(self):
""" Get calendar """
events = {
datetime(2018,1,1).date(): "event1",
datetime(2018,4,25).date(): "event2"
}
cal = utils.terminal_calendar(events)
self.assertEqual(len(cal), 1136)
self.assertTrue(' 2018 ' in cal)
self.assertTrue(' January ' in cal)
self.assertTrue(' March ' in cal)
def test_empty_terminal_calendar(self):
cal = utils.terminal_calendar({})
print(cal) |
the-stack_106_31896 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, KubeVirt Team <@kubevirt>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
module: k8s_auth
short_description: Authenticate to Kubernetes clusters which require an explicit login step
author: KubeVirt Team (@kubevirt)
description:
- "This module handles authenticating to Kubernetes clusters requiring I(explicit) authentication procedures,
meaning ones where a client logs in (obtains an authentication token), performs API operations using said
token and then logs out (revokes the token). An example of a Kubernetes distribution requiring this module
is OpenShift."
- "On the other hand a popular configuration for username+password authentication is one utilizing HTTP Basic
Auth, which does not involve any additional login/logout steps (instead login credentials can be attached
to each and every API call performed) and as such is handled directly by the C(k8s) module (and other
resource–specific modules) by utilizing the C(host), C(username) and C(password) parameters. Please
consult your preferred module's documentation for more details."
options:
state:
description:
- If set to I(present) connect to the API server using the URL specified in C(host) and attempt to log in.
- If set to I(absent) attempt to log out by revoking the authentication token specified in C(api_key).
default: present
choices:
- present
- absent
type: str
host:
description:
- Provide a URL for accessing the API server.
required: true
type: str
username:
description:
- Provide a username for authenticating with the API server.
type: str
password:
description:
- Provide a password for authenticating with the API server.
type: str
ca_cert:
description:
- "Path to a CA certificate file used to verify connection to the API server. The full certificate chain
must be provided to avoid certificate validation errors."
aliases: [ ssl_ca_cert ]
type: path
validate_certs:
description:
- "Whether or not to verify the API server's SSL certificates."
type: bool
default: true
aliases: [ verify_ssl ]
api_key:
description:
- When C(state) is set to I(absent), this specifies the token to revoke.
type: str
requirements:
- python >= 2.7
- urllib3
- requests
- requests-oauthlib
'''
EXAMPLES = r'''
- hosts: localhost
module_defaults:
group/k8s:
host: https://k8s.example.com/
ca_cert: ca.pem
tasks:
- block:
# It's good practice to store login credentials in a secure vault and not
# directly in playbooks.
- include_vars: k8s_passwords.yml
- name: Log in (obtain access token)
kubernetes.core.k8s_auth:
username: admin
password: "{{ k8s_admin_password }}"
register: k8s_auth_results
# Previous task provides the token/api_key, while all other parameters
# are taken from module_defaults
- name: Get a list of all pods from any namespace
kubernetes.core.k8s_info:
api_key: "{{ k8s_auth_results.k8s_auth.api_key }}"
kind: Pod
register: pod_list
always:
- name: If login succeeded, try to log out (revoke access token)
when: k8s_auth_results.k8s_auth.api_key is defined
kubernetes.core.k8s_auth:
state: absent
api_key: "{{ k8s_auth_results.k8s_auth.api_key }}"
'''
# Returned value names need to match k8s modules parameter names, to make it
# easy to pass returned values of k8s_auth to other k8s modules.
# Discussion: https://github.com/ansible/ansible/pull/50807#discussion_r248827899
RETURN = r'''
k8s_auth:
description: Kubernetes authentication facts.
returned: success
type: complex
contains:
api_key:
description: Authentication token.
returned: success
type: str
host:
description: URL for accessing the API server.
returned: success
type: str
ca_cert:
description: Path to a CA certificate file used to verify connection to the API server.
returned: success
type: str
validate_certs:
description: "Whether or not to verify the API server's SSL certificates."
returned: success
type: bool
username:
description: Username for authenticating with the API server.
returned: success
type: str
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib_parse import urlparse, parse_qs, urlencode
# 3rd party imports
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
try:
from requests_oauthlib import OAuth2Session
HAS_REQUESTS_OAUTH = True
except ImportError:
HAS_REQUESTS_OAUTH = False
try:
from urllib3.util import make_headers
HAS_URLLIB3 = True
except ImportError:
HAS_URLLIB3 = False
K8S_AUTH_ARG_SPEC = {
'state': {
'default': 'present',
'choices': ['present', 'absent'],
},
'host': {'required': True},
'username': {},
'password': {'no_log': True},
'ca_cert': {'type': 'path', 'aliases': ['ssl_ca_cert']},
'validate_certs': {
'type': 'bool',
'default': True,
'aliases': ['verify_ssl']
},
'api_key': {'no_log': True},
}
class KubernetesAuthModule(AnsibleModule):
def __init__(self):
AnsibleModule.__init__(
self,
argument_spec=K8S_AUTH_ARG_SPEC,
required_if=[
('state', 'present', ['username', 'password']),
('state', 'absent', ['api_key']),
]
)
if not HAS_REQUESTS:
self.fail("This module requires the python 'requests' package. Try `pip install requests`.")
if not HAS_REQUESTS_OAUTH:
self.fail("This module requires the python 'requests-oauthlib' package. Try `pip install requests-oauthlib`.")
if not HAS_URLLIB3:
self.fail("This module requires the python 'urllib3' package. Try `pip install urllib3`.")
def execute_module(self):
state = self.params.get('state')
verify_ssl = self.params.get('validate_certs')
ssl_ca_cert = self.params.get('ca_cert')
self.auth_username = self.params.get('username')
self.auth_password = self.params.get('password')
self.auth_api_key = self.params.get('api_key')
self.con_host = self.params.get('host')
# python-requests takes either a bool or a path to a ca file as the 'verify' param
if verify_ssl and ssl_ca_cert:
self.con_verify_ca = ssl_ca_cert # path
else:
self.con_verify_ca = verify_ssl # bool
# Get needed info to access authorization APIs
self.openshift_discover()
if state == 'present':
new_api_key = self.openshift_login()
result = dict(
host=self.con_host,
validate_certs=verify_ssl,
ca_cert=ssl_ca_cert,
api_key=new_api_key,
username=self.auth_username,
)
else:
self.openshift_logout()
result = dict()
self.exit_json(changed=False, k8s_auth=result)
def openshift_discover(self):
url = '{0}/.well-known/oauth-authorization-server'.format(self.con_host)
ret = requests.get(url, verify=self.con_verify_ca)
if ret.status_code != 200:
self.fail_request("Couldn't find OpenShift's OAuth API", method='GET', url=url,
reason=ret.reason, status_code=ret.status_code)
try:
oauth_info = ret.json()
self.openshift_auth_endpoint = oauth_info['authorization_endpoint']
self.openshift_token_endpoint = oauth_info['token_endpoint']
except Exception:
self.fail_json(msg="Something went wrong discovering OpenShift OAuth details.",
exception=traceback.format_exc())
def openshift_login(self):
os_oauth = OAuth2Session(client_id='openshift-challenging-client')
authorization_url, state = os_oauth.authorization_url(self.openshift_auth_endpoint,
state="1", code_challenge_method='S256')
auth_headers = make_headers(basic_auth='{0}:{1}'.format(self.auth_username, self.auth_password))
# Request authorization code using basic auth credentials
ret = os_oauth.get(
authorization_url,
headers={'X-Csrf-Token': state, 'authorization': auth_headers.get('authorization')},
verify=self.con_verify_ca,
allow_redirects=False
)
if ret.status_code != 302:
self.fail_request("Authorization failed.", method='GET', url=authorization_url,
reason=ret.reason, status_code=ret.status_code)
# In here we have `code` and `state`, I think `code` is the important one
qwargs = {}
for k, v in parse_qs(urlparse(ret.headers['Location']).query).items():
qwargs[k] = v[0]
qwargs['grant_type'] = 'authorization_code'
# Using authorization code given to us in the Location header of the previous request, request a token
ret = os_oauth.post(
self.openshift_token_endpoint,
headers={
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
# This is just base64 encoded 'openshift-challenging-client:'
'Authorization': 'Basic b3BlbnNoaWZ0LWNoYWxsZW5naW5nLWNsaWVudDo='
},
data=urlencode(qwargs),
verify=self.con_verify_ca
)
if ret.status_code != 200:
self.fail_request("Failed to obtain an authorization token.", method='POST',
url=self.openshift_token_endpoint,
reason=ret.reason, status_code=ret.status_code)
return ret.json()['access_token']
def openshift_logout(self):
url = '{0}/apis/oauth.openshift.io/v1/oauthaccesstokens/{1}'.format(self.con_host, self.auth_api_key)
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(self.auth_api_key)
}
json = {
"apiVersion": "oauth.openshift.io/v1",
"kind": "DeleteOptions"
}
requests.delete(url, headers=headers, json=json, verify=self.con_verify_ca)
# Ignore errors, the token will time out eventually anyway
def fail(self, msg=None):
self.fail_json(msg=msg)
def fail_request(self, msg, **kwargs):
req_info = {}
for k, v in kwargs.items():
req_info['req_' + k] = v
self.fail_json(msg=msg, **req_info)
def main():
module = KubernetesAuthModule()
try:
module.execute_module()
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
the-stack_106_31898 | """
Including Other Files
=====================
FEE files may contain other files; to load another file, use the ``Include``
verb::
Include anchors.fee;
"""
from . import FEEVerb
import lark
import os
PARSEOPTS = dict(use_helpers=True)
GRAMMAR = """
?start: action
action: ESCAPED_STRING
"""
VERBS = ["Include", "IncludeFEA"]
def _file_to_string_or_error(parser, filename):
# Try it relative to current file
basedir = os.path.dirname(parser.current_file)
trypath = os.path.join(basedir, filename)
for p in [trypath, filename]:
if os.path.exists(p):
with open(p) as f:
return f.read()
raise ValueError("Could not include file %s" % filename)
class Include(FEEVerb):
def ESCAPED_STRING(self, tok):
return tok.value[1:-1] # slice removes "'s
def action(self, args):
(filename,) = args
return self.parser.parseString(_file_to_string_or_error(self.parser, filename))
from fontFeatures.feaLib import FeaParser
class IncludeFEA(Include):
def action(self, args):
(filename,) = args
feaparser = FeaParser(_file_to_string_or_error(self.parser, filename))
feaparser.ff = parser.fontfeatures
feaparser.parse()
|
the-stack_106_31899 | import datetime
from datetime import datetime
import time
import requests
import pyttsx3
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import speech_recognition as spreg
from selenium.webdriver.chrome.options import Options
driver = webdriver.Chrome(executable_path=("D://chromedriver.exe"))
username = "your username"
userpassword = "your password"
# Integrating speech module
def speechmod():
recog = spreg.Recognizer()
driver.quit()
global text
with spreg.Microphone() as source:
print('Tell Something: ')
speech = recog.listen(source)
try:
text = recog.recognize_google(speech)
return text
except spreg.UnknownValueError:
speak('Unable to recognize the audio')
# Integrating speak function
def speak(text):
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
engine.say(text)
engine.runAndWait()
# Instagram login
def instalogin():
try:
driver.get("https://www.instagram.com/accounts/login/")
time.sleep(2)
login_id = driver.find_element_by_xpath('//*[@id="loginForm"]/div/div[1]/div/label/input')
login_id.send_keys(username)
time.sleep(1)
password = driver.find_element_by_xpath('//*[@id="loginForm"]/div/div[2]/div/label/input')
password.send_keys(userpassword)
clicklog = driver.find_element_by_xpath('//*[@id="loginForm"]/div/div[3]')
clicklog.click()
time.sleep(3)
saveinf = driver.find_element_by_xpath('//*[@id="react-root"]/section/main/div/div/div/section/div/button')
saveinf.click()
time.sleep(2)
notnow = driver.find_element_by_xpath('/html/body/div[5]/div/div/div/div[3]/button[2]')
notnow.click()
except:
speak("login failed")
# Integrating count of message on instagram
def instamsgcount():
instalogin()
global count
try:
instamsg = driver.find_element_by_xpath(
'//*[@id="react-root"]/section/nav/div[2]/div/div/div[3]/div/div[2]/a/div/div/div')
count = int(instamsg.get_attribute('innerText'))
msgtext = "You have " + str(count) + " total messages"
speak(msgtext)
print(msgtext)
instamsg.click()
if (count < 1):
speak('you dont haave any new messages')
except:
speak('you dont have any new messages')
# Integrating whose msg is there
def messagedperson():
try:
instamsgcount()
time.sleep(7)
arr = []
for i in range(1, count + 1):
nameofrecent = driver.find_element_by_xpath(
'/html/body/div[1]/section/div/div[2]/div/div/div[1]/div[2]/div/div/div/div/div[' + str(
i) + ']/a/div/div[2]/div[1]/div/div/div/div')
++i
nameofrecent.click()
time.sleep(5)
personnn = driver.find_element_by_xpath(
'//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[1]/div/div/div[2]/div/div[2]/button/div/div/div')
personnn.click()
time.sleep(3)
personname = driver.find_element_by_xpath('//*[@id="react-root"]/section/main/div/header/section/div[2]/h1')
PNAME = personname.get_attribute('innerHTML')
arr.append(PNAME)
speak("you have messages from , ")
for i in arr:
speak(i)
print(i)
except:
speak('you dont have any new messages')
messagedperson()
|
the-stack_106_31901 | import pytest
from vending_machine.hoge.vending_machine import VendingMachine
# 自販機に金額を投入できることを確認するテスト
def test_insert_money():
vending_machine = VendingMachine()
vending_machine.insert(100)
# 【Vending Machineの機能】10円、100円、XX
## テスト内容:指定された金額は受け入れて、それ以外はErrorを起こすテスト
insert_money_list = [
(10),
(50),
(100),
(500),
(1000),
]
@pytest.mark.parametrize("money", insert_money_list)
def test_valid_money(money):
"""
正しい金額が設定されていることを確認するテスト関数
"""
# 自販機の入力金額を読み取る関数を設置
result = VendingMachine().check_insert_money(money)
assert result
def test_invalid_money():
"""
不正の金額が設定されていることを確認するテスト関数
"""
insert_money = 200
# 自販機の入力金額を読み取る関数を設置
result = VendingMachine().check_insert_money(insert_money)
assert not result
def test_multi_insert_money():
"""
複数回投入
"""
vending_machine = VendingMachine()
money = {10: 1, 50: 0, 100: 1, 500: 0, 1000: 0}
vending_machine.insert(money)
assert True
def test_total_money():
"""
投入金額の総計
"""
vending_machine = VendingMachine()
money = {10: 1, 50: 0, 100: 1, 500: 0, 1000: 0}
vending_machine.insert(money)
actual = vending_machine.get_total()
expected = 0
assert actual == expected
# 複数回投入
# 投入・投入金額の総計
# insert_money = [100,100]
# def test_get_num_money():
# assert count(insert_mon)
# def test_get_total_money():
# assert sum(insert_money)
# 複数回メソッドを呼べるようにする。入れた回数分お金を集計できているかテスト
##
# 払い戻しXX
# 払い戻しができるかテスト、投入金額の数値が返ってくるかテスト
|
the-stack_106_31902 | from datetime import date
start = date(2020, 1, 1)
today = date.today()
delta = today - start
if (delta.days < 101):
print("Today is day {}".format(delta.days))
else:
print('100 Days of Code sprint has ended') |
the-stack_106_31903 | from django.db.models.fields.related import ManyToOneRel
from django.conf import settings
from django_filters import FilterSet, Filter
from django_filters.filterset import get_model_field
from django.contrib.gis import forms
from .settings import app_settings, API_SRID
from .widgets import HiddenGeometryWidget
class PolygonFilter(Filter):
field_class = forms.PolygonField
def __init__(self, *args, **kwargs):
kwargs.setdefault('field_name', app_settings['GEOM_FIELD_NAME'])
kwargs.setdefault('widget', HiddenGeometryWidget)
kwargs.setdefault('lookup_expr', 'intersects')
super(PolygonFilter, self).__init__(*args, **kwargs)
class PythonPolygonFilter(PolygonFilter):
def filter(self, qs, value):
if not value:
return qs
if not value.srid:
value.srid = API_SRID
value.transform(settings.SRID)
filtered = []
for o in qs.all():
geom = getattr(o, self.field_name)
if geom and geom.valid and not geom.empty:
if getattr(geom, self.lookup_expr)(value):
filtered.append(o.pk)
else:
filtered.append(o.pk)
return qs.filter(pk__in=filtered)
class BaseMapEntityFilterSet(FilterSet):
def __init__(self, *args, **kwargs):
super(BaseMapEntityFilterSet, self).__init__(*args, **kwargs)
self.__bypass_labels()
def __bypass_labels(self):
"""
These hacks allow to bypass field labels. Using either placeholders,
empty choices label, etc. This allows to greatly save space in form layout,
which is required for concise filter forms.
"""
for fieldname in self.base_filters.keys():
field = self.form.fields[fieldname]
if isinstance(field, forms.MultiValueField):
for i, widget in enumerate(field.widget.widgets):
self.__set_placeholder(field.fields[i], widget)
elif isinstance(field, forms.ChoiceField):
field.empty_label = field.label
self.__set_placeholder(field, field.widget)
elif isinstance(field, forms.NullBooleanField):
choices = [(u'1', field.label)] + field.widget.choices[1:]
field.widget.choices = choices
self.__set_placeholder(field, field.widget)
else:
self.__set_placeholder(field, field.widget)
def __set_placeholder(self, field, widget):
field.help_text = '' # Hide help text
widget.attrs['placeholder'] = field.label
widget.attrs['data-placeholder'] = field.label
widget.attrs['title'] = field.label
widget.attrs['data-label'] = field.label
@classmethod
def add_filter(cls, name, filter_=None):
field = get_model_field(cls._meta.model, name)
if filter_ is None:
if isinstance(field, ManyToOneRel):
filter_ = cls.filter_for_reverse_field(field, name)
else:
filter_ = cls.filter_for_field(field, name)
cls.base_filters[name] = filter_
@classmethod
def add_filters(cls, filters):
for name, filter_ in filters.items():
filter_.field_name = name
cls.add_filter(name, filter_)
class MapEntityFilterSet(BaseMapEntityFilterSet):
bbox = PolygonFilter()
class Meta:
fields = ['bbox']
|
the-stack_106_31904 | from itertools import zip_longest
from typing import Callable, Dict
import urllib3
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
from gql import Client, gql
from gql.transport.requests import RequestsHTTPTransport
# Disable insecure warnings
urllib3.disable_warnings()
CAST_MAPPING: Dict[str, Callable] = {
'string': str,
'boolean': bool,
'number': arg_to_number,
}
def execute_query(client: Client, args: Dict) -> CommandResults:
query = gql(args.get('query'))
variables_names = argToList(args.get('variables_names', ''))
variables_values = argToList(args.get('variables_values', ''))
variables_types = argToList(args.get('variables_types', ''))
if len(variables_names) != len(variables_values) or \
(variables_types and len(variables_types) != len(variables_values)):
raise ValueError('The variable lists are not in the same length')
variables = {}
for variable_name, variable_value, variable_type in zip_longest(variables_names, variables_values, variables_types):
if variable_type:
variable_value = CAST_MAPPING[variable_type](variable_value)
elif variable_value.isdigit():
variable_value = int(variable_value)
elif variable_value.lower() in {'true', 'false'}:
variable_value = bool(variable_value)
variables[variable_name] = variable_value
result = client.execute(query, variable_values=variables)
if (result_size := sys.getsizeof(result)) > (max_result_size := float(args.get('max_result_size', 10))) * 10000:
raise ValueError(f'Result size {result_size / 10000} KBs is larger then max result size {max_result_size} KBs')
command_results_args = {
'readable_output': tableToMarkdown('GraphQL Query Results', result),
'raw_response': result,
'outputs': result if argToBoolean(args.get('populate_context_data')) else None,
'outputs_prefix': 'GraphQL'
}
if args.get('outputs_key_field'):
command_results_args['outputs_key_field'] = args.get('outputs_key_field')
return CommandResults(**command_results_args)
def main() -> None:
command = demisto.command()
try:
params = demisto.params()
request_params = {
'url': params.get('url'),
'verify': not params.get('insecure', False),
'retries': 3,
}
if credentials := params.get('credentials'):
if (identifier := credentials.get('identifier', '')).startswith('_header:'):
header_name = identifier.split('_header:')[1]
header_value = credentials.get('password', '')
request_params['headers'] = {header_name: header_value}
else:
request_params['auth'] = (identifier, credentials.get('password'))
transport = RequestsHTTPTransport(**request_params)
handle_proxy()
fetch_schema_from_transport = params.get('fetch_schema_from_transport', True)
if fetch_schema_from_transport is None:
fetch_schema_from_transport = True
client = Client(
transport=transport,
fetch_schema_from_transport=fetch_schema_from_transport,
)
demisto.debug(f'Command being called is {command}')
if command == 'test-module':
with client as session:
session.fetch_schema()
return_results('ok')
elif command == 'graphql-query':
return_results(execute_query(client, demisto.args()))
elif command == 'graphql-mutation':
return_results(execute_query(client, demisto.args()))
else:
raise NotImplementedError(f"Received an un-supported command: {command}")
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'Failed to execute {command} command. Error: {str(e)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
the-stack_106_31905 | import os
from torch.utils.data import Dataset
import numpy as np
import torch
import torch.nn.functional as F
import re
class TextDataset(Dataset):
def __init__(self, txt_path, mode='train', seq_len=50, n_steps=50):
assert os.path.exists(txt_path), f"File not found {txt_path}\n"
with open(txt_path, 'r') as txt:
data = txt.read()
self.int2char, self.char2int = self.get_lookup_tables(data)
self.encoded = np.array([self.char2int[ch] for ch in data])
split_element = int(0.8 * len(self.encoded))
if mode == 'train':
self.encoded = self.encoded[:split_element]
else:
self.encoded = self.encoded[split_element:]
self.chars = tuple(self.char2int.keys())
self.batches = list(self.get_batches(self.encoded, seq_len, n_steps))
self.n_symbols = len(self.chars)
def get_lookup_tables(self, text):
chars = tuple(sorted(set(text)))
int2char = dict(enumerate(chars))
char2int = {ch: ii for ii, ch in int2char.items()}
return int2char, char2int
def __len__(self):
return len(self.batches)
def get_batches(self, arr, n_seqs, n_steps):
'''Create a generator that returns batches of size
n_seqs x n_steps from arr.
'''
batch_size = n_seqs * n_steps
n_batches = len(arr) // batch_size
# Keep only enough characters to make full batches
arr = arr[:n_batches * batch_size]
# Reshape into n_seqs rows
arr = arr.reshape((n_seqs, -1))
for n in range(0, arr.shape[1], n_steps):
# The features
x = arr[:, n:n + n_steps]
# The targets, shifted by one
y = np.zeros_like(x)
try:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, n + n_steps]
except IndexError:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, 0]
yield torch.from_numpy(x), torch.from_numpy(y)
def __getitem__(self, item):
inp, target = self.batches[item]
inp = F.one_hot(inp, self.n_symbols).float()
# inp, target = torch.from_numpy(inp), torch.from_numpy(target)
return inp, target
# TextDataset('data/arxiv_small.txt')
|
the-stack_106_31906 | #!/usr/bin/env python3
import argparse
from copy import deepcopy
import time
from botocore.exceptions import ClientError
module_info = {
# Name of the module (should be the same as the filename)
'name': 'elb__enum_logging',
# Name and any other notes about the author
'author': 'Spencer Gietzen of Rhino Security Labs',
# Category of the module. Make sure the name matches an existing category.
'category': 'EVADE',
# One liner description of the module functionality. This shows up when a user searches for modules.
'one_liner': 'Collects a list of Elastic Load Balancers without access logging.',
# Description about what the module does and how it works
'description': 'This module will enumerate all EC2 Elastic Load Balancers and save their data to the current session, as well as write a list of ELBs with logging disabled to ./sessions/[current_session_name]/downloads/elbs_no_logs_[timestamp].csv.',
# A list of AWS services that the module utilizes during its execution
'services': ['ElasticLoadBalancing'],
# For prerequisite modules, try and see if any existing modules return the data that is required for your module before writing that code yourself, that way, session data can stay separated and modular.
'prerequisite_modules': [],
# Module arguments to autocomplete when the user hits tab
'arguments_to_autocomplete': ['--regions'],
}
parser = argparse.ArgumentParser(add_help=False, description=module_info['description'])
parser.add_argument('--regions', required=False, default=None, help='One or more (comma separated) AWS regions in the format "us-east-1". Defaults to all session regions.')
def main(args, pacu_main):
session = pacu_main.get_active_session()
args = parser.parse_args(args)
print = pacu_main.print
get_regions = pacu_main.get_regions
if not args.regions:
regions = get_regions('elasticloadbalancing')
else:
regions = args.regions.split(',')
summary_data = {'load_balancers': 0}
if 'LoadBalancers' not in session.EC2.keys():
ec2_data = deepcopy(session.EC2)
ec2_data['LoadBalancers'] = []
session.update(pacu_main.database, EC2=ec2_data)
load_balancers = list()
for region in regions:
print('Starting region {}...'.format(region))
client = pacu_main.get_boto3_client('elbv2', region)
count = 0
response = None
next_marker = False
while (response is None or 'NextMarker' in response):
try:
if next_marker is False:
response = client.describe_load_balancers()
else:
response = client.describe_load_balancers(Marker=next_marker)
if 'NextMarker' in response:
next_marker = response['NextMarker']
for load_balancer in response['LoadBalancers']:
load_balancer['Region'] = region
# Adding Attributes to current load balancer database
load_balancer['Attributes'] = client.describe_load_balancer_attributes(
LoadBalancerArn=load_balancer['LoadBalancerArn']
)['Attributes']
load_balancers.append(load_balancer)
except ClientError as error:
if error.response['Error']['Code'] == 'AccessDenied':
print(' FAILURE: MISSING REQUIRED AWS PERMISSIONS')
else:
print(' {}'.format(error.response['Error']['Code']))
break
if response and 'LoadBalancers' in response:
count += len(response['LoadBalancers'])
summary_data['load_balancers'] += count
print(' {} load balancer(s) found '.format(count))
ec2_data = deepcopy(session.EC2)
ec2_data['LoadBalancers'] = deepcopy(load_balancers)
session.update(pacu_main.database, EC2=ec2_data)
print('\n{} total load balancer(s) found.'.format(len(session.EC2['LoadBalancers'])))
now = time.time()
csv_file_path = 'sessions/{}/downloads/elbs_no_logs_{}.csv'.format(session.name, now)
summary_data['csv_file_path'] = csv_file_path
summary_data['logless'] = 0
with open(csv_file_path, 'w+') as csv_file:
csv_file.write('Load Balancer Name,Load Balancer ARN,Region\n')
for load_balancer in session.EC2['LoadBalancers']:
for attribute in load_balancer['Attributes']:
if attribute['Key'] == 'access_logs.s3.enabled':
if attribute['Value'] is False or attribute['Value'] == 'false':
csv_file.write('{},{},{}\n'.format(load_balancer['LoadBalancerName'], load_balancer['LoadBalancerArn'], load_balancer['Region']))
summary_data['logless'] += 1
return summary_data
def summary(data, pacu_main):
out = ' {} Load balancer(s) have been found\n'.format(data['load_balancers'])
if data['logless'] > 0:
out += ' {} Load balancer(s) found without logging\n'.format(data['logless'])
out += ' List of Load balancers without logging saved to:\n {}\n'.format(data['csv_file_path'])
return out
|
the-stack_106_31907 | """Test for tmuxp Server object."""
import logging
from libtmux import Server
from libtmux.common import has_gte_version
logger = logging.getLogger(__name__)
def test_has_session(server, session):
assert server.has_session(session.get("session_name"))
assert not server.has_session("asdf2314324321")
def test_socket_name(server):
"""``-L`` socket_name.
``-L`` socket_name file name of socket. which will be stored in
env TMUX_TMPDIR or /tmp if unset.)
"""
myserver = Server(socket_name="test")
assert myserver.socket_name == "test"
def test_socket_path(server):
"""``-S`` socket_path (alternative path for server socket)."""
myserver = Server(socket_path="test")
assert myserver.socket_path == "test"
def test_config(server):
"""``-f`` file for tmux(1) configuration."""
myserver = Server(config_file="test")
assert myserver.config_file == "test"
def test_256_colors(server):
myserver = Server(colors=256)
assert myserver.colors == 256
print(myserver.colors)
proc = myserver.cmd("list-sessions")
print("list-sessions", proc)
assert "-2" in proc.cmd
assert "-8" not in proc.cmd
def test_88_colors(server):
myserver = Server(colors=88)
assert myserver.colors == 88
proc = myserver.cmd("list-sessions")
assert "-8" in proc.cmd
assert "-2" not in proc.cmd
def test_show_environment(server):
"""Server.show_environment() returns dict."""
_vars = server.show_environment()
assert isinstance(_vars, dict)
def test_set_show_environment_single(server, session):
"""Set environment then Server.show_environment(key)."""
server.set_environment("FOO", "BAR")
assert "BAR" == server.show_environment("FOO")
server.set_environment("FOO", "DAR")
assert "DAR" == server.show_environment("FOO")
assert "DAR" == server.show_environment()["FOO"]
def test_show_environment_not_set(server):
"""Unset environment variable returns None."""
assert server.show_environment("BAR") is None
def test_new_session(server):
"""Server.new_session creates and returns valid session"""
mysession = server.new_session("test_new_session")
assert mysession.get("session_name") == "test_new_session"
assert server.has_session("test_new_session")
def test_new_session_shell(server):
"""Server.new_session creates and returns valid session running with
specified command"""
cmd = "sleep 1m"
mysession = server.new_session("test_new_session", window_command=cmd)
window = mysession.list_windows()[0]
pane = window.list_panes()[0]
assert mysession.get("session_name") == "test_new_session"
assert server.has_session("test_new_session")
if has_gte_version("3.2"):
assert pane.get("pane_start_command").replace('"', "") == cmd
else:
assert pane.get("pane_start_command") == cmd
|
the-stack_106_31908 | """Check whether a file format is supported by BIDS and then load it."""
# Authors: Mainak Jas <[email protected]>
# Alexandre Gramfort <[email protected]>
# Teon Brooks <[email protected]>
# Chris Holdgraf <[email protected]>
# Stefan Appelhoff <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import glob
import json
import re
from datetime import datetime, timezone
import numpy as np
import mne
from mne import io, read_events, events_from_annotations
from mne.utils import has_nibabel, logger, warn
from mne.coreg import fit_matched_points
from mne.transforms import apply_trans
from mne_bids.dig import _read_dig_bids
from mne_bids.tsv_handler import _from_tsv, _drop
from mne_bids.config import ALLOWED_DATATYPE_EXTENSIONS, reader, _map_options
from mne_bids.utils import _extract_landmarks, _get_ch_type_mapping
from mne_bids.path import (BIDSPath, _parse_ext, _find_matching_sidecar,
_infer_datatype)
def _read_raw(raw_fpath, electrode=None, hsp=None, hpi=None,
allow_maxshield=False, config=None, verbose=None, **kwargs):
"""Read a raw file into MNE, making inferences based on extension."""
_, ext = _parse_ext(raw_fpath)
# KIT systems
if ext in ['.con', '.sqd']:
raw = io.read_raw_kit(raw_fpath, elp=electrode, hsp=hsp,
mrk=hpi, preload=False, **kwargs)
# BTi systems
elif ext == '.pdf':
raw = io.read_raw_bti(raw_fpath, config_fname=config,
head_shape_fname=hsp,
preload=False, verbose=verbose,
**kwargs)
elif ext == '.fif':
raw = reader[ext](raw_fpath, allow_maxshield, **kwargs)
elif ext in ['.ds', '.vhdr', '.set', '.edf', '.bdf']:
raw = reader[ext](raw_fpath, **kwargs)
# MEF and NWB are allowed, but not yet implemented
elif ext in ['.mef', '.nwb']:
raise ValueError(f'Got "{ext}" as extension. This is an allowed '
f'extension but there is no IO support for this '
f'file format yet.')
# No supported data found ...
# ---------------------------
else:
raise ValueError(f'Raw file name extension must be one '
f'of {ALLOWED_DATATYPE_EXTENSIONS}\n'
f'Got {ext}')
return raw
def _read_events(events_data, event_id, raw, verbose=None):
"""Retrieve events (for use in *_events.tsv) from FIFF/array & Annotations.
Parameters
----------
events_data : str | np.ndarray | None
If a string, a path to an events file. If an array, an MNE events array
(shape n_events, 3). If None, events will be generated from
``raw.annotations``.
event_id : dict | None
The event id dict used to create a 'trial_type' column in events.tsv,
mapping a description key to an integer-valued event code.
raw : mne.io.Raw
The data as MNE-Python Raw object.
verbose : bool | str | int | None
If not None, override default verbose level (see :func:`mne.verbose`).
Returns
-------
all_events : np.ndarray, shape = (n_events, 3)
The first column contains the event time in samples and the third
column contains the event id. The second column is ignored for now but
typically contains the value of the trigger channel either immediately
before the event or immediately after.
all_dur : np.ndarray, shape (n_events,)
The event durations in seconds.
all_desc : dict
A dictionary with the keys corresponding to the event descriptions and
the values to the event IDs.
"""
# get events from events_data
if isinstance(events_data, str):
events = read_events(events_data, verbose=verbose).astype(int)
elif isinstance(events_data, np.ndarray):
if events_data.ndim != 2:
raise ValueError('Events must have two dimensions, '
f'found {events_data.ndim}')
if events_data.shape[1] != 3:
raise ValueError('Events must have second dimension of length 3, '
f'found {events_data.shape[1]}')
events = events_data
else:
events = np.empty(shape=(0, 3), dtype=int)
if events.size > 0:
# Only keep events for which we have an ID <> description mapping.
ids_without_desc = set(events[:, 2]) - set(event_id.values())
if ids_without_desc:
raise ValueError(
f'No description was specified for the following event(s): '
f'{", ".join([str(x) for x in sorted(ids_without_desc)])}. '
f'Please add them to the event_id dictionary, or drop them '
f'from the events_data array.'
)
del ids_without_desc
mask = [e in list(event_id.values()) for e in events[:, 2]]
events = events[mask]
# Append events to raw.annotations. All event onsets are relative to
# measurement beginning.
id_to_desc_map = dict(zip(event_id.values(), event_id.keys()))
# We don't pass `first_samp`, as set_annotations() below will take
# care of this shift automatically.
new_annotations = mne.annotations_from_events(
events=events, sfreq=raw.info['sfreq'], event_desc=id_to_desc_map,
orig_time=raw.annotations.orig_time, verbose=verbose)
raw = raw.copy() # Don't alter the original.
annotations = raw.annotations.copy()
# We use `+=` here because `Annotations.__iadd__()` does the right
# thing and also performs a sanity check on `Annotations.orig_time`.
annotations += new_annotations
raw.set_annotations(annotations)
del id_to_desc_map, annotations, new_annotations
# Now convert the Annotations to events.
all_events, all_desc = events_from_annotations(
raw,
event_id=event_id,
regexp=None, # Include `BAD_` and `EDGE_` Annotations, too.
verbose=verbose
)
all_dur = raw.annotations.duration
if all_events.size == 0 and 'rest' not in raw.filenames[0]:
warn('No events found or provided. Please add annotations '
'to the raw data, or provide the events_data and '
'event_id parameters. If this is resting state data '
'it is recommended to name the task "rest".')
return all_events, all_dur, all_desc
def _handle_participants_reading(participants_fname, raw,
subject, verbose=None):
participants_tsv = _from_tsv(participants_fname)
subjects = participants_tsv['participant_id']
row_ind = subjects.index(subject)
# set data from participants tsv into subject_info
for infokey, infovalue in participants_tsv.items():
if infokey == 'sex' or infokey == 'hand':
value = _map_options(what=infokey, key=infovalue[row_ind],
fro='bids', to='mne')
# We don't know how to translate to MNE, so skip.
if value is None:
if infokey == 'sex':
info_str = 'subject sex'
else:
info_str = 'subject handedness'
warn(f'Unable to map `{infokey}` value to MNE. '
f'Not setting {info_str}.')
else:
value = infovalue[row_ind]
# add data into raw.Info
if raw.info['subject_info'] is None:
raw.info['subject_info'] = dict()
raw.info['subject_info'][infokey] = value
return raw
def _handle_scans_reading(scans_fname, raw, bids_path, verbose=False):
"""Read associated scans.tsv and set meas_date."""
scans_tsv = _from_tsv(scans_fname)
fname = bids_path.fpath.name
if fname.endswith('.pdf'):
# for BTI files, the scan is an entire directory
fname = fname.split('.')[0]
# get the row corresponding to the file
# use string concatenation instead of os.path
# to work nicely with windows
data_fname = bids_path.datatype + '/' + fname
fnames = scans_tsv['filename']
acq_times = scans_tsv['acq_time']
row_ind = fnames.index(data_fname)
# check whether all split files have the same acq_time
# and throw an error if they don't
if '_split-' in fname:
split_idx = fname.find('split-')
pattern = re.compile(bids_path.datatype + '/' +
bids_path.basename[:split_idx] +
r'split-\d+_' + bids_path.datatype +
bids_path.fpath.suffix)
split_fnames = list(filter(pattern.match, fnames))
split_acq_times = []
for split_f in split_fnames:
split_acq_times.append(acq_times[fnames.index(split_f)])
if len(set(split_acq_times)) != 1:
raise ValueError("Split files must have the same acq_time.")
# extract the acquisition time from scans file
acq_time = acq_times[row_ind]
if acq_time != 'n/a':
# microseconds in the acquisition time is optional
if '.' not in acq_time:
# acquisition time ends with '.%fZ' microseconds string
acq_time += '.0Z'
acq_time = datetime.strptime(acq_time, '%Y-%m-%dT%H:%M:%S.%fZ')
acq_time = acq_time.replace(tzinfo=timezone.utc)
if verbose:
logger.debug(f'Loaded {scans_fname} scans file to set '
f'acq_time as {acq_time}.')
raw.set_meas_date(acq_time)
return raw
def _handle_info_reading(sidecar_fname, raw, verbose=None):
"""Read associated sidecar.json and populate raw.
Handle PowerLineFrequency of recording.
"""
with open(sidecar_fname, 'r', encoding='utf-8-sig') as fin:
sidecar_json = json.load(fin)
# read in the sidecar JSON's line frequency
line_freq = sidecar_json.get("PowerLineFrequency")
if line_freq == "n/a":
line_freq = None
if raw.info["line_freq"] is not None and line_freq is None:
line_freq = raw.info["line_freq"] # take from file is present
if raw.info["line_freq"] is not None and line_freq is not None:
# if both have a set Power Line Frequency, then
# check that they are the same, else there is a
# discrepency in the metadata of the dataset.
if raw.info["line_freq"] != line_freq:
raise ValueError("Line frequency in sidecar json does "
"not match the info datastructure of "
"the mne.Raw. "
"Raw is -> {} ".format(raw.info["line_freq"]),
"Sidecar JSON is -> {} ".format(line_freq))
raw.info["line_freq"] = line_freq
return raw
def _handle_events_reading(events_fname, raw):
"""Read associated events.tsv and populate raw.
Handle onset, duration, and description of each event.
"""
logger.info('Reading events from {}.'.format(events_fname))
events_dict = _from_tsv(events_fname)
# Get the descriptions of the events
if 'trial_type' in events_dict:
trial_type_col_name = 'trial_type'
elif 'stim_type' in events_dict: # Backward-compat with old datasets.
trial_type_col_name = 'stim_type'
warn(f'The events file, {events_fname}, contains a "stim_type" '
f'column. This column should be renamed to "trial_type" for '
f'BIDS compatibility.')
else:
trial_type_col_name = None
if trial_type_col_name is not None:
# Drop events unrelated to a trial type
events_dict = _drop(events_dict, 'n/a', trial_type_col_name)
if 'value' in events_dict:
# Check whether the `trial_type` <> `value` mapping is unique.
trial_types = events_dict[trial_type_col_name]
values = np.asarray(events_dict['value'], dtype=str)
for trial_type in np.unique(trial_types):
idx = np.where(trial_type == np.atleast_1d(trial_types))[0]
matching_values = values[idx]
if len(np.unique(matching_values)) > 1:
# Event type descriptors are ambiguous; create hierarchical
# event descriptors.
logger.info(
f'The event "{trial_type}" refers to multiple event '
f'values. Creating hierarchical event names.')
for ii in idx:
new_name = f'{trial_type}/{values[ii]}'
logger.info(f' Renaming event: {trial_type} -> '
f'{new_name}')
trial_types[ii] = new_name
descriptions = np.asarray(trial_types, dtype=str)
else:
descriptions = np.asarray(events_dict[trial_type_col_name],
dtype=str)
elif 'value' in events_dict:
# If we don't have a proper description of the events, perhaps we have
# at least an event value?
# Drop events unrelated to value
events_dict = _drop(events_dict, 'n/a', 'value')
descriptions = np.asarray(events_dict['value'], dtype=str)
# Worst case, we go with 'n/a' for all events
else:
descriptions = np.array(['n/a'] * len(events_dict['onset']), dtype=str)
# Deal with "n/a" strings before converting to float
ons = [np.nan if on == 'n/a' else on for on in events_dict['onset']]
dus = [0 if du == 'n/a' else du for du in events_dict['duration']]
onsets = np.asarray(ons, dtype=float)
durations = np.asarray(dus, dtype=float)
# Keep only events where onset is known
good_events_idx = ~np.isnan(onsets)
onsets = onsets[good_events_idx]
durations = durations[good_events_idx]
descriptions = descriptions[good_events_idx]
del good_events_idx
# Add Events to raw as annotations
annot_from_events = mne.Annotations(onset=onsets,
duration=durations,
description=descriptions,
orig_time=None)
raw.set_annotations(annot_from_events)
return raw
def _get_bads_from_tsv_data(tsv_data):
"""Extract names of bads from data read from channels.tsv."""
idx = []
for ch_idx, status in enumerate(tsv_data['status']):
if status.lower() == 'bad':
idx.append(ch_idx)
bads = [tsv_data['name'][i] for i in idx]
return bads
def _handle_channels_reading(channels_fname, raw):
"""Read associated channels.tsv and populate raw.
Updates status (bad) and types of channels.
"""
logger.info('Reading channel info from {}.'.format(channels_fname))
channels_dict = _from_tsv(channels_fname)
ch_names_tsv = channels_dict['name']
# Now we can do some work.
# The "type" column is mandatory in BIDS. We can use it to set channel
# types in the raw data using a mapping between channel types
channel_type_dict = dict()
# Get the best mapping we currently have from BIDS to MNE nomenclature
bids_to_mne_ch_types = _get_ch_type_mapping(fro='bids', to='mne')
ch_types_json = channels_dict['type']
for ch_name, ch_type in zip(ch_names_tsv, ch_types_json):
# Try to map from BIDS nomenclature to MNE, leave channel type
# untouched if we are uncertain
updated_ch_type = bids_to_mne_ch_types.get(ch_type, None)
if updated_ch_type is None:
# XXX Try again with uppercase spelling – this should be removed
# XXX once https://github.com/bids-standard/bids-validator/issues/1018 # noqa:E501
# XXX has been resolved.
# XXX x-ref https://github.com/mne-tools/mne-bids/issues/481
updated_ch_type = bids_to_mne_ch_types.get(ch_type.upper(), None)
if updated_ch_type is not None:
msg = ('The BIDS dataset contains channel types in lowercase '
'spelling. This violates the BIDS specification and '
'will raise an error in the future.')
warn(msg)
if updated_ch_type is not None:
channel_type_dict[ch_name] = updated_ch_type
# Rename channels in loaded Raw to match those read from the BIDS sidecar
for bids_ch_name, raw_ch_name in zip(ch_names_tsv, raw.ch_names.copy()):
if bids_ch_name != raw_ch_name:
raw.rename_channels({raw_ch_name: bids_ch_name})
# Set the channel types in the raw data according to channels.tsv
raw.set_channel_types(channel_type_dict)
# Set bad channels based on _channels.tsv sidecar
if 'status' in channels_dict:
bads = _get_bads_from_tsv_data(channels_dict)
raw.info['bads'] = bads
return raw
def read_raw_bids(bids_path, extra_params=None, verbose=True):
"""Read BIDS compatible data.
Will attempt to read associated events.tsv and channels.tsv files to
populate the returned raw object with raw.annotations and raw.info['bads'].
Parameters
----------
bids_path : mne_bids.BIDSPath
The file to read. The :class:`mne_bids.BIDSPath` instance passed here
**must** have the ``.root`` attribute set. The ``.datatype`` attribute
**may** be set. If ``.datatype`` is not set and only one data type
(e.g., only EEG or MEG data) is present in the dataset, it will be
selected automatically.
extra_params : None | dict
Extra parameters to be passed to MNE read_raw_* functions.
If a dict, for example: ``extra_params=dict(allow_maxshield=True)``.
Note that the ``exclude`` parameter, which is supported by some
MNE-Python readers, is not supported; instead, you need to subset
your channels **after** reading.
verbose : bool
The verbosity level.
Returns
-------
raw : mne.io.Raw
The data as MNE-Python Raw object.
Raises
------
RuntimeError
If multiple recording data types are present in the dataset, but
``datatype=None``.
RuntimeError
If more than one data files exist for the specified recording.
RuntimeError
If no data file in a supported format can be located.
ValueError
If the specified ``datatype`` cannot be found in the dataset.
"""
if not isinstance(bids_path, BIDSPath):
raise RuntimeError('"bids_path" must be a BIDSPath object. Please '
'instantiate using mne_bids.BIDSPath().')
bids_path = bids_path.copy()
sub = bids_path.subject
ses = bids_path.session
bids_root = bids_path.root
datatype = bids_path.datatype
suffix = bids_path.suffix
# check root available
if bids_root is None:
raise ValueError('The root of the "bids_path" must be set. '
'Please use `bids_path.update(root="<root>")` '
'to set the root of the BIDS folder to read.')
# infer the datatype and suffix if they are not present in the BIDSPath
if datatype is None:
datatype = _infer_datatype(root=bids_root, sub=sub, ses=ses)
bids_path.update(datatype=datatype)
if suffix is None:
bids_path.update(suffix=datatype)
data_dir = bids_path.directory
bids_fname = bids_path.fpath.name
if op.splitext(bids_fname)[1] == '.pdf':
bids_raw_folder = op.join(data_dir, f'{bids_path.basename}')
bids_fpath = glob.glob(op.join(bids_raw_folder, 'c,rf*'))[0]
config = op.join(bids_raw_folder, 'config')
else:
bids_fpath = op.join(data_dir, bids_fname)
config = None
if extra_params is None:
extra_params = dict()
elif 'exclude' in extra_params:
del extra_params['exclude']
logger.info('"exclude" parameter is not supported by read_raw_bids')
raw = _read_raw(bids_fpath, electrode=None, hsp=None, hpi=None,
config=config, verbose=None, **extra_params)
# Try to find an associated events.tsv to get information about the
# events in the recorded data
events_fname = _find_matching_sidecar(bids_path, suffix='events',
extension='.tsv',
on_error='warn')
if events_fname is not None:
raw = _handle_events_reading(events_fname, raw)
# Try to find an associated channels.tsv to get information about the
# status and type of present channels
channels_fname = _find_matching_sidecar(bids_path,
suffix='channels',
extension='.tsv',
on_error='warn')
if channels_fname is not None:
raw = _handle_channels_reading(channels_fname, raw)
# Try to find an associated electrodes.tsv and coordsystem.json
# to get information about the status and type of present channels
on_error = 'warn' if suffix == 'ieeg' else 'ignore'
electrodes_fname = _find_matching_sidecar(bids_path,
suffix='electrodes',
extension='.tsv',
on_error=on_error)
coordsystem_fname = _find_matching_sidecar(bids_path,
suffix='coordsystem',
extension='.json',
on_error=on_error)
if electrodes_fname is not None:
if coordsystem_fname is None:
raise RuntimeError(f"BIDS mandates that the coordsystem.json "
f"should exist if electrodes.tsv does. "
f"Please create coordsystem.json for"
f"{bids_path.basename}")
if datatype in ['meg', 'eeg', 'ieeg']:
_read_dig_bids(electrodes_fname, coordsystem_fname,
raw=raw, datatype=datatype, verbose=verbose)
# Try to find an associated sidecar .json to get information about the
# recording snapshot
sidecar_fname = _find_matching_sidecar(bids_path,
suffix=datatype,
extension='.json',
on_error='warn')
if sidecar_fname is not None:
raw = _handle_info_reading(sidecar_fname, raw, verbose=verbose)
# read in associated scans filename
scans_fname = BIDSPath(
subject=bids_path.subject, session=bids_path.session,
suffix='scans', extension='.tsv',
root=bids_path.root
).fpath
if scans_fname.exists():
raw = _handle_scans_reading(scans_fname, raw, bids_path,
verbose=verbose)
# read in associated subject info from participants.tsv
participants_tsv_fpath = op.join(bids_root, 'participants.tsv')
subject = f"sub-{bids_path.subject}"
if op.exists(participants_tsv_fpath):
raw = _handle_participants_reading(participants_tsv_fpath, raw,
subject, verbose=verbose)
else:
warn("Participants file not found for {}... Not reading "
"in any particpants.tsv data.".format(bids_fname))
return raw
def get_head_mri_trans(bids_path, extra_params=None):
"""Produce transformation matrix from MEG and MRI landmark points.
Will attempt to read the landmarks of Nasion, LPA, and RPA from the sidecar
files of (i) the MEG and (ii) the T1 weighted MRI data. The two sets of
points will then be used to calculate a transformation matrix from head
coordinates to MRI coordinates.
Parameters
----------
bids_path : mne_bids.BIDSPath
The path of the recording for which to retrieve the transformation. The
:class:`mne_bids.BIDSPath` instance passed here **must** have the
``.root`` attribute set.
extra_params : None | dict
Extra parameters to be passed to MNE read_raw_* functions when reading
the lankmarks from the MEG file.
If a dict, for example: ``extra_params=dict(allow_maxshield=True)``.
Returns
-------
trans : mne.transforms.Transform
The data transformation matrix from head to MRI coordinates
"""
if not has_nibabel(): # pragma: no cover
raise ImportError('This function requires nibabel.')
import nibabel as nib
if not isinstance(bids_path, BIDSPath):
raise RuntimeError('"bids_path" must be a BIDSPath object. Please '
'instantiate using mne_bids.BIDSPath().')
# check root available
bids_path = bids_path.copy()
bids_root = bids_path.root
if bids_root is None:
raise ValueError('The root of the "bids_path" must be set. '
'Please use `bids_path.update(root="<root>")` '
'to set the root of the BIDS folder to read.')
# only get this for MEG data
bids_path.update(datatype='meg')
# Get the sidecar file for MRI landmarks
bids_fname = bids_path.update(suffix='meg', root=bids_root)
t1w_json_path = _find_matching_sidecar(bids_fname, suffix='T1w',
extension='.json')
# Get MRI landmarks from the JSON sidecar
with open(t1w_json_path, 'r', encoding='utf-8-sig') as f:
t1w_json = json.load(f)
mri_coords_dict = t1w_json.get('AnatomicalLandmarkCoordinates', dict())
mri_landmarks = np.asarray((mri_coords_dict.get('LPA', np.nan),
mri_coords_dict.get('NAS', np.nan),
mri_coords_dict.get('RPA', np.nan)))
if np.isnan(mri_landmarks).any():
raise RuntimeError('Could not parse T1w sidecar file: "{}"\n\n'
'The sidecar file MUST contain a key '
'"AnatomicalLandmarkCoordinates" pointing to a '
'dict with keys "LPA", "NAS", "RPA". '
'Yet, the following structure was found:\n\n"{}"'
.format(t1w_json_path, t1w_json))
# The MRI landmarks are in "voxels". We need to convert the to the
# neuromag RAS coordinate system in order to compare the with MEG landmarks
# see also: `mne_bids.write.write_anat`
t1w_path = t1w_json_path.replace('.json', '.nii')
if not op.exists(t1w_path):
t1w_path += '.gz' # perhaps it is .nii.gz? ... else raise an error
if not op.exists(t1w_path):
raise RuntimeError('Could not find the T1 weighted MRI associated '
'with "{}". Tried: "{}" but it does not exist.'
.format(t1w_json_path, t1w_path))
t1_nifti = nib.load(t1w_path)
# Convert to MGH format to access vox2ras method
t1_mgh = nib.MGHImage(t1_nifti.dataobj, t1_nifti.affine)
# now extract transformation matrix and put back to RAS coordinates of MRI
vox2ras_tkr = t1_mgh.header.get_vox2ras_tkr()
mri_landmarks = apply_trans(vox2ras_tkr, mri_landmarks)
mri_landmarks = mri_landmarks * 1e-3
# Get MEG landmarks from the raw file
_, ext = _parse_ext(bids_fname)
if extra_params is None:
extra_params = dict()
if ext == '.fif':
extra_params = dict(allow_maxshield=True)
raw = read_raw_bids(bids_path=bids_path, extra_params=extra_params)
meg_coords_dict = _extract_landmarks(raw.info['dig'])
meg_landmarks = np.asarray((meg_coords_dict['LPA'],
meg_coords_dict['NAS'],
meg_coords_dict['RPA']))
# Given the two sets of points, fit the transform
trans_fitted = fit_matched_points(src_pts=meg_landmarks,
tgt_pts=mri_landmarks)
trans = mne.transforms.Transform(fro='head', to='mri', trans=trans_fitted)
return trans
|
the-stack_106_31911 | from v1_0.user_roles import anonymous_user, authenticated_user, bumblebee_user
import time
import unittest
import adsmutils
import datetime
#resources as of [email protected]
api_resources = {
"adsws.accounts": {
"base": "/v1/accounts",
"endpoints": [
"/oauth/authorize",
"/oauth/invalid/",
"/oauth/errors/",
"/oauth/token",
"/oauth/ping/",
"/oauth/ping/",
"/oauth/info/",
"/user/delete",
"/change-password",
"/change-email",
"/bootstrap",
"/protected",
"/register",
"/status",
"/logout",
"/ready",
"/alive",
"/token",
"/csrf",
"/user",
"/reset-password/<string:token>",
"/verify/<string:token>",
"/info/<string:account_data>"
]
},
"adsws.api": {
"base": "/v1",
"endpoints": [
"/alive",
"/author-affiliation/alive",
"/author-affiliation/export",
"/author-affiliation/ready",
"/author-affiliation/search",
"/biblib/alive",
"/biblib/classic",
"/biblib/documents/<string:library>",
"/biblib/libraries",
"/biblib/libraries/operations/<string:library>",
"/biblib/libraries/<string:library>",
"/biblib/permissions/<string:library>",
"/biblib/ready",
"/biblib/resources",
"/biblib/transfer/<string:library>",
"/biblib/twopointoh",
"/citation_helper/",
"/citation_helper/alive",
"/citation_helper/ready",
"/export/aastex",
"/export/aastex/<bibcode>",
"/export/ads",
"/export/ads/<bibcode>",
"/export/alive",
"/export/bibtex",
"/export/bibtexabs",
"/export/bibtexabs/<bibcode>",
"/export/bibtex/<bibcode>",
"/export/convert",
"/export/csl",
"/export/custom",
"/export/dcxml",
"/export/dcxml/<bibcode>",
"/export/endnote",
"/export/endnote/<bibcode>",
"/export/icarus",
"/export/icarus/<bibcode>",
"/export/medlars",
"/export/medlars/<bibcode>",
"/export/mnras",
"/export/mnras/<bibcode>",
"/export/procite",
"/export/procite/<bibcode>",
"/export/ready",
"/export/refabsxml",
"/export/refabsxml/<bibcode>",
"/export/refworks",
"/export/refworks/<bibcode>",
"/export/refxml",
"/export/refxml/<bibcode>",
"/export/ris",
"/export/ris/<bibcode>",
"/export/rss",
"/export/rss/<bibcode>/",
"/export/rss/<bibcode>/<path:link>",
"/export/soph",
"/export/soph/<bibcode>",
"/export/votable",
"/export/votable/<bibcode>",
"/graphics/alive",
"/graphics/ready",
"/graphics/<string:bibcode>",
"/harbour/alive",
"/harbour/auth/classic",
"/harbour/auth/twopointoh",
"/harbour/export/twopointoh/<export>",
"/harbour/libraries/classic/<int:uid>",
"/harbour/libraries/twopointoh/<int:uid>",
"/harbour/mirrors",
"/harbour/myads/classic/<int:uid>",
"/harbour/ready",
"/harbour/user",
"/harbour/version",
"/metrics/",
"/metrics/alive",
"/metrics/ready",
"/metrics/<string:bibcode>",
"/oauth/authorize",
"/oauth/errors/",
"/oauth/info/",
"/oauth/invalid/",
"/oauth/ping/",
"/oauth/ping/",
"/oauth/token",
"/objects/",
"/objects/alive",
"/objects/nedsrv",
"/objects/query",
"/objects/ready",
"/objects/<string:objects>",
"/objects/<string:objects>/<string:source>",
"/orcid/alive",
"/orcid/exchangeOAuthCode",
"/orcid/export/<iso_datestring>",
"/orcid/get-profile/<orcid_id>",
"/orcid/<orcid_id>/orcid-profile",
"/orcid/<orcid_id>/orcid-profile/<type>",
"/orcid/<orcid_id>/orcid-work",
"/orcid/<orcid_id>/orcid-works",
"/orcid/<orcid_id>/orcid-works/<putcode>",
"/orcid/orcid-name/<orcid_id>",
"/orcid/preferences/<orcid_id>",
"/orcid/ready",
"/orcid/update-orcid-profile/<orcid_id>",
"/orcid/update-status/<orcid_id>",
"/protected",
"/ready",
"/reference/alive",
"/reference/ready",
"/reference/text",
"/reference/text/<reference>",
"/reference/xml",
"/resolver/alive",
"/resolver/<bibcode>",
"/resolver/<bibcode>/<link_type>",
"/resolver/<bibcode>/<link_type>:<path:id>",
"/resolver/delete",
"/resolver/ready",
"/resolver/resources",
"/resolver/update",
"/search/alive",
"/search/bigquery",
"/search/qtree",
"/search/query",
"/search/ready",
"/search/resources",
"/search/status",
"/search/tvrh",
"/status",
"/user/<string:identifier>",
"/vault/alive",
"/vault/configuration",
"/vault/configuration/<key>",
"/vault/execute_query/<queryid>",
"/vault/get-myads/<user_id>",
"/vault/myads-import",
"/vault/myads-users/<iso_datestring>",
"/vault/notifications",
"/vault/notifications/<myads_id>",
"/vault/query",
"/vault/query2svg/<queryid>",
"/vault/query/<queryid>",
"/vault/ready",
"/vault/user-data",
"/vis/alive",
"/vis/author-network",
"/vis/paper-network",
"/vis/ready",
"/vis/word-cloud",
]
},
"adsws.feedback": {
"base": "/v1/feedback",
"endpoints": [
"/oauth/authorize",
"/oauth/invalid/",
"/oauth/errors/",
"/oauth/token",
"/oauth/ping/",
"/oauth/ping/",
"/oauth/info/",
"/ready",
"/alive",
"/slack"
]
}
}
class CitationHelperServiceTest(unittest.TestCase):
def test_resources(self):
# /v1/resources doesn't exist (but I think it should exist)
r = anonymous_user.get('/resources')
assert r.status_code == 404
# the response is organized from the perspective of the ADS developer/ API maintainer
# but API users probably expect to see something like:
# {
# '/v1': {
# 'endpoints': [
# '/search/query'
# ...
# ]
# },
# '/v2': {
# 'endpoints': [
# '/search/newquery',
# ...
# ]
# }
# }
#
# If we run two versions of the API alongside, I don't see
# how the current structure can communicate two different
# 'bases'
# hack to get to the resources
url = '/'.join(anonymous_user.get_config('API_URL').split('/')[0:-1])
r = anonymous_user.get( url + '/resources')
resources = r.json()
for k, v in list(api_resources.items()):
self.assertCountEqual(api_resources[k], resources[k])
def test_limits_authenticated(self):
self.check_limits(user=authenticated_user)
def test_limits_bbb(self):
self.check_limits(user=bumblebee_user)
def check_limits(self, user=authenticated_user):
# Check the response contains Headers
# and the limits are there
r = user.get('/search/query', params={'q': 'title:"%s"' % time.time()})
assert r.headers['X-Ratelimit-Limit']
old_limit = int(r.headers['X-RateLimit-Remaining'])
r = user.get('/search/query', params={'q': 'title:"%s"' % time.time()})
assert r.headers['X-RateLimit-Remaining'] == str(old_limit-1)
assert 'X-RateLimit-Reset' in r.headers
def test_bootstrap(self):
expires = datetime.datetime.fromordinal(adsmutils.get_date().toordinal() + 5)
params = {'expires': expires.isoformat(), 'ratelimit': 0.001, 'create_new' : False}
r = authenticated_user.get('/accounts/bootstrap', params=params)
a = r.json()
r = anonymous_user.get('/accounts/bootstrap', params=params)
b = r.json()
# currently fails, it returns 'anonymous' for the
# authenticated user if the user in db has empty 'is_active' column
# also, the ratelimits must allow for more clients (i.e. be not fully
# consumed)
assert a['username'] != b['username']
assert a['access_token'] != b['access_token']
assert a['username'] == 'tester@ads'
assert b['username'] == 'anonymous@ads'
# repeating the bootstrap request should give you the
# same access token
for x in range(5):
r = anonymous_user.get('/accounts/bootstrap', params=params, headers={'Authorization': 'Bearer %s' % b['access_token']})
assert r.json()['access_token'] == b['access_token']
for x in range(5):
r = authenticated_user.get('/accounts/bootstrap', params=params)
assert r.json()['access_token'] == a['access_token']
def test_crossx_headers(self):
# XXX: this should be improved (but in general, the microservices
# should test for headers that they require (e.g. Orcid-Authorization
# is tested in orcid)
for endpoint in [
'/accounts/bootstrap'
]:
r = bumblebee_user.options(endpoint)
# the value of this header will differ between staging and production
assert 'access-control-allow-origin' in r.headers
assert 'ui.adsabs.harvard.edu' in r.headers['access-control-allow-origin']
assert 'access-control-allow-headers' in r.headers
assert r.headers['access-control-allow-headers']
if __name__ == '__main__':
unittest.main() |
the-stack_106_31912 | import unittest
import pandas as pd
# from sklearn.datasets import load_boston
import mesostat.utils.pandas_helper as ph
class TestMetricAutocorr(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
data = [
['cat', 5, 'F', 'Susan'],
['cat', 6, 'M', 'Johnny'],
['dog', 10, 'M', 'Bork'],
['dog', 12, 'F', 'Fufu'],
['rat', 3, 'F', 'Mika']
]
self.df = pd.DataFrame(data, columns=['species', 'weight', 'sex', 'name'])
# dataDict = load_boston()
# self.dataExample = pd.DataFrame(dataDict['data'], columns=dataDict['feature_names'])
def test_first_row(self):
row1 = ph.pd_first_row(self.df)[1]
row2 = ph.pd_is_one_row(ph.pd_query(self.df, {'name': 'Susan'}))[1]
self.assertTrue(row1.equals(row2))
def test_rows_colval(self):
df1 = ph.pd_rows_colval(self.df, 'species', 'dog')
df2 = ph.pd_query(self.df, {'species': 'dog'})
self.assertTrue(df1.equals(df2))
def test_pd_query(self):
# Test no query has no result
df1 = ph.pd_query(self.df, {})
self.assertTrue(df1.equals(self.df))
# Test empty dataframe queries to empty dataframe
df1 = ph.pd_query(pd.DataFrame(), {'species': 'dog'})
self.assertEqual(len(df1), 0)
# Test non-existing column or value
df1 = ph.pd_query(pd.DataFrame(), {'house': 10})
df2 = ph.pd_query(pd.DataFrame(), {'species': 'monkey'})
self.assertEqual(len(df1), 0)
self.assertEqual(len(df2), 0)
# Assert multiquery
df1 = ph.pd_query(self.df, {'species': 'dog', 'sex': 'F'})
row = ph.pd_is_one_row(df1)[1]
self.assertEqual(row['name'], 'Fufu')
def test_row_exists(self):
isRowGood = ph.pd_row_exists(self.df, ['dog', 10, 'M', 'Bork'])
isRowBad = ph.pd_row_exists(self.df, ['dog', 10, 'M', 'Johnny'])
self.assertTrue(isRowGood)
self.assertFalse(isRowBad)
def test_append_row(self):
dfNew = self.df.copy()
dfNew = ph.pd_append_row(dfNew, ['rat', 4, 'F', 'Gaga'])
self.assertEqual(len(self.df), 5)
self.assertEqual(len(dfNew), 6)
def test_outer_product(self):
pass
# outerDict = {
# 'aaa': [1, None, 3],
# 'bbb': ['cat', 'dog', 'rat', None]
# }
#
# print(pandas_helper.outer_product_df(outerDict))
unittest.main()
|
the-stack_106_31913 | from examples.scheduling.toy_rcpsp_examples import (
MyExampleMRCPSPDomain_WithCost,
MyExampleRCPSPDomain,
)
from skdecide.hub.solver.do_solver.sk_to_do_binding import build_do_domain
# Testing the binding between skdecide and discrete-optimization lib
def create_do_from_sk():
rcpsp_domain = MyExampleRCPSPDomain()
do_domain = build_do_domain(rcpsp_domain)
print("Loading rcpsp domain :resulting class in DO : ", do_domain.__class__)
rcpsp_domain = MyExampleMRCPSPDomain_WithCost()
do_domain = build_do_domain(rcpsp_domain)
print(
"Loading multimode-rcpsp domain : resulting class in DO : ", do_domain.__class__
)
from examples.discrete_optimization.rcpsp_multiskill_parser_example import (
get_data_available_ms,
)
from skdecide.hub.domain.rcpsp.rcpsp_sk_parser import load_multiskill_domain
rcpsp_domain = load_multiskill_domain(get_data_available_ms()[0])
do_domain = build_do_domain(rcpsp_domain)
print(
"Loading multiskill-rcpsp domain : resulting class in DO : ",
do_domain.__class__,
)
if __name__ == "__main__":
create_do_from_sk()
|
the-stack_106_31914 | from vaccine_feed_ingest_schema import location
from vaccine_feed_ingest.stages import enrichment
def test_add_provider_from_name_minimal(minimal_location):
enrichment._add_provider_from_name(minimal_location)
def test_add_provider_from_name(full_location):
# Clear parent prganization to check
full_location.parent_organization = None
assert not full_location.parent_organization
links = enrichment._generate_link_map(full_location)
assert "rite_aid" not in links
enrichment._add_provider_from_name(full_location)
links = enrichment._generate_link_map(full_location)
assert "rite_aid" in links
assert links["rite_aid"] == "5952"
assert full_location.parent_organization
assert str(full_location.parent_organization.id) == "rite_aid"
def test_add_source_link_minimal(minimal_location):
enrichment._add_source_link(minimal_location)
def test_add_source_link(full_location):
# Clear parent prganization to check
full_location.links = None
enrichment._add_source_link(full_location)
links = enrichment._generate_link_map(full_location)
assert full_location.source.source in links
assert links[full_location.source.source] == full_location.source.id
def test_normalize_phone_format(minimal_location):
minimal_location.contact = [
location.Contact(phone="(800) 456-7890"),
location.Contact(phone="1-415-789-3456"),
location.Contact(phone="+1 (415)888-8888"),
location.Contact(phone="888-888-8888 x8888888888"),
]
enrichment._normalize_phone_format(minimal_location)
assert len(minimal_location.contact) == 4
expected = {
"(800) 456-7890",
"(415) 789-3456",
"(415) 888-8888",
"888-888-8888 x8888888888",
}
actual = {entry.phone for entry in minimal_location.contact if entry.phone}
assert expected == actual
def test_add_provider_tag_minimal(minimal_location):
enrichment._add_provider_tag(minimal_location)
def test_add_provider_tag(full_location):
enrichment._add_provider_tag(full_location)
links = enrichment._generate_link_map(full_location)
assert enrichment.PROVIDER_TAG in links
assert links[enrichment.PROVIDER_TAG] == str(full_location.parent_organization.id)
def test_process_location_minimal(minimal_location):
assert enrichment._process_location(minimal_location) is None
def test_process_location(full_location):
assert enrichment._process_location(full_location)
|
the-stack_106_31915 | import numpy as np
class HumanOthelloPlayer():
def __init__(self, game):
self.game = game
def play(self, board):
# display(board)
valid = self.game.getValidMoves(board, 1)
for i in range(len(valid)):
if valid[i]:
print(int(i/self.game.n), int(i%self.game.n))
while True:
a = input()
x,y = [int(x) for x in a.split(' ')]
a = self.game.n * x + y if x!= -1 else self.game.n ** 2
if valid[a]:
break
else:
print('Invalid')
return a
class GreedyOthelloPlayer():
def __init__(self, game):
self.game = game
def play(self, board):
valids = self.game.getValidMoves(board, 1)
candidates = []
for a in range(self.game.getActionSize()):
if valids[a]==0:
continue
nextBoard, _ = self.game.getNextState(board, 1, a)
score = self.game.getScore(nextBoard, 1)
candidates += [(-score, a)]
candidates.sort()
return candidates[0][1]
|
the-stack_106_31918 | #!/usr/bin/env python
"""End to end tests for lib.flows.general.memory."""
import os
from grr.client.components.rekall_support import rekall_types as rdf_rekall_types
from grr.endtoend_tests import base
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import flow_runner
class AbstractTestAnalyzeClientMemory(base.ClientTestBase):
"""Test AnalyzeClientMemory (Rekall).
We use the rekall caching profile server for these tests, since we may not
have direct internet access. It may be necessary to manually populate the
cache with lib.rekall_profile_server.GRRRekallProfileServer.GetMissingProfiles
on the console to make these tests pass.
"""
flow = "AnalyzeClientMemory"
args = {"request": rdf_rekall_types.RekallRequest()}
def setUpRequest(self):
raise NotImplementedError("Implemented by subclasses")
def setUp(self):
self.setUpRequest()
self.old_config = config_lib.CONFIG.Get("Rekall.profile_server")
if "Test Context" in config_lib.CONFIG.context:
# We're running in a test context, where the rekall repository server is
# set to TestRekallRepositoryProfileServer, which won't actually work for
# an end to end test. We change it temporarily to allow the test to pass.
config_lib.CONFIG.Set("Rekall.profile_server", "GRRRekallProfileServer")
super(AbstractTestAnalyzeClientMemory, self).setUp()
def tearDown(self):
if "Test Context" in config_lib.CONFIG.context:
config_lib.CONFIG.Set("Rekall.profile_server", self.old_config)
super(AbstractTestAnalyzeClientMemory, self).tearDown()
def CheckFlow(self):
self.responses = self.CheckCollectionNotEmptyWithRetry(
self.session_id.Add(flow_runner.RESULTS_SUFFIX), self.token)
def OpenFlow(self):
"""Returns the flow used on this test."""
return aff4.FACTORY.Open(str(self.session_id), token=self.token)
class AbstractTestAnalyzeClientMemoryWindows(AbstractTestAnalyzeClientMemory,
base.AutomatedTest):
platforms = ["Windows"]
class TestAnalyzeClientMemoryWindowsPSList(
AbstractTestAnalyzeClientMemoryWindows):
def setUpRequest(self):
self.args["request"].plugins = [
rdf_rekall_types.PluginRequest(plugin="pslist")
]
class TestAnalyzeClientMemoryWindowsModules(
AbstractTestAnalyzeClientMemoryWindows):
def setUpRequest(self):
self.args["request"].plugins = [
rdf_rekall_types.PluginRequest(plugin="modules")
]
class TestAnalyzeClientMemoryWindowsDLLList(
AbstractTestAnalyzeClientMemoryWindows):
"""Run rekall DLL list and look for the GRR process."""
def setUpRequest(self):
self.binaryname = "svchost.exe"
self.args["request"].plugins = [
rdf_rekall_types.PluginRequest(
plugin="dlllist",
args=dict(
proc_regex=self.binaryname, method="PsActiveProcessHead"))
]
def CheckFlow(self):
super(TestAnalyzeClientMemoryWindowsDLLList, self).CheckFlow()
# Make sure the dlllist found our process by regex:
response_str = "".join([x.json_messages for x in self.responses])
self.assertIn(self.binaryname, response_str)
class TestAnalyzeClientMemoryMac(AbstractTestAnalyzeClientMemory,
base.AutomatedTest):
"""Runs Rekall on Macs."""
platforms = ["Darwin"]
def setUpRequest(self):
self.args["request"].plugins = [
rdf_rekall_types.PluginRequest(plugin="pslist")
]
def CheckFlow(self):
super(TestAnalyzeClientMemoryMac, self).CheckFlow()
binary_name = config_lib.CONFIG.Get(
"Client.binary_name", context=["Client Context", "Platform:Darwin"])
self.assertTrue(
any([
binary_name in response.json_messages for response in self.responses
]))
class TestAnalyzeClientMemoryLinux(AbstractTestAnalyzeClientMemory):
"""Runs Rekall on Linux."""
platforms = ["Linux"]
def setUpRequest(self):
self.args["request"].plugins = [
rdf_rekall_types.PluginRequest(plugin="pslist")
]
def CheckForInit(self):
super(TestAnalyzeClientMemoryLinux, self).CheckFlow()
self.assertTrue(
any(["\"init\"" in r.json_messages for r in self.responses]))
def CheckFlow(self):
self.CheckForInit()
class TestAnalyzeClientMemoryLoggingWorks(AbstractTestAnalyzeClientMemory):
"""Runs pslist with DEBUG logging and checks that we got DEBUG messages."""
platforms = ["Linux", "Windows", "Darwin"]
def setUpRequest(self):
self.args["request"].plugins = [
rdf_rekall_types.PluginRequest(plugin="pslist")
]
self.args["request"].session["logging_level"] = "DEBUG"
def CheckFlow(self):
super(TestAnalyzeClientMemoryLoggingWorks, self).CheckFlow()
self.assertIn("\"level\":\"DEBUG\"", self.responses[0].json_messages)
class TestAnalyzeClientMemoryNonexistantPlugin(AbstractTestAnalyzeClientMemory):
"""Tests flow failure when a plugin doesn't exist."""
platforms = ["Linux", "Windows"]
def setUpRequest(self):
self.args["request"].plugins = [
rdf_rekall_types.PluginRequest(plugin="idontexist")
]
def CheckForError(self, flow_obj):
self.assertEqual(flow_obj.context.state, "ERROR")
def CheckForInvalidPlugin(self, flow_obj):
self.assertIn("invalid plugin", str(flow_obj.context.backtrace).lower())
def CheckFlow(self):
flow = self.OpenFlow()
self.CheckForError(flow)
self.CheckForInvalidPlugin(flow)
class TestAnalyzeClientMemoryPluginBadParamsFails(
TestAnalyzeClientMemoryNonexistantPlugin):
"""Tests flow failure when a plugin is given wrong parameters."""
def setUpRequest(self):
self.args["request"].plugins = [
rdf_rekall_types.PluginRequest(
plugin="pslist", args=dict(abcdefg=12345))
]
def CheckForInvalidArgs(self, flow_obj):
self.assertIn("InvalidArgs", flow_obj.context.backtrace)
def CheckFlow(self):
flow = self.OpenFlow()
# First check that the flow ended up with an error
self.CheckForError(flow)
self.CheckForInvalidArgs(flow)
class TestAnalyzeClientMemoryNonexistantPluginWithExisting(
TestAnalyzeClientMemoryLinux, TestAnalyzeClientMemoryNonexistantPlugin):
"""Tests flow failure when failing and non failing plugins run together."""
def setUpRequest(self):
self.args["request"].plugins = [
rdf_rekall_types.PluginRequest(plugin="pslist"),
rdf_rekall_types.PluginRequest(plugin="idontexist")
]
def CheckFlow(self):
super(TestAnalyzeClientMemoryNonexistantPlugin, self).CheckFlow()
flow = self.OpenFlow()
# idontexist should throw an error and have invalid plugin in the backtrace.
self.CheckForError(flow)
self.CheckForInvalidPlugin(flow)
# but pslist should still give results.
self.CheckForInit()
class TestSigScan(AbstractTestAnalyzeClientMemoryWindows):
"""Tests signature scanning on Windows."""
def setUpRequest(self):
# This is a signature for the tcpip.sys driver on Windows 7. If you are
# running a different version, a hit is not guaranteed.
sig_path = os.path.join(config_lib.CONFIG["Test.end_to_end_data_dir"],
"tcpip.sig")
signature = open(sig_path, "rb").read().strip()
args = {"scan_kernel": True, "signature": [signature]}
self.args["request"].plugins = [
rdf_rekall_types.PluginRequest(
plugin="sigscan", args=args)
]
def CheckFlow(self):
super(TestSigScan, self).CheckFlow()
self.assertTrue(
any([
"Hit in kernel AS:" in response.json_messages
for response in self.responses
]))
class TestYarascanExists(AbstractTestAnalyzeClientMemory):
"""Tests the client has been built with yara."""
platforms = ["Linux", "Windows", "Darwin"]
def setUpRequest(self):
self.args["request"].plugins = [
rdf_rekall_types.PluginRequest(plugin="yarascan")
]
def CheckForError(self, flow_obj):
# Invoking yarascan without arguments will report an ERROR.
self.assertEqual(flow_obj.context.state, "ERROR")
def CheckForInvalidPlugin(self, flow_obj):
# When a plugin doesn't exist, Rekall raises with an "Invalid plugin"
self.assertNotIn("invalid plugin", str(flow_obj.context.backtrace).lower())
# Yarascan without arguments will generate a PluginError as it requires
# arguments.
self.assertIn("PluginError", flow_obj.context.backtrace)
def CheckFlow(self):
flow = self.OpenFlow()
self.CheckForError(flow)
self.CheckForInvalidPlugin(flow)
|
the-stack_106_31919 | from datetime import timedelta
from airflow.utils.dates import days_ago
from airflow.models import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
args = {
'owner': 'Airflow',
'start_date': days_ago(2),
}
script="""
set -e
cd /usr/local/airflow/dags
git pull
"""
with DAG(
dag_id='dag-update',
default_args=args,
schedule_interval='0/30 0 * * *',
dagrun_timeout=timedelta(minutes=5),
tags=['airflow', 'dag', 'update']
) as dag:
first = DummyOperator(task_id='first')
# [START howto_operator_bash]
work = BashOperator(
task_id='git_pull',
bash_command=script
)
# [END howto_operator_bash]
last = DummyOperator(task_id='last')
first >> work >> last
if __name__ == "__main__":
dag.cli()
|
the-stack_106_31920 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
import logging
import logging.config
import os
import sys
import warnings
import socket
from os.path import dirname, expanduser
from shutil import which
import tornado.ioloop
from PIL import Image
from tornado.httpserver import HTTPServer
from tornado.netutil import bind_unix_socket
from thumbor.config import Config
from thumbor.console import get_server_parameters
from thumbor.context import Context
from thumbor.importer import Importer
from thumbor.signal_handler import setup_signal_handler
def get_as_integer(value):
try:
return int(value)
except (ValueError, TypeError):
return None
def get_config(config_path, use_environment=False):
if use_environment:
Config.allow_environment_variables()
lookup_paths = [os.curdir, expanduser("~"), "/etc/", dirname(__file__)]
return Config.load(config_path, conf_name="thumbor.conf", lookup_paths=lookup_paths)
def configure_log(config, log_level):
if config.THUMBOR_LOG_CONFIG and config.THUMBOR_LOG_CONFIG != "":
logging.config.dictConfig(config.THUMBOR_LOG_CONFIG)
else:
logging.basicConfig(
level=getattr(logging, log_level),
format=config.THUMBOR_LOG_FORMAT,
datefmt=config.THUMBOR_LOG_DATE_FORMAT,
)
def get_importer(config):
importer = Importer(config)
importer.import_modules()
if importer.error_handler_class is not None:
importer.error_handler = importer.error_handler_class(config) # pylint: disable=not-callable
return importer
def validate_config(config, server_parameters):
if server_parameters.security_key is None:
server_parameters.security_key = config.SECURITY_KEY
if not isinstance(server_parameters.security_key, (bytes, str)):
raise RuntimeError(
"No security key was found for this instance of thumbor. "
+ "Please provide one using the conf file or a security key file."
)
if config.ENGINE or config.USE_GIFSICLE_ENGINE:
# Error on Image.open when image pixel count is above MAX_IMAGE_PIXELS
warnings.simplefilter("error", Image.DecompressionBombWarning)
if config.USE_GIFSICLE_ENGINE:
server_parameters.gifsicle_path = which("gifsicle")
if server_parameters.gifsicle_path is None:
raise RuntimeError(
"If using USE_GIFSICLE_ENGINE configuration to True,"
" the `gifsicle` binary must be in the PATH "
"and must be an executable."
)
def get_context(server_parameters, config, importer):
return Context(server=server_parameters, config=config, importer=importer)
def get_application(context):
return context.modules.importer.import_class(context.app_class)(context)
def run_server(application, context):
server = HTTPServer(application, xheaders=True)
if context.server.fd is not None:
fd_number = get_as_integer(context.server.fd)
if fd_number is not None:
sock = socket.socket(fileno=fd_number) # pylint: disable=too-many-function-args
else:
sock = bind_unix_socket(context.server.fd)
server.add_socket(sock)
logging.debug("thumbor starting at fd %s", context.server.fd)
else:
server.bind(context.server.port, context.server.ip)
logging.debug(
"thumbor starting at %s:%d", context.server.ip, context.server.port
)
server.start(context.server.processes)
return server
def main(arguments=None):
"""Runs thumbor server with the specified arguments."""
if arguments is None:
arguments = sys.argv[1:]
server_parameters = get_server_parameters(arguments)
config = get_config(
server_parameters.config_path, server_parameters.use_environment
)
configure_log(config, server_parameters.log_level.upper())
validate_config(config, server_parameters)
importer = get_importer(config)
with get_context(server_parameters, config, importer) as context:
application = get_application(context)
server = run_server(application, context)
setup_signal_handler(server, config)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main(sys.argv[1:])
|
the-stack_106_31921 | """"
名称:089 童芯派 mBuild点阵屏 计时装置
硬件: 童芯派 mBuild点阵屏
功能介绍:
使用点阵屏实现可视化的计时效果,按下A键以点亮点阵屏的方式进行计时,
按下B键以熄灭点阵屏的方式进行计时。
使用到的API及功能解读:
1.cyberpi.led_matrix.on(x, y, 1)
点亮点阵屏指定x、y坐标的led灯
2.cyberpi.led_matrix.off(x, y, 1)
熄灭点阵屏指定x、y坐标的led灯
3.cyberpi.led_matrix.clear(1)
熄灭点阵屏所有的LED灯
难度:⭐⭐⭐⭐
支持的模式:上传模式、在线模式
无
"""
# ---------程序分割线----------------程序分割线----------------程序分割线----------
import cyberpi
import time
while True:
cyberpi.display.label('按下A键开始计时', 16, 'center')
if cyberpi.controller.is_press('a'):
cyberpi.display.label('计时中', 24, 'center')
for i in range(0, 8):
for j in range(0, 16, 1):
cyberpi.led_matrix.on(j, i, 1)
time.sleep(1)
cyberpi.led_matrix.clear(1)
cyberpi.display.label('计时结束', '24', 'center')
cyberpi.audio.play_until('prompt-tone')
if cyberpi.controller.is_press('b'):
cyberpi.display.label('计时中', 24, 'center')
for i in range(0, 8):
for j in range(0, 16, 1):
cyberpi.led_matrix.on(j, i, 1)
for m in range(0, 8):
for n in range(0, 16, 1):
cyberpi.led_matrix.off(n, m, 1)
time.sleep(1)
cyberpi.display.label('计时结束', '24', 'center')
cyberpi.audio.play_until('prompt-tone') |
the-stack_106_31922 | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-节点管理(BlueKing-BK-NODEMAN) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
# Generated by Django 2.2.8 on 2020-08-12 11:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("node_man", "0012_subscription_name"),
]
operations = [
migrations.AddField(
model_name="packages",
name="deploy_type",
field=models.CharField(blank=True, max_length=64, null=True, verbose_name="部署方式"),
),
migrations.AddField(
model_name="packages",
name="source_app_code",
field=models.CharField(blank=True, max_length=64, null=True, verbose_name="来源系统APP CODE"),
),
migrations.AddField(
model_name="packages",
name="version_log",
field=models.TextField(blank=True, null=True, verbose_name="版本日志"),
),
migrations.AddField(
model_name="packages",
name="version_log_en",
field=models.TextField(blank=True, null=True, verbose_name="英文版本日志"),
),
migrations.AlterField(
model_name="processstatus",
name="status",
field=models.CharField(
choices=[
("RUNNING", "RUNNING"),
("UNKNOWN", "UNKNOWN"),
("TERMINATED", "TERMINATED"),
("NOT_INSTALLED", "NOT_INSTALLED"),
("UNREGISTER", "UNREGISTER"),
],
db_index=True,
default="UNKNOWN",
max_length=45,
verbose_name="进程状态",
),
),
]
|
the-stack_106_31924 | """Support for Xiaomi water heaters."""
import logging
import math
from homeassistant.const import * # noqa: F401
from homeassistant.components.water_heater import (
DOMAIN as ENTITY_DOMAIN,
WaterHeaterEntity,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_OPERATION_MODE,
)
from . import (
DOMAIN,
CONF_MODEL,
XIAOMI_CONFIG_SCHEMA as PLATFORM_SCHEMA, # noqa: F401
MiotToggleEntity,
async_setup_config_entry,
bind_services_to_entries,
)
from .core.miot_spec import (
MiotSpec,
MiotService,
)
_LOGGER = logging.getLogger(__name__)
DATA_KEY = f'{ENTITY_DOMAIN}.{DOMAIN}'
DEFAULT_MIN_TEMP = 40
DEFAULT_MAX_TEMP = 65
SERVICE_TO_METHOD = {}
async def async_setup_entry(hass, config_entry, async_add_entities):
await async_setup_config_entry(hass, config_entry, async_setup_platform, async_add_entities, ENTITY_DOMAIN)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
hass.data.setdefault(DATA_KEY, {})
hass.data[DOMAIN]['add_entities'][ENTITY_DOMAIN] = async_add_entities
config['hass'] = hass
model = str(config.get(CONF_MODEL) or '')
spec = hass.data[DOMAIN]['miot_specs'].get(model)
entities = []
if isinstance(spec, MiotSpec):
for srv in spec.get_services(ENTITY_DOMAIN, 'kettle'):
if not srv.get_property('mode', 'target_temperature'):
continue
entities.append(MiotWaterHeaterEntity(config, srv))
for entity in entities:
hass.data[DOMAIN]['entities'][entity.unique_id] = entity
async_add_entities(entities, update_before_add=True)
bind_services_to_entries(hass, SERVICE_TO_METHOD)
class MiotWaterHeaterEntity(MiotToggleEntity, WaterHeaterEntity):
def __init__(self, config, miot_service: MiotService):
super().__init__(miot_service, config=config, logger=_LOGGER)
self._prop_status = miot_service.get_property('status')
self._prop_mode = miot_service.get_property('mode')
self._prop_modes = []
if self._prop_mode:
self._prop_modes.append(self._prop_mode)
self._prop_modes.extend(miot_service.get_properties('water_level'))
self._prop_temperature = miot_service.get_property('temperature', 'indoor_temperature')
self._prop_target_temp = miot_service.get_property('target_temperature')
self._prev_target_temp = None
if self._prop_target_temp:
self._supported_features |= SUPPORT_TARGET_TEMPERATURE
if self._prop_modes:
self._supported_features |= SUPPORT_OPERATION_MODE
async def async_update(self):
await super().async_update()
if not self._available:
return
if self._prop_power:
if not self._prop_power.readable and self._prop_status:
# https://github.com/al-one/hass-xiaomi-miot/issues/30
val = self._prop_status.from_dict(self._state_attrs)
if val is not None:
off = val in self._prop_status.list_search('Off')
await self.async_update_attrs({
self._prop_power.full_name: not off,
})
self._update_sub_entities(self._prop_power.name, domain='switch')
@property
def state(self):
"""Return the current state."""
sta = self.current_operation
mds = []
if self._prop_mode:
mds = self._prop_mode.list_descriptions()
if sta is None or sta not in mds:
if self._prop_status:
val = self._prop_status.from_dict(self._state_attrs)
if val is not None:
sta = self._prop_status.list_description(val)
if sta is None and self._prop_power and self._prop_power.readable:
sta = STATE_ON if self._prop_power.from_dict(self._state_attrs) else STATE_OFF
if sta:
sta = str(sta).lower()
return sta
@property
def current_operation(self):
"""Return current operation ie. eco, electric, performance, ..."""
for p in self._prop_modes:
val = p.from_dict(self._state_attrs)
if val is not None:
return p.list_description(val)
return None
@property
def operation_list(self):
"""Return the list of available operation modes."""
for p in self._prop_modes:
return p.list_descriptions() or []
return None
def set_operation_mode(self, mode):
"""Set new target operation mode."""
for p in self._prop_modes:
val = p.list_value(mode)
return self.set_property(p, val)
raise NotImplementedError()
@property
def current_temperature(self):
"""Return the current temperature."""
if self._prop_temperature:
return round(self._prop_temperature.from_dict(self._state_attrs) or 0, 2)
return None
@property
def temperature_unit(self):
prop = self._prop_temperature or self._prop_target_temp
if prop:
if prop.unit in ['celsius', TEMP_CELSIUS]:
return TEMP_CELSIUS
if prop.unit in ['fahrenheit', TEMP_FAHRENHEIT]:
return TEMP_FAHRENHEIT
if prop.unit in ['kelvin', TEMP_KELVIN]:
return TEMP_KELVIN
return TEMP_CELSIUS
def set_temperature(self, **kwargs):
"""Set new target temperature."""
if self._prop_target_temp:
val = kwargs.get(ATTR_TEMPERATURE) or 0
stp = self._prop_target_temp.range_step()
if stp and stp > 1:
val = round(val / stp) * stp
elif self._prev_target_temp is None:
val = round(val)
elif val >= self._prev_target_temp:
val = math.ceil(val)
else:
val = int(val)
ret = self.set_property(self._prop_target_temp, val)
if ret:
self._prev_target_temp = val
return ret
raise NotImplementedError()
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self._prop_target_temp:
val = round(self._prop_target_temp.from_dict(self._state_attrs) or 0, 2)
if val:
self._prev_target_temp = val
elif self._prev_target_temp:
val = self._prev_target_temp
return val
return None
@property
def target_temperature_high(self):
"""Return the highbound target temperature we try to reach."""
if self._prop_target_temp:
return self._prop_target_temp.range_max()
return None
@property
def target_temperature_low(self):
"""Return the lowbound target temperature we try to reach."""
if self._prop_target_temp:
return self._prop_target_temp.range_min()
return None
@property
def min_temp(self):
"""Return the minimum temperature."""
return self.target_temperature_low or DEFAULT_MIN_TEMP
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.target_temperature_high or DEFAULT_MAX_TEMP
@property
def is_away_mode_on(self):
"""Return true if away mode is on."""
return None
def turn_away_mode_on(self):
"""Turn away mode on."""
raise NotImplementedError()
def turn_away_mode_off(self):
"""Turn away mode off."""
raise NotImplementedError()
|
the-stack_106_31925 | # -*- coding: utf-8 -*-
#
# This file is part of MIEZE simulation.
# Copyright (C) 2019, 2020 TUM FRM2 E21 Research Group.
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Conversion from eigenfrequency to indexes of the capacity boxes."""
from scripts.analysis.find_best_capacity_index import compute_index
from scripts.compute_values.eigenfrequency_capacity import compute_capacity_for_given_eigenfrequency
from scripts.parameters import computed_params
from scripts.utils import transform_frequency
def main(eigenfrequency_value):
capacity = compute_capacity_for_given_eigenfrequency(eigenfrequency_value, computed_params)
print(f'For the given eigenfrequency of {eigenfrequency_value}, the desired capacity is: {capacity}\n')
numerical_values, message = compute_index(capacity)
print(message)
hrf = transform_frequency(eigenfrequency_value)
print(f'hrf = {str(hrf)}')
return numerical_values, message
if __name__ == '__main__':
frequency_value = int(input('welche frequenz?'))
main(frequency_value)
|
the-stack_106_31926 | # Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from typing import Dict, List, Optional
from unittest.mock import Mock
import aiounittest
from matrix_content_scanner.utils.constants import ErrCodes
from matrix_content_scanner.utils.errors import ContentScannerRestError, FileDirtyError
from matrix_content_scanner.utils.types import MediaDescription
from tests.testutils import (
ENCRYPTED_FILE_METADATA,
SMALL_PNG,
SMALL_PNG_ENCRYPTED,
get_content_scanner,
)
class ScannerTestCase(aiounittest.AsyncTestCase):
def setUp(self) -> None:
self.downloader_res = MediaDescription(
content_type="image/png",
content=SMALL_PNG,
)
async def download_file(
media_path: str,
thumbnail_params: Optional[Dict[str, List[str]]] = None,
) -> MediaDescription:
return self.downloader_res
self.downloader_mock = Mock(side_effect=download_file)
mcs = get_content_scanner()
mcs.file_downloader.download_file = self.downloader_mock # type: ignore[assignment]
self.scanner = mcs.scanner
async def test_scan(self) -> None:
media = await self.scanner.scan_file("foo/bar")
self.assertEqual(media.content, SMALL_PNG)
async def test_scan_dirty(self) -> None:
self.scanner._script = "false"
with self.assertRaises(FileDirtyError):
await self.scanner.scan_file("foo/bar")
async def test_encrypted_file(self) -> None:
self._setup_encrypted()
media = await self.scanner.scan_file("foo/bar", ENCRYPTED_FILE_METADATA)
self.assertEqual(media.content, SMALL_PNG_ENCRYPTED)
async def test_cache(self) -> None:
await self.scanner.scan_file("foo/bar")
self.assertEqual(self.downloader_mock.call_count, 1)
media = await self.scanner.scan_file("foo/bar")
self.assertEqual(self.downloader_mock.call_count, 1)
self.assertEqual(media.content, SMALL_PNG)
async def test_cache_encrypted(self) -> None:
self._setup_encrypted()
await self.scanner.scan_file("foo/bar", ENCRYPTED_FILE_METADATA)
self.assertEqual(self.downloader_mock.call_count, 1)
media = await self.scanner.scan_file("foo/bar", ENCRYPTED_FILE_METADATA)
self.assertEqual(self.downloader_mock.call_count, 1)
self.assertEqual(media.content, SMALL_PNG_ENCRYPTED)
async def test_cache_download_thumbnail(self) -> None:
await self.scanner.scan_file("foo/bar")
self.assertEqual(self.downloader_mock.call_count, 1)
await self.scanner.scan_file("foo/bar", thumbnail_params={"width": ["50"]})
self.assertEqual(self.downloader_mock.call_count, 2)
async def test_different_encryption_key(self) -> None:
"""Tests that if some of the file's metadata changed, we don't match against the
cache and we download the file again.
Also tests that the scanner fails in the correct way if it can't decrypt a file.
"""
self._setup_encrypted()
await self.scanner.scan_file("foo/bar", ENCRYPTED_FILE_METADATA)
self.assertEqual(self.downloader_mock.call_count, 1)
modified_metadata = copy.deepcopy(ENCRYPTED_FILE_METADATA)
modified_metadata["file"]["key"]["k"] = "somethingelse"
with self.assertRaises(ContentScannerRestError) as cm:
await self.scanner.scan_file("foo/bar", modified_metadata)
self.assertEqual(cm.exception.http_status, 400)
self.assertEqual(cm.exception.reason, ErrCodes.FAILED_TO_DECRYPT)
self.assertEqual(self.downloader_mock.call_count, 2)
async def test_mimetype(self) -> None:
self.scanner._allowed_mimetypes = ["image/jpeg"]
with self.assertRaises(FileDirtyError):
await self.scanner.scan_file("foo/bar")
async def test_mimetype_encrypted(self) -> None:
self._setup_encrypted()
self.scanner._allowed_mimetypes = ["image/jpeg"]
with self.assertRaises(FileDirtyError):
await self.scanner.scan_file("foo/bar", ENCRYPTED_FILE_METADATA)
async def test_dont_cache_exit_codes(self) -> None:
self.scanner._exit_codes_to_ignore = [5]
# It's tricky to give a value to `scanner._script` that makes `_run_scan` return 5
# directly, so we just mock it here.
run_scan_mock = Mock(return_value=5)
self.scanner._run_scan = run_scan_mock # type: ignore[assignment]
# Scan the file, we'll check later that it wasn't cached.
with self.assertRaises(FileDirtyError):
await self.scanner.scan_file("foo/bar")
self.assertEqual(self.downloader_mock.call_count, 1)
# Update the mock so that the file is cached at the next scan.
run_scan_mock.return_value = 1
# Scan the file again to check that the file wasn't cached.
with self.assertRaises(FileDirtyError):
await self.scanner.scan_file("foo/bar")
self.assertEqual(self.downloader_mock.call_count, 2)
# The file should be cached now.
with self.assertRaises(FileDirtyError):
await self.scanner.scan_file("foo/bar")
self.assertEqual(self.downloader_mock.call_count, 2)
async def test_outside_temp_dir(self) -> None:
with self.assertRaises(FileDirtyError):
await self.scanner.scan_file("../bar")
def _setup_encrypted(self) -> None:
self.downloader_res.content_type = "application/octet-stream"
self.downloader_res.content = SMALL_PNG_ENCRYPTED
|
the-stack_106_31927 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UsersOutlookTasksAttachmentsOperations:
"""UsersOutlookTasksAttachmentsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~users_actions.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def create_upload_session(
self,
user_id: str,
outlook_task_id: str,
body: "models.Paths1Urds13UsersUserIdOutlookTasksOutlooktaskIdAttachmentsMicrosoftGraphCreateuploadsessionPostRequestbodyContentApplicationJsonSchema",
**kwargs
) -> "models.MicrosoftGraphUploadSession":
"""Invoke action createUploadSession.
Invoke action createUploadSession.
:param user_id: key: id of user.
:type user_id: str
:param outlook_task_id: key: id of outlookTask.
:type outlook_task_id: str
:param body: Action parameters.
:type body: ~users_actions.models.Paths1Urds13UsersUserIdOutlookTasksOutlooktaskIdAttachmentsMicrosoftGraphCreateuploadsessionPostRequestbodyContentApplicationJsonSchema
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphUploadSession, or the result of cls(response)
:rtype: ~users_actions.models.MicrosoftGraphUploadSession
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphUploadSession"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_upload_session.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'outlookTask-id': self._serialize.url("outlook_task_id", outlook_task_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'Paths1Urds13UsersUserIdOutlookTasksOutlooktaskIdAttachmentsMicrosoftGraphCreateuploadsessionPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphUploadSession', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_upload_session.metadata = {'url': '/users/{user-id}/outlook/tasks/{outlookTask-id}/attachments/microsoft.graph.createUploadSession'} # type: ignore
|
the-stack_106_31929 | from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..types import UNSET, Unset
T = TypeVar("T", bound="DataProviderStatus")
@attr.s(auto_attribs=True)
class DataProviderStatus:
""" """
is_active: Union[Unset, bool] = UNSET
driver: Union[Unset, str] = UNSET
error: Union[Unset, str] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
is_active = self.is_active
driver = self.driver
error = self.error
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if is_active is not UNSET:
field_dict["is_active"] = is_active
if driver is not UNSET:
field_dict["driver"] = driver
if error is not UNSET:
field_dict["error"] = error
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
is_active = d.pop("is_active", UNSET)
driver = d.pop("driver", UNSET)
error = d.pop("error", UNSET)
data_provider_status = cls(
is_active=is_active,
driver=driver,
error=error,
)
data_provider_status.additional_properties = d
return data_provider_status
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
the-stack_106_31930 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Model."""
from collections.abc import Iterable
import os
import math
import numpy as np
from mindspore import log as logger
from ..common.tensor import Tensor
from ..nn.metrics import get_metrics
from .._checkparam import check_input_data, check_output_data, check_int_positive, check_bool, check_int
from .callback import _InternalCallbackParam, RunContext, _CallbackManager
from .. import context
from ..parallel._utils import _get_parallel_mode, _get_device_num, _get_global_rank, \
_get_parameter_broadcast, _device_number_check, _parameter_broadcast_check
from ..nn.metrics import Loss
from .. import nn
from ..nn.wrap.cell_wrapper import _VirtualDatasetCell
from .parallel_utils import ParallelMode
from ._utils import _to_full_tensor
from ..parallel._utils import _need_to_full
from ..common import dtype as mstype
from .dataset_helper import DatasetHelper
from . import amp
class Model:
"""
High-Level API for Training or Testing.
`Model` groups layers into an object with training and inference features.
Args:
network (Cell): A training or testing network.
loss_fn (Cell): Objective function, if loss_fn is None, the
network should contain the logic of loss and grads calculation, and the logic
of parallel if needed. Default: None.
optimizer (Cell): Optimizer for updating the weights. Default: None.
metrics (Union[dict, set]): A Dictionary or a set of metrics to be evaluated by the model during
training and testing. eg: {'accuracy', 'recall'}. Default: None.
eval_network (Cell): Network for evaluation. If not defined, `network` and `loss_fn` would be wrapped as
`eval_network`. Default: None.
eval_indexes (list): When defining the `eval_network`, if `eval_indexes` is None, all outputs of the
`eval_network` would be passed to metrics, otherwise `eval_indexes` must contain three
elements, including the positions of loss value, predicted value and label. The loss
value would be passed to the `Loss` metric, the predicted value and label would be passed
to other metric. Default: None.
amp_level (str): Option for argument `level` in `mindspore.amp.build_train_network`, level for mixed
precision training. Supports [O0, O2, O3]. Default: "O0".
- O0: Do not change.
- O2: Cast network to float16, keep batchnorm run in float32, using dynamic loss scale.
- O3: Cast network to float16, with additional property 'keep_batchnorm_fp32=False'.
O2 is recommended on GPU, O3 is recommended on Ascend.
loss_scale_manager (Union[None, LossScaleManager]): If it is None, the loss would not be scaled. Otherwise,
scale the loss by LossScaleManager. It is a key argument.
e.g. Use `loss_scale_manager=None` to set the value.
keep_batchnorm_fp32 (bool): Keep Batchnorm running in `float32`. If it is set to true, the level setting before
will be overwritten. Default: True.
Examples:
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.conv = nn.Conv2d(3, 64, 3, has_bias=False, weight_init='normal')
>>> self.bn = nn.BatchNorm2d(64)
>>> self.relu = nn.ReLU()
>>> self.flatten = nn.Flatten()
>>> self.fc = nn.Dense(64*224*224, 12) # padding=0
>>>
>>> def construct(self, x):
>>> x = self.conv(x)
>>> x = self.bn(x)
>>> x = self.relu(x)
>>> x = self.flatten(x)
>>> out = self.fc(x)
>>> return out
>>>
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
>>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
>>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None)
>>> dataset = get_dataset()
>>> model.train(2, dataset)
"""
def __init__(self, network, loss_fn=None, optimizer=None, metrics=None, eval_network=None,
eval_indexes=None, amp_level="O0", **kwargs):
self._network = network
self._loss_fn = loss_fn
self._optimizer = optimizer
self._loss_scale_manager = None
self._loss_scale_manager_set = False
self._keep_bn_fp32 = True
self._check_kwargs(kwargs)
self._amp_level = amp_level
self._process_amp_args(kwargs)
self._parallel_mode = _get_parallel_mode()
self._device_number = _get_device_num()
self._global_rank = _get_global_rank()
self._parameter_broadcast = _get_parameter_broadcast()
self._train_network = self._build_train_network()
self._build_eval_network(metrics, eval_network, eval_indexes)
self._build_predict_network()
def _process_amp_args(self, kwargs):
if self._amp_level in ["O0", "O3"]:
self._keep_bn_fp32 = False
if 'keep_batchnorm_fp32' in kwargs:
self._keep_bn_fp32 = kwargs['keep_batchnorm_fp32']
if 'loss_scale_manager' in kwargs:
self._loss_scale_manager = kwargs['loss_scale_manager']
self._loss_scale_manager_set = True
def _check_kwargs(self, kwargs):
for arg in kwargs:
if arg not in ['loss_scale_manager', 'keep_batchnorm_fp32']:
raise ValueError(f"Unsupport arg '{arg}'")
def _build_train_network(self):
"""Build train network"""
network = self._network
if self._optimizer:
if self._loss_scale_manager_set:
network = amp.build_train_network(network,
self._optimizer,
self._loss_fn,
level=self._amp_level,
loss_scale_manager=self._loss_scale_manager,
keep_batchnorm_fp32=self._keep_bn_fp32)
else:
network = amp.build_train_network(network,
self._optimizer,
self._loss_fn,
level=self._amp_level,
keep_batchnorm_fp32=self._keep_bn_fp32)
elif self._loss_fn:
network = nn.WithLossCell(network, self._loss_fn)
# If need to check if loss_fn is not None, but optimizer is None
if self._parallel_mode in (ParallelMode.SEMI_AUTO_PARALLEL, ParallelMode.AUTO_PARALLEL):
network.set_auto_parallel()
return network
def _build_eval_network(self, metrics, eval_network, eval_indexes):
"""Build the network for evaluation."""
self._metric_fns = get_metrics(metrics)
if not self._metric_fns:
return
if eval_network is not None:
if eval_indexes is not None and not (isinstance(eval_indexes, list) and len(eval_indexes) == 3):
raise ValueError("Eval_indexes must be a list or None. If eval_indexes is a list, length of it \
must be three. But got {}".format(eval_indexes))
self._eval_network = eval_network
self._eval_indexes = eval_indexes
else:
if self._loss_fn is None:
raise ValueError("loss_fn can not be None.")
self._eval_network = nn.WithEvalCell(self._network, self._loss_fn, self._amp_level in ["O2", "O3"])
self._eval_indexes = [0, 1, 2]
if self._parallel_mode in (ParallelMode.SEMI_AUTO_PARALLEL, ParallelMode.AUTO_PARALLEL):
if self._optimizer:
self._eval_network = _VirtualDatasetCell(self._eval_network)
self._eval_network.set_auto_parallel()
def _build_predict_network(self):
"""Build the network for prediction."""
self._predict_network = self._network
if self._parallel_mode in (ParallelMode.SEMI_AUTO_PARALLEL, ParallelMode.AUTO_PARALLEL):
self._predict_network = _VirtualDatasetCell(self._network)
self._predict_network.set_auto_parallel()
def _clear_metrics(self):
"""Clear metrics local values."""
for metric in self._metric_fns.values():
metric.clear()
def _update_metrics(self, outputs):
"""Update metrics local values."""
if not isinstance(outputs, tuple):
raise ValueError("The `outputs` is not tuple.")
if self._eval_indexes is not None and len(outputs) < 3:
raise ValueError("The length of `outputs` must be greater than or equal to 3, \
but got {}".format(len(outputs)))
for metric in self._metric_fns.values():
if self._eval_indexes is None:
metric.update(*outputs)
else:
if isinstance(metric, Loss):
metric.update(outputs[self._eval_indexes[0]])
else:
metric.update(outputs[self._eval_indexes[1]], outputs[self._eval_indexes[2]])
def _get_metrics(self):
"""Get metrics local values."""
metrics = dict()
for key, value in self._metric_fns.items():
metrics[key] = value.eval()
return metrics
def _get_scaling_sens(self):
"""get the scaling sens"""
scaling_sens = 1
if self._loss_scale_manager is not None:
scaling_sens = self._loss_scale_manager.get_loss_scale()
if self._parallel_mode == ParallelMode.DATA_PARALLEL:
scaling_sens /= self._device_number
return scaling_sens
def _exec_preprocess(self, network, is_train, phase, dataset, dataset_sink_mode, sink_size=-1, epoch_num=1):
"""Initializes dataset."""
need_wrap = False
if dataset_sink_mode:
# remove later to deal with loop sink
if not hasattr(dataset, '__ME_INITED__') and context.get_context("device_target") == "Ascend" \
and not context.get_context("enable_ge"):
need_wrap = True
if not is_train:
dataset.__loop_size__ = 1
dataset_helper = DatasetHelper(dataset, dataset_sink_mode, sink_size, epoch_num)
# remove later to deal with loop sink
if need_wrap:
network = nn.DataWrapper(network, *(dataset_helper.types_shapes()), dataset.__ME_INITED__)
network.set_train(is_train)
network.phase = phase
if self._parallel_mode in (ParallelMode.SEMI_AUTO_PARALLEL, ParallelMode.AUTO_PARALLEL):
network.set_auto_parallel()
return dataset_helper, network
def init(self, train_dataset=None, valid_dataset=None):
"""
Initialize compute graphs and data graphs with the sink mode.
Note:
Pre-init process only supports `GRAPH_MODE` and `Ascend` target currently.
Args:
train_dataset (Dataset): A training dataset iterator. If `train_dataset` is defined, training graphs will be
initialized. Default: None.
valid_dataset (Dataset): A evaluating dataset iterator. If `valid_dataset` is defined, evaluation graphs
will be initialized, and `metrics` in `Model` can not be None. Default: None.
Examples:
>>> train_dataset = get_train_dataset()
>>> valid_dataset = get_valid_dataset()
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
>>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
>>> model = Model(net, loss_fn=loss, optimizer=optim, metrics={'acc'})
>>> model.init(train_dataset, valid_dataset)
>>> model.train(2, train_dataset)
>>> model.eval(valid_dataset)
"""
if context.get_context("mode") != context.GRAPH_MODE or context.get_context("device_target") != "Ascend":
raise RuntimeError('Pre-init process only supports GRAPH MODE and Ascend target currently.')
if not train_dataset and not valid_dataset:
raise ValueError('Both train_dataset and valid_dataset can not be None or empty.')
_device_number_check(self._parallel_mode, self._device_number)
if train_dataset:
_parameter_broadcast_check(self._parallel_mode, self._parameter_broadcast)
self._train_network.set_train()
self._train_network.phase = 'train'
if self._parameter_broadcast:
self._train_network.set_broadcast_flag()
train_dataset.__no_send__ = True
train_dataset_helper, train_network = self._exec_preprocess(self._train_network,
is_train=True,
phase='train',
dataset=train_dataset,
dataset_sink_mode=True)
self._train_network = train_network
for inputs in train_dataset_helper:
self._train_network.compile(*inputs)
break
if valid_dataset:
if not self._metric_fns:
raise RuntimeError('If define `valid_dataset`, metric fn can not be None or empty.')
self._eval_network.set_train(False)
self._eval_network.phase = 'eval'
valid_dataset.__no_send__ = True
valid_dataset_helper, eval_network = self._exec_preprocess(self._eval_network,
is_train=False,
phase='eval',
dataset=valid_dataset,
dataset_sink_mode=True)
self._eval_network = eval_network
for inputs in valid_dataset_helper:
self._eval_network.compile(*inputs)
break
def _train(self, epoch, train_dataset, callbacks=None, dataset_sink_mode=True, sink_size=-1):
"""
Training.
Args:
epoch (int): Total number of iterations on the data.
train_dataset (Dataset): A training dataset iterator. If there is no
loss_fn, a tuple with multiple data (data1, data2, data3, ...) will be
returned and passed to the network. Otherwise, a tuple (data, label) will
be returned. The data and label would be passed to the network and loss
function respectively.
callbacks (list): List of callback objects which should be executed while training. Default: None.
dataset_sink_mode (bool): Determine whether the data should be passed through the dataset channel.
Default: True.
Configure pynative mode, the training process will be performed with
dataset not sink.
sink_size (int): Control the amount of data in each sink. Default: -1.
"""
epoch = check_int_positive(epoch)
self._train_network.set_train()
if self._parameter_broadcast:
self._train_network.set_broadcast_flag()
cb_params = _InternalCallbackParam()
cb_params.train_network = self._train_network
cb_params.epoch_num = epoch
if dataset_sink_mode and sink_size > 0:
cb_params.batch_num = sink_size
else:
cb_params.batch_num = train_dataset.get_dataset_size()
cb_params.mode = "train"
cb_params.loss_fn = self._loss_fn
cb_params.optimizer = self._optimizer
cb_params.parallel_mode = self._parallel_mode
cb_params.device_number = self._device_number
cb_params.train_dataset = train_dataset
cb_params.list_callback = self._transform_callbacks(callbacks)
cb_params.train_dataset_element = None
cb_params.network = self._network
ms_role = os.getenv("MS_ROLE")
if ms_role in ("MS_PSERVER", "MS_SCHED"):
epoch = 1
# build callback list
with _CallbackManager(callbacks) as list_callback:
if not dataset_sink_mode:
self._train_process(epoch, train_dataset, list_callback, cb_params)
elif context.get_context("mode") == context.PYNATIVE_MODE:
logger.warning("The pynative mode cannot support dataset sink mode currently."
"So the training process will be performed with dataset not sink.")
self._train_process(epoch, train_dataset, list_callback, cb_params)
else:
self._train_dataset_sink_process(epoch, train_dataset, list_callback, cb_params, sink_size)
@staticmethod
def _transform_callbacks(callbacks):
"""Transform callback to a list."""
if callbacks is None:
return []
if isinstance(callbacks, Iterable):
return list(callbacks)
return [callbacks]
def _train_dataset_sink_process(self, epoch, train_dataset, list_callback=None, cb_params=None, sink_size=-1):
"""
Training process. The data would be passed to network through dataset channel.
Args:
epoch (int): Total number of iterations on the data.
train_dataset (Dataset): A training dataset iterator. If there is no
loss_fn, a tuple with multiple data (data1, data2, data3, ...) should be
returned and passed to the network. Otherwise, a tuple (data, label) should
be returned. The data and label would be passed to the network and loss
function respectively.
list_callback (Callback): Executor of callback list. Default: None.
cb_params (_InternalCallbackParam): Callback parameters. Default: None.
sink_size (int): Control the amount of data in each sink. Default: -1.
"""
if sink_size == -1:
epoch_num = epoch
else:
epoch_num = math.ceil(epoch * sink_size / train_dataset.get_dataset_size())
dataset_helper, train_network = self._exec_preprocess(self._train_network,
is_train=True,
phase='train',
dataset=train_dataset,
dataset_sink_mode=True,
sink_size=sink_size,
epoch_num=epoch_num)
self._train_network = train_network
cb_params.train_network = self._train_network
cb_params.cur_step_num = 0
run_context = RunContext(cb_params)
list_callback.begin(run_context)
# used to stop training for early stop, such as stopAtTIme or stopATStep
should_stop = False
for i in range(epoch):
cb_params.cur_epoch_num = i + 1
list_callback.epoch_begin(run_context)
# for data sink dataset_helper only iter once, other wise iter epoch_size times.
for inputs in dataset_helper:
if _need_to_full() and context.get_context("device_target") == "GPU":
inputs = _to_full_tensor(inputs, self._device_number, self._global_rank)
list_callback.step_begin(run_context)
outputs = self._train_network(*inputs)
cb_params.cur_step_num += dataset_helper.sink_size()
cb_params.net_outputs = outputs
list_callback.step_end(run_context)
list_callback.epoch_end(run_context)
should_stop = should_stop or run_context.get_stop_requested()
if should_stop:
break
dataset_helper.stop_send()
list_callback.end(run_context)
def _train_process(self, epoch, train_dataset, list_callback=None, cb_params=None):
"""
Training process. The data would be passed to network directly.
Args:
epoch (int): Total number of iterations on the data.
train_dataset (Dataset): A training dataset iterator. If there is no
loss_fn, a tuple with multiple data (data1, data2, data3, ...) should be
returned and passed to the network. Otherwise, a tuple (data, label) should
be returned. The data and label would be passed to the network and loss
function respectively.
list_callback (Callback): Executor of callback list. Default: None.
cb_params (_InternalCallbackParam): Callback parameters. Default: None.
"""
dataset_helper, _ = self._exec_preprocess(self._train_network,
is_train=True,
phase='train',
dataset=train_dataset,
dataset_sink_mode=False)
cb_params.cur_step_num = 0
run_context = RunContext(cb_params)
list_callback.begin(run_context)
# used to stop training for early stop, such as stopAtTIme or stopATStep
should_stop = False
for i in range(epoch):
cb_params.cur_epoch_num = i + 1
list_callback.epoch_begin(run_context)
for next_element in dataset_helper:
len_element = len(next_element)
if self._loss_fn and len_element != 2:
raise ValueError("when loss_fn is not None, train_dataset should"
"return two elements, but got {}".format(len_element))
cb_params.cur_step_num += 1
list_callback.step_begin(run_context)
overflow = False
if self._loss_scale_manager and self._loss_scale_manager.get_drop_overflow_update():
scaling_sens = self._get_scaling_sens()
next_element = tuple(next_element) + (Tensor(scaling_sens, mstype.float32),)
cb_params.train_dataset_element = next_element
outputs = self._train_network(*next_element)
cb_params.net_outputs = outputs
if self._loss_scale_manager and self._loss_scale_manager.get_drop_overflow_update():
_, overflow, _ = outputs
overflow = np.all(overflow.asnumpy())
self._loss_scale_manager.update_loss_scale(overflow)
list_callback.step_end(run_context)
should_stop = should_stop or run_context.get_stop_requested()
if should_stop:
break
train_dataset.reset()
list_callback.epoch_end(run_context)
should_stop = should_stop or run_context.get_stop_requested()
if should_stop:
break
list_callback.end(run_context)
def train(self, epoch, train_dataset, callbacks=None, dataset_sink_mode=True, sink_size=-1):
"""
Training API where the iteration is controlled by python front-end.
When setting pynative mode, the training process will be performed with dataset not sink.
Note:
CPU is not supported when dataset_sink_mode is true.
If dataset_sink_mode is True, epoch of training should be equal to the count of repeat
operation in dataset processing. Otherwise, errors could occur since the amount of data
is not equal to the required amount of training .
If dataset_sink_mode is True, data will be sent to device. If device is Ascend, features
of data will be transferred one by one. The limitation of data transmission per time is 256M.
Args:
epoch (int): Generally, total number of iterations on the data per epoch.
When dataset_sink_mode is set to true and sink_size>0, each epoch sink sink_size
steps on the data instead of total number of iterations.
train_dataset (Dataset): A training dataset iterator. If there is no
loss_fn, a tuple with multiple data (data1, data2, data3, ...) should be
returned and passed to the network. Otherwise, a tuple (data, label) should
be returned. The data and label would be passed to the network and loss
function respectively.
callbacks (list): List of callback objects which should be executed while training. Default: None.
dataset_sink_mode (bool): Determines whether to pass the data through dataset channel. Default: True.
Configure pynative mode, the training process will be performed with
dataset not sink.
sink_size (int): Control the amount of data in each sink.
If sink_size=-1, sink the complete dataset for each epoch.
If sink_size>0, sink sink_size data for each epoch.
If dataset_sink_mode is False, set sink_size as invalid. Default: -1.
Examples:
>>> dataset = get_dataset()
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
>>> loss_scale_manager = FixedLossScaleManager()
>>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
>>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None, loss_scale_manager=loss_scale_manager)
>>> model.train(2, dataset)
"""
check_bool(dataset_sink_mode)
check_int(sink_size)
if sink_size < -1 or sink_size == 0:
raise ValueError("The sink_size must be -1 or positive, but got sink_size {}.".format(sink_size))
_device_number_check(self._parallel_mode, self._device_number)
_parameter_broadcast_check(self._parallel_mode, self._parameter_broadcast)
self._train(epoch,
train_dataset,
callbacks=callbacks,
dataset_sink_mode=dataset_sink_mode,
sink_size=sink_size)
def _eval_dataset_sink_process(self, valid_dataset, list_callback=None, cb_params=None):
"""
Evaluation. The data would be passed to network through dataset channel.
Args:
valid_dataset (Dataset): Dataset to evaluate the model.
list_callback (Callback): Executor of callback list. Default: None.
cb_params (_InternalCallbackParam): Callback parameters. Default: None.
Returns:
Dict, which returns the loss value and metrics values for the model in the test mode.
"""
run_context = RunContext(cb_params)
dataset_helper, eval_network = self._exec_preprocess(self._eval_network,
is_train=False,
phase='eval',
dataset=valid_dataset,
dataset_sink_mode=True)
self._eval_network = eval_network
cb_params.eval_network = self._eval_network
list_callback.begin(run_context)
for inputs in dataset_helper:
cb_params.cur_step_num += 1
list_callback.step_begin(run_context)
outputs = self._eval_network(*inputs)
cb_params.net_outputs = outputs
list_callback.step_end(run_context)
self._update_metrics(outputs)
metrics = self._get_metrics()
cb_params.metrics = metrics
list_callback.end(run_context)
return metrics
def _eval_process(self, valid_dataset, list_callback=None, cb_params=None):
"""
Evaluation. The data would be passed to network directly.
Args:
valid_dataset (Dataset): Dataset to evaluate the model.
list_callback (Callback): Executor of callback list. Default: None.
cb_params (_InternalCallbackParam): Callback parameters. Default: None.
Returns:
Dict, which returns the loss value and metrics values for the model in the test mode.
"""
run_context = RunContext(cb_params)
list_callback.begin(run_context)
dataset_helper, _ = self._exec_preprocess(self._eval_network,
is_train=False,
phase='eval',
dataset=valid_dataset,
dataset_sink_mode=False)
for next_element in dataset_helper:
cb_params.cur_step_num += 1
list_callback.step_begin(run_context)
outputs = self._eval_network(*next_element)
cb_params.net_outputs = outputs
list_callback.step_end(run_context)
self._update_metrics(outputs)
valid_dataset.reset()
metrics = self._get_metrics()
cb_params.metrics = metrics
list_callback.end(run_context)
return metrics
def eval(self, valid_dataset, callbacks=None, dataset_sink_mode=True):
"""
Evaluation API where the iteration is controlled by python front-end.
Configure to pynative mode, the evaluation will be performed with dataset non-sink mode.
Note:
CPU is not supported when dataset_sink_mode is true.
If dataset_sink_mode is True, data will be sent to device. If device is Ascend, features
of data will be transferred one by one. The limitation of data transmission per time is 256M.
Args:
valid_dataset (Dataset): Dataset to evaluate the model.
callbacks (list): List of callback objects which should be executed while training. Default: None.
dataset_sink_mode (bool): Determines whether to pass the data through dataset channel. Default: True.
Returns:
Dict, which returns the loss value and metrics values for the model in the test mode.
Examples:
>>> dataset = get_dataset()
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
>>> model = Model(net, loss_fn=loss, optimizer=None, metrics={'acc'})
>>> model.eval(dataset)
"""
check_bool(dataset_sink_mode)
_device_number_check(self._parallel_mode, self._device_number)
if not self._metric_fns:
raise ValueError("metric fn can not be None or empty.")
cb_params = _InternalCallbackParam()
cb_params.eval_network = self._eval_network
cb_params.valid_dataset = valid_dataset
cb_params.batch_num = valid_dataset.get_dataset_size()
cb_params.mode = "eval"
cb_params.cur_step_num = 0
cb_params.list_callback = self._transform_callbacks(callbacks)
cb_params.network = self._network
self._eval_network.set_train(mode=False)
self._eval_network.phase = 'eval'
self._clear_metrics()
with _CallbackManager(callbacks) as list_callback:
if dataset_sink_mode:
return self._eval_dataset_sink_process(valid_dataset, list_callback, cb_params)
return self._eval_process(valid_dataset, list_callback, cb_params)
def predict(self, *predict_data):
"""
Generate output predictions for the input samples.
Data could be a single tensor, a list of tensor, or a tuple of tensor.
Note:
Batch data should be put together in one tensor.
Args:
predict_data (Tensor): Tensor of predict data. can be array, list or tuple.
Returns:
Tensor, array(s) of predictions.
Examples:
>>> input_data = Tensor(np.random.randint(0, 255, [1, 3, 224, 224]), mindspore.float32)
>>> model = Model(Net())
>>> model.predict(input_data)
"""
self._predict_network.set_train(False)
check_input_data(*predict_data, data_class=Tensor)
result = self._predict_network(*predict_data)
check_output_data(result)
return result
__all__ = ["Model"]
|
the-stack_106_31931 | # -*- coding: utf-8 -*-
import logging
from django.conf import settings
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.geos import MultiPolygon, Polygon
from bims.models.boundary import Boundary
from bims.models.boundary_type import BoundaryType
logger = logging.getLogger(__name__)
class UpdateBoundary(object):
""" Update boundaries from shapefile.
Boundary that created based on boundary_type. E.g. Country
And use column_name as indicator which column to be used
for saving name of the boundary.
"""
help = 'Import boundaries from shp file'
def is_ascii(self, s):
return all(ord(c) < 128 for c in s)
def save_data(
self, shapefile, boundary_type, column_name, column_code_name,
boundary_level, column_top_boundary=None):
""" Saving data boundary from shapefile.
:param shapefile: shapefile path data that hold boundaries data
:type shapefile: str
:param boundary_type: what boundary type that will be generated.
:type boundary_type: str
:param column_name: column name of boundary name.
Needed for naming the boundary.
:type column_name: str
:param column_code_name: column name of boundary code name.
Needed for code name of the boundary.
:type column_code_name: str
:param boundary_level: Level of boundary in administrative.
:type boundary_level: int
:param column_top_boundary: column name of top of boundary.
It is used for getting which boundary that in top of this boundary.
It is codename of that boundary
:type column_top_boundary: str
"""
try:
boundary_type = BoundaryType.objects.get(
name=boundary_type)
except BoundaryType.DoesNotExist:
boundary_type = BoundaryType.objects.create(
name=boundary_type
)
boundary_type.level = boundary_level
boundary_type.save()
Boundary.objects.filter(type=boundary_type).delete()
data_source = DataSource(
shapefile)
layer = data_source[0]
for feature in layer:
name = feature[column_name].value
if boundary_type.name == 'country' \
and name not in settings.FOCUSED_COUNTRIES:
continue
# TODO :Fix grapelli that can't handle non ascii
if not self.is_ascii(name):
continue
name = name.encode('utf-8').strip()
codename = feature[column_code_name].value
codename = codename.strip()
print('COPYING %s' % name)
# get top boundary
top_level_boundary = None
if column_top_boundary:
top_boundary_codename = feature[
column_top_boundary].value
top_boundary_codename = top_boundary_codename.strip()
try:
top_level = (boundary_level - 1)
boundary = Boundary.objects.get(
code_name=top_boundary_codename,
type__level=top_level
)
top_level_boundary = boundary
except Boundary.DoesNotExist:
print('Top boundary=%s not found' %
top_boundary_codename)
boundary = Boundary.objects.create(
name=name,
type=boundary_type,
code_name=codename,
top_level_boundary=top_level_boundary
)
geometry = feature.geom
if 'MultiPolygon' not in geometry.geojson:
geometry = MultiPolygon(
Polygon(geometry.coords[0])).geojson
else:
geometry = geometry.geojson
boundary.geometry = geometry
boundary.centroid = boundary.geometry.centroid
# Don't save geometry if not municipals
# and USE_GEOMETRY_BOUNDARY is false
if boundary_type.name != 'municipal' and \
not settings.USE_GEOMETRY_BOUNDARY:
boundary.geometry = None
boundary.save()
|
the-stack_106_31932 | """BleBox cover entity."""
from openpeerpower.components.cover import (
ATTR_POSITION,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPENING,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
SUPPORT_STOP,
CoverEntity,
)
from . import BleBoxEntity, create_blebox_entities
from .const import BLEBOX_TO_OPP_COVER_STATES, BLEBOX_TO_OPP_DEVICE_CLASSES
async def async_setup_entry(opp, config_entry, async_add_entities):
"""Set up a BleBox entry."""
create_blebox_entities(
opp, config_entry, async_add_entities, BleBoxCoverEntity, "covers"
)
class BleBoxCoverEntity(BleBoxEntity, CoverEntity):
"""Representation of a BleBox cover feature."""
@property
def state(self):
"""Return the equivalent OPP cover state."""
return BLEBOX_TO_OPP_COVER_STATES[self._feature.state]
@property
def device_class(self):
"""Return the device class."""
return BLEBOX_TO_OPP_DEVICE_CLASSES[self._feature.device_class]
@property
def supported_features(self):
"""Return the supported cover features."""
position = SUPPORT_SET_POSITION if self._feature.is_slider else 0
stop = SUPPORT_STOP if self._feature.has_stop else 0
return position | stop | SUPPORT_OPEN | SUPPORT_CLOSE
@property
def current_cover_position(self):
"""Return the current cover position."""
position = self._feature.current
if position == -1: # possible for shutterBox
return None
return None if position is None else 100 - position
@property
def is_opening(self):
"""Return whether cover is opening."""
return self._is_state(STATE_OPENING)
@property
def is_closing(self):
"""Return whether cover is closing."""
return self._is_state(STATE_CLOSING)
@property
def is_closed(self):
"""Return whether cover is closed."""
return self._is_state(STATE_CLOSED)
async def async_open_cover(self, **kwargs):
"""Open the cover position."""
await self._feature.async_open()
async def async_close_cover(self, **kwargs):
"""Close the cover position."""
await self._feature.async_close()
async def async_set_cover_position(self, **kwargs):
"""Set the cover position."""
position = kwargs[ATTR_POSITION]
await self._feature.async_set_position(100 - position)
async def async_stop_cover(self, **kwargs):
"""Stop the cover."""
await self._feature.async_stop()
def _is_state(self, state_name):
value = self.state
return None if value is None else value == state_name
|
the-stack_106_31933 | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
''' A module for helping ban group join spammers. '''
from asyncio import sleep
from requests import get
from telethon.events import ChatAction
from telethon.tl.types import ChannelParticipantsAdmins, Message
from userbot import BOTLOG, BOTLOG_CHATID, CMD_HELP, ANTI_SPAMBOT, ANTI_SPAMBOT_SHOUT, bot
@bot.on(ChatAction)
async def ANTI_SPAMBOT(welcm):
try:
''' Ban a recently joined user if it
matches the spammer checking algorithm. '''
if not ANTI_SPAMBOT:
return
if welcm.user_joined or welcm.user_added:
adder = None
ignore = False
users = None
if welcm.user_added:
ignore = False
try:
adder = welcm.action_message.from_id
except AttributeError:
return
async for admin in bot.iter_participants(
welcm.chat_id, filter=ChannelParticipantsAdmins):
if admin.id == adder:
ignore = True
break
if ignore:
return
elif welcm.user_joined:
users_list = hasattr(welcm.action_message.action, "users")
if users_list:
users = welcm.action_message.action.users
else:
users = [welcm.action_message.from_id]
await sleep(5)
spambot = False
if not users:
return
for user_id in users:
async for message in bot.iter_messages(welcm.chat_id,
from_user=user_id):
correct_type = isinstance(message, Message)
if not message or not correct_type:
break
join_time = welcm.action_message.date
message_date = message.date
if message_date < join_time:
continue # The message was sent before the user joined, thus ignore it
check_user = await welcm.client.get_entity(user_id)
# DEBUGGING. LEAVING IT HERE FOR SOME TIME ###
print(
f"User Joined: {check_user.first_name} [ID: {check_user.id}]"
)
print(f"Chat: {welcm.chat.title}")
print(f"Time: {join_time}")
print(
f"Message Sent: {message.text}\n\n[Time: {message_date}]"
)
##############################################
try:
cas_url = f"https://combot.org/api/cas/check?user_id={check_user.id}"
r = get(cas_url, timeout=3)
data = r.json()
except BaseException:
print(
"CAS check failed, falling back to legacy anti_spambot behaviour."
)
data = None
pass
if data and data['ok']:
reason = f"[Banned by Combot Anti Spam](https://combot.org/cas/query?u={check_user.id})"
spambot = True
elif "t.cn/" in message.text:
reason = "Match on `t.cn` URLs"
spambot = True
elif "t.me/joinchat" in message.text:
reason = "Potential Promotion Message"
spambot = True
elif message.fwd_from:
reason = "Forwarded Message"
spambot = True
elif "?start=" in message.text:
reason = "Telegram bot `start` link"
spambot = True
elif "bit.ly/" in message.text:
reason = "Match on `bit.ly` URLs"
spambot = True
else:
if check_user.first_name in ("Bitmex", "Promotion",
"Information", "Dex",
"Announcements", "Info"):
if user.last_name == "Bot":
reason = "Known spambot"
spambot = True
if spambot:
print(f"Potential Spam Message: {message.text}")
await message.delete()
break
continue # Check the next messsage
if spambot:
chat = await welcm.get_chat()
admin = chat.admin_rights
creator = chat.creator
if not admin and not creator:
if ANTI_SPAMBOT_SHOUT:
await welcm.reply(
"@admins\n"
"`ANTI SPAMBOT DETECTOR!\n"
"THIS USER MATCHES MY ALGORITHMS AS A SPAMBOT!`"
f"REASON: {reason}")
kicked = False
reported = True
else:
try:
await welcm.reply(
"`Potential Spambot Detected !!`\n"
f"`REASON:` {reason}\n"
"Kicking away for now, will log the ID for further purposes.\n"
f"`USER:` [{check_user.first_name}](tg://user?id={check_user.id})"
)
await welcm.client.kick_participant(
welcm.chat_id, check_user.id)
kicked = True
reported = False
except BaseException:
if ANTI_SPAMBOT_SHOUT:
await welcm.reply(
"@admins\n"
"`ANTI SPAMBOT DETECTOR!\n"
"THIS USER MATCHES MY ALGORITHMS AS A SPAMBOT!`"
f"REASON: {reason}")
kicked = False
reported = True
if BOTLOG:
if kicked or reported:
await welcm.client.send_message(
BOTLOG_CHATID, "#ANTI_SPAMBOT REPORT\n"
f"USER: [{user.first_name}](tg://user?id={check_user.id})\n"
f"USER ID: `{check_user.id}`\n"
f"CHAT: {welcm.chat.title}\n"
f"CHAT ID: `{welcm.chat_id}`\n"
f"REASON: {reason}\n"
f"MESSAGE:\n\n{message.text}")
except ValueError:
pass
CMD_HELP.update({
'anti_spambot':
"If enabled in config.env or env var,\
\nthis module will ban(or inform the admins of the group about) the\
\nspammer(s) if they match the userbot's anti-spam algorithm."
}) |
the-stack_106_31935 | #!/usr/bin/env python
from __future__ import division
from past.utils import old_div
import os, sys, json, requests, copy, math
from pprint import pprint, pformat
from frameMetadata.FrameMetadata import FrameMetadata
from utils.UrlUtils import UrlUtils
from utils.queryBuilder import postQuery, buildQuery, createMetaObjects
def check_reference(dataset, md):
"""Check reference of this metadata against what's in GRQ."""
# get config
uu = UrlUtils()
rest_url = uu.rest_url
# is this scene a reference?
fm_md = copy.deepcopy(md)
fm = FrameMetadata()
fm.load(fm_md)
#sys.stderr.write("fm.reference: %s\n" % fm.reference)
#sys.stderr.write("fm.trackNumber: %s\n" % fm.trackNumber)
#sys.stderr.write("fm.beamID: %s\n" % fm.beamID)
#sys.stderr.write("fm.latitudeIndexMin: %s\n" % fm.latitudeIndexMin)
#sys.stderr.write("fm.latitudeIndexMax: %s\n" % fm.latitudeIndexMax)
# if not a reference, save
if fm.reference == False:
return { 'ok_to_save': True, 'suspicious_flag': False, 'suspicious_code': '' }
# check if reference exists already
extremes = fm.getExtremes(fm.bbox)
latMin = extremes[0]
latMax = extremes[1]
lonMin = extremes[2]
lonMax = extremes[3]
latDelta = (latMax - latMin) / 3.
latitudeResolution = .1
params = {
'sensor': fm.platform,
'dataset_type':dataset,
'trackNumber':fm.trackNumber,
'latitudeIndexMin': int(math.floor(old_div((latMin - latDelta),latitudeResolution))),
'latitudeIndexMax': int(math.ceil(old_div((latMax + latDelta),latitudeResolution))),
'system_version':uu.version,
'direction':fm.direction,
'lookDirection':fm.lookDirection,
'reference':True,
}
if fm.beamID:
params['beamID'] = fm.beamID
metList, status = postQuery(buildQuery(params, ['within']))
metObj = createMetaObjects(metList)
# if none found, save
if len(metObj) == 0:
return { 'ok_to_save': True, 'suspicious_flag': False, 'suspicious_code': '' }
# loop over frames and check if in this frame's bbox
inbbox_count = 0
frames = []
for met_idx, tmp_fm in enumerate(metObj):
inbbox = fm.isInBbox(tmp_fm.refbbox)
if inbbox: inbbox_count += 1
frames.append({
'id': os.path.splitext(metList[met_idx]['dfdn']['ProductName'])[0],
'archive_filename': metList[met_idx]['archive_filename'],
'inbbox': inbbox,
})
#print "true_count:", true_count
# if all not in bbox, okay to save but flag suspicious
if inbbox_count == 0:
return { 'ok_to_save': True,
'frames': frames,
'suspicious_flag': True,
'suspicious_code': 'no_frames_in_bbox' }
# if one is in bbox, not okay to update
elif inbbox_count == 1:
#return { 'ok_to_save': False, 'reprocess': True, 'suspicious_flag': True, 'suspicious_code': 'one_frame_in_bbox' }
# fail for now; it can be eventually reprocessed after the initial re-ingest
return { 'ok_to_save': False,
'frames': frames,
'reprocess': False,
'suspicious_flag': True,
'suspicious_code': 'one_frame_in_bbox' }
# if more than one is in bbox, not okay to update and flag
else:
return { 'ok_to_save': False,
'frames': frames,
'reprocess': False,
'suspicious_flag': True,
'suspicious_code': 'more_than_one_frame_in_bbox' }
if __name__ == "__main__":
dataset = sys.argv[1]
md_file = sys.argv[2]
json_file = sys.argv[3]
with open(md_file) as f:
md = json.load(f)
with open(json_file, 'w') as f:
f.write(json.dumps(check_reference(dataset, md), indent=2))
|
the-stack_106_31938 | import numpy as np
import pickle
import pdb
from src.visualizations.make_pie_chart import make_pie_chart
def calc_aic(N: int, log_L: float, num_params: int) -> float:
return 2 * num_params - 2 * log_L
def calc_bic(N: int, log_L: float, num_params: int) -> float:
return -2 * log_L + np.log(N) * num_params
def calc_aic_bic_individuals(model_data: dict) -> tuple:
if "2NN" in model_data:
del model_data["2NN"]
if "3NN" in model_data:
del model_data["3NN"]
if "4NN" in model_data:
del model_data["4NN"]
if "5NN" in model_data:
del model_data["5NN"]
if "Exemplar (s=0.001)" in model_data:
del model_data["Exemplar (s=0.001)"]
if "Exemplar (s=0.1)" in model_data:
del model_data["Exemplar (s=0.1)"]
if "Exemplar (s=1)" in model_data:
del model_data["Exemplar (s=1)"]
individual_aic = {}
individual_bic = {}
for name in model_data["1NN"]:
null_LL = model_data["Null"][name]
num_papers = model_data["1NN"][name][1]
if num_papers < 5:
continue
model_LL = {model_name: model_data[model_name][name][0] + null_LL for model_name in model_data if model_name != "Null"}
aic = {model_name: calc_aic(num_papers, LL, 0) for model_name, LL in model_LL.items() if "Exemplar" not in model_name}
aic["Exemplar"] = calc_aic(num_papers, model_LL["Exemplar"], 1)
aic["Null"] = calc_aic(num_papers, null_LL, 0)
#aic["Exemplar (s=0.001)"] = calc_aic(num_papers, model_LL["Exemplar (s=0.001)"], 1)
#aic["Exemplar (s=0.1)"] = calc_aic(num_papers, model_LL["Exemplar (s=0.1)"], 1)
#aic["Exemplar (s=1)"] = calc_aic(num_papers, model_LL["Exemplar (s=1)"], 1)
bic = {model_name: calc_bic(num_papers, LL, 0) for model_name, LL in model_LL.items() if "Exemplar" not in model_name}
bic["Exemplar"] = calc_bic(num_papers, model_LL["Exemplar"], 1)
bic["Null"] = calc_bic(num_papers, null_LL, 0)
#bic["Exemplar (s=0.001)"] = calc_bic(num_papers, model_LL["Exemplar (s=0.001)"], 1)
#bic["Exemplar (s=0.1)"] = calc_bic(num_papers, model_LL["Exemplar (s=0.1)"], 1)
#bic["Exemplar (s=1)"] = calc_bic(num_papers, model_LL["Exemplar (s=1)"], 1)
individual_aic[name] = aic
individual_bic[name] = bic
return individual_aic, individual_bic
def get_best_model_individual(individual_data: dict) -> dict:
best_per_individual = {name: [float("inf"), []] for name in individual_data}
for individual in individual_data:
for model in individual_data[individual]:
if individual_data[individual][model] < best_per_individual[individual][0]:
best_per_individual[individual][0] = individual_data[individual][model]
best_per_individual[individual][1] = [model]
elif individual_data[individual][model] == best_per_individual[individual][0]:
best_per_individual[individual][1].append(model)
return best_per_individual
def calc_aic_bic_overall(model_data: dict) -> dict:
if "2NN" in model_data:
del model_data["2NN"]
if "3NN" in model_data:
del model_data["3NN"]
if "4NN" in model_data:
del model_data["4NN"]
if "5NN" in model_data:
del model_data["5NN"]
aic = {}
bic = {}
num_papers_overall = 0
log_L_overall = {model_name: 0 for model_name in model_data}
for name in model_data["1NN"]:
null_LL = model_data["Null"][name]
num_papers = model_data["1NN"][name][1]
num_papers_overall += num_papers
for model_name in model_data:
if model_name == "Null":
continue
model_LL = model_data[model_name][name][0] + null_LL
log_L_overall[model_name] += model_LL
for model_name in model_data:
num_params = 1 if "Exemplar" in model_name else 0
aic[model_name] = calc_aic(num_papers_overall, log_L_overall[model_name], num_params)
bic[model_name] = calc_bic(num_papers_overall, log_L_overall[model_name], num_params)
return aic, bic
if __name__ == "__main__":
field = "medicine"
num_authors = 2
#model_data = pickle.load(open(f"results/full-2/{field}.p", "rb"))
model_data = pickle.load(open(f"results/summary/k-author/authorship-{field}-{num_authors}.p", "rb"))
#model_data = pickle.load(open(f"results/summary/k-author/authorship-{field}-{num_authors}.p", "rb"))
aic, bic = calc_aic_bic_individuals(model_data)
aic_individual = get_best_model_individual(aic)
bic_individual = get_best_model_individual(bic)
make_pie_chart(aic_individual, include_null=True, len_included=False, filename=f"aic-{field}-{num_authors}")
make_pie_chart(bic_individual, include_null=True, len_included=False, filename=f"bic-{field}-{num_authors}")
#print(aic)
#print(bic)
#print(get_best_model_individual(bic)) |
the-stack_106_31942 | # Lint as: python3
# Copyright 2020 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for flax.struct."""
from typing import Any
from absl.testing import absltest
import dataclasses
from flax import struct
import jax
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
@struct.dataclass
class Point:
x: float
y: float
meta: Any = struct.field(pytree_node=False)
class StructTest(absltest.TestCase):
def test_no_extra_fields(self):
p = Point(x=1, y=2, meta={})
with self.assertRaises(dataclasses.FrozenInstanceError):
p.new_field = 1
def test_mutation(self):
p = Point(x=1, y=2, meta={})
new_p = p.replace(x=3)
self.assertEqual(new_p, Point(x=3, y=2, meta={}))
with self.assertRaises(dataclasses.FrozenInstanceError):
p.y = 3
def test_pytree_nodes(self):
p = Point(x=1, y=2, meta={'abc': True})
leaves = jax.tree_leaves(p)
self.assertEqual(leaves, [1, 2])
new_p = jax.tree_map(lambda x: x + x, p)
self.assertEqual(new_p, Point(x=2, y=4, meta={'abc': True}))
if __name__ == '__main__':
absltest.main()
|
the-stack_106_31943 | # -*- coding: utf-8 -*-
"""The graphical part of a DFTB+ BandStructure node"""
import logging
import tkinter as tk
import dftbplus_step
import seamm
logger = logging.getLogger(__name__)
class TkBandStructure(seamm.TkNode):
def __init__(
self,
tk_flowchart=None,
node=None,
canvas=None,
x=120,
y=20,
w=200,
h=50,
my_logger=logger,
keyword_metadata=None,
):
"""Initialize the graphical Tk DFTB+ BandStructure step
Keyword arguments:
"""
self.results_widgets = []
super().__init__(
tk_flowchart=tk_flowchart,
node=node,
canvas=canvas,
x=x,
y=y,
w=w,
h=h,
my_logger=my_logger,
keyword_metadata=keyword_metadata,
)
def right_click(self, event):
"""Probably need to add our dialog..."""
super().right_click(event)
self.popup_menu.add_command(label="Edit..", command=self.edit)
self.popup_menu.tk_popup(event.x_root, event.y_root, 0)
def create_dialog(self, title="Edit DFTB+ BandStructure Step"):
"""Create the dialog!"""
self.logger.debug("Creating the dialog")
super().create_dialog(title=title, widget="notebook", results_tab=True)
# Create all the widgets
P = self.node.parameters
frame = self["frame"]
for key in dftbplus_step.BandStructureParameters.parameters:
if key not in ("results", "extra keywords", "create tables"):
self[key] = P[key].widget(frame)
# self.setup_results(dftbplus_step.properties, calculation=calculation)
self.logger.debug("Finished creating the dialog")
def reset_dialog(self, widget=None):
frame = self["frame"]
for slave in frame.grid_slaves():
slave.grid_forget()
# Put in the widgets
row = 0
self["nPoints"].grid(row=row, column=0, sticky=tk.N)
row += 1
return row
|
the-stack_106_31944 | # Simple demo of reading and writing the time for the PCF8523 real-time clock.
# Change the if False to if True below to set the time, otherwise it will just
# print the current date and time every second. Notice also comments to adjust
# for working with hardware vs. software I2C.
import time
import board
# For hardware I2C (M0 boards) use this line:
import busio as io
# Or for software I2C (ESP8266) use this line instead:
#import bitbangio as io
import adafruit_pcf8523
# Change to the appropriate I2C clock & data pins here!
i2c_bus = io.I2C(board.SCL, board.SDA)
# Create the RTC instance:
rtc = adafruit_pcf8523.PCF8523(i2c_bus)
# Lookup table for names of days (nicer printing).
days = ("Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday")
#pylint: disable-msg=bad-whitespace
#pylint: disable-msg=using-constant-test
if False: # change to True if you want to set the time!
# year, mon, date, hour, min, sec, wday, yday, isdst
t = time.struct_time((2017, 10, 29, 10, 31, 0, 0, -1, -1))
# you must set year, mon, date, hour, min, sec and weekday
# yearday is not supported, isdst can be set but we don't do anything with it at this time
print("Setting time to:", t) # uncomment for debugging
rtc.datetime = t
print()
#pylint: enable-msg=using-constant-test
#pylint: enable-msg=bad-whitespace
# Main loop:
while True:
t = rtc.datetime
#print(t) # uncomment for debugging
print("The date is {} {}/{}/{}".format(days[int(t.tm_wday)], t.tm_mday, t.tm_mon, t.tm_year))
print("The time is {}:{:02}:{:02}".format(t.tm_hour, t.tm_min, t.tm_sec))
time.sleep(1) # wait a second
|
the-stack_106_31946 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
import functools
import types
from functools import reduce
from arch.api.utils import log_utils
from federatedml.framework.homo.blocks import has_converged, loss_scatter, model_scatter, model_broadcaster
from federatedml.framework.homo.blocks import random_padding_cipher
from federatedml.framework.homo.blocks.base import HomoTransferBase
from federatedml.framework.homo.blocks.has_converged import HasConvergedTransVar
from federatedml.framework.homo.blocks.loss_scatter import LossScatterTransVar
from federatedml.framework.homo.blocks.model_broadcaster import ModelBroadcasterTransVar
from federatedml.framework.homo.blocks.model_scatter import ModelScatterTransVar
from federatedml.framework.homo.blocks.random_padding_cipher import RandomPaddingCipherTransVar
from federatedml.framework.weights import Weights, NumericWeights, TransferableWeights
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
from federatedml.util import consts
LOGGER = log_utils.getLogger()
class LegacyAggregatorTransVar(HomoTransferBase):
def __init__(self, server=(consts.ARBITER,), clients=(consts.GUEST, consts.HOST,), prefix=None):
super().__init__(server=server, clients=clients, prefix=prefix)
self.loss_scatter = LossScatterTransVar(server=server, clients=clients, prefix=self.prefix)
self.has_converged = HasConvergedTransVar(server=server, clients=clients, prefix=self.prefix)
self.model_scatter = ModelScatterTransVar(server=server, clients=clients, prefix=self.prefix)
self.model_broadcaster = ModelBroadcasterTransVar(server=server, clients=clients, prefix=self.prefix)
self.random_padding_cipher = RandomPaddingCipherTransVar(server=server, clients=clients, prefix=self.prefix)
class Arbiter(object):
def __init__(self, trans_var=LegacyAggregatorTransVar()):
self._guest_parties = trans_var.get_parties(roles=[consts.GUEST])
self._host_parties = trans_var.get_parties(roles=[consts.HOST])
self._client_parties = trans_var.client_parties
self._loss_sync = loss_scatter.Server(trans_var.loss_scatter)
self._converge_sync = has_converged.Server(trans_var.has_converged)
self._model_scatter = model_scatter.Server(trans_var.model_scatter)
self._model_broadcaster = model_broadcaster.Server(trans_var.model_broadcaster)
self._random_padding_cipher = random_padding_cipher.Server(trans_var.random_padding_cipher)
# noinspection PyUnusedLocal,PyAttributeOutsideInit,PyProtectedMember
def register_aggregator(self, transfer_variables: BaseTransferVariables, enable_secure_aggregate=True):
if enable_secure_aggregate:
self._random_padding_cipher.exchange_secret_keys()
return self
def aggregate_model(self, ciphers_dict=None, suffix=tuple()) -> Weights:
models = self.get_models_for_aggregate(ciphers_dict, suffix=suffix)
total_model, total_degree = reduce(lambda x, y: (x[0] + y[0], x[1] + y[1]), models)
total_model /= total_degree
LOGGER.debug("In aggregate model, total_model: {}, total_degree: {}".format(total_model.unboxed, total_degree))
return total_model
def aggregate_and_broadcast(self, ciphers_dict=None, suffix=tuple()):
"""
aggregate models from guest and hosts, then broadcast the aggregated model.
Args:
ciphers_dict: a dict of host id to host cipher
suffix: tag suffix
"""
model = self.aggregate_model(ciphers_dict=ciphers_dict, suffix=suffix)
self.send_aggregated_model(model, ciphers_dict=ciphers_dict, suffix=suffix)
return model
def get_models_for_aggregate(self, ciphers_dict=None, suffix=tuple()):
models = self._model_scatter.get_models(suffix=suffix)
guest_model = models[0]
yield (guest_model.weights, guest_model.get_degree() or 1.0)
# host model
index = 0
for model in models[1:]:
weights = model.weights
if ciphers_dict and ciphers_dict.get(index, None):
weights = weights.decrypted(ciphers_dict[index])
yield (weights, model.get_degree() or 1.0)
index += 1
def send_aggregated_model(self, model: Weights,
ciphers_dict: typing.Union[None, typing.Mapping[int, typing.Any]] = None,
suffix=tuple()):
if ciphers_dict is None:
ciphers_dict = {}
party_to_cipher = {self._host_parties[idx]: cipher for idx, cipher in ciphers_dict.items()}
for party in self._client_parties:
cipher = party_to_cipher.get(party)
if cipher is None:
self._model_broadcaster.send_model(model=model.for_remote(), parties=party, suffix=suffix)
else:
self._model_broadcaster.send_model(model=model.encrypted(cipher, False).for_remote(), parties=party,
suffix=suffix)
def aggregate_loss(self, idx=None, suffix=tuple()):
if idx is None:
parties = None
else:
parties = []
parties.extend(self._guest_parties)
parties.extend([self._host_parties[i] for i in idx])
losses = self._loss_sync.get_losses(parties=parties, suffix=suffix)
total_loss = 0.0
total_degree = 0.0
for loss in losses:
total_loss += loss.unboxed
total_degree += loss.get_degree(1.0)
return total_loss / total_degree
def send_converge_status(self, converge_func: types.FunctionType, converge_args, suffix=tuple()):
is_converge = converge_func(*converge_args)
return self._converge_sync.remote_converge_status(is_converge, suffix=suffix)
class Client(object):
def __init__(self, trans_var=LegacyAggregatorTransVar()):
self._enable_secure_aggregate = False
self._loss_sync = loss_scatter.Client(trans_var.loss_scatter)
self._converge_sync = has_converged.Client(trans_var.has_converged)
self._model_scatter = model_scatter.Client(trans_var.model_scatter)
self._model_broadcaster = model_broadcaster.Client(trans_var.model_broadcaster)
self._random_padding_cipher = random_padding_cipher.Client(trans_var.random_padding_cipher)
# noinspection PyAttributeOutsideInit,PyUnusedLocal,PyProtectedMember
def register_aggregator(self, transfer_variables: BaseTransferVariables, enable_secure_aggregate=True):
self._enable_secure_aggregate = enable_secure_aggregate
if enable_secure_aggregate:
self._cipher = self._random_padding_cipher.create_cipher()
return self
def secure_aggregate(self, send_func, weights: Weights, degree: float = None, enable_secure_aggregate=True):
# w -> w * degree
if degree:
weights *= degree
# w * degree -> w * degree + \sum(\delta(i, j) * r_{ij}), namely, adding random mask.
if enable_secure_aggregate:
weights = weights.encrypted(cipher=self._cipher, inplace=True)
# maybe remote degree
remote_weights = weights.for_remote().with_degree(degree) if degree else weights.for_remote()
send_func(remote_weights)
def send_model(self, weights: Weights, degree: float = None, suffix=tuple()):
def _func(_weights: TransferableWeights):
self._model_scatter.send_model(model=_weights, suffix=suffix)
return self.secure_aggregate(send_func=_func,
weights=weights,
degree=degree,
enable_secure_aggregate=self._enable_secure_aggregate)
def get_aggregated_model(self, suffix=tuple()):
return self._model_broadcaster.get_model(suffix=suffix)
def aggregate_then_get(self, model: Weights, degree: float = None, suffix=tuple()) -> Weights:
self.send_model(weights=model, degree=degree, suffix=suffix)
return self.get_aggregated_model(suffix=suffix)
def send_loss(self, loss: typing.Union[float, Weights], degree: float = None, suffix=tuple()):
if isinstance(loss, float):
loss = NumericWeights(loss)
return self.secure_aggregate(send_func=functools.partial(self._loss_sync.send_loss, suffix=suffix),
weights=loss, degree=degree,
enable_secure_aggregate=False)
def get_converge_status(self, suffix=tuple()):
return self._converge_sync.get_converge_status(suffix=suffix)
Guest = Client
Host = Client
def with_role(role, transfer_variable, enable_secure_aggregate=True):
if role == consts.GUEST:
return Client().register_aggregator(transfer_variable, enable_secure_aggregate)
elif role == consts.HOST:
return Client().register_aggregator(transfer_variable, enable_secure_aggregate)
elif role == consts.ARBITER:
return Arbiter().register_aggregator(transfer_variable, enable_secure_aggregate)
else:
raise ValueError(f"role {role} not found")
|
the-stack_106_31947 | import os
import logging
from logging import Formatter
from logging.handlers import RotatingFileHandler
import json
from easydict import EasyDict
from pprint import pprint
from utils.dirs import create_dirs
def setup_logging(log_dir):
log_file_format = "[%(levelname)s] - %(asctime)s - %(name)s - : %(message)s in %(pathname)s:%(lineno)d"
log_console_format = "[%(levelname)s]: %(message)s"
# Main logger
main_logger = logging.getLogger()
main_logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(Formatter(log_console_format))
exp_file_handler = RotatingFileHandler('{}exp_debug.log'.format(log_dir), maxBytes=10**6, backupCount=5)
exp_file_handler.setLevel(logging.DEBUG)
exp_file_handler.setFormatter(Formatter(log_file_format))
exp_errors_file_handler = RotatingFileHandler('{}exp_error.log'.format(log_dir), maxBytes=10**6, backupCount=5)
exp_errors_file_handler.setLevel(logging.WARNING)
exp_errors_file_handler.setFormatter(Formatter(log_file_format))
main_logger.addHandler(console_handler)
main_logger.addHandler(exp_file_handler)
main_logger.addHandler(exp_errors_file_handler)
def get_config_from_json(json_file):
"""
Get the config from a json file
:param json_file: the path of the config file
:return: config(namespace), config(dictionary)
"""
# parse the configurations from the config json file provided
with open(json_file, 'r') as config_file:
try:
config_dict = json.load(config_file)
# EasyDict allows to access dict values as attributes (works recursively).
config = EasyDict(config_dict)
return config, config_dict
except ValueError:
print("INVALID JSON file format.. Please provide a good json file")
exit(-1)
def process_config(json_file):
"""
Get the json file
Processing it with EasyDict to be accessible as attributes
then editing the path of the experiments folder
creating some important directories in the experiment folder
Then setup the logging in the whole program
Then return the config
:param json_file: the path of the config file
:return: config object(namespace)
"""
config, _ = get_config_from_json(json_file)
print(" ThE Configuration of your experiment ..")
pprint(config)
# making sure that you have provided the exp_name.
try:
print(" *************************************** ")
print("The experiment name is {}".format(config.exp_name))
print(" *************************************** ")
except AttributeError:
print("ERROR!!..Please provide the exp_name in json file..")
exit(-1)
# create some important directories to be used for that experiment.
config.summary_dir = os.path.join("experiments", config.exp_name, "summaries/")
config.checkpoint_dir = os.path.join("experiments", config.exp_name, "checkpoints/")
config.out_dir = os.path.join("experiments", config.exp_name, "out/")
config.log_dir = os.path.join("experiments", config.exp_name, "logs/")
create_dirs([config.summary_dir, config.checkpoint_dir, config.out_dir, config.log_dir])
# setup logging in the project
setup_logging(config.log_dir)
logging.getLogger().info("Hi, This is root.")
logging.getLogger().info("After the configurations are successfully processed and dirs are created.")
logging.getLogger().info("The pipeline of the project will begin now.")
return config
|
the-stack_106_31950 |
"""
Google provides the defer() call as a wrapper around the taskqueue API. Unfortunately
it suffers from serious bugs, and "ticking timebomb" API decisions. Specifically:
- defer(_transactional=True) won't work transactionally if your task > 100kb
- A working defer() might suddenly start blowing up inside transactions if the task grows > 100kb
if you haven't specified xg=True, or you hit the entity group limit
This defer is an adapted version of that one, with the following changes:
- defer() will *always* use an entity group (even if the task is < 100kb) unless you pass
_small_task=True
- defer(_transactional=True) works
- Adds a _wipe_related_caches option (defaults to True) which wipes out ForeignKey caches
if you defer Django model instances (which can result in stale data when the deferred task
runs)
"""
import copy
import logging
from django.db import models
from django.utils import timezone
from google.appengine.api.datastore import Delete
from google.appengine.ext.deferred import PermanentTaskFailure # noqa
from djangae.db import transaction
from djangae.environment import task_queue_name
from djangae.models import DeferIterationMarker
from djangae.processing import find_key_ranges_for_queryset
from djangae.utils import retry
logger = logging.getLogger(__name__)
def _wipe_caches(args, kwargs):
# Django related fields (E.g. foreign key) store a "cache" of the related
# object when it's first accessed. These caches can drastically bloat up
# an instance. If we then defer that instance we're pickling and unpickling a
# load of data we likely need to reload in the task anyway. This code
# wipes the caches of related fields if any of the args or kwargs are
# instances.
def _wipe_instance(instance):
for field in (f for f in instance._meta.fields if f.rel):
cache_name = field.get_cache_name()
if hasattr(instance, cache_name):
delattr(instance, cache_name)
# We have to copy the instances before wiping the caches
# otherwise the calling code will suddenly lose their cached things
for i, arg in enumerate(args):
if isinstance(arg, models.Model):
args[i] = copy.deepcopy(arg)
_wipe_instance(args[i])
for k, v in list(kwargs.items()):
if isinstance(v, models.Model):
kwargs[k] = copy.deepcopy(v)
_wipe_instance(kwargs[k])
def defer(obj, *args, **kwargs):
"""
This is a replacement for google.appengine.ext.deferred.defer which doesn't
suffer the bug where tasks are deferred non-transactionally when they hit a
certain limit.
It also *always* uses an entity group, unless you pass _small_task=True in which
case it *never* uses an entity group (but you are limited by 100K)
"""
from google.appengine.ext.deferred.deferred import (
run_from_datastore,
serialize,
taskqueue,
_DeferredTaskEntity,
_DEFAULT_URL,
_TASKQUEUE_HEADERS,
_DEFAULT_QUEUE
)
KWARGS = {
"countdown", "eta", "name", "target", "retry_options"
}
taskargs = {x: kwargs.pop(("_%s" % x), None) for x in KWARGS}
taskargs["url"] = kwargs.pop("_url", _DEFAULT_URL)
transactional = kwargs.pop("_transactional", False)
small_task = kwargs.pop("_small_task", False)
wipe_related_caches = kwargs.pop("_wipe_related_caches", True)
taskargs["headers"] = dict(_TASKQUEUE_HEADERS)
taskargs["headers"].update(kwargs.pop("_headers", {}))
queue = kwargs.pop("_queue", _DEFAULT_QUEUE)
if wipe_related_caches:
args = list(args)
_wipe_caches(args, kwargs)
args = tuple(args)
pickled = serialize(obj, *args, **kwargs)
key = None
try:
# Always use an entity group unless this has been
# explicitly marked as a small task
if not small_task:
key = _DeferredTaskEntity(data=pickled).put()
# Defer the task
task = taskqueue.Task(payload=pickled, **taskargs)
ret = task.add(queue, transactional=transactional)
# Delete the key as it wasn't needed
if key:
Delete(key)
return ret
except taskqueue.TaskTooLargeError:
if small_task:
raise
pickled = serialize(run_from_datastore, str(key))
task = taskqueue.Task(payload=pickled, **taskargs)
# This is the line that fixes a bug in the SDK. The SDK
# code doesn't pass transactional here.
return task.add(queue, transactional=transactional)
except: # noqa
# Any other exception? Delete the key
if key:
Delete(key)
raise
_TASK_TIME_LIMIT = 10 * 60
class TimeoutException(Exception):
"Exception thrown to indicate that a new shard should begin and the current one should end"
pass
def _process_shard(marker_id, model, query, callback, finalize, buffer_time, args, kwargs):
args = args or tuple()
start_time = timezone.now()
try:
marker = DeferIterationMarker.objects.get(pk=marker_id)
except DeferIterationMarker.DoesNotExist:
logger.warning("DeferIterationMarker with ID: %s has vanished, cancelling task", marker_id)
return
# Redefer if the task isn't ready to begin
if not marker.is_ready:
defer(
_process_shard, marker_id, model, query, callback, finalize,
buffer_time=buffer_time,
args=args,
kwargs=kwargs,
_queue=task_queue_name(),
_countdown=1
)
return
try:
qs = model.objects.all()
qs.query = query
qs.order_by("pk")
last_pk = None
for instance in qs.all():
last_pk = instance.pk
if (timezone.now() - start_time).total_seconds() > _TASK_TIME_LIMIT - buffer_time:
raise TimeoutException()
callback(instance, *args, **kwargs)
else:
@transaction.atomic(xg=True)
def mark_shard_complete():
try:
marker.refresh_from_db()
except DeferIterationMarker.DoesNotExist:
logger.warning("TaskMarker with ID: %s has vanished, cancelling task", marker_id)
return
marker.shards_complete += 1
marker.save()
if marker.shards_complete == marker.shard_count:
# Delete the marker if we were asked to
if marker.delete_on_completion:
marker.delete()
defer(
finalize,
*args,
_transactional=True,
_queue=task_queue_name(),
**kwargs
)
retry(mark_shard_complete, _attempts=6)
except (Exception, TimeoutException) as e:
# We intentionally don't catch DeadlineExceededError here. There's not enough time to redefer a task
# and so the only option is to retry the current shard. It shouldn't happen though, 15 seconds should be
# ample time... DeadlineExceededError doesn't subclass Exception, it subclasses BaseException so it'll
# never enter here (if it does occur, somehow)
if isinstance(e, TimeoutException):
logger.debug("Ran out of time processing shard. Deferring new shard to continue from: %s", last_pk)
else:
logger.exception("Error processing shard. Retrying.")
if last_pk:
qs = qs.filter(pk__gte=last_pk)
defer(
_process_shard, marker_id, qs.model, qs.query, callback, finalize,
buffer_time=buffer_time,
args=args,
kwargs=kwargs,
_queue=task_queue_name(),
_countdown=1
)
def _generate_shards(model, query, callback, finalize, args, kwargs, shards, delete_marker, buffer_time):
queryset = model.objects.all()
queryset.query = query
key_ranges = find_key_ranges_for_queryset(queryset, shards)
marker = DeferIterationMarker.objects.create(
delete_on_completion=delete_marker,
callback_name=callback.__name__,
finalize_name=finalize.__name__
)
for i, (start, end) in enumerate(key_ranges):
is_last = i == (len(key_ranges) - 1)
qs = model.objects.all()
qs.query = query
filter_kwargs = {}
if start:
filter_kwargs["pk__gte"] = start
if end:
filter_kwargs["pk__lt"] = end
qs = qs.filter(**filter_kwargs)
@transaction.atomic(xg=True)
def make_shard():
marker.refresh_from_db()
marker.shard_count += 1
if is_last:
marker.is_ready = True
marker.save()
defer(
_process_shard,
marker.pk,
qs.model, qs.query, callback, finalize,
args=args,
kwargs=kwargs,
buffer_time=buffer_time,
_queue=task_queue_name(),
_transactional=True
)
try:
retry(make_shard, _attempts=5)
except: # noqa
marker.delete() # This will cause outstanding tasks to abort
raise
def defer_iteration_with_finalize(
queryset, callback, finalize, _queue='default', _shards=5,
_delete_marker=True, _transactional=False, _buffer_time=15, *args, **kwargs):
defer(
_generate_shards,
queryset.model,
queryset.query,
callback,
finalize,
args=args,
kwargs=kwargs,
delete_marker=_delete_marker,
shards=_shards,
buffer_time=_buffer_time,
_queue=_queue,
_transactional=_transactional
)
|
the-stack_106_31951 | # Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from __future__ import print_function
import os
import platform
import shutil
import stat
import time
import re
import tempfile
from runner import RunnerCore, path_from_root, env_modify, chdir
from tools.shared import NODE_JS, PYTHON, EMCC, SPIDERMONKEY_ENGINE, V8_ENGINE, CONFIG_FILE, PIPE, STDOUT, EM_CONFIG, LLVM_ROOT, CANONICAL_TEMP_DIR
from tools.shared import run_process, try_delete, run_js, safe_ensure_dirs, expected_llvm_version, generate_sanity
from tools.shared import Cache, Settings
from tools import jsrun, shared
SANITY_FILE = CONFIG_FILE + '_sanity'
commands = [[PYTHON, EMCC], [PYTHON, path_from_root('tests', 'runner.py'), 'blahblah']]
def restore():
shutil.copyfile(CONFIG_FILE + '_backup', CONFIG_FILE)
# restore the config file and set it up for our uses
def restore_and_set_up():
restore()
with open(CONFIG_FILE, 'a') as f:
# don't use the native optimizer from the emsdk - we want to test how it builds
f.write('\nEMSCRIPTEN_NATIVE_OPTIMIZER = ""\n')
# make LLVM_ROOT sensitive to the LLVM env var, as we test that
f.write('\nLLVM_ROOT = os.path.expanduser(os.getenv("LLVM", "%s"))\n' % LLVM_ROOT)
# wipe the config and sanity files, creating a blank slate
def wipe():
try_delete(CONFIG_FILE)
try_delete(SANITY_FILE)
def add_to_config(content):
with open(CONFIG_FILE, 'a') as f:
f.write(content + '\n')
def mtime(filename):
return os.path.getmtime(filename)
def make_fake_clang(filename, version):
"""Create a fake clang that only handles --version
--version writes to stdout (unlike -v which writes to stderr)
"""
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
f.write('#!/bin/sh\n')
f.write('echo "clang version %s"\n' % version)
f.write('echo "..."\n')
shutil.copyfile(filename, filename + '++')
os.chmod(filename, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
os.chmod(filename + '++', stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
def make_fake_llc(filename, targets):
"""Create a fake llc that only handles --version and writes target
list to stdout.
"""
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
f.write('#!/bin/sh\n')
f.write('echo "llc fake output\nRegistered Targets:\n%s"' % targets)
os.chmod(filename, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
SANITY_MESSAGE = 'Emscripten: Running sanity checks'
EMBUILDER = path_from_root('embuilder.py')
# arguments to build a minimal hello world program, without even libc
# (-O1 avoids -O0's default assertions which bring in checking code;
# FILESYSTEM=0 avoids bringing libc for that)
# (ERROR_ON_UNDEFINED_SYMBOLS=0 is needed because __errno_location is
# not included on the native side but needed by a lot of JS libraries.)
MINIMAL_HELLO_WORLD = [path_from_root('tests', 'hello_world_em_asm.c'), '-O1', '-s', 'FILESYSTEM=0', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0']
class sanity(RunnerCore):
@classmethod
def setUpClass(cls):
super(sanity, cls).setUpClass()
shutil.copyfile(CONFIG_FILE, CONFIG_FILE + '_backup')
print()
print('Running sanity checks.')
print('WARNING: This will modify %s, and in theory can break it although it should be restored properly. A backup will be saved in %s_backup' % (EM_CONFIG, EM_CONFIG))
print()
print('>>> the original settings file is:')
print(open(os.path.expanduser('~/.emscripten')).read())
print('<<<')
print()
assert os.path.exists(CONFIG_FILE), 'To run these tests, we need a (working!) %s file to already exist' % EM_CONFIG
assert 'EMCC_DEBUG' not in os.environ, 'do not run sanity checks in debug mode!'
assert 'EMCC_WASM_BACKEND' not in os.environ, 'do not force wasm backend either way in sanity checks!'
@classmethod
def tearDownClass(cls):
super(sanity, cls).tearDownClass()
restore()
def setUp(self):
super(sanity, self).setUp()
wipe()
self.start_time = time.time()
def tearDown(self):
super(sanity, self).tearDown()
print('time:', time.time() - self.start_time)
def do(self, command):
print('Running: ' + ' '.join(command))
if type(command) is not list:
command = [command]
if command[0] == EMCC:
command = [PYTHON] + command
return run_process(command, stdout=PIPE, stderr=STDOUT, check=False).stdout
def check_working(self, command, expected=None):
if type(command) is not list:
command = [command]
if expected is None:
if command[0] == EMCC or (len(command) >= 2 and command[1] == EMCC):
expected = 'no input files'
else:
expected = "could not find the following tests: blahblah"
output = self.do(command)
self.assertContained(expected, output)
return output
# this should be the very first thing that runs. if this fails, everything else is irrelevant!
def test_aaa_normal(self):
for command in commands:
# Your existing EM_CONFIG should work!
restore_and_set_up()
self.check_working(command)
def test_firstrun(self):
for command in commands:
wipe()
def make_executable(name):
with open(os.path.join(temp_bin, name), 'w') as f:
os.fchmod(f.fileno(), stat.S_IRWXU)
try:
temp_bin = tempfile.mkdtemp()
old_environ_path = os.environ['PATH']
os.environ['PATH'] = temp_bin + os.pathsep + old_environ_path
make_executable('llvm-dis')
make_executable('node')
make_executable('python2')
output = self.do(command)
finally:
os.environ['PATH'] = old_environ_path
shutil.rmtree(temp_bin)
self.assertContained('Welcome to Emscripten!', output)
self.assertContained('This is the first time any of the Emscripten tools has been run.', output)
self.assertContained('A settings file has been copied to %s, at absolute path: %s' % (EM_CONFIG, CONFIG_FILE), output)
self.assertContained('It contains our best guesses for the important paths, which are:', output)
self.assertContained('LLVM_ROOT', output)
self.assertContained('NODE_JS', output)
if platform.system() is not 'Windows':
# os.chmod can't make files executable on Windows
self.assertIdentical(temp_bin, re.search("^ *LLVM_ROOT *= (.*)$", output, re.M).group(1))
possible_nodes = [os.path.join(temp_bin, 'node')]
if os.path.exists('/usr/bin/nodejs'):
possible_nodes.append('/usr/bin/nodejs')
self.assertIdentical(possible_nodes, re.search("^ *NODE_JS *= (.*)$", output, re.M).group(1))
self.assertContained('Please edit the file if any of those are incorrect', output)
self.assertContained('This command will now exit. When you are done editing those paths, re-run it.', output)
assert output.split()[-1].endswith('===='), 'We should have stopped: ' + output
config_file = open(CONFIG_FILE).read()
template_file = open(path_from_root('tools', 'settings_template_readonly.py')).read()
self.assertNotContained('~/.emscripten', config_file)
self.assertContained('~/.emscripten', template_file)
self.assertNotContained('{{{', config_file)
self.assertNotContained('}}}', config_file)
self.assertContained('{{{', template_file)
self.assertContained('}}}', template_file)
for content in ['EMSCRIPTEN_ROOT', 'LLVM_ROOT', 'NODE_JS', 'TEMP_DIR', 'COMPILER_ENGINE', 'JS_ENGINES']:
self.assertContained(content, config_file)
# The guessed config should be ok
# XXX This depends on your local system! it is possible `which` guesses wrong
# try_delete('a.out.js')
# output = run_process([PYTHON, EMCC, path_from_root('tests', 'hello_world.c')], stdout=PIPE, stderr=PIPE).output
# self.assertContained('hello, world!', run_js('a.out.js'), output)
# Second run, with bad EM_CONFIG
for settings in ['blah', 'LLVM_ROOT="blarg"; JS_ENGINES=[]; COMPILER_ENGINE=NODE_JS=SPIDERMONKEY_ENGINE=[]']:
f = open(CONFIG_FILE, 'w')
f.write(settings)
f.close()
output = self.do(command)
if 'LLVM_ROOT' not in settings:
self.assertContained('Error in evaluating %s' % EM_CONFIG, output)
elif 'runner.py' not in ' '.join(command):
self.assertContained('ERROR', output) # sanity check should fail
def test_closure_compiler(self):
CLOSURE_FATAL = 'fatal: closure compiler'
CLOSURE_WARNING = 'does not exist'
# Sanity check should find closure
restore_and_set_up()
output = self.check_working(EMCC)
self.assertNotContained(CLOSURE_FATAL, output)
self.assertNotContained(CLOSURE_WARNING, output)
# Append a bad path for closure, will warn
f = open(CONFIG_FILE, 'a')
f.write('CLOSURE_COMPILER = "/tmp/nowhere/nothingtoseehere/kjadsfkjwelkjsdfkqgas/nonexistent.txt"\n')
f.close()
output = self.check_working(EMCC, CLOSURE_WARNING)
# And if you actually try to use the bad path, will be fatal
f = open(CONFIG_FILE, 'a')
f.write('CLOSURE_COMPILER = "/tmp/nowhere/nothingtoseehere/kjadsfkjwelkjsdfkqgas/nonexistent.txt"\n')
f.close()
output = self.check_working([EMCC, '-s', '--closure', '1'] + MINIMAL_HELLO_WORLD + ['-O2'], CLOSURE_FATAL)
# With a working path, all is well
restore_and_set_up()
try_delete('a.out.js')
output = self.check_working([EMCC, '-s', '--closure', '1'] + MINIMAL_HELLO_WORLD + ['-O2'], '')
assert os.path.exists('a.out.js'), output
def test_llvm(self):
LLVM_WARNING = 'LLVM version appears incorrect'
restore_and_set_up()
# Clang should report the version number we expect, and emcc should not warn
assert shared.check_llvm_version()
output = self.check_working(EMCC)
assert LLVM_WARNING not in output, output
# Fake a different llvm version
restore_and_set_up()
with open(CONFIG_FILE, 'a') as f:
f.write('LLVM_ROOT = "' + path_from_root('tests', 'fake') + '"')
with env_modify({'EM_IGNORE_SANITY': '1'}):
for x in range(-2, 3):
for y in range(-2, 3):
expected_x, expected_y = (int(x) for x in expected_llvm_version().split('.'))
expected_x += x
expected_y += y
if expected_x < 0 or expected_y < 0:
continue # must be a valid llvm version
print(expected_llvm_version(), x, y, expected_x, expected_y)
make_fake_clang(path_from_root('tests', 'fake', 'clang'), '%s.%s' % (expected_x, expected_y))
if x != 0 or y != 0:
output = self.check_working(EMCC, LLVM_WARNING)
else:
output = self.check_working(EMCC)
assert LLVM_WARNING not in output, output
def test_emscripten_root(self):
# The correct path
restore_and_set_up()
add_to_config("EMSCRIPTEN_ROOT = '%s'" % path_from_root())
self.check_working(EMCC)
# The correct path with extra stuff
restore_and_set_up()
add_to_config("EMSCRIPTEN_ROOT = '%s'" % (path_from_root() + os.path.sep))
self.check_working(EMCC)
def test_llvm_fastcomp(self):
WARNING = 'fastcomp in use, but LLVM has not been built with the JavaScript backend as a target'
restore_and_set_up()
# Should see js backend during sanity check
self.assertTrue(shared.check_llvm())
output = self.check_working(EMCC)
self.assertNotIn(WARNING, output)
# Fake incorrect llc output, no mention of js backend
restore_and_set_up()
with open(CONFIG_FILE, 'a') as f:
f.write('LLVM_ROOT = "' + path_from_root('tests', 'fake', 'bin') + '"')
# print '1', open(CONFIG_FILE).read()
make_fake_clang(path_from_root('tests', 'fake', 'bin', 'clang'), expected_llvm_version())
make_fake_llc(path_from_root('tests', 'fake', 'bin', 'llc'), 'no j-s backend for you!')
self.check_working(EMCC, WARNING)
# fake some more
for fake in ['llvm-link', 'llvm-ar', 'opt', 'llvm-as', 'llvm-dis', 'llvm-nm', 'lli']:
open(path_from_root('tests', 'fake', 'bin', fake), 'w').write('.')
try_delete(SANITY_FILE)
self.check_working(EMCC, WARNING)
# make sure sanity checks notice there is no source dir with version #
make_fake_llc(path_from_root('tests', 'fake', 'bin', 'llc'), 'there IZ a js backend: JavaScript (asm.js, emscripten) backend')
try_delete(SANITY_FILE)
self.check_working(EMCC, 'clang version does not appear to include fastcomp')
VERSION_WARNING = 'Emscripten, llvm and clang build versions do not match, this is dangerous'
# add version number
make_fake_clang(path_from_root('tests', 'fake', 'bin', 'clang'), '%s (emscripten waka : waka)' % expected_llvm_version())
try_delete(SANITY_FILE)
self.check_working(EMCC, VERSION_WARNING)
restore_and_set_up()
self.check_working([EMCC] + MINIMAL_HELLO_WORLD + ['-s', 'ASM_JS=0'], '''Very old compiler settings (pre-fastcomp) are no longer supported.''')
def test_node(self):
NODE_WARNING = 'node version appears too old'
NODE_WARNING_2 = 'cannot check node version'
restore_and_set_up()
# Clang should report the version number we expect, and emcc should not warn
assert shared.check_node_version()
output = self.check_working(EMCC)
self.assertNotIn(NODE_WARNING, output)
# Fake a different node version
restore_and_set_up()
f = open(CONFIG_FILE, 'a')
f.write('NODE_JS = "' + path_from_root('tests', 'fake', 'nodejs') + '"')
f.close()
with env_modify({'EM_IGNORE_SANITY': '1'}):
for version, succeed in [('v0.8.0', False),
('v4.1.0', False),
('v4.1.1', True),
('v4.2.3-pre', True),
('cheez', False)]:
print(version, succeed)
f = open(path_from_root('tests', 'fake', 'nodejs'), 'w')
f.write('#!/bin/sh\n')
f.write('''if [ $1 = "--version" ]; then
echo "%s"
else
%s $@
fi
''' % (version, NODE_JS))
f.close()
os.chmod(path_from_root('tests', 'fake', 'nodejs'), stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
if not succeed:
if version[0] == 'v':
self.check_working(EMCC, NODE_WARNING)
else:
self.check_working(EMCC, NODE_WARNING_2)
else:
output = self.check_working(EMCC)
assert NODE_WARNING not in output, output
def test_emcc(self):
SANITY_FAIL_MESSAGE = 'sanity check failed to run'
# emcc should check sanity if no ${EM_CONFIG}_sanity
restore_and_set_up()
time.sleep(1)
assert not os.path.exists(SANITY_FILE) # restore is just the settings, not the sanity
output = self.check_working(EMCC)
self.assertContained(SANITY_MESSAGE, output)
assert os.path.exists(SANITY_FILE) # EMCC should have checked sanity successfully
assert mtime(SANITY_FILE) > mtime(CONFIG_FILE)
assert generate_sanity() == open(SANITY_FILE).read()
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
# emcc run again should not sanity check, because the sanity file is newer
output = self.check_working(EMCC)
self.assertNotContained(SANITY_MESSAGE, output)
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
# correct sanity contents mean we need not check
open(SANITY_FILE, 'w').write(generate_sanity())
output = self.check_working(EMCC)
self.assertNotContained(SANITY_MESSAGE, output)
# incorrect sanity contents mean we *must* check
open(SANITY_FILE, 'w').write('wakawaka')
output = self.check_working(EMCC)
self.assertContained(SANITY_MESSAGE, output)
# but with EMCC_DEBUG=1 we should check
with env_modify({'EMCC_DEBUG': '1'}):
output = self.check_working(EMCC)
try_delete(CANONICAL_TEMP_DIR)
self.assertContained(SANITY_MESSAGE, output)
output = self.check_working(EMCC)
self.assertNotContained(SANITY_MESSAGE, output)
# also with -v, with or without inputs
output = self.check_working([EMCC, '-v'], SANITY_MESSAGE)
output = self.check_working([EMCC, '-v'] + MINIMAL_HELLO_WORLD + [], SANITY_MESSAGE)
# Make sure the test runner didn't do anything to the setup
output = self.check_working(EMCC)
self.assertNotContained(SANITY_MESSAGE, output)
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
# emcc should also check sanity if the file is outdated
time.sleep(0.1)
restore_and_set_up()
assert mtime(SANITY_FILE) < mtime(CONFIG_FILE)
output = self.check_working(EMCC)
self.assertContained(SANITY_MESSAGE, output)
assert mtime(SANITY_FILE) >= mtime(CONFIG_FILE)
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
# emcc should be configurable directly from EM_CONFIG without any config file
restore_and_set_up()
config = open(CONFIG_FILE, 'r').read()
open('main.cpp', 'w').write('''
#include <stdio.h>
int main() {
printf("hello from emcc with no config file\\n");
return 0;
}
''')
wipe()
with env_modify({'EM_CONFIG': config}):
run_process([PYTHON, EMCC, 'main.cpp', '-o', 'a.out.js'])
self.assertContained('hello from emcc with no config file', run_js('a.out.js'))
def test_emcc_caching(self):
INCLUDING_MESSAGE = 'including X'
BUILDING_MESSAGE = 'building X for cache'
ERASING_MESSAGE = 'clearing cache'
EMCC_CACHE = Cache.dirname
restore_and_set_up()
Cache.erase()
assert not os.path.exists(EMCC_CACHE)
with env_modify({'EMCC_DEBUG': '1'}):
# Building a file that *does* need something *should* trigger cache
# generation, but only the first time
for filename, libname in [('hello_libcxx.cpp', 'libc++')]:
for i in range(3):
print(filename, libname, i)
self.clear()
output = self.do([EMCC, '-O' + str(i), '-s', '--llvm-lto', '0', path_from_root('tests', filename), '--save-bc', 'a.bc', '-s', 'DISABLE_EXCEPTION_CATCHING=0'])
# print '\n\n\n', output
assert INCLUDING_MESSAGE.replace('X', libname) in output
if libname == 'libc':
assert INCLUDING_MESSAGE.replace('X', 'libc++') not in output # we don't need libc++ in this code
else:
assert INCLUDING_MESSAGE.replace('X', 'libc') in output # libc++ always forces inclusion of libc
assert (BUILDING_MESSAGE.replace('X', libname) in output) == (i == 0), 'Must only build the first time'
self.assertContained('hello, world!', run_js('a.out.js'))
assert os.path.exists(EMCC_CACHE)
full_libname = libname + '.bc' if libname != 'libc++' else libname + '.a'
assert os.path.exists(os.path.join(EMCC_CACHE, full_libname))
try_delete(CANONICAL_TEMP_DIR)
restore_and_set_up()
def ensure_cache():
self.do([PYTHON, EMCC, '-O2', path_from_root('tests', 'hello_world.c')])
# Manual cache clearing
ensure_cache()
self.assertTrue(os.path.exists(EMCC_CACHE))
self.assertTrue(os.path.exists(Cache.root_dirname))
output = self.do([PYTHON, EMCC, '--clear-cache'])
self.assertIn(ERASING_MESSAGE, output)
self.assertFalse(os.path.exists(EMCC_CACHE))
self.assertFalse(os.path.exists(Cache.root_dirname))
self.assertIn(SANITY_MESSAGE, output)
# Changing LLVM_ROOT, even without altering .emscripten, clears the cache
ensure_cache()
make_fake_clang(path_from_root('tests', 'fake', 'bin', 'clang'), expected_llvm_version())
with env_modify({'LLVM': path_from_root('tests', 'fake', 'bin')}):
self.assertTrue(os.path.exists(EMCC_CACHE))
output = self.do([PYTHON, EMCC])
self.assertIn(ERASING_MESSAGE, output)
self.assertFalse(os.path.exists(EMCC_CACHE))
def test_nostdincxx(self):
restore_and_set_up()
Cache.erase()
for compiler in [EMCC]:
print(compiler)
run_process([PYTHON, EMCC] + MINIMAL_HELLO_WORLD + ['-v']) # run once to ensure binaryen port is all ready
proc = run_process([PYTHON, EMCC] + MINIMAL_HELLO_WORLD + ['-v'], stdout=PIPE, stderr=PIPE)
out = proc.stdout
err = proc.stderr
proc2 = run_process([PYTHON, EMCC] + MINIMAL_HELLO_WORLD + ['-v', '-nostdinc++'], stdout=PIPE, stderr=PIPE)
out2 = proc2.stdout
err2 = proc2.stderr
self.assertIdentical(out, out2)
def focus(e):
assert 'search starts here:' in e, e
assert e.count('End of search list.') == 1, e
return e[e.index('search starts here:'):e.index('End of search list.') + 20]
err = focus(err)
err2 = focus(err2)
assert err == err2, err + '\n\n\n\n' + err2
def test_emconfig(self):
restore_and_set_up()
fd, custom_config_filename = tempfile.mkstemp(prefix='.emscripten_config_')
orig_config = open(CONFIG_FILE, 'r').read()
# Move the ~/.emscripten to a custom location.
with os.fdopen(fd, "w") as f:
f.write(orig_config)
# Make a syntax error in the original config file so that attempting to access it would fail.
open(CONFIG_FILE, 'w').write('asdfasdfasdfasdf\n\'\'\'' + orig_config)
temp_dir = tempfile.mkdtemp(prefix='emscripten_temp_')
with chdir(temp_dir):
self.do([PYTHON, EMCC, '--em-config', custom_config_filename] + MINIMAL_HELLO_WORLD + ['-O2'])
result = run_js('a.out.js')
self.assertContained('hello, world!', result)
# Clean up created temp files.
os.remove(custom_config_filename)
if Settings.WASM_BACKEND:
os.remove(custom_config_filename + "_sanity_wasm")
else:
os.remove(custom_config_filename + "_sanity")
shutil.rmtree(temp_dir)
def test_emcc_ports(self):
restore_and_set_up()
# listing ports
out = self.do([PYTHON, EMCC, '--show-ports'])
assert 'Available ports:' in out, out
assert 'SDL2' in out, out
assert 'SDL2_image' in out, out
assert 'SDL2_net' in out, out
# using ports
RETRIEVING_MESSAGE = 'retrieving port'
BUILDING_MESSAGE = 'generating port'
from tools import system_libs
PORTS_DIR = system_libs.Ports.get_dir()
for compiler in [EMCC]:
print(compiler)
for i in [0, 1]:
self.do([PYTHON, EMCC, '--clear-cache'])
print(i)
if i == 0:
try_delete(PORTS_DIR)
else:
self.do([PYTHON, compiler, '--clear-ports'])
assert not os.path.exists(PORTS_DIR)
# Building a file that doesn't need ports should not trigger anything
# (avoid wasm to avoid the binaryen port)
output = self.do([compiler, path_from_root('tests', 'hello_world_sdl.cpp'), '-s', 'WASM=0'])
print('no', output)
assert RETRIEVING_MESSAGE not in output, output
assert BUILDING_MESSAGE not in output
assert not os.path.exists(PORTS_DIR)
# Building a file that need a port does trigger stuff
output = self.do([compiler, path_from_root('tests', 'hello_world_sdl.cpp'), '-s', 'WASM=0', '-s', 'USE_SDL=2'])
print('yes', output)
assert RETRIEVING_MESSAGE in output, output
assert BUILDING_MESSAGE in output, output
assert os.path.exists(PORTS_DIR)
def second_use():
# Using it again avoids retrieve and build
output = self.do([compiler, path_from_root('tests', 'hello_world_sdl.cpp'), '-s', 'WASM=0', '-s', 'USE_SDL=2'])
assert RETRIEVING_MESSAGE not in output, output
assert BUILDING_MESSAGE not in output, output
second_use()
# if the version isn't sufficient, we retrieve and rebuild
subdir = os.listdir(os.path.join(PORTS_DIR, 'sdl2'))[0]
os.rename(os.path.join(PORTS_DIR, 'sdl2', subdir), os.path.join(PORTS_DIR, 'sdl2', 'old-subdir'))
import zipfile
z = zipfile.ZipFile(os.path.join(PORTS_DIR, 'sdl2' + '.zip'), 'w')
if not os.path.exists('old-sub'):
os.mkdir('old-sub')
open(os.path.join('old-sub', 'a.txt'), 'w').write('waka')
open(os.path.join('old-sub', 'b.txt'), 'w').write('waka')
z.write(os.path.join('old-sub', 'a.txt'))
z.write(os.path.join('old-sub', 'b.txt'))
z.close()
output = self.do([compiler, path_from_root('tests', 'hello_world_sdl.cpp'), '-s', 'WASM=0', '-s', 'USE_SDL=2'])
assert RETRIEVING_MESSAGE in output, output
assert BUILDING_MESSAGE in output, output
assert os.path.exists(PORTS_DIR)
second_use()
def test_native_optimizer(self):
restore_and_set_up()
def build():
return self.check_working([EMCC] + MINIMAL_HELLO_WORLD + ['-O2', '-s', 'WASM=0'], 'running js post-opts')
def test():
self.assertContained('hello, world!', run_js('a.out.js'))
with env_modify({'EMCC_DEBUG': '1'}):
# basic usage or lack of usage
for native in [None, 0, 1]:
print('phase 1, part', native)
Cache.erase()
try:
if native is not None:
os.environ['EMCC_NATIVE_OPTIMIZER'] = str(native)
output = build()
assert ('js optimizer using native' in output) == (not not (native or native is None)), output
test()
if native or native is None: # None means use the default, which is to use the native optimizer
assert 'building native optimizer' in output, output
# compile again, no rebuild of optimizer
output = build()
assert 'building native optimizer' not in output
assert 'js optimizer using native' in output
test()
finally:
if native is not None:
del os.environ['EMCC_NATIVE_OPTIMIZER']
# force a build failure, see we fall back to non-native
for native in [1, 'g']:
with env_modify({'EMCC_NATIVE_OPTIMIZER': str(native)}):
print('phase 2, part', native)
Cache.erase()
try:
# break it
f = path_from_root('tools', 'optimizer', 'optimizer-main.cpp')
src = open(f).read()
bad = src.replace('main', '!waka waka<')
assert bad != src
open(f, 'w').write(bad)
# first try
output = build()
assert 'failed to build native optimizer' in output, output
if native == 1:
assert 'to see compiler errors, build with EMCC_NATIVE_OPTIMIZER=g' in output
assert 'waka waka' not in output
else:
assert 'output from attempt' in output, output
assert 'waka waka' in output, output
assert 'js optimizer using native' not in output
test() # still works, without native optimizer
# second try, see previous failure
output = build()
assert 'failed to build native optimizer' not in output
assert 'seeing that optimizer could not be built' in output
test() # still works, without native optimizer
# clear cache, try again
Cache.erase()
output = build()
assert 'failed to build native optimizer' in output
test() # still works, without native optimizer
finally:
open(f, 'w').write(src)
Cache.erase()
# now it should work again
output = build()
assert 'js optimizer using native' in output
test() # still works
try_delete(CANONICAL_TEMP_DIR)
def test_d8_path(self):
""" Test that running JS commands works for node, d8, and jsc and is not path dependent """
# Fake some JS engines
restore_and_set_up()
sample_script = path_from_root('tests', 'print_args.js')
# Note that the path contains 'd8'.
test_path = path_from_root('tests', 'fake', 'abcd8765')
if not os.path.exists(test_path):
os.makedirs(test_path)
with env_modify({'EM_IGNORE_SANITY': '1'}):
jsengines = [('d8', V8_ENGINE),
('d8_g', V8_ENGINE),
('js', SPIDERMONKEY_ENGINE),
('node', NODE_JS),
('nodejs', NODE_JS)]
for filename, engine in jsengines:
if type(engine) is list:
engine = engine[0]
if engine == '':
print('WARNING: Not testing engine %s, not configured.' % (filename))
continue
print(filename, engine)
test_engine_path = os.path.join(test_path, filename)
f = open(test_engine_path, 'w')
f.write('#!/bin/sh\n')
f.write('%s $@\n' % (engine))
f.close()
os.chmod(test_engine_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
try:
out = jsrun.run_js(sample_script, engine=test_engine_path, args=['--foo'], full_output=True, assert_returncode=0, skip_check=True)
except Exception as e:
if 'd8' in filename:
assert False, 'Your d8 version does not correctly parse command-line arguments, please upgrade or delete from ~/.emscripten config file: %s' % (e)
else:
assert False, 'Error running script command: %s' % (e)
self.assertEqual('0: --foo', out.strip())
def test_wacky_env(self):
restore_and_set_up()
def build():
return self.check_working([EMCC] + MINIMAL_HELLO_WORLD, '')
def test():
self.assertContained('hello, world!', run_js('a.out.js'))
print('normal build')
with env_modify({'EMCC_FORCE_STDLIBS': None}):
Cache.erase()
build()
test()
print('wacky env vars, these should not mess our bootstrapping')
with env_modify({'EMCC_FORCE_STDLIBS': '1'}):
Cache.erase()
build()
test()
def test_vanilla(self):
restore_and_set_up()
Cache.erase()
with env_modify({'EMCC_DEBUG': '1'}):
# see that we test vanilla status, and just once
TESTING = 'testing for asm.js target'
self.check_working(EMCC, TESTING)
for i in range(3):
output = self.check_working(EMCC, 'check tells us to use')
assert TESTING not in output
# if env var tells us, do what it says
with env_modify({'EMCC_WASM_BACKEND': '1'}):
self.check_working(EMCC, 'EMCC_WASM_BACKEND tells us to use wasm backend')
with env_modify({'EMCC_WASM_BACKEND': '0'}):
self.check_working(EMCC, 'EMCC_WASM_BACKEND tells us to use asm.js backend')
def make_fake(report):
with open(CONFIG_FILE, 'a') as f:
f.write('LLVM_ROOT = "' + path_from_root('tests', 'fake', 'bin') + '"\n')
# BINARYEN_ROOT needs to exist in the config, even though this test
# doesn't actually use it.
f.write('BINARYEN_ROOT= "%s"\n' % path_from_root('tests', 'fake', 'bin'))
make_fake_llc(path_from_root('tests', 'fake', 'bin', 'llc'), report)
with open(path_from_root('tests', 'fake', 'bin', 'wasm-ld'), 'w') as f:
f.write('#!/bin/sh\n')
f.write('exit 0\n')
os.chmod(path_from_root('tests', 'fake', 'bin', 'wasm-ld'), stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
with env_modify({'EMCC_DEBUG': '1'}):
make_fake('wasm32-unknown-unknown-elf')
# see that we request the right backend from llvm
with env_modify({'EMCC_WASM_BACKEND': '1'}):
self.check_working([EMCC] + MINIMAL_HELLO_WORLD + ['-c'], 'wasm32-unknown-unknown-elf')
make_fake('asmjs-unknown-emscripten')
with env_modify({'EMCC_WASM_BACKEND': '0'}):
self.check_working([EMCC] + MINIMAL_HELLO_WORLD + ['-c'], 'asmjs-unknown-emscripten')
# check the current installed one is ok
restore_and_set_up()
self.check_working(EMCC)
output = self.check_working(EMCC, 'check tells us to use')
if 'wasm backend' in output:
self.check_working([EMCC] + MINIMAL_HELLO_WORLD + ['-c'], 'wasm32-unknown-unknown-elf')
else:
assert 'asm.js backend' in output
self.check_working([EMCC] + MINIMAL_HELLO_WORLD + ['-c'], 'asmjs-unknown-emscripten')
# fake llc output
def test_with_fake(report, expected):
make_fake(report)
with env_modify({'EMCC_DEBUG': '1'}):
output = self.check_working([EMCC] + MINIMAL_HELLO_WORLD + ['-c'], expected)
self.assertContained('config file changed since we checked vanilla', output)
test_with_fake('got js backend! JavaScript (asm.js, emscripten) backend', 'check tells us to use asm.js backend')
test_with_fake('got wasm32 backend! WebAssembly 32-bit', 'check tells us to use wasm backend')
# use LLVM env var to modify LLVM between vanilla checks
assert not os.environ.get('LLVM'), 'we need to modify LLVM env var for this'
f = open(CONFIG_FILE, 'a')
f.write('LLVM_ROOT = os.getenv("LLVM", "' + path_from_root('tests', 'fake1', 'bin') + '")\n')
f.close()
safe_ensure_dirs(path_from_root('tests', 'fake1', 'bin'))
f = open(path_from_root('tests', 'fake1', 'bin', 'llc'), 'w')
f.write('#!/bin/sh\n')
f.write('echo "llc fake1 output\nRegistered Targets:\n%s"' % 'got js backend! JavaScript (asm.js, emscripten) backend')
f.close()
os.chmod(path_from_root('tests', 'fake1', 'bin', 'llc'), stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
safe_ensure_dirs(path_from_root('tests', 'fake2', 'bin'))
f = open(path_from_root('tests', 'fake2', 'bin', 'llc'), 'w')
f.write('#!/bin/sh\n')
f.write('echo "llc fake2 output\nRegistered Targets:\n%s"' % 'got wasm32 backend! WebAssembly 32-bit')
f.close()
os.chmod(path_from_root('tests', 'fake2', 'bin', 'llc'), stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
with env_modify({'EMCC_DEBUG': '1'}):
self.check_working([EMCC] + MINIMAL_HELLO_WORLD + ['-c'], 'use asm.js backend')
with env_modify({'LLVM': path_from_root('tests', 'fake2', 'bin')}):
self.check_working([EMCC] + MINIMAL_HELLO_WORLD + ['-c'], 'regenerating vanilla check since other llvm')
try_delete(CANONICAL_TEMP_DIR)
return # TODO: the rest of this
# check separate cache dirs are used
restore_and_set_up()
self.check_working([EMCC], '')
root_cache = os.path.expanduser('~/.emscripten_cache')
if os.path.exists(os.path.join(root_cache, 'asmjs')):
shutil.rmtree(os.path.join(root_cache, 'asmjs'))
if os.path.exists(os.path.join(root_cache, 'wasm')):
shutil.rmtree(os.path.join(root_cache, 'wasm'))
with env_modify({'EMCC_WASM_BACKEND': '1'}):
self.check_working([EMCC] + MINIMAL_HELLO_WORLD, '')
assert os.path.exists(os.path.join(root_cache, 'wasm'))
with env_modify({'EMCC_WASM_BACKEND': '0'}):
self.check_working([EMCC] + MINIMAL_HELLO_WORLD, '')
assert os.path.exists(os.path.join(root_cache, 'asmjs'))
shutil.rmtree(os.path.join(root_cache, 'asmjs'))
self.check_working([EMCC] + MINIMAL_HELLO_WORLD, '')
assert os.path.exists(os.path.join(root_cache, 'asmjs'))
def test_wasm_backend_builds(self):
# we can build a program using the wasm backend, rebuilding binaryen etc. as needed
restore_and_set_up()
def check():
print(self.do([PYTHON, EMCC, '--clear-cache']))
print(self.do([PYTHON, EMCC, '--clear-ports']))
with env_modify({'EMCC_WASM_BACKEND': '1'}):
self.check_working([EMCC, path_from_root('tests', 'hello_world.c')], '')
print('normally')
check()
print('with no BINARYEN_ROOT')
open(CONFIG_FILE, 'a').write('''
BINARYEN_ROOT = ''
''')
print(open(CONFIG_FILE).read())
check()
def test_binaryen(self):
import tools.ports.binaryen as binaryen
tag_file = Cache.get_path('binaryen_tag_' + binaryen.TAG + '.txt')
assert not os.environ.get('BINARYEN') # must not have binaryen env var set
# test in 2 modes - with BINARYEN_ROOT in the config file, set to '', and without it entirely
for binaryen_root_in_config in [1, 0]:
print('binaryen_root_in_config:', binaryen_root_in_config)
def prep():
restore_and_set_up()
print('clearing ports...')
print(self.do([PYTHON, EMCC, '--clear-ports']))
wipe()
self.do([PYTHON, EMCC]) # first run stage
try_delete(tag_file)
# if BINARYEN_ROOT is set, we don't build the port. Check we do build it if not
if binaryen_root_in_config:
config = open(CONFIG_FILE).read()
assert '''BINARYEN_ROOT = os.path.expanduser(os.getenv('BINARYEN', ''))''' in config, config # setup created it to be ''
print('created config:')
print(config)
restore_and_set_up()
config = open(CONFIG_FILE).read()
config = config.replace('BINARYEN_ROOT', '''BINARYEN_ROOT = os.path.expanduser(os.getenv('BINARYEN', '')) # ''')
else:
restore_and_set_up()
config = open(CONFIG_FILE).read()
config = config.replace('BINARYEN_ROOT', '#')
print('modified config:')
print(config)
open(CONFIG_FILE, 'w').write(config)
print('build using embuilder')
prep()
run_process([PYTHON, EMBUILDER, 'build', 'binaryen'])
assert os.path.exists(tag_file)
run_process([PYTHON, EMCC] + MINIMAL_HELLO_WORLD + ['-s', 'BINARYEN=1'])
self.assertContained('hello, world!', run_js('a.out.js'))
print('see we show an error for emmake (we cannot build natively under emmake)')
prep()
try_delete('a.out.js')
out = self.do([PYTHON, path_from_root('emmake.py'), EMCC] + MINIMAL_HELLO_WORLD + ['-s', 'BINARYEN=1'])
assert not os.path.exists(tag_file)
assert not os.path.exists('a.out.js')
self.assertContained('For example, for binaryen, do "python embuilder.py build binaryen"', out)
if not binaryen_root_in_config:
print('build on demand')
for side_module in (False, True):
print(side_module)
prep()
assert not os.path.exists(tag_file)
try_delete('a.out.js')
try_delete('a.out.wasm')
cmd = [PYTHON, EMCC]
if not side_module:
cmd += MINIMAL_HELLO_WORLD
else:
# EM_ASM doesn't work in a wasm side module, build a normal program
cmd += [path_from_root('tests', 'hello_world.c'), '-s', 'SIDE_MODULE=1']
cmd += ['-s', 'BINARYEN=1']
run_process(cmd)
assert os.path.exists(tag_file)
assert os.path.exists('a.out.wasm')
if not side_module:
assert os.path.exists('a.out.js')
self.assertContained('hello, world!', run_js('a.out.js'))
|
the-stack_106_31955 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import sys
import random
import time
import math
import distutils.util
from functools import partial
import numpy as np
import paddle
from paddle.io import DataLoader
from paddle.metric import Metric, Accuracy, Precision, Recall
from paddlenlp.datasets import load_dataset
from paddlenlp.data import Stack, Tuple, Pad, Dict
from paddlenlp.data.sampler import SamplerHelper
from paddlenlp.transformers import BertForSequenceClassification, BertTokenizer
from paddlenlp.transformers import ElectraForSequenceClassification, ElectraTokenizer
from paddlenlp.transformers import ErnieForSequenceClassification, ErnieTokenizer
from paddlenlp.transformers import LinearDecayWithWarmup
from paddlenlp.metrics import AccuracyAndF1, Mcc, PearsonAndSpearman
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
METRIC_CLASSES = {
"cola": Mcc,
"sst-2": Accuracy,
"mrpc": AccuracyAndF1,
"sts-b": PearsonAndSpearman,
"qqp": AccuracyAndF1,
"mnli": Accuracy,
"qnli": Accuracy,
"rte": Accuracy,
}
MODEL_CLASSES = {
"bert": (BertForSequenceClassification, BertTokenizer),
"ernie": (ErnieForSequenceClassification, ErnieTokenizer),
}
def parse_args():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " +
", ".join(METRIC_CLASSES.keys()), )
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " +
", ".join(MODEL_CLASSES.keys()), )
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: "
+ ", ".join(
sum([
list(classes[-1].pretrained_init_configuration.keys())
for classes in MODEL_CLASSES.values()
], [])), )
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.", )
parser.add_argument(
"--learning_rate",
default=1e-4,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument(
"--num_train_epochs",
default=3,
type=int,
help="Total number of training epochs to perform.", )
parser.add_argument(
"--logging_steps",
type=int,
default=100,
help="Log every X updates steps.")
parser.add_argument(
"--save_steps",
type=int,
default=100,
help="Save checkpoint every X updates steps.")
parser.add_argument(
"--batch_size",
default=32,
type=int,
help="Batch size per GPU/CPU for training.", )
parser.add_argument(
"--weight_decay",
default=0.0,
type=float,
help="Weight decay if we apply some.")
parser.add_argument(
"--warmup_steps",
default=0,
type=int,
help="Linear warmup over warmup_steps. If > 0: Override warmup_proportion"
)
parser.add_argument(
"--warmup_proportion",
default=0.1,
type=float,
help="Linear warmup proportion over total steps.")
parser.add_argument(
"--adam_epsilon",
default=1e-6,
type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--seed", default=42, type=int, help="random seed for initialization")
parser.add_argument(
"--device",
default="gpu",
type=str,
choices=["cpu", "gpu", "xpu"],
help="The device to select to train the model, is must be cpu/gpu/xpu.")
parser.add_argument(
"--use_amp",
type=distutils.util.strtobool,
default=False,
help="Enable mixed precision training.")
parser.add_argument(
"--scale_loss",
type=float,
default=2**15,
help="The value of scale_loss for fp16.")
args = parser.parse_args()
return args
def set_seed(args):
# Use the same data seed(for data shuffle) for all procs to guarantee data
# consistency after sharding.
random.seed(args.seed)
np.random.seed(args.seed)
# Maybe different op seeds(for dropout) for different procs is better. By:
# `paddle.seed(args.seed + paddle.distributed.get_rank())`
paddle.seed(args.seed)
@paddle.no_grad()
def evaluate(model, loss_fct, metric, data_loader):
model.eval()
metric.reset()
for batch in data_loader:
input_ids, segment_ids, labels = batch
logits = model(input_ids, segment_ids)
loss = loss_fct(logits, labels)
correct = metric.compute(logits, labels)
metric.update(correct)
res = metric.accumulate()
if isinstance(metric, AccuracyAndF1):
print(
"eval loss: %f, acc: %s, precision: %s, recall: %s, f1: %s, acc and f1: %s, "
% (
loss.numpy(),
res[0],
res[1],
res[2],
res[3],
res[4], ),
end='')
elif isinstance(metric, Mcc):
print("eval loss: %f, mcc: %s, " % (loss.numpy(), res[0]), end='')
elif isinstance(metric, PearsonAndSpearman):
print(
"eval loss: %f, pearson: %s, spearman: %s, pearson and spearman: %s, "
% (loss.numpy(), res[0], res[1], res[2]),
end='')
else:
print("eval loss: %f, acc: %s, " % (loss.numpy(), res), end='')
model.train()
def convert_example(example,
tokenizer,
label_list,
max_seq_length=512,
is_test=False):
"""convert a glue example into necessary features"""
if not is_test:
# `label_list == None` is for regression task
label_dtype = "int64" if label_list else "float32"
# Get the label
label = example['labels']
label = np.array([label], dtype=label_dtype)
# Convert raw text to feature
if (int(is_test) + len(example)) == 2:
example = tokenizer(example['sentence'], max_seq_len=max_seq_length)
else:
example = tokenizer(
example['sentence1'],
text_pair=example['sentence2'],
max_seq_len=max_seq_length)
if not is_test:
return example['input_ids'], example['token_type_ids'], label
else:
return example['input_ids'], example['token_type_ids']
def do_train(args):
paddle.set_device(args.device)
if paddle.distributed.get_world_size() > 1:
paddle.distributed.init_parallel_env()
set_seed(args)
args.task_name = args.task_name.lower()
metric_class = METRIC_CLASSES[args.task_name]
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
train_ds = load_dataset('glue', args.task_name, splits="train")
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
trans_func = partial(
convert_example,
tokenizer=tokenizer,
label_list=train_ds.label_list,
max_seq_length=args.max_seq_length)
train_ds = train_ds.map(trans_func, lazy=True)
train_batch_sampler = paddle.io.DistributedBatchSampler(
train_ds, batch_size=args.batch_size, shuffle=True)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # segment
Stack(dtype="int64" if train_ds.label_list else "float32") # label
): fn(samples)
train_data_loader = DataLoader(
dataset=train_ds,
batch_sampler=train_batch_sampler,
collate_fn=batchify_fn,
num_workers=0,
return_list=True)
if args.task_name == "mnli":
dev_ds_matched, dev_ds_mismatched = load_dataset(
'glue', args.task_name, splits=["dev_matched", "dev_mismatched"])
dev_ds_matched = dev_ds_matched.map(trans_func, lazy=True)
dev_ds_mismatched = dev_ds_mismatched.map(trans_func, lazy=True)
dev_batch_sampler_matched = paddle.io.BatchSampler(
dev_ds_matched, batch_size=args.batch_size, shuffle=False)
dev_data_loader_matched = DataLoader(
dataset=dev_ds_matched,
batch_sampler=dev_batch_sampler_matched,
collate_fn=batchify_fn,
num_workers=0,
return_list=True)
dev_batch_sampler_mismatched = paddle.io.BatchSampler(
dev_ds_mismatched, batch_size=args.batch_size, shuffle=False)
dev_data_loader_mismatched = DataLoader(
dataset=dev_ds_mismatched,
batch_sampler=dev_batch_sampler_mismatched,
collate_fn=batchify_fn,
num_workers=0,
return_list=True)
else:
dev_ds = load_dataset('glue', args.task_name, splits='dev')
dev_ds = dev_ds.map(trans_func, lazy=True)
dev_batch_sampler = paddle.io.BatchSampler(
dev_ds, batch_size=args.batch_size, shuffle=False)
dev_data_loader = DataLoader(
dataset=dev_ds,
batch_sampler=dev_batch_sampler,
collate_fn=batchify_fn,
num_workers=0,
return_list=True)
num_classes = 1 if train_ds.label_list == None else len(train_ds.label_list)
model = model_class.from_pretrained(
args.model_name_or_path, num_classes=num_classes)
if paddle.distributed.get_world_size() > 1:
model = paddle.DataParallel(model)
num_training_steps = args.max_steps if args.max_steps > 0 else (
len(train_data_loader) * args.num_train_epochs)
warmup = args.warmup_steps if args.warmup_steps > 0 else args.warmup_proportion
lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps,
warmup)
# Generate parameter names needed to perform weight decay.
# All bias and LayerNorm parameters are excluded.
decay_params = [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
]
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
beta1=0.9,
beta2=0.999,
epsilon=args.adam_epsilon,
parameters=model.parameters(),
weight_decay=args.weight_decay,
apply_decay_param_fun=lambda x: x in decay_params)
loss_fct = paddle.nn.loss.CrossEntropyLoss(
) if train_ds.label_list else paddle.nn.loss.MSELoss()
metric = metric_class()
if args.use_amp:
scaler = paddle.amp.GradScaler(init_loss_scaling=args.scale_loss)
global_step = 0
tic_train = time.time()
for epoch in range(args.num_train_epochs):
for step, batch in enumerate(train_data_loader):
global_step += 1
input_ids, segment_ids, labels = batch
with paddle.amp.auto_cast(
args.use_amp,
custom_white_list=["layer_norm", "softmax", "gelu"]):
logits = model(input_ids, segment_ids)
loss = loss_fct(logits, labels)
if args.use_amp:
scaler.scale(loss).backward()
scaler.minimize(optimizer, loss)
else:
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.clear_grad()
if global_step % args.logging_steps == 0:
print(
"global step %d/%d, epoch: %d, batch: %d, rank_id: %s, loss: %f, lr: %.10f, speed: %.4f step/s"
% (global_step, num_training_steps, epoch, step,
paddle.distributed.get_rank(), loss, optimizer.get_lr(),
args.logging_steps / (time.time() - tic_train)))
tic_train = time.time()
if global_step % args.save_steps == 0 or global_step == num_training_steps:
tic_eval = time.time()
if args.task_name == "mnli":
evaluate(model, loss_fct, metric, dev_data_loader_matched)
evaluate(model, loss_fct, metric,
dev_data_loader_mismatched)
print("eval done total : %s s" % (time.time() - tic_eval))
else:
evaluate(model, loss_fct, metric, dev_data_loader)
print("eval done total : %s s" % (time.time() - tic_eval))
if paddle.distributed.get_rank() == 0:
output_dir = os.path.join(args.output_dir,
"%s_ft_model_%d.pdparams" %
(args.task_name, global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Need better way to get inner model of DataParallel
model_to_save = model._layers if isinstance(
model, paddle.DataParallel) else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
if global_step >= num_training_steps:
return
def print_arguments(args):
"""print arguments"""
print('----------- Configuration Arguments -----------')
for arg, value in sorted(vars(args).items()):
print('%s: %s' % (arg, value))
print('------------------------------------------------')
if __name__ == "__main__":
args = parse_args()
print_arguments(args)
do_train(args)
|
the-stack_106_31956 | """Network layer for MRP."""
from abc import abstractmethod
import asyncio
import logging
from pyatv import exceptions
from pyatv.core.net import tcp_keepalive
from pyatv.interface import StateProducer
from pyatv.protocols.mrp import protobuf
from pyatv.support import chacha20, log_binary, log_protobuf
from pyatv.support.variant import read_variant, write_variant
_LOGGER = logging.getLogger(__name__)
class AbstractMrpConnection(asyncio.Protocol, StateProducer):
"""Abstract base class for an MRP connection."""
@abstractmethod
async def connect(self) -> None:
"""Connect to device."""
@property
@abstractmethod
def connected(self) -> bool:
"""If a connection is open or not."""
@abstractmethod
def close(self) -> None:
"""Close connection to device."""
@abstractmethod
def send(self, message: protobuf.ProtocolMessage) -> None:
"""Send protobuf message to device."""
class MrpConnection(
AbstractMrpConnection
): # pylint: disable=too-many-instance-attributes # noqa
"""Network layer that encryptes/decryptes and (de)serializes messages."""
def __init__(self, host, port, loop, atv=None):
"""Initialize a new MrpConnection."""
super().__init__()
self.host = str(host)
self.port = port
self.atv = atv
self.loop = loop
self._log_str = ""
self._buffer = b""
self._chacha = None
self._transport = None
def connection_made(self, transport):
"""Device connection was made."""
_LOGGER.debug("Connection made to device")
self._transport = transport
sock = transport.get_extra_info("socket")
try:
tcp_keepalive(sock)
except exceptions.NotSupportedError as ex:
_LOGGER.warning("Keep-alive not supported: %s", str(ex))
dstaddr, dstport = sock.getpeername()
srcaddr, srcport = sock.getsockname()
self._log_str = f"{srcaddr}:{srcport}<->{dstaddr}:{dstport} "
_LOGGER.debug("%s Connection established", self._log_str)
def connection_lost(self, exc):
"""Device connection was dropped."""
_LOGGER.debug("%s Disconnected from device: %s", self._log_str, exc)
self._transport = None
self.listener.stop() # pylint: disable=no-member
if self.atv:
if exc is None:
self.atv.listener.connection_closed()
else:
self.atv.listener.connection_lost(exc)
def eof_received(self):
"""Device sent EOF (no more data)."""
_LOGGER.debug("%s Received EOF from server", self._log_str)
if self._transport.can_write_eof():
self._transport.write_eof()
self._transport.close()
def enable_encryption(self, output_key, input_key):
"""Enable encryption with the specified keys."""
self._chacha = chacha20.Chacha20Cipher(output_key, input_key)
@property
def connected(self) -> bool:
"""If a connection is open or not."""
return self._transport is not None
async def connect(self) -> None:
"""Connect to device."""
await self.loop.create_connection(lambda: self, self.host, self.port)
def close(self) -> None:
"""Close connection to device."""
_LOGGER.debug("%s Closing connection", self._log_str)
if self._transport:
self._transport.close()
self._transport = None
self._chacha = None
def send(self, message: protobuf.ProtocolMessage) -> None:
"""Send protobuf message to device."""
serialized = message.SerializeToString()
log_binary(_LOGGER, self._log_str + ">> Send", Data=serialized)
if self._chacha:
serialized = self._chacha.encrypt(serialized)
log_binary(_LOGGER, self._log_str + ">> Send", Encrypted=serialized)
data = write_variant(len(serialized)) + serialized
self._transport.write(data)
log_protobuf(_LOGGER, self._log_str + ">> Send: Protobuf", message)
def send_raw(self, data):
"""Send message to device."""
log_binary(_LOGGER, self._log_str + ">> Send raw", Data=data)
if self._chacha:
data = self._chacha.encrypt(data)
log_binary(_LOGGER, self._log_str + ">> Send raw", Encrypted=data)
data = write_variant(len(data)) + data
self._transport.write(data)
def data_received(self, data):
"""Message was received from device."""
# A message might be split over several reads, so we store a buffer and
# try to decode messages from that buffer
self._buffer += data
log_binary(_LOGGER, self._log_str + "<< Receive", Data=data)
while self._buffer:
# The variant tells us how much data must follow
length, raw = read_variant(self._buffer)
if len(raw) < length:
_LOGGER.debug(
"%s Require %d bytes but only %d in buffer",
self._log_str,
length,
len(raw),
)
break
data = raw[:length] # Incoming message (might be encrypted)
self._buffer = raw[length:] # Buffer, might contain more messages
try:
self._handle_message(data)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("%s Failed to handle message", self._log_str)
def _handle_message(self, data):
if self._chacha:
data = self._chacha.decrypt(data)
log_binary(_LOGGER, self._log_str + "<< Receive", Decrypted=data)
parsed = protobuf.ProtocolMessage()
parsed.ParseFromString(data)
log_protobuf(_LOGGER, self._log_str + "<< Receive: Protobuf", parsed)
self.listener.message_received(parsed, data) # pylint: disable=no-member
def __str__(self):
"""Return string representation of connection object."""
return f"MRP:{self.host}"
|
the-stack_106_31958 | import json
import plotly
import pandas as pd
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar
from sklearn.externals import joblib
from sqlalchemy import create_engine
app = Flask(__name__)
def tokenize(text):
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
# load data
engine = create_engine('sqlite:///../data/DisasterResponse.db')
df = pd.read_sql_table('CategorizedMessages', engine)
# load model
model = joblib.load("../models/classifier.pkl")
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
# extract data needed for visuals
# TODO: Below is an example - modify to extract data for your own visuals
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
## Statistics per genre
df_genre = df.drop('id', axis=1).groupby(['genre']).sum()#.reset_index()
df_genre = df_genre.transpose().reset_index()
df_genre.columns = ['genre', 'direct', 'news', 'social']
# create visuals
# TODO: Below is an example - modify to create your own visuals
graphs = [
{
'data': [
Bar(
x=genre_names,
y=genre_counts
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Genre"
}
}
},
{
'data': [
# Bar(
# x=label_names,
# y=label_values,
# marker = dict(color='green')
# )
Bar(
name="direct",
x=df_genre["genre"],
y=df_genre["direct"]
#offsetgroup=0,
),
Bar(
name="news",
x=df_genre["genre"],
y=df_genre["news"]
#offsetgroup=1,
),
Bar(
name="social",
x=df_genre["genre"],
y=df_genre["social"]
#offsetgroup=2,
)
],
'layout': {
'title': "Chart Categories per Genre",
'yaxis': {
'title':"Number of Messages"
},
'xaxis': {
'title': "Categories"
}
}
}
]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graphJSON)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = dict(zip(df.columns[4:], classification_labels))
# This will render the go.html Please see that file.
return render_template(
'go.html',
query=query,
classification_result=classification_results
)
def main():
app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main() |
the-stack_106_31959 | import threading
import shared
import time
import sys
import os
import pickle
import tr#anslate
from helper_sql import *
from helper_threading import *
from debug import logger
"""
The singleCleaner class is a timer-driven thread that cleans data structures
to free memory, resends messages when a remote node doesn't respond, and
sends pong messages to keep connections alive if the network isn't busy.
It cleans these data structures in memory:
inventory (moves data to the on-disk sql database)
inventorySets (clears then reloads data out of sql database)
It cleans these tables on the disk:
inventory (clears expired objects)
pubkeys (clears pubkeys older than 4 weeks old which we have not used personally)
knownNodes (clears addresses which have not been online for over 3 days)
It resends messages when there has been no response:
resends getpubkey messages in 5 days (then 10 days, then 20 days, etc...)
resends msg messages in 5 days (then 10 days, then 20 days, etc...)
"""
class singleCleaner(threading.Thread, StoppableThread):
def __init__(self):
threading.Thread.__init__(self, name="singleCleaner")
self.initStop()
def run(self):
timeWeLastClearedInventoryAndPubkeysTables = 0
try:
shared.maximumLengthOfTimeToBotherResendingMessages = (float(shared.config.get('bitmessagesettings', 'stopresendingafterxdays')) * 24 * 60 * 60) + (float(shared.config.get('bitmessagesettings', 'stopresendingafterxmonths')) * (60 * 60 * 24 *365)/12)
except:
# Either the user hasn't set stopresendingafterxdays and stopresendingafterxmonths yet or the options are missing from the config file.
shared.maximumLengthOfTimeToBotherResendingMessages = float('inf')
while shared.shutdown == 0:
shared.UISignalQueue.put((
'updateStatusBar', 'Doing housekeeping (Flushing inventory in memory to disk...)'))
shared.inventory.flush()
shared.UISignalQueue.put(('updateStatusBar', ''))
shared.broadcastToSendDataQueues((
0, 'pong', 'no data')) # commands the sendData threads to send out a pong message if they haven't sent anything else in the last five minutes. The socket timeout-time is 10 minutes.
# If we are running as a daemon then we are going to fill up the UI
# queue which will never be handled by a UI. We should clear it to
# save memory.
if shared.safeConfigGetBoolean('bitmessagesettings', 'daemon'):
shared.UISignalQueue.queue.clear()
if timeWeLastClearedInventoryAndPubkeysTables < int(time.time()) - 7380:
timeWeLastClearedInventoryAndPubkeysTables = int(time.time())
shared.inventory.clean()
# pubkeys
sqlExecute(
'''DELETE FROM pubkeys WHERE time<? AND usedpersonally='no' ''',
int(time.time()) - shared.lengthOfTimeToHoldOnToAllPubkeys)
# Let us resend getpubkey objects if we have not yet heard a pubkey, and also msg objects if we have not yet heard an acknowledgement
queryreturn = sqlQuery(
'''select toaddress, ackdata, status FROM sent WHERE ((status='awaitingpubkey' OR status='msgsent') AND folder='sent' AND sleeptill<? AND senttime>?) ''',
int(time.time()),
int(time.time()) - shared.maximumLengthOfTimeToBotherResendingMessages)
for row in queryreturn:
if len(row) < 2:
logger.error('Something went wrong in the singleCleaner thread: a query did not return the requested fields. ' + repr(row))
self.stop.wait(3)
break
toAddress, ackData, status = row
if status == 'awaitingpubkey':
resendPubkeyRequest(toAddress)
elif status == 'msgsent':
resendMsg(ackData)
# cleanup old nodes
now = int(time.time())
toDelete = []
shared.knownNodesLock.acquire()
for stream in shared.knownNodes:
for node in shared.knownNodes[stream].keys():
if now - shared.knownNodes[stream][node] > 2419200: # 28 days
shared.needToWriteKownNodesToDisk = True
del shared.knownNodes[stream][node]
shared.knownNodesLock.release()
# Let us write out the knowNodes to disk if there is anything new to write out.
if shared.needToWriteKnownNodesToDisk:
shared.knownNodesLock.acquire()
output = open(shared.appdata + 'knownnodes.dat', 'wb')
try:
pickle.dump(shared.knownNodes, output)
output.close()
except Exception as err:
if "Errno 28" in str(err):
logger.fatal('(while receiveDataThread shared.needToWriteKnownNodesToDisk) Alert: Your disk or data storage volume is full. ')
shared.UISignalQueue.put(('alert', (tr._translate("MainWindow", "Disk full"), tr._translate("MainWindow", 'Alert: Your disk or data storage volume is full. Bitmessage will now exit.'), True)))
if shared.daemon:
os._exit(0)
shared.knownNodesLock.release()
shared.needToWriteKnownNodesToDisk = False
self.stop.wait(300)
def resendPubkeyRequest(address):
logger.debug('It has been a long time and we haven\'t heard a response to our getpubkey request. Sending again.')
try:
del shared.neededPubkeys[
address] # We need to take this entry out of the shared.neededPubkeys structure because the shared.workerQueue checks to see whether the entry is already present and will not do the POW and send the message because it assumes that it has already done it recently.
except:
pass
shared.UISignalQueue.put((
'updateStatusBar', 'Doing work necessary to again attempt to request a public key...'))
sqlExecute(
'''UPDATE sent SET status='msgqueued' WHERE toaddress=?''',
address)
shared.workerQueue.put(('sendmessage', ''))
def resendMsg(ackdata):
logger.debug('It has been a long time and we haven\'t heard an acknowledgement to our msg. Sending again.')
sqlExecute(
'''UPDATE sent SET status='msgqueued' WHERE ackdata=?''',
ackdata)
shared.workerQueue.put(('sendmessage', ''))
shared.UISignalQueue.put((
'updateStatusBar', 'Doing work necessary to again attempt to deliver a message...'))
|
the-stack_106_31961 | # https://adventofcode.com/2018/day/10
import re
from time import sleep
point = re.compile(r'^position=<\s*(-?\d+),\s*(-?\d+)> velocity=<\s*(-?\d+),\s*(-?\d+)>$')
with open('day10.txt') as file:
points = [tuple(map(int, point.match(line).groups())) for line in file]
def bounding_box(t, plot=False):
current = [(p[0] + t * p[2], p[1] + t * p[3]) for p in points]
minx = min(p[0] for p in current)
maxx = max(p[0] for p in current)
miny = min(p[1] for p in current)
maxy = max(p[1] for p in current)
height = maxy-miny+1
width = maxx-minx+1
if plot:
for y in range(height):
for x in range(width):
if any(p[0]-minx == x and p[1]-miny == y for p in current):
print('#', end='')
else:
print('.', end='')
print()
return height*width
candidate = min(range(20000), key=bounding_box)
bounding_box(candidate, True)
print(candidate) |
the-stack_106_31962 | import random
from random import randrange
import time
import colorama
from colorama import Fore, Style
Alphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def conv(num, to = 32, froM = 16):
if isinstance(num, str):
n = int(num, froM)
else:
n = int(num)
if n < to:
return Alphabet[n]
else:
return conv(n // to, to) + Alphabet[n % to]
def comp(a, b):
if len(a) > len(b):
return 1
elif len(a) < len(b):
return 0
else:
for i in range(len(b)):
if a[i] > b[i]:
return 1
if a[i] < b[i]:
return 0
return 1
def LongAdd(a, b, c = '', carry = 0):
n = len(a)
m = len(b)
if n > m:
i = n - m
b = lshift(b, i)
if n < m:
i = m - n
a = lshift(a, i)
n = m
for i in range(n - 1, -1, -1):
if isinstance(a[i], str):
n_A = int(a[i], 32)
else:
n_A = int(a[i])
if isinstance(b[i], str):
n_B = int(b[i], 32)
else:
n_B = int(b[i])
temp = n_A + n_B + carry
c = Alphabet[temp % 32] + c
carry = temp // 32
return Alphabet[carry] + c
def LongSub(a, b, c = '', borrow = 0):
n = len(a)
m = len(b)
if n > m:
i = n - m
b = lshift(b, i)
if n < m:
i = m - n
a = lshift(a, i)
n = m
for i in range(n-1, -1,-1):
if isinstance(a[i], str):
n_A = int(a[i], 32)
else:
n_A = int(a[i])
if isinstance(b[i], str):
n_B = int(b[i], 32)
else:
n_B = int(b[i])
temp = n_A - n_B - borrow
if temp >= 0:
c = Alphabet[temp % 32] + c
borrow = 0
else:
c = Alphabet[(32 + temp) % 32] + c
borrow = 1
return Alphabet[borrow] + c
def LongMulOneDigit (a, b, c = ''):
carry = 0
n = len(a)
if isinstance(b, str):
n_b = int(b, 32)
else:
n_b = int(b)
for i in range(n-1, -1,-1):
if isinstance(a[i], str):
n_A = int(a[i], 32)
else:
n_A = int(a[i])
temp = n_A * n_b + carry
c = Alphabet[temp % 32] + c
carry = temp // 32
if carry == 0:
return c
else:
c = Alphabet[carry] + c
return c
def shift(temp, i):
for j in range(i):
temp = temp + '0'
return temp
def lshift(tmp, k):
for i in range(k):
tmp = '0' + tmp
return tmp
def rlshift(tmp):
k = 0
if tmp[0] == '0':
for i in range(len(tmp)):
if tmp[i] == '0' and tmp[i + 1] == '0':
k += 1
else:
tmp = tmp[k+1::]
break
return tmp
def rshiftBin(tmp, k):
if tmp is None:
return tmp
else:
for i in range(0, k):
del tmp[-i+1]
return tmp
def LongMul(a, b, c = ''):
n = len(a)
m = len(b)
if n > m:
i = n - m
b = lshift(b, i)
if n < m:
i = m - n
a = lshift(a, i)
n = m
for i in range(n-1, -1,-1):
tmp = LongMulOneDigit(a, b[i], '')
k = abs(i - n + 1)
tmp = shift(tmp, k)
if i == n - 1:
c = tmp
else:
if len(c) < len(tmp):
c = '0' + c
c = LongAdd( c, tmp, '')
return c
def power1(a, b):
res = '1'
b.reverse()
for i in b:
if i == 1:
res = rlshift(LongMul(res, a))
a = rlshift(LongMul(a, a))
return res
def power2(a, b):
res = '1'
for i in range(len(b)):
if b[i] == 1:
res = rlshift(LongMul(res, a))
if i != len(b) - 1:
res = rlshift(LongMul(res, res))
return res
def convf(a):
b = []
for i in range(0, len(a), 8):
b.append(int(a[i:i+8], 16))
return b
def rconvf(a):
b = []
k = ''
c = ''
if len(a) == 0:
return 0
else:
for i in a:
k = str(hex(i))
k = k[2::].upper()
b.append(k)
for i in b:
c = c + i
return c
def lcmp(a, b):
if len(a) > len(b):
return 1
elif len(a) < len(b):
return -1
else:
for i in range(len(b)):
if a[i] > b[i]:
return 1
if a[i] < b[i]:
return -1
return 0
def BinConv(num, beta = 16):
b = []
num = conv(num, 2, beta)
for i in num:
b.append(int(i))
d = b.copy()
return d
def sdth(n, i):
N = n.copy()
for i in range(i):
N.append(0)
return N
def ins(n, i):
N = n.copy()
if len(N) > i:
N.reverse()
N[i] = 1
N.reverse()
else:
N.append(1)
N = sdth(N, i)
return N
def subBin(a, b):
res = []
b.reverse()
a.reverse()
borrow = 0
for i in range(len(a)):
if len(b) > i:
num = b[i]
temp = a[i] - num - borrow
if temp >= 0:
res.append(temp)
borrow = 0
else:
res.append(temp + 2)
borrow = 1
num = 0
res.reverse()
while(len(res) != 0):
if res[0] == 0:
del res[0]
else:
break
b.reverse()
a.reverse()
return res
def div(a, b):
k = len(b)
r = a
q = []
while lcmp(r, b) == 1 or lcmp(r, b) == 0:
t = len(r)
c = sdth(b, t-k)
if lcmp(r,c) == -1:
t = t - 1
c = sdth(b, t-k)
r = subBin(r, c)
q = ins(q, t-k)
return q, r
def rBinConv(num, beta):
Num = ''
if num == []:
num.append(0)
for i in num:
Num = Num + str(i)
num = conv(Num, beta, 2)
return num
def diV(num1, num2):
q, r = div(BinConv(num1), BinConv(num2))
q = rBinConv(q, 16)
r = conv(LongSub(conv(num1), LongMul(conv(num2), conv(q))), 16, 32)
return [q, r]
def rev(n):
for i in n:
n.reverse()
def tests(A, B, C):
test1(A, B, C)
print('--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------')
test2(A)
def test1(A, B, C):
print("( A + B ) * C =", rlshift(conv(LongMul(rlshift(LongAdd(conv(A), conv(B))), conv(C)), 16, 32)))
print("C * ( A + B ) =", rlshift(conv(LongMul(conv(C), rlshift(LongAdd(conv(A), conv(B)))), 16, 32)))
print("A * C + B * C =", rlshift(conv(LongAdd(rlshift(LongMul(conv(C), conv(B))), rlshift(LongMul(conv(A), conv(C)))), 16, 32)))
def test2(A):
n = random.randrange(101)
print("n =", n)
print("n * A =", rlshift(conv(LongMul(conv(A), conv(conv(str(n), 16, 10))), 16, 32)))
d = conv(A)
for i in range(n-1):
d = LongAdd(conv(A), d)
print("A + A + ... + A = ", rlshift(conv(d,16,32)))
def poW(A, B):
print('---------------------------------------------------------------------------------------------------------------------------------------------------------------------------')
pow1_time = time.time()
print("A ^ B = " + conv(str(power1(conv(A), BinConv(B))), 16, 32) + Fore.BLUE +"\n Время выполнения power1(): %s seconds" % (time.time() - pow1_time) + Style.RESET_ALL)
pow2_time = time.time()
print("A ^ B = " + conv(str(power2(conv(A), BinConv(B))), 16, 32) + Fore.BLUE + "\n Время выполнения power2(): %s seconds" % (time.time() - pow2_time)+ Style.RESET_ALL)
print('---------------------------------------------------------------------------------------------------------------------------------------------------------------------------')
#LABA2
def gcd(a, b):
while lcmp(BinConv(a), BinConv(b)) == 1 or lcmp(BinConv(a), BinConv(b)) == -1:
if lcmp(BinConv(a), BinConv(b)) == 1:
a = LongSub(conv(a), conv(b))
a = conv(a, 16, 32)
else:
b = LongSub(conv(b), conv(a))
b = conv(b, 16, 32)
return a
def addBin(a, b):
if len(b) > len(a):
x = a
a = b
b = x
res = []
a.reverse()
b.reverse()
carry = 0
for i in range(len(a)):
tmp = 0
if len(b) > i:
tmp = b[i]
temp = a[i] + tmp + carry
res.append(temp % 2)
carry = temp // 2
if carry != 0:
res.append(carry)
res.reverse()
return res
def mulOneDigitBin(bigNum, a):
bigNum.reverse()
carry = 0
res = []
for i in range(len(bigNum)):
temp = carry + a * bigNum[i]
res.append(temp % 2)
carry = temp // 2
if carry != 0:
res.append(carry)
res.reverse()
bigNum.reverse()
return res
def mulBin(a, b):
if len(b) > len(a):
x = a
a = b
b = x
res = []
b.reverse()
for i in range(len(b)):
tmp = mulOneDigitBin(a, b[i])
tmp = sdth(tmp, i)
res = addBin(tmp, res)
return res
def lcm(a, b):
if lcmp(BinConv(a), BinConv(b)) == -1:
d = a
a = b
b = d
gcD = gcd(a, b)
a = diV(a, gcD)[0]
res = LongMul(conv(b), conv(a))
return conv(res, 16, 32)
def barret(x, n, mu):
if len(x) == 2 * len(n):
q = rshiftBin(x, len(n) - 1)
q = mulBin(q, mu)
q = rshiftBin(q, len(n) + 1)
r = subBin(x, mulBin(q, n))
while comp(r, n) == 1:
r = subBin(r, n)
if r == []:
return [0]
return r
else:
return div(x,n)[1]
def addMod(a = 'D852D3A2099FA0EF4', b ='047A091AC46AFE31', n = '047A091AC46CBE30'):
summ = conv(LongAdd(conv(a), conv(b)), 10, 32)
mu = BinConv(conv(str(2**(2*len(summ))), 16, 10))
summ = BinConv(summ, 10)
mu = div(mu, BinConv(n))[0]
n = BinConv(n)
Barret = rBinConv(barret(summ, n, mu), 16)
return Barret
def subMod(a = 'D852D3A2099FA0EF4', b ='047A091AC46AFE31', n = '047A091AC46CBE30'):
sub = conv(LongSub(conv(a), conv(b)), 10, 32)
mu = BinConv(conv(str(2**(2*len(sub))), 16, 10))
sub = BinConv(sub, 10)
mu = div(mu, BinConv(n))[0]
n = BinConv(n)
Barret = rBinConv(barret(sub, n, mu), 16)
return Barret
def mulMod(a = '47A091AC', b ='D852D3A2', n = '047A091AC46CBE30'):
mul = conv(LongMul(conv(a), conv(b)), 10, 32)
mu = BinConv(conv(str(2**(2*len(mul))), 16, 10))
mul = BinConv(mul, 10)
mu = div(mu, BinConv(n))[0]
n = BinConv(n)
Barret = rBinConv(barret(mul, n, mu), 16)
return Barret
def powMod():
a = '1EAEDD395588036066915AF60F3F84502967BD8617DC'
b = '1253FBED85830A10694A33E1C0DF38E62C8F6B2575B1'
n = '247A'
a = BinConv(a)
b = BinConv(b)
n = BinConv(n)
return gorner(a, b, n)
def test11():
a = "1EAEDD395588036066915AF60F3F84502967BD8617DC"
b = "1253FBED85830A10694A33E1C0DF38E62C8F6B2575B1"
c = "1253FBEF85830A10694A33E1C0DF38E62C8F6B2575B1"
n = "247A"
add = conv(LongAdd(conv(a), conv(b)), 16, 32)
m1 = mulMod(add, c, n)
m2 = mulMod(c, add , n)
num1 = mulBin(BinConv(a), BinConv(c))
num2 = mulBin(BinConv(b), BinConv(c))
resAdd = addMod(rBinConv(num1, 16), rBinConv(num2, 16), n)
if m1 == m2 or m1 == resAdd:
print("Yes")
else:
print("Nu ne polyshilos")
def test22():
a = "1EAEDD39558803"
n = random.randrange(101)
print("n =", n)
mod = "A"
res1 = conv(a)
for i in range(n-1):
res1 = rlshift(LongAdd(conv(a), res1))
mu = BinConv(conv(str(2**(2*len(res1))), 16, 10))
mu = div(mu, BinConv(str(n)))[0]
res1 = rBinConv(barret(BinConv(res1, 32), BinConv(mod), mu), 16)
#res1 = rlshift(conv(res1, 16, 32))
#res1 = diV(res1, mod)[1]
res2 = mulMod(a, conv(str(n), 16, 10), mod)
if res1 == res2:
print("Yes")
print(res1)
else:
print("Nu ne polyshilos")
def ArithmeticMod():
s = time.time()
add = addMod()
print(add, Fore.BLUE + "\nTime: " + str(time.time() - s) + " seconds" + Style.RESET_ALL )
s = time.time()
sub = subMod()
print(sub, Fore.BLUE + "\nTime: " + str(time.time() - s) + " seconds" + Style.RESET_ALL )
s = time.time()
mul = mulMod()
print(mul, Fore.BLUE + "\nTime: " + str(time.time() - s) + " seconds"+ Style.RESET_ALL )
s = time.time()
p = rBinConv(powMod(), 16)
print(p, Fore.BLUE + "\nTime: " + str(time.time() - s) + " seconds" + Style.RESET_ALL)
def stb(s):
tmp = []
for i in range(len(s)):
tmp.append(int(s[i]))
return tmp
def bts(b):
binary = ''
for i in b:
binary = binary + str(i)
if binary == '' or binary == None:
return '0'
else:
return binary
def gorner(a, b, n):
c = [1]
mu = BinConv(conv(str(2**(len(n))), 16, 10))
mu = div(mu, n)[0]
b.reverse()
for i in b:
if i == 1:
c = barret(mulBin(c.copy(), a.copy()), n, mu)
a = barret(mulBin(a.copy(), a.copy()), n, mu)
return c |
the-stack_106_31963 | r"""
Projective plane conics over a field
AUTHORS:
- Marco Streng (2010-07-20)
- Nick Alexander (2008-01-08)
"""
#*****************************************************************************
# Copyright (C) 2008 Nick Alexander <[email protected]>
# Copyright (C) 2009/2010 Marco Streng <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.rings.all import PolynomialRing
from sage.rings.complex_mpfr import is_ComplexField
from sage.rings.real_mpfr import is_RealField
from sage.modules.free_module_element import vector
from sage.structure.sequence import Sequence
from sage.structure.element import is_Vector
from sage.schemes.projective.projective_space import ProjectiveSpace
from sage.matrix.constructor import Matrix
from sage.structure.element import is_Matrix
from sage.schemes.curves.projective_curve import ProjectivePlaneCurve
from sage.categories.fields import Fields
_Fields = Fields()
class ProjectiveConic_field(ProjectivePlaneCurve):
r"""
Create a projective plane conic curve over a field.
See ``Conic`` for full documentation.
EXAMPLES::
sage: K = FractionField(PolynomialRing(QQ, 't'))
sage: P.<X, Y, Z> = K[]
sage: Conic(X^2 + Y^2 - Z^2)
Projective Conic Curve over Fraction Field of Univariate Polynomial Ring in t over Rational Field defined by X^2 + Y^2 - Z^2
TESTS::
sage: K = FractionField(PolynomialRing(QQ, 't'))
sage: Conic([K(1), 1, -1])._test_pickling()
"""
def __init__(self, A, f):
r"""
See ``Conic`` for full documentation.
EXAMPLES:
::
sage: c = Conic([1, 1, 1]); c
Projective Conic Curve over Rational Field defined by x^2 + y^2 + z^2
"""
ProjectivePlaneCurve.__init__(self, A, f)
self._coefficients = [f[(2,0,0)], f[(1,1,0)], f[(1,0,1)],
f[(0,2,0)], f[(0,1,1)], f[(0,0,2)]]
self._parametrization = None
self._diagonal_matrix = None
self._rational_point = None
def _repr_type(self):
r"""
Returns ``'Projective Conic'``, which is the first part of the
plain text representation of this object as output by
the function ``_repr_`` of the class ``Curve_generic``.
EXAMPLES::
sage: c = Conic([1, 1, 1]); c
Projective Conic Curve over Rational Field defined by x^2 + y^2 + z^2
sage: c._repr_()
'Projective Conic Curve over Rational Field defined by x^2 + y^2 + z^2'
sage: c._repr_type()
'Projective Conic'
"""
return "Projective Conic"
def base_extend(self, S):
r"""
Returns the conic over ``S`` given by the same equation as ``self``.
EXAMPLES::
sage: c = Conic([1, 1, 1]); c
Projective Conic Curve over Rational Field defined by x^2 + y^2 + z^2
sage: c.has_rational_point()
False
sage: d = c.base_extend(QuadraticField(-1, 'i')); d
Projective Conic Curve over Number Field in i with defining polynomial x^2 + 1 with i = 1*I defined by x^2 + y^2 + z^2
sage: d.rational_point(algorithm = 'rnfisnorm')
(i : 1 : 0)
"""
if S in _Fields:
B = self.base_ring()
if B == S:
return self
if not S.has_coerce_map_from(B):
raise ValueError("No natural map from the base ring of self " \
"(= %s) to S (= %s)" % (self, S))
from .constructor import Conic
con = Conic([S(c) for c in self.coefficients()], \
self.variable_names())
if self._rational_point is not None:
pt = [S(c) for c in Sequence(self._rational_point)]
if not pt == [0,0,0]:
# The following line stores the point in the cache
# if (and only if) there is no point in the cache.
pt = con.point(pt)
return con
return ProjectivePlaneCurve.base_extend(self, S)
def cache_point(self, p):
r"""
Replace the point in the cache of ``self`` by ``p`` for use
by ``self.rational_point()`` and ``self.parametrization()``.
EXAMPLES::
sage: c = Conic([1, -1, 1])
sage: c.point([15, 17, 8])
(15/8 : 17/8 : 1)
sage: c.rational_point()
(15/8 : 17/8 : 1)
sage: c.cache_point(c.rational_point(read_cache = False))
sage: c.rational_point()
(-1 : 1 : 0)
"""
if isinstance(p, (tuple, list)):
p = self.point(p)
self._rational_point = p
def coefficients(self):
r"""
Gives a the `6` coefficients of the conic ``self``
in lexicographic order.
EXAMPLES::
sage: Conic(QQ, [1,2,3,4,5,6]).coefficients()
[1, 2, 3, 4, 5, 6]
sage: P.<x,y,z> = GF(13)[]
sage: a = Conic(x^2+5*x*y+y^2+z^2).coefficients(); a
[1, 5, 0, 1, 0, 1]
sage: Conic(a)
Projective Conic Curve over Finite Field of size 13 defined by x^2 + 5*x*y + y^2 + z^2
"""
return self._coefficients
def derivative_matrix(self):
r"""
Gives the derivative of the defining polynomial of
the conic ``self``, which is a linear map,
as a `3 \times 3` matrix.
EXAMPLES:
In characteristic different from `2`, the
derivative matrix is twice the symmetric matrix:
::
sage: c = Conic(QQ, [1,1,1,1,1,0])
sage: c.symmetric_matrix()
[ 1 1/2 1/2]
[1/2 1 1/2]
[1/2 1/2 0]
sage: c.derivative_matrix()
[2 1 1]
[1 2 1]
[1 1 0]
An example in characteristic `2`:
::
sage: P.<t> = GF(2)[]
sage: c = Conic([t, 1, t^2, 1, 1, 0]); c
Projective Conic Curve over Fraction Field of Univariate Polynomial Ring in t over Finite Field of size 2 (using GF2X) defined by t*x^2 + x*y + y^2 + t^2*x*z + y*z
sage: c.is_smooth()
True
sage: c.derivative_matrix()
[ 0 1 t^2]
[ 1 0 1]
[t^2 1 0]
"""
a, b, c, d, e, f = self.coefficients()
return Matrix([[ 2*a , b , c ],
[ b , 2*d , e ],
[ c , e , 2*f ]])
def determinant(self):
r"""
Returns the determinant of the symmetric matrix that defines
the conic ``self``.
This is defined only if the base field has characteristic
different from `2`.
EXAMPLES:
::
sage: C = Conic([1,2,3,4,5,6])
sage: C.determinant()
41/4
sage: C.symmetric_matrix().determinant()
41/4
Determinants are only defined in characteristic different from `2`::
sage: C = Conic(GF(2), [1, 1, 1, 1, 1, 0])
sage: C.is_smooth()
True
sage: C.determinant()
Traceback (most recent call last):
...
ValueError: The conic self (= Projective Conic Curve over Finite Field of size 2 defined by x^2 + x*y + y^2 + x*z + y*z) has no symmetric matrix because the base field has characteristic 2
"""
return self.symmetric_matrix().determinant()
def diagonal_matrix(self):
r"""
Returns a diagonal matrix `D` and a matrix `T` such that `T^t A T = D`
holds, where `(x, y, z) A (x, y, z)^t` is the defining polynomial
of the conic ``self``.
EXAMPLES:
::
sage: c = Conic(QQ, [1,2,3,4,5,6])
sage: d, t = c.diagonal_matrix(); d, t
(
[ 1 0 0] [ 1 -1 -7/6]
[ 0 3 0] [ 0 1 -1/3]
[ 0 0 41/12], [ 0 0 1]
)
sage: t.transpose()*c.symmetric_matrix()*t
[ 1 0 0]
[ 0 3 0]
[ 0 0 41/12]
Diagonal matrices are only defined in characteristic different
from `2`:
::
sage: c = Conic(GF(4, 'a'), [0, 1, 1, 1, 1, 1])
sage: c.is_smooth()
True
sage: c.diagonal_matrix()
Traceback (most recent call last):
...
ValueError: The conic self (= Projective Conic Curve over Finite Field in a of size 2^2 defined by x*y + y^2 + x*z + y*z + z^2) has no symmetric matrix because the base field has characteristic 2
"""
A = self.symmetric_matrix()
B = self.base_ring()
basis = [vector(B,{2:0,i:1}) for i in range(3)]
for i in range(3):
zerovalue = (basis[i]*A*basis[i].column()== 0)
if zerovalue:
for j in range(i+1,3):
if basis[j]*A*basis[j].column() != 0:
b = basis[i]
basis[i] = basis[j]
basis[j] = b
zerovalue = False
if zerovalue:
for j in range(i+1,3):
if basis[i]*A*basis[j].column() != 0:
basis[i] = basis[i]+basis[j]
zerovalue = False
if not zerovalue:
l = (basis[i]*A*basis[i].column())
for j in range(i+1,3):
basis[j] = basis[j] - \
(basis[i]*A*basis[j].column())/l * basis[i]
T = Matrix(basis).transpose()
return T.transpose()*A*T, T
def diagonalization(self, names=None):
r"""
Returns a diagonal conic `C`, an isomorphism of schemes `M: C` -> ``self``
and the inverse `N` of `M`.
EXAMPLES::
sage: Conic(GF(5), [1,0,1,1,0,1]).diagonalization()
(Projective Conic Curve over Finite Field of size 5 defined by x^2 + y^2 + 2*z^2,
Scheme morphism:
From: Projective Conic Curve over Finite Field of size 5 defined by x^2 + y^2 + 2*z^2
To: Projective Conic Curve over Finite Field of size 5 defined by x^2 + y^2 + x*z + z^2
Defn: Defined on coordinates by sending (x : y : z) to
(x + 2*z : y : z),
Scheme morphism:
From: Projective Conic Curve over Finite Field of size 5 defined by x^2 + y^2 + x*z + z^2
To: Projective Conic Curve over Finite Field of size 5 defined by x^2 + y^2 + 2*z^2
Defn: Defined on coordinates by sending (x : y : z) to
(x - 2*z : y : z))
The diagonalization is only defined in characteristic different
from 2:
::
sage: Conic(GF(2), [1,1,1,1,1,0]).diagonalization()
Traceback (most recent call last):
...
ValueError: The conic self (= Projective Conic Curve over Finite Field of size 2 defined by x^2 + x*y + y^2 + x*z + y*z) has no symmetric matrix because the base field has characteristic 2
An example over a global function field:
::
sage: K = FractionField(PolynomialRing(GF(7), 't'))
sage: (t,) = K.gens()
sage: C = Conic(K, [t/2,0, 1, 2, 0, 3])
sage: C.diagonalization()
(Projective Conic Curve over Fraction Field of Univariate Polynomial Ring in t over Finite Field of size 7 defined by 4*t*x^2 + 2*y^2 + ((3*t + 3)/t)*z^2,
Scheme morphism:
From: Projective Conic Curve over Fraction Field of Univariate Polynomial Ring in t over Finite Field of size 7 defined by 4*t*x^2 + 2*y^2 + ((3*t + 3)/t)*z^2
To: Projective Conic Curve over Fraction Field of Univariate Polynomial Ring in t over Finite Field of size 7 defined by 4*t*x^2 + 2*y^2 + x*z + 3*z^2
Defn: Defined on coordinates by sending (x : y : z) to
(x + 6/t*z : y : z),
Scheme morphism:
From: Projective Conic Curve over Fraction Field of Univariate Polynomial Ring in t over Finite Field of size 7 defined by 4*t*x^2 + 2*y^2 + x*z + 3*z^2
To: Projective Conic Curve over Fraction Field of Univariate Polynomial Ring in t over Finite Field of size 7 defined by 4*t*x^2 + 2*y^2 + ((3*t + 3)/t)*z^2
Defn: Defined on coordinates by sending (x : y : z) to
(x + 1/t*z : y : z))
"""
if names is None:
names = self.defining_polynomial().parent().variable_names()
from .constructor import Conic
D, T = self.diagonal_matrix()
con = Conic(D, names = names)
return con, con.hom(T, self), self.hom(T.inverse(), con)
def gens(self):
r"""
Returns the generators of the coordinate ring of ``self``.
EXAMPLES:
::
sage: P.<x,y,z> = QQ[]
sage: c = Conic(x^2+y^2+z^2)
sage: c.gens()
(xbar, ybar, zbar)
sage: c.defining_polynomial()(c.gens())
0
The function ``gens()`` is required for the following construction:
::
sage: C.<a,b,c> = Conic(GF(3), [1, 1, 1])
sage: C
Projective Conic Curve over Finite Field of size 3 defined by a^2 + b^2 + c^2
"""
return self.coordinate_ring().gens()
def has_rational_point(self, point = False,
algorithm = 'default', read_cache = True):
r"""
Returns True if and only if the conic ``self``
has a point over its base field `B`.
If ``point`` is True, then returns a second output, which is
a rational point if one exists.
Points are cached whenever they are found. Cached information
is used if and only if ``read_cache`` is True.
ALGORITHM:
The parameter ``algorithm`` specifies the algorithm
to be used:
- ``'default'`` -- If the base field is real or complex,
use an elementary native Sage implementation.
- ``'magma'`` (requires Magma to be installed) --
delegates the task to the Magma computer algebra
system.
EXAMPLES::
sage: Conic(RR, [1, 1, 1]).has_rational_point()
False
sage: Conic(CC, [1, 1, 1]).has_rational_point()
True
sage: Conic(RR, [1, 2, -3]).has_rational_point(point = True)
(True, (1.73205080756888 : 0.000000000000000 : 1.00000000000000))
Conics over polynomial rings can be solved internally::
sage: R.<t> = QQ[]
sage: C = Conic([-2,t^2+1,t^2-1])
sage: C.has_rational_point()
True
And they can also be solved with Magma::
sage: C.has_rational_point(algorithm='magma') # optional - magma
True
sage: C.has_rational_point(algorithm='magma', point=True) # optional - magma
(True, (-t : 1 : 1))
sage: D = Conic([t,1,t^2])
sage: D.has_rational_point(algorithm='magma') # optional - magma
False
TESTS:
One of the following fields comes with an embedding into the complex
numbers, one does not. Check that they are both handled correctly by
the Magma interface. ::
sage: K.<i> = QuadraticField(-1)
sage: K.coerce_embedding()
Generic morphism:
From: Number Field in i with defining polynomial x^2 + 1 with i = 1*I
To: Complex Lazy Field
Defn: i -> 1*I
sage: Conic(K, [1,1,1]).rational_point(algorithm='magma') # optional - magma
(-i : 1 : 0)
sage: x = QQ['x'].gen()
sage: L.<i> = NumberField(x^2+1, embedding=None)
sage: Conic(L, [1,1,1]).rational_point(algorithm='magma') # optional - magma
(-i : 1 : 0)
sage: L == K
False
"""
if read_cache:
if self._rational_point is not None:
if point:
return True, self._rational_point
else:
return True
B = self.base_ring()
if algorithm == 'magma':
from sage.interfaces.magma import magma
M = magma(self)
b = M.HasRationalPoint().sage()
if not point:
return b
if not b:
return False, None
M_pt = M.HasRationalPoint(nvals=2)[1]
# Various attempts will be made to convert `pt` to
# a Sage object. The end result will always be checked
# by self.point().
pt = [M_pt[1], M_pt[2], M_pt[3]]
# The first attempt is to use sequences. This is efficient and
# succeeds in cases where the Magma interface fails to convert
# number field elements, because embeddings between number fields
# may be lost on conversion to and from Magma.
# This should deal with all absolute number fields.
try:
return True, self.point([B(c.Eltseq().sage()) for c in pt])
except TypeError:
pass
# The second attempt tries to split Magma elements into
# numerators and denominators first. This is necessary
# for the field of rational functions, because (at the moment of
# writing) fraction field elements are not converted automatically
# from Magma to Sage.
try:
return True, self.point( \
[B(c.Numerator().sage()/c.Denominator().sage()) for c in pt])
except (TypeError, NameError):
pass
# Finally, let the Magma interface handle conversion.
try:
return True, self.point([B(c.sage()) for c in pt])
except (TypeError, NameError):
pass
raise NotImplementedError("No correct conversion implemented for converting the Magma point %s on %s to a correct Sage point on self (=%s)" % (M_pt, M, self))
if algorithm != 'default':
raise ValueError("Unknown algorithm: %s" % algorithm)
if is_ComplexField(B):
if point:
[_,_,_,d,e,f] = self._coefficients
if d == 0:
return True, self.point([0,1,0])
return True, self.point([0, ((e**2-4*d*f).sqrt()-e)/(2*d), 1],
check = False)
return True
if is_RealField(B):
D, T = self.diagonal_matrix()
[a, b, c] = [D[0,0], D[1,1], D[2,2]]
if a == 0:
ret = True, self.point(T*vector([1,0,0]), check = False)
elif a*c <= 0:
ret = True, self.point(T*vector([(-c/a).sqrt(),0,1]),
check = False)
elif b == 0:
ret = True, self.point(T*vector([0,1,0]), check = False)
elif b*c <= 0:
ret = True, self.point(T*vector([0,(-c/b).sqrt(),0,1]),
check = False)
else:
ret = False, None
if point:
return ret
return ret[0]
raise NotImplementedError("has_rational_point not implemented for " \
"conics over base field %s" % B)
def has_singular_point(self, point = False):
r"""
Return True if and only if the conic ``self`` has a rational
singular point.
If ``point`` is True, then also return a rational singular
point (or ``None`` if no such point exists).
EXAMPLES:
::
sage: c = Conic(QQ, [1,0,1]); c
Projective Conic Curve over Rational Field defined by x^2 + z^2
sage: c.has_singular_point(point = True)
(True, (0 : 1 : 0))
sage: P.<x,y,z> = GF(7)[]
sage: e = Conic((x+y+z)*(x-y+2*z)); e
Projective Conic Curve over Finite Field of size 7 defined by x^2 - y^2 + 3*x*z + y*z + 2*z^2
sage: e.has_singular_point(point = True)
(True, (2 : 4 : 1))
sage: Conic([1, 1, -1]).has_singular_point()
False
sage: Conic([1, 1, -1]).has_singular_point(point = True)
(False, None)
``has_singular_point`` is not implemented over all fields
of characteristic `2`. It is implemented over finite fields.
::
sage: F.<a> = FiniteField(8)
sage: Conic([a, a+1, 1]).has_singular_point(point = True)
(True, (a + 1 : 0 : 1))
sage: P.<t> = GF(2)[]
sage: C = Conic(P, [t,t,1]); C
Projective Conic Curve over Fraction Field of Univariate Polynomial Ring in t over Finite Field of size 2 (using GF2X) defined by t*x^2 + t*y^2 + z^2
sage: C.has_singular_point(point = False)
Traceback (most recent call last):
...
NotImplementedError: Sorry, find singular point on conics not implemented over all fields of characteristic 2.
"""
if not point:
ret = self.has_singular_point(point = True)
return ret[0]
B = self.base_ring()
if B.characteristic() == 2:
[a,b,c,d,e,f] = self.coefficients()
if b == 0 and c == 0 and e == 0:
for i in range(3):
if [a, d, f][i] == 0:
return True, self.point(vector(B, {2:0, i:1}))
if hasattr(a/f, 'is_square') and hasattr(a/f, 'sqrt'):
if (a/f).is_square():
return True, self.point([1,0,(a/f).sqrt()])
if (d/f).is_square():
return True, self.point([0,1,(d/f).sqrt()])
raise NotImplementedError("Sorry, find singular point on conics not implemented over all fields of characteristic 2.")
pt = [e, c, b]
if self.defining_polynomial()(pt) == 0:
return True, self.point(pt)
return False, None
D = self.symmetric_matrix()
if D.determinant() == 0:
return True, self.point(Sequence(D.right_kernel().gen()))
return False, None
def hom(self, x, Y=None):
r"""
Return the scheme morphism from ``self`` to ``Y`` defined by ``x``.
Here ``x`` can be a matrix or a sequence of polynomials.
If ``Y`` is omitted, then a natural image is found if possible.
EXAMPLES:
Here are a few Morphisms given by matrices. In the first
example, ``Y`` is omitted, in the second example, ``Y`` is specified.
::
sage: c = Conic([-1, 1, 1])
sage: h = c.hom(Matrix([[1,1,0],[0,1,0],[0,0,1]])); h
Scheme morphism:
From: Projective Conic Curve over Rational Field defined by -x^2 + y^2 + z^2
To: Projective Conic Curve over Rational Field defined by -x^2 + 2*x*y + z^2
Defn: Defined on coordinates by sending (x : y : z) to
(x + y : y : z)
sage: h([-1, 1, 0])
(0 : 1 : 0)
sage: c = Conic([-1, 1, 1])
sage: d = Conic([4, 1, -1])
sage: c.hom(Matrix([[0, 0, 1/2], [0, 1, 0], [1, 0, 0]]), d)
Scheme morphism:
From: Projective Conic Curve over Rational Field defined by -x^2 + y^2 + z^2
To: Projective Conic Curve over Rational Field defined by 4*x^2 + y^2 - z^2
Defn: Defined on coordinates by sending (x : y : z) to
(1/2*z : y : x)
``ValueError`` is raised if the wrong codomain ``Y`` is specified:
::
sage: c = Conic([-1, 1, 1])
sage: c.hom(Matrix([[0, 0, 1/2], [0, 1, 0], [1, 0, 0]]), c)
Traceback (most recent call last):
...
ValueError: The matrix x (= [ 0 0 1/2]
[ 0 1 0]
[ 1 0 0]) does not define a map from self (= Projective Conic Curve over Rational Field defined by -x^2 + y^2 + z^2) to Y (= Projective Conic Curve over Rational Field defined by -x^2 + y^2 + z^2)
The identity map between two representations of the same conic:
::
sage: C = Conic([1,2,3,4,5,6])
sage: D = Conic([2,4,6,8,10,12])
sage: C.hom(identity_matrix(3), D)
Scheme morphism:
From: Projective Conic Curve over Rational Field defined by x^2 + 2*x*y + 4*y^2 + 3*x*z + 5*y*z + 6*z^2
To: Projective Conic Curve over Rational Field defined by 2*x^2 + 4*x*y + 8*y^2 + 6*x*z + 10*y*z + 12*z^2
Defn: Defined on coordinates by sending (x : y : z) to
(x : y : z)
An example not over the rational numbers:
::
sage: P.<t> = QQ[]
sage: C = Conic([1,0,0,t,0,1/t])
sage: D = Conic([1/t^2, 0, -2/t^2, t, 0, (t + 1)/t^2])
sage: T = Matrix([[t,0,1],[0,1,0],[0,0,1]])
sage: C.hom(T, D)
Scheme morphism:
From: Projective Conic Curve over Fraction Field of Univariate Polynomial Ring in t over Rational Field defined by x^2 + t*y^2 + 1/t*z^2
To: Projective Conic Curve over Fraction Field of Univariate Polynomial Ring in t over Rational Field defined by 1/t^2*x^2 + t*y^2 + (-2/t^2)*x*z + ((t + 1)/t^2)*z^2
Defn: Defined on coordinates by sending (x : y : z) to
(t*x + z : y : z)
"""
if is_Matrix(x):
from .constructor import Conic
y = x.inverse()
A = y.transpose()*self.matrix()*y
im = Conic(A)
if Y is None:
Y = im
elif not Y == im:
raise ValueError("The matrix x (= %s) does not define a " \
"map from self (= %s) to Y (= %s)" % \
(x, self, Y))
x = Sequence(x*vector(self.ambient_space().gens()))
return self.Hom(Y)(x, check = False)
return ProjectivePlaneCurve.hom(self, x, Y)
def is_diagonal(self):
r"""
Return True if and only if the conic has the form
`a*x^2 + b*y^2 + c*z^2`.
EXAMPLES:
::
sage: c=Conic([1,1,0,1,0,1]); c
Projective Conic Curve over Rational Field defined by x^2 + x*y + y^2 + z^2
sage: d,t = c.diagonal_matrix()
sage: c.is_diagonal()
False
sage: c.diagonalization()[0].is_diagonal()
True
"""
return all(self.coefficients()[i] == 0 for i in [1, 2, 4])
def is_smooth(self):
r"""
Returns True if and only if ``self`` is smooth.
EXAMPLES:
::
sage: Conic([1,-1,0]).is_smooth()
False
sage: Conic(GF(2),[1,1,1,1,1,0]).is_smooth()
True
"""
if self.base_ring().characteristic() == 2:
[a,b,c,d,e,f] = self.coefficients()
if b == 0 and c == 0 and e == 0:
return False
return self.defining_polynomial()([e, c, b]) != 0
return self.determinant() != 0
def _magma_init_(self, magma):
"""
Internal function. Returns a string to initialize this
conic in the Magma subsystem.
EXAMPLES::
sage: C = Conic(QQ, [1,2,3])
sage: C._magma_init_(magma) # optional - magma
'Conic([_sage_ref...|1/1,2/1,3/1,0/1,0/1,0/1])'
sage: C = Conic(GF(41), [-1,2,5]) # optional - magma
sage: C._magma_init_(magma) # optional - magma
'Conic([_sage_ref...|GF(41)!40,GF(41)!2,GF(41)!5,GF(41)!0,GF(41)!0,GF(41)!0])'
sage: F.<a> = GF(25)
sage: C = Conic([3,0,1,4,a,2])
sage: C
Projective Conic Curve over Finite Field in a of size 5^2 defined by -2*x^2 - y^2 + x*z + (a)*y*z + 2*z^2
sage: magma(C) # optional - magma
Conic over GF(5^2) defined by
3*X^2 + 4*Y^2 + X*Z + a*Y*Z + 2*Z^2
sage: magma(Conic([1/2,2/3,-4/5,6/7,8/9,-10/11])) # optional - magma
Conic over Rational Field defined by
1/2*X^2 + 2/3*X*Y + 6/7*Y^2 - 4/5*X*Z + 8/9*Y*Z - 10/11*Z^2
sage: R.<x> = Frac(QQ['x'])
sage: magma(Conic([x,1+x,1-x])) # optional - magma
Conic over Univariate rational function field over Rational Field defined by
x*X^2 + (x + 1)*Y^2 + (-x + 1)*Z^2
sage: P.<x> = QQ[]
sage: K.<b> = NumberField(x^3+x+1)
sage: magma(Conic([b,1,2])) # optional - magma
Conic over Number Field with defining polynomial x^3 + x + 1 over the Rational Field defined by
b*X^2 + Y^2 + 2*Z^2
"""
kmn = magma(self.base_ring())._ref()
coeffs = self.coefficients()
magma_coeffs = [coeffs[i]._magma_init_(magma) for i in [0, 3, 5, 1, 4, 2]]
return 'Conic([%s|%s])' % (kmn,','.join(magma_coeffs))
def matrix(self):
r"""
Returns a matrix `M` such that `(x, y, z) M (x, y, z)^t`
is the defining equation of ``self``.
The matrix `M` is upper triangular if the base field has
characteristic `2` and symmetric otherwise.
EXAMPLES::
sage: R.<x, y, z> = QQ[]
sage: C = Conic(x^2 + x*y + y^2 + z^2)
sage: C.matrix()
[ 1 1/2 0]
[1/2 1 0]
[ 0 0 1]
sage: R.<x, y, z> = GF(2)[]
sage: C = Conic(x^2 + x*y + y^2 + x*z + z^2)
sage: C.matrix()
[1 1 1]
[0 1 0]
[0 0 1]
"""
if self.base_ring().characteristic() == 2:
return self.upper_triangular_matrix()
return self.symmetric_matrix()
_matrix_ = matrix
def parametrization(self, point=None, morphism=True):
r"""
Return a parametrization `f` of ``self`` together with the
inverse of `f`.
.. warning::
The second map is currently broken and neither the inverse nor
well-defined.
If ``point`` is specified, then that point is used
for the parametrization. Otherwise, use ``self.rational_point()``
to find a point.
If ``morphism`` is True, then `f` is returned in the form
of a Scheme morphism. Otherwise, it is a tuple of polynomials
that gives the parametrization.
EXAMPLES:
An example over a finite field ::
sage: c = Conic(GF(2), [1,1,1,1,1,0])
sage: f, g = c.parametrization(); f, g
(Scheme morphism:
From: Projective Space of dimension 1 over Finite Field of size 2
To: Projective Conic Curve over Finite Field of size 2 defined by x^2 + x*y
+ y^2 + x*z + y*z
Defn: Defined on coordinates by sending (x : y) to ...,
Scheme morphism:
From: Projective Conic Curve over Finite Field of size 2 defined by x^2 + x*y
+ y^2 + x*z + y*z
To: Projective Space of dimension 1 over Finite Field of size 2
Defn: Defined on coordinates by sending (x : y : z) to ...)
sage: set(f(p) for p in f.domain())
{(0 : 0 : 1), (0 : 1 : 1), (1 : 0 : 1)}
sage: (g*f).is_one() # known bug (see :trac:`31892`)
True
An example with ``morphism = False`` ::
sage: R.<x,y,z> = QQ[]
sage: C = Curve(7*x^2 + 2*y*z + z^2)
sage: (p, i) = C.parametrization(morphism = False); (p, i)
([-2*x*y, x^2 + 7*y^2, -2*x^2], [-1/2*x, 1/7*y + 1/14*z])
sage: C.defining_polynomial()(p)
0
sage: i[0](p) / i[1](p)
x/y
A ``ValueError`` is raised if ``self`` has no rational point ::
sage: C = Conic(x^2 + y^2 + 7*z^2)
sage: C.parametrization()
Traceback (most recent call last):
...
ValueError: Conic Projective Conic Curve over Rational Field defined by x^2 + y^2 + 7*z^2 has no rational points over Rational Field!
A ``ValueError`` is raised if ``self`` is not smooth ::
sage: C = Conic(x^2 + y^2)
sage: C.parametrization()
Traceback (most recent call last):
...
ValueError: The conic self (=Projective Conic Curve over Rational Field defined by x^2 + y^2) is not smooth, hence does not have a parametrization.
"""
if (not self._parametrization is None) and not point:
par = self._parametrization
else:
if not self.is_smooth():
raise ValueError("The conic self (=%s) is not smooth, hence does not have a parametrization." % self)
if point is None:
point = self.rational_point()
point = Sequence(point)
B = self.base_ring()
Q = PolynomialRing(B, 'x,y')
[x, y] = Q.gens()
gens = self.ambient_space().gens()
P = PolynomialRing(B, 4, ['X', 'Y', 'T0', 'T1'])
[X, Y, T0, T1] = P.gens()
c3 = [j for j in range(2,-1,-1) if point[j] != 0][0]
c1 = [j for j in range(3) if j != c3][0]
c2 = [j for j in range(3) if j != c3 and j != c1][0]
L = [0,0,0]
L[c1] = Y*T1*point[c1] + Y*T0
L[c2] = Y*T1*point[c2] + X*T0
L[c3] = Y*T1*point[c3]
bezout = P(self.defining_polynomial()(L) / T0)
t = [bezout([x,y,0,-1]),bezout([x,y,1,0])]
par = (tuple([Q(p([x,y,t[0],t[1]])/y) for p in L]),
tuple([gens[m]*point[c3]-gens[c3]*point[m]
for m in [c2,c1]]))
if self._parametrization is None:
self._parametrization = par
if not morphism:
return par
P1 = ProjectiveSpace(self.base_ring(), 1, 'x,y')
return P1.hom(par[0],self), self.Hom(P1)(par[1], check = False)
def point(self, v, check=True):
r"""
Constructs a point on ``self`` corresponding to the input ``v``.
If ``check`` is True, then checks if ``v`` defines a valid
point on ``self``.
If no rational point on ``self`` is known yet, then also caches the point
for use by ``self.rational_point()`` and ``self.parametrization()``.
EXAMPLES::
sage: c = Conic([1, -1, 1])
sage: c.point([15, 17, 8])
(15/8 : 17/8 : 1)
sage: c.rational_point()
(15/8 : 17/8 : 1)
sage: d = Conic([1, -1, 1])
sage: d.rational_point()
(-1 : 1 : 0)
"""
if is_Vector(v):
v = Sequence(v)
p = ProjectivePlaneCurve.point(self, v, check=check)
if self._rational_point is None:
self._rational_point = p
return p
def random_rational_point(self, *args1, **args2):
r"""
Return a random rational point of the conic ``self``.
ALGORITHM:
1. Compute a parametrization `f` of ``self`` using
``self.parametrization()``.
2. Computes a random point `(x:y)` on the projective
line.
3. Output `f(x:y)`.
The coordinates x and y are computed using
``B.random_element``, where ``B`` is the base field of
``self`` and additional arguments to ``random_rational_point``
are passed to ``random_element``.
If the base field is a finite field, then the
output is uniformly distributed over the points of self.
EXAMPLES::
sage: c = Conic(GF(2), [1,1,1,1,1,0])
sage: [c.random_rational_point() for i in range(10)] # output is random
[(1 : 0 : 1), (1 : 0 : 1), (1 : 0 : 1), (0 : 1 : 1), (1 : 0 : 1), (0 : 0 : 1), (1 : 0 : 1), (1 : 0 : 1), (0 : 0 : 1), (1 : 0 : 1)]
sage: d = Conic(QQ, [1, 1, -1])
sage: d.random_rational_point(den_bound = 1, num_bound = 5) # output is random
(-24/25 : 7/25 : 1)
sage: Conic(QQ, [1, 1, 1]).random_rational_point()
Traceback (most recent call last):
...
ValueError: Conic Projective Conic Curve over Rational Field defined by x^2 + y^2 + z^2 has no rational points over Rational Field!
"""
if not self.is_smooth():
raise NotImplementedError("Sorry, random points not implemented " \
"for non-smooth conics")
par = self.parametrization()
x = 0
y = 0
B = self.base_ring()
while x == 0 and y == 0:
x = B.random_element(*args1, **args2)
y = B.random_element(*args1, **args2)
return par[0]([x,y])
def rational_point(self, algorithm = 'default', read_cache = True):
r"""
Return a point on ``self`` defined over the base field.
Raises ``ValueError`` if no rational point exists.
See ``self.has_rational_point`` for the algorithm used
and for the use of the parameters ``algorithm`` and ``read_cache``.
EXAMPLES:
Examples over `\QQ` ::
sage: R.<x,y,z> = QQ[]
sage: C = Conic(7*x^2 + 2*y*z + z^2)
sage: C.rational_point()
(0 : 1 : 0)
sage: C = Conic(x^2 + 2*y^2 + z^2)
sage: C.rational_point()
Traceback (most recent call last):
...
ValueError: Conic Projective Conic Curve over Rational Field defined by x^2 + 2*y^2 + z^2 has no rational points over Rational Field!
sage: C = Conic(x^2 + y^2 + 7*z^2)
sage: C.rational_point(algorithm = 'rnfisnorm')
Traceback (most recent call last):
...
ValueError: Conic Projective Conic Curve over Rational Field defined by x^2 + y^2 + 7*z^2 has no rational points over Rational Field!
Examples over number fields ::
sage: P.<x> = QQ[]
sage: L.<b> = NumberField(x^3-5)
sage: C = Conic(L, [3, 2, -b])
sage: p = C.rational_point(algorithm = 'rnfisnorm')
sage: p # output is random
(1/3*b^2 - 4/3*b + 4/3 : b^2 - 2 : 1)
sage: C.defining_polynomial()(list(p))
0
sage: K.<i> = QuadraticField(-1)
sage: D = Conic(K, [3, 2, 5])
sage: D.rational_point(algorithm = 'rnfisnorm') # output is random
(-3 : 4*i : 1)
sage: L.<s> = QuadraticField(2)
sage: Conic(QQ, [1, 1, -3]).has_rational_point()
False
sage: E = Conic(L, [1, 1, -3])
sage: E.rational_point() # output is random
(-1 : -s : 1)
Currently Magma is better at solving conics over number fields than
Sage, so it helps to use the algorithm 'magma' if Magma is installed::
sage: q = C.rational_point(algorithm = 'magma', read_cache=False) # optional - magma
sage: q # output is random, optional - magma
(1/5*b^2 : 1/5*b^2 : 1)
sage: C.defining_polynomial()(list(p)) # optional - magma
0
sage: len(str(p)) > 1.5*len(str(q)) # optional - magma
True
sage: D.rational_point(algorithm = 'magma', read_cache=False) # random, optional - magma
(1 : 2*i : 1)
sage: E.rational_point(algorithm='magma', read_cache=False) # random, optional - magma
(-s : 1 : 1)
sage: F = Conic([L.gen(), 30, -20])
sage: q = F.rational_point(algorithm='magma') # optional - magma
sage: q # output is random, optional - magma
(-10/7*s + 40/7 : 5/7*s - 6/7 : 1)
sage: p = F.rational_point(read_cache=False)
sage: p # output is random
(788210*s - 1114700 : -171135*s + 242022 : 1)
sage: len(str(p)) > len(str(q)) # optional - magma
True
sage: Conic([L.gen(), 30, -21]).has_rational_point(algorithm='magma') # optional - magma
False
Examples over finite fields ::
sage: F.<a> = FiniteField(7^20)
sage: C = Conic([1, a, -5]); C
Projective Conic Curve over Finite Field in a of size 7^20 defined by x^2 + (a)*y^2 + 2*z^2
sage: C.rational_point() # output is random
(4*a^19 + 5*a^18 + 4*a^17 + a^16 + 6*a^15 + 3*a^13 + 6*a^11 + a^9 + 3*a^8 + 2*a^7 + 4*a^6 + 3*a^5 + 3*a^4 + a^3 + a + 6 : 5*a^18 + a^17 + a^16 + 6*a^15 + 4*a^14 + a^13 + 5*a^12 + 5*a^10 + 2*a^9 + 6*a^8 + 6*a^7 + 6*a^6 + 2*a^4 + 3 : 1)
Examples over `\RR` and `\CC` ::
sage: Conic(CC, [1, 2, 3]).rational_point()
(0 : 1.22474487139159*I : 1)
sage: Conic(RR, [1, 1, 1]).rational_point()
Traceback (most recent call last):
...
ValueError: Conic Projective Conic Curve over Real Field with 53 bits of precision defined by x^2 + y^2 + z^2 has no rational points over Real Field with 53 bits of precision!
"""
bl,pt = self.has_rational_point(point = True, algorithm = algorithm,
read_cache = read_cache)
if bl:
return pt
raise ValueError("Conic %s has no rational points over %s!" % \
(self, self.ambient_space().base_ring()))
def singular_point(self):
r"""
Returns a singular rational point of ``self``
EXAMPLES:
::
sage: Conic(GF(2), [1,1,1,1,1,1]).singular_point()
(1 : 1 : 1)
``ValueError`` is raised if the conic has no rational singular point
::
sage: Conic(QQ, [1,1,1,1,1,1]).singular_point()
Traceback (most recent call last):
...
ValueError: The conic self (= Projective Conic Curve over Rational Field defined by x^2 + x*y + y^2 + x*z + y*z + z^2) has no rational singular point
"""
b = self.has_singular_point(point = True)
if not b[0]:
raise ValueError("The conic self (= %s) has no rational " \
"singular point" % self)
return b[1]
def symmetric_matrix(self):
r"""
The symmetric matrix `M` such that `(x y z) M (x y z)^t`
is the defining equation of ``self``.
EXAMPLES::
sage: R.<x, y, z> = QQ[]
sage: C = Conic(x^2 + x*y/2 + y^2 + z^2)
sage: C.symmetric_matrix()
[ 1 1/4 0]
[1/4 1 0]
[ 0 0 1]
sage: C = Conic(x^2 + 2*x*y + y^2 + 3*x*z + z^2)
sage: v = vector([x, y, z])
sage: v * C.symmetric_matrix() * v
x^2 + 2*x*y + y^2 + 3*x*z + z^2
"""
a, b, c, d, e, f = self.coefficients()
if self.base_ring().characteristic() == 2:
if b == 0 and c == 0 and e == 0:
return Matrix([[a,0,0],[0,d,0],[0,0,f]])
raise ValueError("The conic self (= %s) has no symmetric matrix " \
"because the base field has characteristic 2" % \
self)
return Matrix([[ a , b/2, c/2 ],
[ b/2, d , e/2 ],
[ c/2, e/2, f ]])
def upper_triangular_matrix(self):
r"""
The upper-triangular matrix `M` such that `(x y z) M (x y z)^t`
is the defining equation of ``self``.
EXAMPLES::
sage: R.<x, y, z> = QQ[]
sage: C = Conic(x^2 + x*y + y^2 + z^2)
sage: C.upper_triangular_matrix()
[1 1 0]
[0 1 0]
[0 0 1]
sage: C = Conic(x^2 + 2*x*y + y^2 + 3*x*z + z^2)
sage: v = vector([x, y, z])
sage: v * C.upper_triangular_matrix() * v
x^2 + 2*x*y + y^2 + 3*x*z + z^2
"""
from sage.matrix.constructor import matrix
[a,b,c,d,e,f] = self.coefficients()
return matrix([[ a, b, c ],
[ 0, d, e ],
[ 0, 0, f ]])
def variable_names(self):
r"""
Returns the variable names of the defining polynomial
of ``self``.
EXAMPLES:
::
sage: c=Conic([1,1,0,1,0,1], 'x,y,z')
sage: c.variable_names()
('x', 'y', 'z')
sage: c.variable_name()
'x'
The function ``variable_names()`` is required
for the following construction:
::
sage: C.<p,q,r> = Conic(QQ, [1, 1, 1])
sage: C
Projective Conic Curve over Rational Field defined by p^2 + q^2 + r^2
"""
return self.defining_polynomial().parent().variable_names()
|
the-stack_106_31964 | """This module provides a way to work with and enumerate implementation configurations."""
from dataclasses import dataclass
from enum import Enum
from itertools import product
from typing import (
get_type_hints,
Union,
get_origin,
get_args,
Generator,
FrozenSet,
Any,
)
from public import public
from .coordinates import CoordinateModel
from .formula import Formula
from .model import CurveModel
from .mult import ScalarMultiplier
@public
class EnumDefine(Enum):
def __str__(self):
return self.value
def __repr__(self):
return self.value
@classmethod
def names(cls):
return [e.name for e in cls]
@classmethod
def values(cls):
return [e.value for e in cls]
@public
class Multiplication(EnumDefine):
"""Base multiplication algorithm to use."""
TOOM_COOK = "MUL_TOOM_COOK"
KARATSUBA = "MUL_KARATSUBA"
COMBA = "MUL_COMBA"
BASE = "MUL_BASE"
@public
class Squaring(EnumDefine):
"""Base squaring algorithm to use."""
TOOM_COOK = "SQR_TOOM_COOK"
KARATSUBA = "SQR_KARATSUBA"
COMBA = "SQR_COMBA"
BASE = "SQR_BASE"
@public
class Reduction(EnumDefine):
"""Modular reduction method used."""
BARRETT = "RED_BARRETT"
MONTGOMERY = "RED_MONTGOMERY"
BASE = "RED_BASE"
@public
class Inversion(EnumDefine):
"""Inversion algorithm used."""
GCD = "INV_GCD"
EULER = "INV_EULER"
@public
class HashType(EnumDefine):
"""Hash algorithm used in ECDH and ECDSA."""
NONE = "HASH_NONE"
SHA1 = "HASH_SHA1"
SHA224 = "HASH_SHA224"
SHA256 = "HASH_SHA256"
SHA384 = "HASH_SHA384"
SHA512 = "HASH_SHA512"
@public
class RandomMod(EnumDefine):
"""Method of sampling a uniform integer modulo order."""
SAMPLE = "MOD_RAND_SAMPLE"
REDUCE = "MOD_RAND_REDUCE"
@public
@dataclass(frozen=True)
class Configuration:
"""An ECC implementation configuration."""
model: CurveModel
coords: CoordinateModel
formulas: FrozenSet[Formula]
scalarmult: ScalarMultiplier
hash_type: HashType
mod_rand: RandomMod
mult: Multiplication
sqr: Squaring
red: Reduction
inv: Inversion
@public
def all_configurations(**kwargs) -> Generator[Configuration, Configuration, None]:
"""
Get all implementation configurations that match the given `kwargs`.
The keys in :paramref:`~.all_configurations.kwargs` should be some of the attributes in the :py:class:`Configuration`,
and the values limit the returned configurations to configuration matching them.
.. note::
The ``formulas`` attribute is unsupported and formulas should be provided using the ``scalarmult``
attribute, which is either a subclass of the :py:class:`~.mult.ScalarMultiplier` class or an instance
of it or a dictionary giving arguments to a constructor of some :py:class:`~.mult.ScalarMultiplier`
subclass.
.. warning::
The returned number of configurations might be quite large and take up significant
memory space.
:param kwargs: The configuration parameters to match.
:return: A generator of the configurations.
"""
def is_optional(arg_type):
return (
get_origin(arg_type) == Union
and len(get_args(arg_type)) == 2
and get_args(arg_type)[1] is type(None) # noqa
)
def leaf_subclasses(cls):
subs = cls.__subclasses__()
result = []
for subclass in subs:
if subclass.__subclasses__():
result.extend(leaf_subclasses(subclass))
else:
result.append(subclass)
return result
def independents(kwargs):
options = {
"hash_type": HashType,
"mod_rand": RandomMod,
"mult": Multiplication,
"sqr": Squaring,
"red": Reduction,
"inv": Inversion,
}
keys = list(filter(lambda key: key not in kwargs, options.keys()))
values = [options[key] for key in keys]
fixed_args = {key: kwargs[key] for key in kwargs if key in options}
for value_choice in product(*values):
yield dict(zip(keys, value_choice), **fixed_args)
def multipliers(mult_classes, coords_formulas, fixed_args=None):
for mult_cls in mult_classes:
if (
fixed_args is not None
and "cls" in fixed_args
and mult_cls != fixed_args["cls"]
):
continue
arg_options = {}
for name, required_type in get_type_hints(mult_cls.__init__).items():
if fixed_args is not None and name in fixed_args:
arg_options[name] = [fixed_args[name]]
continue
if is_optional(required_type):
opt_type = get_args(required_type)[0]
if issubclass(opt_type, Formula):
options = [
formula
for formula in coords_formulas
if isinstance(formula, opt_type)
] + [None]
else:
options = [None] # TODO: anything here?
elif get_origin(required_type) is None and issubclass(
required_type, Formula
):
options = [
formula
for formula in coords_formulas
if isinstance(formula, required_type)
]
elif get_origin(required_type) is None and issubclass(
required_type, bool
):
options = [True, False]
elif (
get_origin(required_type) is None
and issubclass(required_type, int)
and name == "width"
):
options = [3, 5]
else:
options = []
arg_options[name] = options
keys = arg_options.keys()
values = arg_options.values()
for combination in product(*values):
try:
mult = mult_cls(**dict(zip(keys, combination)))
except Exception:
continue
yield mult
for model_cls in leaf_subclasses(CurveModel):
model = model_cls()
if "model" in kwargs and model != kwargs["model"]:
continue
for coords in model.coordinates.values():
if "coords" in kwargs and coords != kwargs["coords"]:
continue
coords_formulas = coords.formulas.values()
mult_classes = leaf_subclasses(ScalarMultiplier)
if "scalarmult" in kwargs:
if isinstance(kwargs["scalarmult"], ScalarMultiplier):
mults = [kwargs["scalarmult"]]
if not set(kwargs["scalarmult"].formulas.values()).issubset(
coords_formulas
):
continue
elif isinstance(kwargs["scalarmult"], type) and issubclass(
kwargs["scalarmult"], ScalarMultiplier
):
mult_classes = list(
filter(
lambda mult: issubclass(mult, kwargs["scalarmult"]),
mult_classes,
)
)
mults = multipliers(mult_classes, coords_formulas)
else:
mults = multipliers(
mult_classes, coords_formulas, kwargs["scalarmult"]
)
else:
mults = multipliers(mult_classes, coords_formulas)
for mult in mults:
formulas = frozenset(mult.formulas.values())
for independent_args in independents(kwargs):
yield Configuration(
model, coords, formulas, mult, **independent_args
)
|
the-stack_106_31965 | #!/usr/bin/env python3
"""Figure 6.5, page 135"""
import random
import multiprocessing as mp
from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
from tqdm.contrib.concurrent import process_map
ACTIONS_A = 2
ACTIONS_B = 10
INITIAL_Q = {'terminal': np.zeros(2),
'a': np.zeros(ACTIONS_A),
'b': np.zeros(ACTIONS_B)}
TRANSITIONS = {'a': ['b', 'terminal'], 'b': ['terminal'] * 10}
def random_argmax(arr):
"""Standart numpy argmax returns the first maximal element, so if there are several maximal elements
in array the result will be biased"""
# returns an array of bools, True = max element
arr_bool_max = arr == arr.max()
# Indies of max elements
indices_of_max = np.flatnonzero(arr_bool_max)
# Random index
random_maximal_element = np.random.choice(indices_of_max)
return random_maximal_element
def epsilon_greedy_policy(action_values, eps=0.1):
"""Epsilon greedy policy with random tie breaking in argmax. Returns index of the chosen action"""
if random.random() > eps:
action = random_argmax(action_values)
else:
action = random.randint(0, len(action_values) - 1)
return action
def q_learning(a, episodes=300, alpha=0.1):
"""Q-learning algorithm for a number of episodes
:param a: placeholder parameter to make possible the usage of map while in multiprocessing
:type a: None
:return: How many times LEFT action from A was chosen in each episode
:rtype: numpy.array
"""
q = deepcopy(INITIAL_Q)
left_actions_from_a = np.zeros(episodes, dtype=int)
for episode in range(episodes):
state = 'a'
while state != 'terminal':
action = epsilon_greedy_policy(q[state])
if state == 'a':
reward = 0
if action == 0:
left_actions_from_a[episode] += 1
else:
reward = np.random.normal(-0.1, 1)
next_state = TRANSITIONS[state][action]
q[state][action] += alpha * (reward + max(q[next_state]) - q[state][action])
state = next_state
return left_actions_from_a
def double_q_learning(a, episodes=300, alpha=0.1, eps=0.1):
"""Double Q-learning algorithm for a number of episodes, page 136
:param a: placeholder parameter to make possible the usage of map while in multiprocessing
:type a: None
:return: How many times LEFT action from A was chosen in each episode
:rtype: numpy.array
"""
q1 = deepcopy(INITIAL_Q)
q2 = deepcopy(INITIAL_Q)
left_actions_from_a = np.zeros(episodes, dtype=int)
for episode in range(episodes):
state = 'a'
while state != 'terminal':
q12_state = q1[state] + q2[state]
action = epsilon_greedy_policy(q12_state)
if state == 'a':
reward = 0
if action == 0:
left_actions_from_a[episode] += 1
else:
reward = np.random.normal(-0.1, 1)
next_state = TRANSITIONS[state][action]
if random.choice([True, False]):
next_action = random_argmax(q1[next_state])
q1[state][action] += alpha * (reward + q2[next_state][next_action] - q1[state][action])
else:
next_action = random_argmax(q2[next_state])
q2[state][action] += alpha * (reward + q1[next_state][next_action] - q2[state][action])
state = next_state
return left_actions_from_a
def fig6_5():
# This option is needed to get lower dispersion if run from Linux. Default for Windows 10 and MacOS.
mp.set_start_method('spawn')
runs = 10_000
workers = mp.cpu_count()
print('Q-learning')
q = np.array(process_map(q_learning, range(runs), max_workers=workers, chunksize=1)).mean(axis=0)
print('Double Q-learning')
double_q = np.array(process_map(double_q_learning, range(runs), max_workers=workers, chunksize=1)).mean(axis=0)
plt.plot(q * 100, label='Q-learning')
plt.plot(double_q * 100, label='Double Q-learning')
plt.legend()
plt.xlabel('Episodes')
plt.ylabel('% left actions from A')
plt.show()
if __name__ == '__main__':
fig6_5()
|
the-stack_106_31966 | try: #import default dependencies
import sys
import subprocess
import os
except Exception as p:
print(p)
import discord
from discord.ext import commands
class clone(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def clone(self, ctx, clonned: discord.User):
'''
Clone your account based on someone elses
syntax: clone @useridk#1337
'''
try:
await ctx.message.delete()
except Exception as w:
print(w)
try:
avatar = await clonned.avatar_url_as(format='png', size=2048).read()
except Exception as m:
print(m)
try:
await self.bot.user.edit(password=self.bot.password, username=clonned.name, avatar=avatar)
await ctx.send("Enjoy your new identity :wink:", delete_after=7)
except Exception as j:
if "You are changing your username or Discord Tag too fast. Try again later." in str(j):
await ctx.send("`Error:` You are changing your username too fast", delete_after=7)
else:
print(str(j))
await ctx.send("An error occured, check console for more details")
@clone.error
async def clone_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Missing required argument: `User to clone`", delete_after=5)
else:
print(error)
def setup(bot):
bot.add_cog(clone(bot))
|
the-stack_106_31967 | #!/usr/bin/env python3
import subprocess
import os
import sys
import json
sys.path.append("../")
sys.path.append("../../system/lib/")
sys.path.append("../array/")
import json_parser
import pos
import cli
import api
import pos_constant
import CREATE_VOL_BASIC_1
ARRAYNAME = CREATE_VOL_BASIC_1.ARRAYNAME
def clear_result():
if os.path.exists( __file__ + ".result"):
os.remove( __file__ + ".result")
def check_result(detail):
data = json.loads(detail)
description = data['Response']['result']['status']['description']
if "no any" in description:
return "pass"
return "fail"
def set_result(detail):
result = check_result(detail)
code = json_parser.get_response_code(detail)
with open(__file__ + ".result", "w") as result_file:
result_file.write(result + " (" + str(code) + ")" + "\n" + detail)
def execute():
clear_result()
CREATE_VOL_BASIC_1.execute()
cli.unmount_array(ARRAYNAME)
out = cli.list_volume(ARRAYNAME)
return out
if __name__ == "__main__":
if len(sys.argv) >= 2:
pos.set_addr(sys.argv[1])
out = execute()
set_result(out)
pos.flush_and_kill_pos() |
the-stack_106_31971 | """Utilities functions file"""
import json
import re
def load_json(filename: str):
"""Loads a json file"""
with open(filename, encoding="utf-8", mode="r") as file:
data = json.load(file)
return data
def save_json(data: json, filename: str, should_be_sorted=True):
"""Saves a json file"""
with open(filename, encoding="utf-8", mode="w") as file:
json.dump(data, file, indent=4, sort_keys=should_be_sorted, separators=(',', ': '))
def convert_seconds_to_str(sec: float):
"""Returns a str representing a number of seconds"""
msg = ""
sec = round(sec)
years = sec // 31536000
if years != 0:
msg += str(int(years)) + "y "
sec -= years * 31536000
days = sec // 86400
if days != 0:
msg += str(int(days)) + "d "
sec -= days * 86400
hours = sec // 3600
if hours != 0:
msg += str(int(hours)) + "h "
sec -= hours * 3600
minutes = sec // 60
sec -= minutes * 60
if minutes != 0:
msg += str(int(minutes)) + "m "
if sec != 0:
msg += str(int(sec)) + "s "
return msg[:-1]
SECONDS_VALUES = [31536000, 86400, 3600, 60, 1]
def convert_str_to_seconds(duration: str):
"""Converts a duration (format %Yy %dd %Hh %Mm %Ss) to the total
number of corresponding seconds.
If the format of duration isn't correct, returns -1."""
regex = re.compile("(?=.*[ydhms])( *[0-9]+y *)?( *[0-9]+d *)?( *[0-9]+h *)?( *[0-9]+m *)?( *[0-9]+s *)?") #pylint: disable=line-too-long
if not regex.fullmatch(duration):
return -1
total_seconds = 0
matches = regex.findall(duration)
for i in range(5):
match = matches[0][i]
if match != "":
end_of_match = match.find(" ") - 1 if match.find(" ") != -1 else len(match) - 1
value = int(match[:end_of_match])
total_seconds += SECONDS_VALUES[i] * value
return total_seconds
def convert_int_to_str(number: int, char: str = "'"):
"""Converts an ugly int into a beautiful and sweet str
Parameters:
nb: The number which is gonna be converted.
char: The characters which are gonna be inserted between every 3 digits.
Example: 2364735247 --> 2'364'735'247"""
number = str(number)
for index in range(len(number) - 3, 0, -3):
number = number[:index] + char + number[index:]
return number
|
the-stack_106_31972 | import numpy as np
from scipy.spatial import distance
def _gt_weights(W):
"""Computes the weights V for a Guttman transform V X = B(X) Z."""
V = -W
V[np.diag_indices(V.shape[0])] = W.sum(axis=1) - W.diagonal()
return V
def _gt_mapping(D, W, Z):
"""Computes the mapping B(X) for a Guttman transform V X = B(X) Z."""
# Compute the Euclidean distances between all pairs of points
Dz = distance.cdist(Z, Z)
# Fill the diagonal of Dz, because *we don't want a division by zero*
np.fill_diagonal(Dz, 1e-5)
B = - W * D / Dz
np.fill_diagonal(B, 0.0)
B[np.diag_indices(B.shape[0])] = -np.sum(B, axis=1)
return B
def _guttman_transform(D, W, Zu, Xa, V):
"""Applies the Guttman transform with anchors.
See Di Franco, Carmelo, et al. "Multidimensional scaling localization with anchors."
Autonomous Robot Systems and Competitions (ICARSC), 2017 IEEE International Conference on. IEEE, 2017.
"""
n, m = Zu.shape[0], Xa.shape[0]
Z = np.vstack([Zu, Xa])
V11, V12 = V[0:n, 0:n], V[0:n, n:]
B = _gt_mapping(D, W, Z)
B11, B12 = B[0:n, 0:n], B[0:n, n:]
#return np.linalg.solve(
# V11,
# np.dot(B11, Zu) + np.dot(B12, Za) - np.dot(V12, Xa)
# )
return np.dot(
np.linalg.pinv(V11),
np.dot(B11, Zu) + np.dot(B12, Xa) - np.dot(V12, Xa)
)
def _stress(D, W, X):
"""Computes the value of the weighted stress function of the MDS."""
Dx = distance.cdist(X, X)
S = W * (D - Dx)**2
return np.triu(S, 1).sum()
def _smacof_single(dissimilarities, weights, init=None, anchors=None, n_components=2, maxitr=300, eps=1e-6, random_state=None):
# Pre-compute the weights of the Guttman transform
V = _gt_weights(weights)
if random_state is None:
random_state = np.random.RandomState()
# Initial positions are random by default
if init is None:
init = random_state.randn(dissimilarities.shape[0]-anchors.shape[0], n_components)
X = init
Sprev = _stress(dissimilarities, weights, np.vstack([X, anchors])) # Stress at previous iteration
for itr in range(maxitr):
X = _guttman_transform(dissimilarities, weights, X, anchors, V)
S = _stress(dissimilarities, weights, np.vstack([X, anchors]))
if np.abs(S - Sprev) < eps:
break
Sprev = S
return X, Sprev
def smacof(dissimilarities, weights=None, init=None, anchors=None, n_components=2, n_init=8, maxitr=300, eps=1e-6, random_state=None):
"""Executes the SMACOF with anchors algorithm to find a Euclidean embedding of dissimilarities between n samples in a d-dimensional space.
Parameters
----------
dissimilarities : n-by-n matrix
The distances/dissimilarities between each pair sample, as a two-dimensional square matrix.
weights : None or n-by-n matrix
The weight of each distance. The greater the weight on a distance, the harder SMACOF will try to respect this distance in its solutions. If None, a matrix of ones is assumed.
init : None or n-by-d matrix
A starting position for the algorithm. If None, `n_init` different random positions will be tried, and the best fitting solution will be kept.
anchors : None or m-by-d matrix
The positions of the m anchors. If None, it is assumed that there are no anchors.
n_components : int
The size (i.e. dimensions) of the embedding space.
n_init : int
The number of initial random positions to try.
maxitr : int
The maximum number of iterations to run.
eps : float
The threshold on the stress change between iterations below which convergence is attained.
random_state : None, int or np.RandomState
The state for the random numbers generator.
Returns
-------
n-by-d array :
The positions of the n samples in the d-dimensional Euclidean space.
"""
# Default weights are 1's
if weights is None:
weights = np.ones(dissimilarities.shape)
if anchors is None:
anchors = np.zeros((0, n_components))
if random_state is None:
random_state = np.random.RandomState()
elif type(random_state) == int:
random_state = np.random.RandomState(random_state)
# Pre-compute the weights of the Guttman transform
V = _gt_weights(weights)
# Only run SMACOF once if an initial position is passed
if init is not None:
n_init = 1
Xbest = None
Sbest = np.inf
for itr in range(n_init):
X, S = _smacof_single(dissimilarities, weights, init, anchors, n_components, maxitr, eps, random_state)
if S < Sbest:
Xbest, Sbest = X, S
return Xbest
|
the-stack_106_31973 | # -*- coding: utf-8 -*-
"""Compute statistical description of datasets"""
import multiprocessing
import itertools
from functools import partial
import numpy as np
import pandas as pd
import matplotlib
from pkg_resources import resource_filename
import pandas_profiling.formatters as formatters
import pandas_profiling.base as base
from pandas_profiling.plot import histogram, mini_histogram
def describe_numeric_1d(series, **kwargs):
"""Compute summary statistics of a numerical (`TYPE_NUM`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Format a number as a percentage. For example 0.25 will be turned to 25%.
_percentile_format = "{:.0%}"
stats = dict()
stats['type'] = base.TYPE_NUM
stats['mean'] = series.mean()
stats['std'] = series.std()
stats['variance'] = series.var()
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# To avoid to compute it several times
_series_no_na = series.dropna()
for percentile in np.array([0.05, 0.25, 0.5, 0.75, 0.95]):
# The dropna() is a workaround for https://github.com/pydata/pandas/issues/13098
stats[_percentile_format.format(percentile)] = _series_no_na.quantile(percentile)
stats['iqr'] = stats['75%'] - stats['25%']
stats['kurtosis'] = series.kurt()
stats['skewness'] = series.skew()
stats['sum'] = series.sum()
stats['mad'] = series.mad()
stats['cv'] = stats['std'] / stats['mean'] if stats['mean'] else np.NaN
stats['n_zeros'] = (len(series) - np.count_nonzero(series))
stats['p_zeros'] = stats['n_zeros'] * 1.0 / len(series)
# Histograms
stats['histogram'] = histogram(series, **kwargs)
stats['mini_histogram'] = mini_histogram(series, **kwargs)
return pd.Series(stats, name=series.name)
def describe_date_1d(series):
"""Compute summary statistics of a date (`TYPE_DATE`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
stats = dict()
stats['type'] = base.TYPE_DATE
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# Histograms
stats['histogram'] = histogram(series)
stats['mini_histogram'] = mini_histogram(series)
return pd.Series(stats, name=series.name)
def describe_categorical_1d(series):
"""Compute summary statistics of a categorical (`TYPE_CAT`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Only run if at least 1 non-missing value
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
names = []
result = []
if base.get_vartype(series) == base.TYPE_CAT:
names += ['top', 'freq', 'type']
result += [top, freq, base.TYPE_CAT]
return pd.Series(result, index=names, name=series.name)
def describe_boolean_1d(series):
"""Compute summary statistics of a boolean (`TYPE_BOOL`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
# The mean of boolean is an interesting information
mean = series.mean()
names = []
result = []
names += ['top', 'freq', 'type', 'mean']
result += [top, freq, base.TYPE_BOOL, mean]
return pd.Series(result, index=names, name=series.name)
def describe_constant_1d(series):
"""Compute summary statistics of a constant (`S_TYPE_CONST`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_CONST], index=['type'], name=series.name)
def describe_unique_1d(series):
"""Compute summary statistics of a unique (`S_TYPE_UNIQUE`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_UNIQUE], index=['type'], name=series.name)
def describe_supported(series, **kwargs):
"""Compute summary statistics of a supported variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
value_counts, distinct_count = base.get_groupby_statistic(series)
if count > distinct_count > 1:
mode = series.mode().iloc[0]
else:
mode = series[0]
results_data = {'count': count,
'distinct_count': distinct_count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'is_unique': distinct_count == leng,
'mode': mode,
'p_unique': distinct_count * 1.0 / leng}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_unsupported(series, **kwargs):
"""Compute summary statistics of a unsupported (`S_TYPE_UNSUPPORTED`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
results_data = {'count': count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'type': base.S_TYPE_UNSUPPORTED}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_1d(data, **kwargs):
"""Compute summary statistics of a variable (a Series).
The description is different according to the type of the variable.
However a set of common stats is also computed.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Replace infinite values with NaNs to avoid issues with
# histograms later.
data.replace(to_replace=[np.inf, np.NINF, np.PINF], value=np.nan, inplace=True)
result = pd.Series({}, name=data.name)
vartype = base.get_vartype(data)
if vartype == base.S_TYPE_UNSUPPORTED:
result = result.append(describe_unsupported(data))
else:
result = result.append(describe_supported(data))
if vartype == base.S_TYPE_CONST:
result = result.append(describe_constant_1d(data))
elif vartype == base.TYPE_BOOL:
result = result.append(describe_boolean_1d(data))
elif vartype == base.TYPE_NUM:
result = result.append(describe_numeric_1d(data, **kwargs))
elif vartype == base.TYPE_DATE:
result = result.append(describe_date_1d(data))
elif vartype == base.S_TYPE_UNIQUE:
result = result.append(describe_unique_1d(data))
else:
# TYPE_CAT
result = result.append(describe_categorical_1d(data))
return result
def multiprocess_func(x, **kwargs):
return x[0], describe_1d(x[1], **kwargs)
def describe(df, dataframe_name='', statement='', bins=10, check_correlation=True, correlation_threshold=0.9, correlation_overrides=None, check_recoded=False, pool_size=multiprocessing.cpu_count(), **kwargs):
"""Generates a dict containing summary statistics for a given dataset stored as a pandas `DataFrame`.
Used has is it will output its content as an HTML report in a Jupyter notebook.
Parameters
----------
df : DataFrame
Data to be analyzed
bins : int
Number of bins in histogram.
The default is 10.
check_correlation : boolean
Whether or not to check correlation.
It's `True` by default.
correlation_threshold: float
Threshold to determine if the variable pair is correlated.
The default is 0.9.
correlation_overrides : list
Variable names not to be rejected because they are correlated.
There is no variable in the list (`None`) by default.
check_recoded : boolean
Whether or not to check recoded correlation (memory heavy feature).
Since it's an expensive computation it can be activated for small datasets.
`check_correlation` must be true to disable this check.
It's `False` by default.
pool_size : int
Number of workers in thread pool
The default is equal to the number of CPU.
Returns
-------
dict
Containing the following keys:
* table: general statistics on the dataset
* variables: summary statistics for each variable
* freq: frequency table
Notes:
------
* The section dedicated to check the correlation should be externalized
"""
if not isinstance(df, pd.DataFrame):
raise TypeError("df must be of type pandas.DataFrame")
if df.empty:
raise ValueError("df can not be empty")
try:
# reset matplotlib style before use
# Fails in matplotlib 1.4.x so plot might look bad
matplotlib.style.use("default")
except:
pass
matplotlib.style.use(resource_filename(__name__, "pandas_profiling.mplstyle"))
# Clearing the cache before computing stats
base.clear_cache()
if not pd.Index(np.arange(0, len(df))).equals(df.index):
# Treat index as any other column
df = df.reset_index()
kwargs.update({'bins': bins})
# Describe all variables in a univariate way
if pool_size == 1:
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in map(local_multiprocess_func, df.iteritems())}
else:
pool = multiprocessing.Pool(pool_size)
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in pool.map(local_multiprocess_func, df.iteritems())}
pool.close()
# Get correlations
dfcorrPear = df.corr(method="pearson")
dfcorrSpear = df.corr(method="spearman")
# Check correlations between variable
if check_correlation is True:
''' TODO: corr(x,y) > 0.9 and corr(y,z) > 0.9 does not imply corr(x,z) > 0.9
If x~y and y~z but not x~z, it would be better to delete only y
Better way would be to find out which variable causes the highest increase in multicollinearity.
'''
corr = dfcorrPear.copy()
for x, corr_x in corr.iterrows():
if correlation_overrides and x in correlation_overrides:
continue
for y, corr in corr_x.iteritems():
if x == y: break
if corr > correlation_threshold:
ldesc[x] = pd.Series(['CORR', y, corr], index=['type', 'correlation_var', 'correlation'])
if check_recoded:
categorical_variables = [(name, data) for (name, data) in df.iteritems() if base.get_vartype(data)=='CAT']
for (name1, data1), (name2, data2) in itertools.combinations(categorical_variables, 2):
if correlation_overrides and name1 in correlation_overrides:
continue
confusion_matrix=pd.crosstab(data1,data2)
if confusion_matrix.values.diagonal().sum() == len(df):
ldesc[name1] = pd.Series(['RECODED', name2], index=['type', 'correlation_var'])
# Convert ldesc to a DataFrame
names = []
ldesc_indexes = sorted([x.index for x in ldesc.values()], key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
variable_stats = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
variable_stats.columns.names = df.columns.names
# General statistics
table_stats = {}
table_stats['n'] = len(df)
table_stats['nvar'] = len(df.columns)
table_stats['total_missing'] = variable_stats.loc['n_missing'].sum() / (table_stats['n'] * table_stats['nvar'])
unsupported_columns = variable_stats.transpose()[variable_stats.transpose().type != base.S_TYPE_UNSUPPORTED].index.tolist()
table_stats['n_duplicates'] = sum(df.duplicated(subset=unsupported_columns)) if len(unsupported_columns) > 0 else 0
memsize = df.memory_usage(index=True).sum()
table_stats['memsize'] = formatters.fmt_bytesize(memsize)
table_stats['recordsize'] = formatters.fmt_bytesize(memsize / table_stats['n'])
table_stats.update({k: 0 for k in ("NUM", "DATE", "CONST", "CAT", "UNIQUE", "CORR", "RECODED", "BOOL", "UNSUPPORTED")})
table_stats.update(dict(variable_stats.loc['type'].value_counts()))
table_stats['REJECTED'] = table_stats['CONST'] + table_stats['CORR'] + table_stats['RECODED']
return {
'table': table_stats,
'variables': variable_stats.T,
'freq': {k: (base.get_groupby_statistic(df[k])[0] if variable_stats[k].type != base.S_TYPE_UNSUPPORTED else None) for k in df.columns},
'correlations': {'pearson': dfcorrPear, 'spearman': dfcorrSpear}
}
|
the-stack_106_31974 | import json
import time
from tools import logger as log
import strategies
def bank_open(**kwargs):
"""
A strategy to open a bank
:param kwargs: strategy, listener, and orders_queue
:return: the input strategy with a report
"""
strategy = kwargs['strategy']
listener = kwargs['listener']
orders_queue = kwargs['orders_queue']
assets = kwargs['assets']
logger = log.get_logger(__name__, strategy['bot'])
global_start, start = time.time(), time.time()
if listener.game_state['storage_open']:
logger.info('Opened bank in {}s'.format(0))
strategy['report'] = {
'success': True,
'details': {'Execution time': 0}
}
log.close_logger(logger)
return strategy
# Open NPC
order = {
'command': 'open_npc',
'parameters': {
'map_id': listener.game_state['map_id'],
'npc_id': -20001,
'action_id': 3
}
}
logger.info('Sending order to bot API: {}'.format(order))
orders_queue.put((json.dumps(order),))
start = time.time()
timeout = 10 if 'timeout' not in strategy.keys() else strategy['timeout']
waiting = True
while waiting and time.time() - start < timeout:
if listener.game_state['npc_dialog_open']:
waiting = False
time.sleep(0.05)
execution_time = time.time() - start
if waiting:
logger.warning('Failed to open NPC dialog in {}s'.format(execution_time))
strategy['report'] = {
'success': False,
'details': {'Execution time': execution_time, 'Reason': 'Timeout'}
}
log.close_logger(logger)
return strategy
# Answer question
order = {
'command': 'answer_npc',
'parameters': {
'reply_id': listener.game_state['npc_possible_replies'][0]
}
}
logger.info('Sending order to bot API: {}'.format(order))
orders_queue.put((json.dumps(order),))
start = time.time()
timeout = 10 if 'timeout' not in strategy.keys() else strategy['timeout']
waiting = True
while waiting and time.time() - start < timeout:
if 'storage_open' in listener.game_state.keys():
if listener.game_state['storage_open']:
waiting = False
time.sleep(0.05)
if waiting:
logger.warning('Failed to answer NPC to open storage in {}s'.format(execution_time))
strategy['report'] = {
'success': False,
'details': {'Execution time': execution_time, 'Reason': 'Failed to answer NPC to open storage in {}s'.format(execution_time)}
}
log.close_logger(logger)
return strategy
execution_time = time.time() - global_start
logger.info('Opened bank in {}s'.format(execution_time))
strategy['report'] = {
'success': True,
'details': {'Execution time': execution_time}
}
log.close_logger(logger)
return strategy |
the-stack_106_31975 | """
example cmdline:
python test/reproduction/so/benchmark_so_litebo_math.py --problem branin --n 200 --init 3 --surrogate gp --optimizer scipy --rep 1 --start_id 0
"""
import os
import sys
import time
import numpy as np
import argparse
import pickle as pkl
sys.path.insert(0, os.getcwd())
from test.reproduction.so.so_benchmark_function import get_problem
from litebo.optimizer.generic_smbo import SMBO
from test.reproduction.test_utils import timeit, seeds
parser = argparse.ArgumentParser()
parser.add_argument('--problem', type=str)
parser.add_argument('--n', type=int, default=100)
parser.add_argument('--init', type=int, default=3)
parser.add_argument('--init_strategy', type=str, default='random_explore_first')
parser.add_argument('--surrogate', type=str, default='gp', choices=['gp', 'prf'])
parser.add_argument('--optimizer', type=str, default='scipy', choices=['scipy', 'local'])
parser.add_argument('--rep', type=int, default=1)
parser.add_argument('--start_id', type=int, default=0)
args = parser.parse_args()
problem_str = args.problem
max_runs = args.n
initial_runs = args.init
init_strategy = args.init_strategy
surrogate_type = args.surrogate
if args.optimizer == 'scipy':
acq_optimizer_type = 'random_scipy'
elif args.optimizer == 'local':
acq_optimizer_type = 'local_random'
else:
raise ValueError('Unknown optimizer %s' % args.optimizer)
rep = args.rep
start_id = args.start_id
mth = 'litebo'
problem = get_problem(problem_str)
cs = problem.get_configspace(optimizer='smac')
time_limit_per_trial = 600
task_id = '%s_%s' % (mth, problem_str)
def evaluate(mth, run_i, seed):
print(mth, run_i, seed, '===== start =====', flush=True)
def objective_function(config):
y = problem.evaluate_config(config)
res = dict()
res['config'] = config
res['objs'] = (y,)
res['constraints'] = None
return res
bo = SMBO(objective_function, cs,
surrogate_type=surrogate_type, # default: gp
acq_optimizer_type=acq_optimizer_type, # default: random_scipy
initial_runs=initial_runs, # default: 3
init_strategy=init_strategy, # default: random_explore_first
max_runs=max_runs,
time_limit_per_trial=time_limit_per_trial, task_id=task_id, random_state=seed)
# bo.run()
config_list = []
perf_list = []
time_list = []
global_start_time = time.time()
for i in range(max_runs):
config, trial_state, objs, trial_info = bo.iterate()
global_time = time.time() - global_start_time
print(seed, i, objs, config, trial_state, trial_info, 'time=', global_time)
config_list.append(config)
perf_list.append(objs[0])
time_list.append(global_time)
return config_list, perf_list, time_list
with timeit('%s all' % (mth,)):
for run_i in range(start_id, start_id + rep):
seed = seeds[run_i]
with timeit('%s %d %d' % (mth, run_i, seed)):
# Evaluate
config_list, perf_list, time_list = evaluate(mth, run_i, seed)
# Save result
print('=' * 20)
print(seed, mth, config_list, perf_list, time_list)
print(seed, mth, 'best perf', np.min(perf_list))
timestamp = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
dir_path = 'logs/so_benchmark_%s_%d/%s/' % (problem_str, max_runs, mth)
file = 'benchmark_%s_%04d_%s.pkl' % (mth, seed, timestamp)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
with open(os.path.join(dir_path, file), 'wb') as f:
save_item = (config_list, perf_list, time_list)
pkl.dump(save_item, f)
print(dir_path, file, 'saved!', flush=True)
|
the-stack_106_31978 | # -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Axon.Component import component
class Translator(component):
Inboxes = {"inbox" : " standard inbox",
"control": "shutdown messages"}
Outboxes = {"outbox": "",
"signal" : ""}
def __init__(self, nick):
super(Translator, self).__init__()
self.nick = nick
def main(self):
while 1:
if not self.anyReady():
self.pause()
yield 1
data = ""
if self.dataReady('privmsg'):
formatted = self.formatPrivmsg(self.recv('privmsg'))
self.send(formatted)
if self.dataReady('channel'):
formatted = self.formatChannelMsg(self.recv('channel'))
self.send(formatted)
if self.dataReady('nonPrivmsg'):
formatted = self.formatMisc(self.recv('channel'))
self.send(formatted)
if self.dataReady('notice'):
formatted = self.formatNotice(self.recv('notice'))
self.send(formatted)
if self.dataReady('ERR'):
formatted = self.formatError(self.recv('ERR'))
self.send(formatted)
if self.dataReady('RPL'):
formatted = self.formatNumReply(self.recv('RPL'))
self.send(formatted)
def formatPrivmsg(self, msg):
temp, sender, recipient, body = msg
if body[0] == 'ACTION':
send = "*** %s %s" % (sender, body[body.find('ACTION') + 7])
else:
send = "%s: %s" % (sender, body)
return send
def formatChannelMsg(self, msg):
return msg
def formatMisc(self, msg):
return msg
def formatNotice(self, msg):
return msg
def formatError(self, msg):
return msg
def formatNumReply(self, msg):
return msg
if __name__ == '__main__':
from Kamaelia.Util.Console import ConsoleReader, ConsoleEchoer
from Kamaelia.Chassis.Graphline import Graphline
from Kamaelia.Chassis.Pipeline import Pipeline
from Prefab import ComplexIRCClientPrefab
client = Graphline(
prefab = ComplexIRCClientPrefab(host="irc.freenode.net", nick="kamaeliabot", defaultChannel="#kamtest"),
formatter = Translator("kamaeliabot"),
linkages = {("prefab", "outbox") : ("formatter", "privmsg")}
)
Pipeline(ConsoleReader(), client, ConsoleEchoer()).run()
|
the-stack_106_31979 | import math
import sys
import numpy as np
from numba.core.compiler import compile_isolated
import unittest
class TestAutoConstants(unittest.TestCase):
def test_numpy_nan(self):
def pyfunc():
return np.nan
cres = compile_isolated(pyfunc, ())
cfunc = cres.entry_point
self.assertTrue(math.isnan(pyfunc()))
self.assertTrue(math.isnan(cfunc()))
def test_sys_constant(self):
def pyfunc():
return sys.hexversion
cres = compile_isolated(pyfunc, ())
cfunc = cres.entry_point
self.assertEqual(pyfunc(), cfunc())
if __name__ == '__main__':
unittest.main()
|
the-stack_106_31980 | # Copyright (c) 2015 [email protected]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import datetime
# Configuration class
class Config:
DEVICE_MAC = '<ADD DEVICE IDENTIFIER HERE>' # ADD DEVICE IDENTIFIER HERE
DEVICE_NAME = 'ArduWeather' # SET YOUR DEVICE NAME HERE
COM_PORT = 'COM4' # SET YOUR PORT HERE
COM_SPEED = 115200 # SET YOUR PORT SPEED HERE
LOGGING_LEVEL = logging.INFO # SET TO "DEBUG" TO SEE MORE LOGS
SEND_INTERVAL = datetime.timedelta(minutes = 5) # SET YOUR SEND INTERVAL HERE
|
the-stack_106_31983 | from collections import namedtuple
from logging import basicConfig, warning, error
from os.path import getmtime, abspath, dirname, join
from datetime import datetime as dt
from sys import argv
from jinja2 import Environment, FileSystemLoader
basicConfig()
Enum = namedtuple('Enum', 'name fields underlying_type')
class Def(namedtuple('Definition', 'name read write address fields enums width doc')):
@property
def needs_constructor(self):
if len(self.fields) > 1:
return True
if not self.fields:
return False
field = self.fields[0]
return field.width <= 32
class Field(namedtuple('Field', 'name width offset enum')):
@property
def type(self):
if self.enum:
return self.enum.name
if self.width == 1:
return 'bool'
for width in [32, 64]:
if self.width <= width:
return f'uint{width}_t'
@property
def mask(self):
return hex(mask(self))
def mask(field_or_def):
return (2 << (field_or_def.width - 1)) - 1
def cut(field_or_def, value):
value >>= getattr(field_or_def, 'offset', 0)
value &= mask(field_or_def)
if field_or_def.width == 1:
return str(bool(value)).lower()
return hex(value)
def prepare_scope():
class ReadOnly:
READ = True
TRIGGER = False
class WriteOnly:
WRITE = True
TRIGGER = False
class ReadWrite(ReadOnly, WriteOnly):
pass
class Trigger:
READ = False
WRITE = False
TRIGGER = True
def addr(address):
class Address:
ADDRESS = address
return Address
def width(w):
class Width:
WIDTH = w
return Width
globals_ = 'ReadOnly WriteOnly ReadWrite Address Trigger'.split()
scope = dict(ReadOnly=ReadOnly, WriteOnly=WriteOnly, ReadWrite=ReadWrite, Trigger=Trigger,
Address=addr, Width=width)
local = {}
return scope, local, globals_
def read_meta(file_name, local):
try:
namespace = local['NAMESPACE']
except KeyError:
warning("no namespace given, defaulting to empty namespace")
namespace = ()
if isinstance(namespace, str):
namespace = [namespace]
if not isinstance(namespace, (tuple, list)):
error("namespace must be a str, list or tuple, defaulting to empty namespace")
namespace = ()
if not all(part.isidentifier() for part in namespace):
error("namespace parts must be valid identifiert, defaulting to empty namespace")
namespace = ()
try:
test_includes = local['TEST_INCLUDE']
except KeyError:
warning("no test include given, compiling the test will probably fail")
test_includes = ''
if not isinstance(test_includes, str):
error("test include must be a str")
exit()
return namespace, dt.fromtimestamp(getmtime(file_name)), test_includes
def is_dunder(string):
return string.startswith('__') and string.endswith('__')
def find_enum(cls):
for key, value in cls.__dict__.items():
if not is_dunder(key):
fields = value.__dict__.copy()
underlying_type = fields.pop('UNDERLYING_TYPE', None)
yield Enum(key, {
name: value for name, value in fields.items()
if not is_dunder(name) and isinstance(value, int)
}, underlying_type)
def cleanup_definitions(local):
for key, value in list(local.items()):
if not isinstance(value, type):
continue
if not hasattr(value, 'ADDRESS'):
error(f"{key} has no 'Address'")
continue
address = getattr(value, 'ADDRESS')
if not hasattr(value, 'READ') and not hasattr(value, 'WRITE'):
error(f"{key} is neither readable nor writeable")
continue
read = getattr(value, 'READ', False)
write = getattr(value, 'WRITE', False)
fields = getattr(value, '__annotations__', {})
if not all(isinstance(width, int) for width in fields.values()):
error(f"{key}: field widths must be integers")
continue
if sum(fields.values()) > 64:
error(f"{key}: total field width may not exceed 64")
continue
enums = list(find_enum(value))
doc = [line.strip() for line in (value.__doc__ or '').strip().splitlines()]
fields_tuples = []
offset = 0
for name, width in fields.items():
cap = name.capitalize()
matching_enum = None
for enum in enums:
if enum.name == cap:
matching_enum = enum
break
fields_tuples.append(Field(name, width, offset, matching_enum))
offset += width
if offset == 0:
offset = getattr(value, 'WIDTH')
yield Def(key, bool(read), bool(write), int(address), fields_tuples, enums, offset, doc)
def read_definition(file_name):
scope, local, globals_ = prepare_scope()
with open(file_name) as fin:
exec(fin.read(), scope, local)
definitions = list(cleanup_definitions(local))
no_duplicate_dict = {}
for d in definitions:
if d.address in no_duplicate_dict:
dup = no_duplicate_dict[d.address]
raise RuntimeError(f"{d.name} and {dup.name} have the same address: {d.address}")
else:
no_duplicate_dict[d.address] = d
meta = read_meta(file_name, local)
return definitions, meta
def main():
if len(argv) != 2:
print("usage: {} DEFINITION_FILE".format(argv[0]))
exit(1)
defs, meta = read_definition(argv[1])
path = dirname(abspath(__file__))
env = Environment(loader=FileSystemLoader(path))
env.filters['hex'] = hex
env.filters['bitand'] = int.__and__
env.filters['cut'] = cut
env.filters['mask'] = mask
ns, datetime, test_include = meta
test_values = [
0,
0xffffffffffffffff,
0xaaaaaaaaaaaaaaaa,
0x5555555555555555,
0xcafedeadbabebeef
]
base = argv[1][:-3]
target = join(dirname(path), 'include', 'extoll', f'{base}_definitions.h')
with open(target, 'w') as fout:
template = env.get_template(f'{base}_definitions.h')
fout.write(template.render(**locals()))
target = join(dirname(path), 'tests', f'{base}_definitions.cpp')
with open(target, 'w') as fout:
template = env.get_template(f'{base}_definitions.cpp')
fout.write(template.render(**locals()))
if __name__ == '__main__':
main()
|
the-stack_106_31984 | import shutil
from pathlib import Path
from simplegallery_bulkcreation import core
def cleanup_after_tests():
path = Path("example/gallery")
shutil.rmtree(path)
return True
def test_read_config_empty():
defaults, galleries = core.read_config("file_which_does_not_exists")
assert defaults == {
"gallery_root": ".",
"title": "A Photo Gallery",
"description": "This is a cool Photo gallery",
"title_photo": "",
"title_photo_offset": "",
}
assert galleries == []
def test_read_config():
defaults, galleries = core.read_config("example/config-example.ini")
assert defaults == {
"gallery_root": "example/gallery",
"title": "My vacations",
"description": "The best days of the year",
"title_photo": "example/pictures/mexico2017/2017-11-01_15-20-23.jpg",
"title_photo_offset": "20",
}
assert galleries == [
{
"name": "Oman 2020",
"pathname": "Oman_2020",
"image_source": "example/pictures/oman2020",
"description": "Some days in the orient",
"background_photo": "2020-02-02_18-40-33.jpg",
"background_photo_offset": 30,
"overview_photo": "2020-02-02_18-40-33.jpg",
"url": "",
},
{
"name": "Greece 2019",
"pathname": "Greece_2019",
"image_source": "example/pictures/greece2019",
"description": "Island hoping in Greece",
"background_photo": "2019-08-29_10-19-43.jpg",
"background_photo_offset": "40",
"overview_photo": "2019-08-29_10-19-43.jpg",
"url": "",
},
]
def test_create_overview_public():
data_path = "src/simplegallery_bulkcreation/data"
defaults, galleries = core.read_config(config_path="example/config-example.ini")
root_dir = defaults["gallery_root"]
core.create_overview_public(root_dir, data_path, defaults, galleries)
assert Path("example/gallery/public/index.html").exists()
assert Path("example/gallery/public/css/main.css").exists()
assert Path("example/gallery/public/css/default-skin.css").exists()
assert (
Path("example/gallery/public/css/main.css").read_text()
== Path("src/simplegallery_bulkcreation/data/public/css/main.css").read_text()
)
assert (
Path("example/gallery/public/css/default-skin.css").read_text()
== Path(
"src/simplegallery_bulkcreation/data/public/css/default-skin.css"
).read_text()
)
cleanup_after_tests()
|
the-stack_106_31985 | #!/usr/bin/env python3
import queue as q
from typing import List
class Node():
def __init__(self, value):
self.visited = False
self.neighbours: List[Node] = []
self.value = value
def __repr__(self):
return str(self.value)
class Graph():
def __init__(self, nodes):
self.nodes: List[Node] = nodes
def reset(self):
for n in self.nodes:
n.visited = False
def bfs(self, n_start: Node) -> List[Node]:
result = []
work_queue = q.Queue()
work_queue.put(n_start)
result.append(n_start)
n_start.visited = True
while not work_queue.empty():
cur = work_queue.get()
for n in cur.neighbours:
if not n.visited:
work_queue.put(n)
n.visited = True
result.append(n)
return result
def dfs(self, n_start: Node) -> List[Node]:
result = []
result.append(n_start)
n_start.visited = True
for n in n_start.neighbours:
if not n.visited:
for r in self.dfs(n):
result.append(r)
return result
def topo_visit(self, node, stack = [], visited : set = set()) -> List[Node]:
if node not in visited:
visited.add(node)
for neighbour in node.neighbours:
self.topo_visit(neighbour, stack, visited)
stack.append(node)
def topo(self):
stack = []
visited = set()
for node in self.nodes:
self.topo_visit(node, stack, visited)
return stack
a, b, c, d, e, f = Node("A"), Node("B"), Node("C"), Node("D"), Node("E"), Node("F")
h = Node("H")
a.neighbours = [b, c, e]
b.neighbours = [d, a]
c.neighbours = [a, d, h]
d.neighbours = [b, c, f]
e.neighbours = [a]
f.neighbours = [d]
h.neighbours = [c, f]
#g = Graph([a, b, c, d, e, f, h])
#assert(g.bfs(a) == ['A', 'B', 'C', 'E', 'D', 'H', 'F'])
#assert(g.bfs(h) == ['H', 'C', 'F', 'A', 'D', 'B', 'E'])
#print(f"BFS from A:{g.bfs(a)}")
#print(f"BFS from A:{g.dfs(a)}")
a.neighbours = [b, c, e]
b.neighbours = [d]
c.neighbours = [h, d]
d.neighbours = [f]
e.neighbours = []
f.neighbours = []
h.neighbours = [f]
g = Graph([a, b, c, d, e, f, h])
print(f"Topological sort:{g.topo()}")
|
the-stack_106_31986 | import django_filters
from dal import autocomplete
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from django.urls import reverse
import operator
from functools import reduce
from .models import AbstractRelation
# TODO __sresch__ : Change this whole module according to the same logic as in apis_core/apis_entities/filters.py
def get_excluded_fields(model):
modelname = model.__name__
base_list = getattr(settings, "APIS_RELATIONS_FILTER_EXCLUDE", [])
rel_model_conf = getattr(settings, "APIS_RELATIONS", {})
if "exclude" in rel_model_conf.keys():
if isinstance(rel_model_conf["exclude"], list):
base_list.extend(rel_model_conf["exclude"])
if modelname in rel_model_conf.keys():
if "exclude" in rel_model_conf[modelname].keys():
if isinstance(rel_model_conf[modelname]["exclude"], list):
base_list.extend(rel_model_conf[modelname]["exclude"])
return set(base_list)
def get_included_fields(model):
modelname = model.__name__
rel_model_conf = getattr(settings, "APIS_RELATIONS", {})
if modelname in rel_model_conf.keys():
return rel_model_conf[modelname].get("include", False)
else:
return False
FIELD_TO_FILTER = {
"ForeignKey": "MultipleChoiceFilter",
"ManyToManyField": "MultipleChoiceFilter",
"TextField": "CharFilter",
"CharField": "CharFilter",
"DateField": "DateFromToRangeFilter",
"BooleanField": "BooleanFilter"
}
def get_field_dicts(model, include_parents=False):
fields = [
{
"f_name": x.name,
"f_v_name": getattr(x, 'verbose_name', None),
"f_help_text": getattr(x, 'helptext', None),
"f_class_name": "{}".format(x.__class__.__name__),
"f_model": getattr(x, 'related_model', None)
} for x in model._meta.get_fields(include_parents=include_parents)
]
return fields
def get_filters(model, exclude=False, include=False, include_parents=False):
filters = []
field_dicts = get_field_dicts(model, include_parents=include_parents)
for x in field_dicts:
filters.append(x['f_name'])
if x['f_model']:
rel_fields = get_field_dicts(x['f_model'], include_parents)
for y in rel_fields:
if 'apis_relations' in "{}".format(y['f_model']):
pass
else:
rel_field_name = "{}__{}".format(x['f_name'], y['f_name'])
filters.append(rel_field_name)
if include:
filters = [x for x in filters if x in include]
elif exclude:
for x in exclude:
if x.startswith("*") and not x.endswith("*"):
filters = [f for f in filters if not f.lower().endswith(x[1:].lower())]
elif x.startswith("*") and x.endswith("*"):
filters = [f for f in filters if not x[1:-1].lower() in f]
elif not x.startswith("*") and x.endswith("*"):
filters = [f for f in filters if not f.lower().startswith(x[:-1].lower())]
else:
filters = [f for f in filters if not x.lower() == f.lower()]
return filters
def get_generic_relation_filter(entity):
class GenericListFilter(django_filters.FilterSet):
#search = django_filters.CharFilter(method='search_filter_method')
def name_label_filter(self, queryset, name, value):
"""
Filter for including the alternative names in the names search.\
The types of labels included in the query are
currently hardcoded in a list.
:param queryset: queryset that the filters are applied on
:param name: name of the attribute to filter on (not used as label types are hardcoded)
:param value: value for the filter
:return: filtered queryset
"""
alternate_names = getattr(settings, "APIS_ALTERNATE_NAMES", ['alternative name'])
res = []
orig_value = value
for n in ['name', 'label__label']:
value = orig_value
f = '{}__'.format(n)
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
else:
f += 'i'
if value.startswith('*') and value.endswith('*'):
f += 'contains'
value = value[1:-1]
elif value.startswith('*'):
f += 'endswith'
value = value[1:]
elif value.endswith('*'):
f += 'startswith'
value = value[:-1]
else:
f += 'exact'
if n == 'label__label':
res.append(Q(**{f: value, 'label__label_type__name__in': alternate_names}))
else:
res.append(Q(**{f: value}))
return queryset.filter(res[0] | res[1]).distinct()
def wildcard_filter(self, queryset, name, value):
f = '{}__'.format(name)
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
else:
f += 'i'
if value.startswith('*') and value.endswith('*'):
f += 'contains'
value = value[1:-1]
elif value.startswith('*'):
f += 'endswith'
value = value[1:]
elif value.endswith('*'):
f += 'startswith'
value = value[:-1]
else:
f += 'exact'
return queryset.filter(**{f: value})
def search_filter_method(self, queryset, name, value):
cls = queryset.model.__name__
sett_filters = getattr(settings, "APIS_RELATIONS", {})
if cls.lower() in sett_filters.keys():
filter_attr = sett_filters[cls.lower()].get("search", ["name"])
query = reduce(operator.or_, [ Q(**{attr: value}) for attr in filter_attr ] )
class Meta:
model = AbstractRelation.get_relation_class_of_name(entity)
fields = get_filters(
model,
exclude=get_excluded_fields(model),
include=get_included_fields(model),
include_parents=True
)
def __init__(self, *args, **kwargs):
attrs = {'data-placeholder': 'Type to get suggestions',
'data-minimum-input-length': getattr(settings, "APIS_MIN_CHAR", 3),
'data-html': True}
super(GenericListFilter, self).__init__(*args, **kwargs)
for x in self.filters.keys():
if type(self.filters[x].field).__name__ == "ModelChoiceField":
current_model_name = str(self.filters[x].queryset.model.__name__).lower()
current_qs = self.filters[x].queryset
if ContentType.objects.filter(app_label='apis_entities', model=current_model_name).count() > 0:
self.filters[x] = django_filters.ModelMultipleChoiceFilter(
field_name=x,
queryset=current_qs,
widget=autocomplete.ModelSelect2Multiple(
url=reverse(
'apis:apis_entities:generic_network_entities_autocomplete',
kwargs={
'entity': current_model_name
}
),
)
)
elif ContentType.objects.filter(app_label='apis_vocabularies', model=current_model_name).count() > 0:
self.filters[x] = django_filters.ModelMultipleChoiceFilter(
field_name=x,
queryset=current_qs,
widget=autocomplete.ModelSelect2Multiple(
url=reverse(
'apis:apis_vocabularies:generic_vocabularies_autocomplete',
kwargs={
'vocab': current_model_name,
'direct': 'normal'
}
),
)
)
if type(self.filters[x].field).__name__ == "DateField":
self.filters[x] = django_filters.DateFromToRangeFilter(
field_name=x,
)
if type(self.filters[x].field).__name__ == "CharField":
self.filters[x] = django_filters.CharFilter(
lookup_expr='icontains',
field_name=x,
)
if type(self.filters[x].field).__name__ == "ModelMultipleChoiceField":
current_model_name = str(self.filters[x].queryset.model.__name__).lower()
current_qs = self.filters[x].queryset
self.filters[x] = django_filters.ModelMultipleChoiceFilter(
field_name=x,
queryset=current_qs,
widget=autocomplete.ModelSelect2Multiple(
url=reverse(
'apis:apis_entities:generic_network_entities_autocomplete',
kwargs={
'entity': current_model_name
}
),
)
)
return GenericListFilter
|
the-stack_106_31987 | #!/usr/bin/python
#
import json
import os
import sqlite3
from datetime import datetime
import requests
from tornado import web, ioloop, httpserver
from .configuration import PlotmanConfig, get_db_path
def PostDat(dp: dict, cfg: PlotmanConfig):
print(dp)
# sending post request and saving response as response object
payload = json.dumps(dp)
r = requests.post(url=f'{cfg.apis.target}:{cfg.apis.port}/report', data=payload)
print(r.text)
"""
from prometheus_client.core import GaugeMetricFamily
class PlotmanCollector:
def collect(self):
cfg = configuration.get_validated_configs()
jobs = Job.get_running_jobs(cfg.directories.log)
count = len(sorted(jobs, key=job.Job.get_time_wall))
yield GaugeMetricFamily("plotman_jobs_count", "Number of plotting jobs running", value=count)
if __name__ == "__main__":
start_http_server(8001)
REGISTRY.register(PlotmanCollector())
while True:
time.sleep(1)
"""
class MainHandler(web.RequestHandler):
def prepare(self):
if self.request.headers.get("Content-Type", "").startswith("application/json"):
self.json_args = json.loads(self.request.body)
else:
self.json_args = None
def post(self):
self.set_header("Content-Type", "text/plain")
req_body = self.json_args
remote_addr = os.environ.get('REMOTE_ADDR', '-')
ts = datetime.now().strftime('%m-%d %H:%M:%S')
con = sqlite3.connect(get_db_path)
cur = con.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS systemchia (
tid text PRIMARY KEY,
block text NOT NULL,
ip text NOT NULL
);''')
# Insert a row of data
cur.execute(f"INSERT INTO systemchia VALUES ('{ts}','{req_body}','{remote_addr}')")
con.commit()
con.close()
print(req_body)
def apiOpen(cfg: PlotmanConfig):
print(f"api port {cfg.apis.port} is now listening")
application = web.Application([(r"/report", MainHandler)])
http_server = httpserver.HTTPServer(application)
http_server.listen(cfg.apis.port)
ioloop.IOLoop.current().start()
|
the-stack_106_31988 | import pytest
import numpy as np
from aos.solver import Solver, SensitivitySolver
def test_abstract_solver():
with pytest.raises(TypeError):
Solver()
def test_sensitivity_solver_nominal():
solver = SensitivitySolver()
y0 = np.zeros(len(solver.y0) + 1)
# hack because Noll (1976) indexing starts from j=1
y0[1:] = solver.y0
xest = solver.solve(y0)
ref = np.zeros(20)
np.testing.assert_allclose(xest.array, ref)
|
the-stack_106_31989 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The RelaxedBernoulli distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import util as tfp_util
from tensorflow_probability.python.bijectors import sigmoid as sigmoid_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import logistic
from tensorflow_probability.python.distributions import transformed_distribution
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import tensor_util
from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import
class RelaxedBernoulli(distribution.Distribution):
"""RelaxedBernoulli distribution with temperature and logits parameters.
The RelaxedBernoulli is a distribution over the unit interval (0,1), which
continuously approximates a Bernoulli. The degree of approximation is
controlled by a temperature: as the temperature goes to 0 the
RelaxedBernoulli becomes discrete with a distribution described by the
`logits` or `probs` parameters, as the temperature goes to infinity the
RelaxedBernoulli becomes the constant distribution that is identically 0.5.
The RelaxedBernoulli distribution is a reparameterized continuous
distribution that is the binary special case of the RelaxedOneHotCategorical
distribution (Maddison et al., 2016; Jang et al., 2016). For details on the
binary special case see the appendix of Maddison et al. (2016) where it is
referred to as BinConcrete. If you use this distribution, please cite both
papers.
Some care needs to be taken for loss functions that depend on the
log-probability of RelaxedBernoullis, because computing log-probabilities of
the RelaxedBernoulli can suffer from underflow issues. In many case loss
functions such as these are invariant under invertible transformations of
the random variables. The KL divergence, found in the variational autoencoder
loss, is an example. Because RelaxedBernoullis are sampled by a Logistic
random variable followed by a `tf.sigmoid` op, one solution is to treat
the Logistic as the random variable and `tf.sigmoid` as downstream. The
KL divergences of two Logistics, which are always followed by a `tf.sigmoid`
op, is equivalent to evaluating KL divergences of RelaxedBernoulli samples.
See Maddison et al., 2016 for more details where this distribution is called
the BinConcrete.
An alternative approach is to evaluate Bernoulli log probability or KL
directly on relaxed samples, as done in Jang et al., 2016. In this case,
guarantees on the loss are usually violated. For instance, using a Bernoulli
KL in a relaxed ELBO is no longer a lower bound on the log marginal
probability of the observation. Thus care and early stopping are important.
#### Examples
Creates three continuous distributions, which approximate 3 Bernoullis with
probabilities (0.1, 0.5, 0.4). Samples from these distributions will be in
the unit interval (0,1).
```python
temperature = 0.5
p = [0.1, 0.5, 0.4]
dist = RelaxedBernoulli(temperature, probs=p)
```
Creates three continuous distributions, which approximate 3 Bernoullis with
logits (-2, 2, 0). Samples from these distributions will be in
the unit interval (0,1).
```python
temperature = 0.5
logits = [-2, 2, 0]
dist = RelaxedBernoulli(temperature, logits=logits)
```
Creates three continuous distributions, whose sigmoid approximate 3 Bernoullis
with logits (-2, 2, 0).
```python
temperature = 0.5
logits = [-2, 2, 0]
dist = Logistic(logits/temperature, 1./temperature)
samples = dist.sample()
sigmoid_samples = tf.sigmoid(samples)
# sigmoid_samples has the same distribution as samples from
# RelaxedBernoulli(temperature, logits=logits)
```
Creates three continuous distributions, which approximate 3 Bernoullis with
logits (-2, 2, 0). Samples from these distributions will be in
the unit interval (0,1). Because the temperature is very low, samples from
these distributions are almost discrete, usually taking values very close to 0
or 1.
```python
temperature = 1e-5
logits = [-2, 2, 0]
dist = RelaxedBernoulli(temperature, logits=logits)
```
Creates three continuous distributions, which approximate 3 Bernoullis with
logits (-2, 2, 0). Samples from these distributions will be in
the unit interval (0,1). Because the temperature is very high, samples from
these distributions are usually close to the (0.5, 0.5, 0.5) vector.
```python
temperature = 100
logits = [-2, 2, 0]
dist = RelaxedBernoulli(temperature, logits=logits)
```
Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The Concrete Distribution:
A Continuous Relaxation of Discrete Random Variables. 2016.
Eric Jang, Shixiang Gu, and Ben Poole. Categorical Reparameterization with
Gumbel-Softmax. 2016.
"""
def __init__(self,
temperature,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name='RelaxedBernoulli'):
"""Construct RelaxedBernoulli distributions.
Args:
temperature: A `Tensor`, representing the temperature of a set of
RelaxedBernoulli distributions. The temperature values should be
positive.
logits: An N-D `Tensor` representing the log-odds
of a positive event. Each entry in the `Tensor` parametrizes
an independent RelaxedBernoulli distribution where the probability of an
event is sigmoid(logits). Only one of `logits` or `probs` should be
passed in.
probs: An N-D `Tensor` representing the probability of a positive event.
Each entry in the `Tensor` parameterizes an independent Bernoulli
distribution. Only one of `logits` or `probs` should be passed in.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: If both `probs` and `logits` are passed, or if neither.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([logits, probs, temperature], tf.float32)
self._temperature = tensor_util.convert_nonref_to_tensor(
temperature, name='temperature', dtype=dtype)
self._probs = tensor_util.convert_nonref_to_tensor(
probs, name='probs', dtype=dtype)
self._logits = tensor_util.convert_nonref_to_tensor(
logits, name='logits', dtype=dtype)
if logits is None:
logits_parameter = tfp_util.DeferredTensor(
lambda x: tf.math.log(x) - tf.math.log1p(-x), self._probs)
else:
logits_parameter = self._logits
shape = tf.broadcast_static_shape(logits_parameter.shape,
self._temperature.shape)
logistic_scale = tfp_util.DeferredTensor(
tf.math.reciprocal, self._temperature)
logistic_loc = tfp_util.DeferredTensor(
lambda x: x * logistic_scale, logits_parameter, shape=shape)
self._transformed_logistic = (
transformed_distribution.TransformedDistribution(
distribution=logistic.Logistic(
logistic_loc,
logistic_scale,
allow_nan_stats=allow_nan_stats,
name=name + '/Logistic'),
bijector=sigmoid_bijector.Sigmoid()))
super(RelaxedBernoulli, self).__init__(
dtype=dtype,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
@staticmethod
def _param_shapes(sample_shape):
return {'logits': tf.convert_to_tensor(sample_shape, dtype=tf.int32)}
@classmethod
def _params_event_ndims(cls):
return dict(temperature=0, logits=0, probs=0)
@property
def temperature(self):
"""Distribution parameter for the location."""
return self._temperature
@property
def logits(self):
"""Input argument `logits`."""
if self._logits is None:
return self._logits_deprecated_behavior()
return self._logits
@property
def probs(self):
"""Input argument `probs`."""
if self._probs is None:
return self._probs_deprecated_behavior()
return self._probs
def logits_parameter(self, name=None):
"""Logits computed from non-`None` input arg (`probs` or `logits`)."""
with self._name_and_control_scope(name or 'logits_parameter'):
return self._logits_parameter_no_checks()
def _logits_parameter_no_checks(self):
if self._logits is None:
probs = tf.convert_to_tensor(self._probs)
return tf.math.log(probs) - tf.math.log1p(-probs)
return tf.identity(self._logits)
def probs_parameter(self, name=None):
"""Probs computed from non-`None` input arg (`probs` or `logits`)."""
with self._name_and_control_scope(name or 'probs_parameter'):
return self._probs_parameter_no_checks()
def _probs_parameter_no_checks(self):
if self._logits is None:
return tf.identity(self._probs)
return tf.math.sigmoid(self._logits)
def _event_shape_tensor(self):
return tf.constant([], dtype=tf.int32)
def _event_shape(self):
return tf.TensorShape([])
def _batch_shape_tensor(self):
return self._transformed_logistic.batch_shape_tensor()
def _batch_shape(self):
return self._transformed_logistic.batch_shape
def _sample_n(self, n, seed=None, **kwargs):
return self._transformed_logistic.sample(n, seed=seed, **kwargs)
def _log_prob(self, y, **kwargs):
return self._transformed_logistic.log_prob(y, **kwargs)
def _prob(self, y, **kwargs):
return self._transformed_logistic.prob(y, **kwargs)
def _log_survival_function(self, y, **kwargs):
return self._transformed_logistic.log_survival_function(y, **kwargs)
def _cdf(self, y, **kwargs):
return self._transformed_logistic.cdf(y, **kwargs)
def _log_cdf(self, y, **kwargs):
return self._transformed_logistic.log_cdf(y, **kwargs)
@deprecation.deprecated(
'2019-10-01',
'The `logits` property will return `None` when the distribution is '
'parameterized with `logits=None`. Use `logits_parameter()` instead.',
warn_once=True)
def _logits_deprecated_behavior(self):
return self.logits_parameter()
@deprecation.deprecated(
'2019-10-01',
'The `probs` property will return `None` when the distribution is '
'parameterized with `probs=None`. Use `probs_parameter()` instead.',
warn_once=True)
def _probs_deprecated_behavior(self):
return self.probs_parameter()
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
if is_init != tensor_util.is_ref(self._temperature):
msg1 = 'Argument `temperature` must be positive.'
temperature = tf.convert_to_tensor(self._temperature)
assertions.append(assert_util.assert_positive(temperature, message=msg1))
if self._probs is not None:
if is_init != tensor_util.is_ref(self._probs):
probs = tf.convert_to_tensor(self._probs)
one = tf.constant(1., probs.dtype)
assertions.extend([
assert_util.assert_non_negative(
probs, message='Argument `probs` has components less than 0.'),
assert_util.assert_less_equal(
probs, one,
message='Argument `probs` has components greater than 1.')
])
return assertions
|
the-stack_106_31990 | import json
from unittest.mock import MagicMock, patch
import pytest
from django.core.exceptions import PermissionDenied
from order import views
from order.models import Product
class TestBundleDetailView:
def test_get_active_group(self, rf):
"""
Test without any data to a group.
"""
request = rf.get('/')
request.session = MagicMock()
request.session.get.return_value = None
view = views.BundleDetailView()
assert view.get_active_group(request) is None
def test_get_active_group_session(self, rf):
"""
Test with group data in session.
"""
request = rf.get('/')
request.session = MagicMock()
request.session.get.return_value = 1
view = views.BundleDetailView()
with patch('order.models.Group.objects') as group_manager:
group_manager.get.return_value = 'My test group'
assert view.get_active_group(request) == 'My test group'
group_manager.get.assert_called_once_with(pk=1)
def test_get_active_group_GET(self, rf):
"""
Test with group data in Get Argument.
"""
request = rf.get('/?group=1')
request.session = MagicMock()
view = views.BundleDetailView()
with patch('order.views.GroupChooseForm') as mock_form:
group_mock = MagicMock()
group_mock.pk = 99
mock_form.is_valid.return_value = True
mock_form().cleaned_data.get.return_value = group_mock
assert view.get_active_group(request) == group_mock
# TODO: test 99 in session
@patch('order.models.Order.objects')
@patch('order.models.Product.objects')
def test_ajax(self, product_manager, order_manager, rf):
"""
Test to send order data via ajax
"""
order_mock = MagicMock()
order_manager.get_or_create.return_value = (order_mock, None)
product_manager.get.return_value = 'My test product'
request = rf.post('/?group=1', {'product': 1, 'amount': 300})
view = views.BundleDetailView()
view.object = bundle_mock = MagicMock()
view.active_group = group_mock = MagicMock()
bundle_mock.price_for_group.return_value = 666.6666
response = view.ajax(request)
assert response.status_code == 200
assert json.loads(response.content.decode('utf-8')) == {'price_for_group': '666.67'}
assert order_mock.amount == 300
order_mock.save.assert_called_with()
order_manager.get_or_create.assert_called_with(
product='My test product', bundle=bundle_mock, group=group_mock)
@patch('order.models.Product.objects')
def test_ajax_no_product(self, product_manager, rf):
"""
Test to send order data via ajax, unkonwn product.
"""
product_manager.get.side_effect = Product.DoesNotExist('Product does not exist')
view = views.BundleDetailView()
view.active_group = MagicMock()
request = rf.post('/?group=1', {'product': 1, 'amount': 300})
response = view.ajax(request)
assert response.status_code == 200
assert json.loads(response.content.decode('utf-8')) == {'error': 'product 1 not found'}
def test_ajax_wrong_data(self, rf):
"""
Test to send data via ajax, but without data.
"""
view = views.BundleDetailView()
request = rf.post('/', {})
response = view.ajax(request)
assert response.status_code == 200
assert json.loads(response.content.decode('utf-8')) == {'error': 'no product data in request'}
def test_get(self, rf):
"""
Test the normal get method.
"""
view = views.BundleDetailView()
view.request = request = rf.get('/')
request.session = MagicMock()
request.session.get.return_value = False
bundle_mock = MagicMock()
bundle_mock.price_for_group.return_value = 333.333
view.get_object = MagicMock(return_value=bundle_mock)
response = view.get(request)
assert response.status_code == 200
def test_post(self, rf):
view = views.BundleDetailView()
view.request = request = rf.post('/', {})
request.session = MagicMock()
request.session.get.return_value = False
bundle_mock = MagicMock()
bundle_mock.price_for_group.return_value = 333.333
view.get_object = MagicMock(return_value=bundle_mock)
response = view.post(request)
assert response.status_code == 200
def test_post_on_closed_bundles(self, rf):
view = views.BundleDetailView()
view.request = request = rf.post('/', {})
request.session = MagicMock()
request.session.get.return_value = False
bundle_mock = MagicMock()
bundle_mock.open = False
view.get_object = MagicMock(return_value=bundle_mock)
with pytest.raises(PermissionDenied):
view.post(request)
class TestBundleOrderView:
def test_get_products(self, rf):
product1, product2, order1, order2, order3 = (MagicMock() for __ in range(5))
order1.product = order2.product = product1
order3.product = product2
order1.amount, order2.amount, order3.amount = (1, 2, 4)
product1.name, product2.name = ('zzz', 'aaa')
product1.multiplier, product2.multiplier = (2, 4)
view = views.BundleOrderView()
view.request = rf.get('/')
view.object = MagicMock()
view.object.orders.all().select_related.return_value = [order1, order2, order3]
products = view.get_products()
assert products == [product2, product1]
assert product1.amount == 3
assert product2.amount == 4
assert product1.order_price == 6
assert product2.order_price == 16
class TestBundleOutputView:
@patch('order.models.Group.objects')
@patch('order.models.Order.objects')
@patch('order.models.Product.objects')
def test_ajax(self, product_manager, order_manager, group_manager, rf):
"""
Test to send order data via ajax
"""
order_mock = MagicMock()
product_manager.get.return_value = 'My test product'
group_manager.get.return_value = 'My test group'
order_manager.get_or_create.return_value = (order_mock, None)
order_manager.filter().aggregate.return_value = {'delivered': 999}
request = rf.post('/', {'product': 1, 'group': 1, 'delivered': 300})
view = views.BundleOutputView()
view.object = MagicMock()
view.object.price_for_group.return_value = 500
view.object.price_for_all.return_value = 1000
response = view.ajax(request)
assert response.status_code == 200
assert json.loads(response.content.decode('utf-8')) == {
'price_for_group': '500.00',
'price_for_all': '1000.00',
'product_delivered': 999}
assert order_mock.delivered == '300'
order_mock.save.assert_called_with()
order_manager.get_or_create.assert_called_with(
product='My test product', bundle=view.object, group='My test group')
@patch('order.models.Product.objects')
def test_ajax_no_product(self, product_manager, rf):
"""
Test to send order data via ajax, unkonwn product.
"""
product_manager.get.side_effect = Product.DoesNotExist('Product does not exist')
view = views.BundleOutputView()
request = rf.post('/', {'product': 1, 'group': 1, 'delivered': 300})
response = view.ajax(request)
assert response.status_code == 200
assert json.loads(response.content.decode('utf-8')) == {'error': 'Group or product not found'}
def test_ajax_wrong_data(self, rf):
"""
Test to send data via ajax, but without data.
"""
view = views.BundleOutputView()
request = rf.post('/', {})
response = view.ajax(request)
assert response.status_code == 200
assert json.loads(response.content.decode('utf-8')) == {'error': 'No product or group data in request'}
def test_get_context_data(self, rf):
product = [MagicMock(name='p0'), MagicMock(name='p1'), MagicMock(name='p2')]
order = [MagicMock(name='o0'), MagicMock(name='o1'), MagicMock(name='o2')]
# Group1 orders product0 and 1
# Group2 orders product0
# Noone orders product2
order[0].group = order[1].group = 'Group1'
order[2].group = 'Group2'
order[0].product = order[2].product = product[0]
order[1].product = product[1]
for i in range(3):
order[i].get_delivered.return_value = (i + 1) * 2
order[i].amount = (i + 1) * 2
order[i].product.multiplier = 1
product[i].name = 'product%d' % i
view = views.BundleOutputView()
view.object = MagicMock()
view.object.orders.all().select_related.return_value = order
context = view.get_context_data()
assert context == {
'groups': {'Group1': [{product[1]: order[1],
product[0]: order[0]},
6],
'Group2': [{product[0]: order[2]},
6]},
'object': view.object,
'price_for_all': 12,
'products': [product[0], product[1]],
'view': view}
assert context['products'][0].delivered == 8
assert context['products'][1].delivered == 4
|
the-stack_106_31993 | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import ipaddress
from functools import cached_property
from hashlib import sha256
from ansible_collections.arista.avd.plugins.module_utils.utils import AristaAvdError, get, default, template_var, AristaAvdMissingVariableError
class EosDesignsFacts:
def __init__(
self,
hostvars=None,
combine=None,
list_compress=None,
convert_dicts=None,
natural_sort=None,
template_lookup_module=None
):
if hostvars:
self._hostvars = hostvars
if combine:
self._combine = combine
if list_compress:
self._list_compress = list_compress
if convert_dicts:
self._convert_dicts = convert_dicts
if natural_sort:
self._natural_sort = natural_sort
if template_lookup_module:
self._template_lookup_module = template_lookup_module
@classmethod
def keys(cls):
'''
Return the list of "keys"
Actually the returned list are the names of attributes not starting with "_" and using cached_property class.
The "_" check is added to allow support for "internal" cached_properties storing temporary values.
'''
return [key for key in dir(cls) if not key.startswith('_') and isinstance(getattr(cls, key), cached_property)]
@classmethod
def internal_keys(cls):
'''
Return the list of "keys" starting with _
Actually the returned list are the names of attributes starting with "_" and using cached_property class.
The "_" check is added to only include "internal" cached_properties storing temporary values.
'''
return [key for key in dir(cls) if key.startswith('_') and isinstance(getattr(cls, key), cached_property)]
def get(self, key, default_value=None):
'''
Emulate the builtin dict .get method
'''
if key in self.keys():
return getattr(self, key)
return default_value
def reset(self):
'''
Reset the cached attribute values
'''
self.__dict__ = {}
def render(self):
'''
Return a dictionary of all @cached_property values.
If the value is cached, it will automatically get returned from cache
If the value is not cached, it will be resolved by the attribute function first.
Empty values are removed from the returned data.
'''
return {key: getattr(self, key) for key in self.keys() if getattr(self, key) is not None}
'''
------------------------------------------------
Example function to set a fact based on hostvars
------------------------------------------------
@cached_property
def foo(self):
"""
"switch.foo" fact set based on "bar" data model
"""
return get(self._hostvars, "bar.foo", default="zoo")
-----------------------------------------------------------------------------------------------------
Example function to set a fact based on a required key under ex "l3leaf.node_groups.<>.nodes.<>.<key>
Notice the variable _switch_data_combined starts with _ meaning it is an internal variable which will
not be returned as part of the facts. We can load such variables with commonly used data, leveraged
by multiple facts functions.
-----------------------------------------------------------------------------------------------------
@cached_property
def foo(self):
"""
"switch.foo" fact set based on "<node_type_key>.*" data model
Example l3leaf.defaults.foo -> switch.foo
"""
return get(self._switch_data_combined, "foo", required=True)
'''
@cached_property
def type(self):
"""
switch.type fact set based on type variable
"""
return get(self._hostvars, "type", required=True)
@cached_property
def hostname(self):
"""
switch.hostname fact set based on inventory_hostname variable
"""
return get(self._hostvars, "inventory_hostname", required=True)
@cached_property
def node_type_key(self):
"""
switch.node_type_key fact set by finding a matching "type" in "node_type_keys" variable
"""
return self._node_type_key_data['key']
@cached_property
def _node_type_key_data(self):
"""
internal _node_type_key_data containing settings for this node_type.
"""
node_type_keys = get(self._hostvars, "node_type_keys", required=True)
node_type_keys = self._convert_dicts(node_type_keys, 'key')
for node_type in node_type_keys:
if node_type['type'] == self.type:
return node_type
# Not found
raise AristaAvdMissingVariableError(f"node_type_keys.<>.type=={type}")
@cached_property
def connected_endpoints(self):
"""
switch.connected_endpoints set based on
node_type_keys.<node_type_key>.connected_endpoints
"""
return get(self._node_type_key_data, "connected_endpoints", default=False)
@cached_property
def default_evpn_role(self):
"""
switch.default_evpn_role set based on
node_type_keys.<node_type_key>.default_evpn_role
"""
return get(self._node_type_key_data, "default_evpn_role", default="none")
@cached_property
def default_underlay_routing_protocol(self):
"""
switch.default_underlay_routing_protocol set based on
node_type_keys.<node_type_key>.default_underlay_routing_protocol
"""
return get(self._node_type_key_data, "default_underlay_routing_protocol", default="ebgp")
@cached_property
def default_overlay_routing_protocol(self):
"""
switch.default_overlay_routing_protocol set based on
node_type_keys.<node_type_key>.default_overlay_routing_protocol
"""
return get(self._node_type_key_data, "default_overlay_routing_protocol", default="ebgp")
@cached_property
def default_overlay_address_families(self):
"""
switch.default_overlay_address_families set based on
node_type_keys.<node_type_key>.default_overlay_address_families
"""
return get(self._node_type_key_data, "default_overlay_address_families", default=["evpn"])
@cached_property
def default_mpls_overlay_role(self):
"""
switch.default_mpls_overlay_role set based on
node_type_keys.<node_type_key>.default_mpls_overlay_role
"""
return get(self._node_type_key_data, "default_mpls_overlay_role", default="none")
@cached_property
def mpls_lsr(self):
"""
switch.mpls_lsr set based on
node_type_keys.<node_type_key>.mpls_lsr
"""
return get(self._node_type_key_data, "mpls_lsr", default=False)
@cached_property
def mlag_support(self):
"""
switch.mlag_support set based on
node_type_keys.<node_type_key>.mlag_support
"""
return get(self._node_type_key_data, "mlag_support", default=False)
@cached_property
def network_services_l1(self):
"""
switch.network_services_l1 set based on
node_type_keys.<node_type_key>.network_services.l1
"""
return get(self._node_type_key_data, "network_services.l1", default=False)
@cached_property
def network_services_l2(self):
"""
switch.network_services_l2 set based on
node_type_keys.<node_type_key>.network_services.l2
"""
return get(self._node_type_key_data, "network_services.l2", default=False)
@cached_property
def network_services_l3(self):
"""
switch.network_services_l3 set based on
node_type_keys.<node_type_key>.network_services.l3 and
<node_type_key>.<defaults | node_groups.<> | nodes.<> >.evpn_services_l2_only
"""
if self.vtep is True:
# switch.network_services_l3 override based on evpn_services_l2_only
if get(self._switch_data_combined, "evpn_services_l2_only") is True:
return False
return get(self._node_type_key_data, "network_services.l3", default=False)
@cached_property
def underlay_router(self):
"""
switch.underlay_router set based on
node_type_keys.<node_type_key>.underlay_router
"""
return get(self._node_type_key_data, "underlay_router", default=True)
@cached_property
def uplink_type(self):
"""
switch.uplink_type set based on
node_type_keys.<node_type_key>.uplink_type
"""
return get(self._node_type_key_data, "uplink_type", default="p2p")
@cached_property
def vtep(self):
"""
switch.vtep set based on
node_type_keys.<node_type_key>.vtep
"""
return get(self._node_type_key_data, "vtep", default=False)
@cached_property
def mpls_ler(self):
"""
switch.mpls_ler set based on
node_type_keys.<node_type_key>.mpls_ler
"""
return get(self._node_type_key_data, "mpls_ler", default=False)
@cached_property
def ip_addressing(self):
"""
switch.ip_addressing.* set based on
templates.ip_addressing.* combined with (overridden by)
node_type_keys.<node_type_key>.ip_addressing.*
"""
hostvar_templates = get(self._hostvars, "templates.ip_addressing", default={})
node_type_templates = get(self._node_type_key_data, "ip_addressing", default={})
if hostvar_templates or node_type_templates:
return self._combine(hostvar_templates, node_type_templates, recursive=True, list_merge='replace')
else:
return {}
@cached_property
def interface_descriptions(self):
"""
switch.interface_descriptions.* set based on
templates.interface_descriptions.* combined with (overridden by)
node_type_keys.<node_type_key>.interface_descriptions.*
"""
hostvar_templates = get(self._hostvars, "templates.interface_descriptions", default={})
node_type_templates = get(self._node_type_key_data, "interface_descriptions", default={})
if hostvar_templates or node_type_templates:
return self._combine(hostvar_templates, node_type_templates, recursive=True, list_merge='replace')
else:
return {}
@cached_property
def _switch_data(self):
"""
internal _switch_data containing inherited vars from fabric_topology data model
Vars are inherited like:
<node_type_key>.defaults ->
<node_type_key>.node_groups.<group> ->
<node_type_key>.node_groups.<group>.nodes.<node> ->
<node_type_key>.nodes.<node>
Returns
-------
dict
node_group : dict
Configuration set at the node_group level - including the "nodes" dict.
Empty dict if the node is not defined under a node_group.
group : str
Optional - Name of the matching node_group. Not set if the node is not defined under a node_group.
combined : dict
Combined configuration after inheritance from all levels
"""
switch_data = {}
hostname = self.hostname
node_type_config = get(self._hostvars, f"{self.node_type_key}", required=True)
if hostname in node_type_config.get('nodes', {}):
node_config = node_type_config['nodes'][hostname]
switch_data['node_group'] = {}
else:
for node_group in node_type_config.get('node_groups', {}):
if hostname in node_type_config['node_groups'][node_group].get('nodes', {}):
node_config = node_type_config['node_groups'][node_group]['nodes'][hostname]
switch_data['node_group'] = node_type_config['node_groups'][node_group]
switch_data['group'] = node_group
break
if not node_config:
raise AristaAvdMissingVariableError(f"{self.node_type_key}.(node_groups.)nodes.{hostname}")
# Load defaults
defaults_config = node_type_config.get('defaults', {})
# Merge node_group data on top of defaults into combined
switch_data['combined'] = self._combine(defaults_config, switch_data['node_group'], recursive=True, list_merge='replace')
# Merge node data on top of combined
switch_data['combined'] = self._combine(switch_data['combined'], node_config, recursive=True, list_merge='replace')
return switch_data
@cached_property
def _switch_data_combined(self):
"""
internal _switch_data_combined pointing to
self._switch_data['combined'] for easier reference.
"""
return get(self._switch_data, "combined", required=True)
@cached_property
def _switch_data_node_group_nodes(self):
"""
internal _switch_data_node_group_nodes pointing to
self._switch_data['node_group']['nodes'] for easier reference.
"""
return get(self._switch_data, "node_group.nodes", default=[])
@cached_property
def group(self):
"""
switch.group set to "node_group" name or None
"""
return get(self._switch_data, "group")
@cached_property
def id(self):
return get(self._switch_data_combined, "id", required=True)
@cached_property
def mgmt_ip(self):
return get(self._switch_data_combined, "mgmt_ip")
@cached_property
def platform(self):
return get(self._switch_data_combined, "platform")
@cached_property
def max_parallel_uplinks(self):
return get(self._switch_data_combined, "max_parallel_uplinks", default=1)
@cached_property
def uplink_switches(self):
return get(self._switch_data_combined, "uplink_switches")
@cached_property
def uplink_interfaces(self):
return get(self._switch_data_combined, "uplink_interfaces")
@cached_property
def uplink_switch_interfaces(self):
return get(self._switch_data_combined, "uplink_switch_interfaces")
@cached_property
def uplink_interface_speed(self):
return get(self._switch_data_combined, "uplink_interface_speed")
@cached_property
def uplink_bfd(self):
return get(self._switch_data_combined, "uplink_bfd")
@cached_property
def uplink_ptp(self):
return get(self._switch_data_combined, "uplink_ptp")
@cached_property
def uplink_macsec(self):
return get(self._switch_data_combined, "uplink_macsec")
@cached_property
def short_esi(self):
'''
If short_esi is set to "auto" we will use sha256 to create a
unique short_esi value based on various uplink information.
'''
short_esi = get(self._switch_data_combined, "short_esi")
if short_esi == "auto":
esi_seed_1 = ''.join(default(self.uplink_switches, [])[:2])
esi_seed_2 = ''.join(default(self.uplink_switch_interfaces, [])[:2])
esi_seed_3 = ''.join(default(self.uplink_interfaces, [])[:2])
esi_seed_4 = default(self.group, '')
esi_hash = sha256(f"{esi_seed_1}{esi_seed_2}{esi_seed_3}{esi_seed_4}".encode('utf-8')).hexdigest()
short_esi = re.sub(r'([0-9a-f]{4})', r'\1:', esi_hash)[:14]
return short_esi
@cached_property
def rack(self):
return get(self._switch_data_combined, "rack")
@cached_property
def raw_eos_cli(self):
return get(self._switch_data_combined, "raw_eos_cli")
@cached_property
def struct_cfg(self):
return get(self._switch_data_combined, "structured_config")
@cached_property
def max_uplink_switches(self):
"""
max_uplink_switches will default to the length of uplink_switches
"""
return default(
get(self._switch_data_combined, "max_uplink_switches"),
len(get(self._switch_data_combined, "uplink_switches", default=[]))
)
@cached_property
def is_deployed(self):
return get(self._hostvars, "is_deployed", default=True)
@cached_property
def platform_settings(self):
platform_settings = get(self._hostvars, "platform_settings", default=[])
# First look for a matching platform setting specifying our platform
for platform_setting in platform_settings:
if self.platform in platform_setting.get('platforms', []):
return platform_setting
# If not found, then look for a default platform setting
for platform_setting in platform_settings:
if 'default' in platform_setting.get('platforms', []):
return platform_setting
return {}
@cached_property
def mgmt_interface(self):
"""
mgmt_interface is inherited from
Global var mgmt_interface ->
Platform Settings management_interface ->
Fabric Topology data model mgmt_interface
"""
return default(
get(self._switch_data_combined, "mgmt_interface"),
self.platform_settings.get("management_interface"),
get(self._hostvars, "mgmt_interface")
)
@cached_property
def underlay_routing_protocol(self):
underlay_routing_protocol = str(get(self._hostvars, "underlay_routing_protocol", default=self.default_underlay_routing_protocol)).lower()
if underlay_routing_protocol not in ['ebgp', 'isis', 'isis-ldp', 'isis-sr', 'isis-sr-ldp', 'ospf', 'ospf-ldp']:
underlay_routing_protocol = self.default_underlay_routing_protocol
return underlay_routing_protocol
@cached_property
def overlay_routing_protocol(self):
overlay_routing_protocol = str(get(self._hostvars, "overlay_routing_protocol", default=self.default_overlay_routing_protocol)).lower()
if overlay_routing_protocol not in ['ebgp', 'ibgp']:
overlay_routing_protocol = self.default_overlay_routing_protocol
return overlay_routing_protocol
@cached_property
def overlay_address_families(self):
return get(self._switch_data_combined, "overlay_address_families", default=self.default_overlay_address_families)
@cached_property
def link_tracking_groups(self):
if get(self._switch_data_combined, "link_tracking.enabled") is True:
link_tracking_groups = []
default_recovery_delay = get(self.platform_settings, 'reload_delay.mlag', 300)
lt_groups = get(self._switch_data_combined, "link_tracking.groups", default=[])
if len(lt_groups) > 0:
for lt_group in lt_groups:
lt_group['recovery_delay'] = lt_group.get('recovery_delay', default_recovery_delay)
link_tracking_groups.append(lt_group)
else:
link_tracking_groups.append({"name": "LT_GROUP1", "recovery_delay": default_recovery_delay})
return link_tracking_groups
return None
@cached_property
def lacp_port_id(self):
if get(self._switch_data_combined, "lacp_port_id_range.enabled") is True:
node_group_length = max(len(self._switch_data_node_group_nodes), 1)
lacp_port_id = {}
switch_id = self.id
port_range = int(get(self._switch_data_combined, 'lacp_port_id_range.size', default=128))
port_offset = int(get(self._switch_data_combined, 'lacp_port_id_range.offset', default=0))
lacp_port_id['begin'] = 1 + (((switch_id - 1) % node_group_length) * port_range) + port_offset
lacp_port_id['end'] = (((switch_id - 1) % node_group_length + 1) * port_range) + port_offset
return lacp_port_id
return None
@cached_property
def _any_network_services(self):
'''
Returns True if either L1, L2 or L3 network_services are enabled
'''
return (
self.network_services_l1 is True
or self.network_services_l2 is True
or self.network_services_l3 is True
)
@cached_property
def filter_tenants(self):
if self._any_network_services:
return get(self._switch_data_combined, "filter.tenants", default=["all"])
return None
@cached_property
def always_include_vrfs_in_tenants(self):
if self._any_network_services:
return get(self._switch_data_combined, "filter.always_include_vrfs_in_tenants")
return None
@cached_property
def filter_tags(self):
if self._any_network_services:
return get(self._switch_data_combined, "filter.tags", default=["all"])
return None
@cached_property
def virtual_router_mac_address(self):
if self._any_network_services:
return get(self._switch_data_combined, "virtual_router_mac_address")
return None
@cached_property
def _vlans(self):
if self._any_network_services:
vlans = []
match_tags = self.filter_tags
if self.group is not None:
match_tags.append(self.group)
network_services_keys = get(self._hostvars, "network_services_keys", default=[])
for network_services_key in self._natural_sort(network_services_keys, "name"):
network_services_key_name = network_services_key.get("name")
if network_services_key_name is None or get(self._hostvars, network_services_key_name) is None:
# Invalid network_services_key.name. Skipping.
continue
tenants = get(self._hostvars, network_services_key_name)
# Support legacy data model by converting nested dict to list of dict
tenants = self._convert_dicts(tenants, 'name')
for tenant in self._natural_sort(tenants, 'name'):
if not set(self.filter_tenants).intersection([tenant['name'], 'all']):
# Not matching tenant filters. Skipping this tenant.
continue
vrfs = tenant.get('vrfs', [])
# Support legacy data model by converting nested dict to list of dict
vrfs = self._convert_dicts(vrfs, 'name')
for vrf in self._natural_sort(vrfs, 'name'):
svis = vrf.get('svis', [])
# Support legacy data model by converting nested dict to list of dict
svis = self._convert_dicts(svis, 'id')
for svi in self._natural_sort(svis, 'id'):
svi_tags = svi.get('tags', ['all'])
if "all" in match_tags or set(svi_tags).intersection(match_tags):
vlans.append(int(svi['id']))
l2vlans = tenant.get('l2vlans', [])
# Support legacy data model by converting nested dict to list of dict
l2vlans = self._convert_dicts(l2vlans, 'id')
for l2vlan in self._natural_sort(l2vlans, 'id'):
l2vlan_tags = l2vlan.get('tags', ['all'])
if "all" in match_tags or set(l2vlan_tags).intersection(match_tags):
vlans.append(int(l2vlan['id']))
return vlans
return []
@cached_property
def vlans(self):
return self._list_compress(self._vlans)
@cached_property
def spanning_tree_mode(self):
if self.network_services_l2 is True:
return get(self._switch_data_combined, "spanning_tree_mode")
return "none"
@cached_property
def spanning_tree_priority(self):
if self.network_services_l2 is True:
return get(self._switch_data_combined, "spanning_tree_priority")
return None
@cached_property
def spanning_tree_root_super(self):
if self.network_services_l2 is True:
return get(self._switch_data_combined, "spanning_tree_root_super")
return None
@cached_property
def igmp_snooping_enabled(self):
if self.network_services_l2 is True:
default_igmp_snooping_enabled = get(self._hostvars, "default_igmp_snooping_enabled")
return get(self._switch_data_combined, "igmp_snooping_enabled", default=default_igmp_snooping_enabled)
return None
@cached_property
def loopback_ipv4_pool(self):
if self.underlay_router is True:
return get(self._switch_data_combined, "loopback_ipv4_pool", required=True)
return None
@cached_property
def loopback_ipv4_offset(self):
if self.underlay_router is True:
return get(self._switch_data_combined, "loopback_ipv4_offset", default=0)
return None
@cached_property
def uplink_ipv4_pool(self):
if self.underlay_router is True:
return get(self._switch_data_combined, "uplink_ipv4_pool")
return None
@cached_property
def _ansible_search_path(self):
return get(self._hostvars, "ansible_search_path", required=True)
@cached_property
def router_id(self):
'''
Run template lookup to render ipv4 address for router_id
Since some templates might contain certain legacy variables (switch_*),
those are mapped from the switch.* model
'''
if self.underlay_router is True:
template_vars = {"ansible_search_path": self._ansible_search_path}
# Copying __dict__ will expose all switch facts cached up until this function is run.
# TODO: We should probably find and document a list of supported context variables instead.
template_vars['switch'] = {key: self.__dict__.get(key) for key in self.keys()}
template_vars['switch_id'] = self.id
template_vars['loopback_ipv4_pool'] = self.loopback_ipv4_pool
template_vars['loopback_ipv4_offset'] = self.loopback_ipv4_offset
template_path = get(self.ip_addressing, "router_id", required=True)
return template_var(template_path, template_vars, self._template_lookup_module)
return None
@cached_property
def evpn_gateway_vxlan_l2(self):
if self.underlay_router is True:
return get(self._switch_data_combined, "evpn_gateway.evpn_l2.enabled", default=False)
return None
@cached_property
def evpn_gateway_vxlan_l3(self):
if self.underlay_router is True:
return get(self._switch_data_combined, "evpn_gateway.evpn_l3.enabled", default=False)
return None
@cached_property
def evpn_gateway_vxlan_l3_inter_domain(self):
if self.underlay_router is True:
return get(self._switch_data_combined, "evpn_gateway.evpn_l3.inter_domain", default=True)
return None
@cached_property
def evpn_gateway_remote_peers(self):
if self.underlay_router is True:
return get(self._switch_data_combined, "evpn_gateway.remote_peers")
return None
@cached_property
def bgp_defaults(self):
if self.underlay_router is True:
return get(self._switch_data_combined, "bgp_defaults", default=[])
return None
@cached_property
def bgp_cluster_id(self):
if self.underlay_router is True:
return get(self._switch_data_combined, "bgp_cluster_id")
return None
@cached_property
def bgp_peer_groups(self):
'''
Get bgp_peer_groups configurations or fallback to defaults
Supporting legacy uppercase keys as well.
'''
if self.underlay_router is True:
return {
"ipv4_underlay_peers": {
"name": default(
get(self._hostvars, "bgp_peer_groups.ipv4_underlay_peers.name"),
get(self._hostvars, "bgp_peer_groups.IPv4_UNDERLAY_PEERS.name"),
"IPv4-UNDERLAY-PEERS"
),
"password": default(
get(self._hostvars, "bgp_peer_groups.ipv4_underlay_peers.password"),
get(self._hostvars, "bgp_peer_groups.IPv4_UNDERLAY_PEERS.password")
),
},
"mlag_ipv4_underlay_peer": {
"name": default(
get(self._hostvars, "bgp_peer_groups.mlag_ipv4_underlay_peer.name"),
get(self._hostvars, "bgp_peer_groups.MLAG_IPv4_UNDERLAY_PEER.name"),
"MLAG-IPv4-UNDERLAY-PEER"
),
"password": default(
get(self._hostvars, "bgp_peer_groups.mlag_ipv4_underlay_peer.password"),
get(self._hostvars, "bgp_peer_groups.MLAG_IPv4_UNDERLAY_PEER.password")
),
},
"evpn_overlay_peers": {
"name": default(
get(self._hostvars, "bgp_peer_groups.evpn_overlay_peers.name"),
get(self._hostvars, "bgp_peer_groups.EVPN_OVERLAY_PEERS.name"),
"EVPN-OVERLAY-PEERS"
),
"password": default(
get(self._hostvars, "bgp_peer_groups.evpn_overlay_peers.password"),
get(self._hostvars, "bgp_peer_groups.EVPN_OVERLAY_PEERS.password")
),
},
"evpn_overlay_core": {
"name": get(self._hostvars, "bgp_peer_groups.evpn_overlay_core.name", default="EVPN-OVERLAY-CORE"),
"password": get(self._hostvars, "bgp_peer_groups.evpn_overlay_core.password")
},
"mpls_overlay_peers": {
"name": get(self._hostvars, "bgp_peer_groups.mpls_overlay_peers.name", default="MPLS-OVERLAY-PEERS"),
"password": get(self._hostvars, "bgp_peer_groups.mpls_overlay_peers.password")
},
"rr_overlay_peers": {
"name": get(self._hostvars, "bgp_peer_groups.rr_overlay_peers.name", default="RR-OVERLAY-PEERS"),
"password": get(self._hostvars, "bgp_peer_groups.rr_overlay_peers.password")
},
}
return None
@cached_property
def evpn_role(self):
if self.underlay_router is True:
return get(self._switch_data_combined, "evpn_role", default=self.default_evpn_role)
return None
@cached_property
def mpls_overlay_role(self):
if self.underlay_router is True:
return get(self._switch_data_combined, "mpls_overlay_role", default=self.default_mpls_overlay_role)
return None
@cached_property
def bgp_as(self):
'''
Get global bgp_as or fabric_topology bgp_as.
This will fail if none of these are found.
'''
if self.underlay_router is True:
if self.underlay_routing_protocol == 'ebgp' or self.evpn_role != 'none' or self.mpls_overlay_role != 'none':
if get(self._hostvars, "bgp_as") is not None:
return str(get(self._hostvars, "bgp_as"))
else:
return str(get(self._switch_data_combined, "bgp_as", required=True))
# Hack to make mpls PR non-breaking, adds empty bgp to igp topology spines
# TODO: Remove this as part of AVD4.0
elif (
self.underlay_routing_protocol in ['isis', 'ospf']
and self.evpn_role == 'none'
and get(self._hostvars, "bgp_as") is not None
):
return str(get(self._hostvars, "bgp_as"))
return None
@cached_property
def evpn_route_servers(self):
'''
For evpn clients the default value for EVPN Route Servers is the content of the uplink_switches variable set elsewhere.
For all other evpn roles there is no default.
'''
if self.underlay_router is True:
if self.evpn_role == "client":
return get(self._switch_data_combined, "evpn_route_servers", default=self.uplink_switches)
else:
return get(self._switch_data_combined, "evpn_route_servers")
return []
@cached_property
def mpls_route_reflectors(self):
if self.underlay_router is True:
if self.mpls_overlay_role in ["client", "server"]:
return get(self._switch_data_combined, "mpls_route_reflectors")
return None
@cached_property
def isis_net(self):
if self.underlay_router is True:
if self.underlay_routing_protocol in ['isis', 'isis-ldp', 'isis-sr', 'isis-sr-ldp']:
isis_system_id_prefix = get(self._switch_data_combined, "isis_system_id_prefix")
if isis_system_id_prefix is not None:
isis_area_id = get(self._hostvars, "isis_area_id", required=True)
switch_id = self.id
return f"{isis_area_id}.{isis_system_id_prefix}.{switch_id:04d}.00"
return None
@cached_property
def is_type(self):
if self.underlay_router is True:
if self.underlay_routing_protocol in ['isis', 'isis-ldp', 'isis-sr', 'isis-sr-ldp']:
default_is_type = get(self._hostvars, "isis_default_is_type", default="level-2")
is_type = str(get(self._switch_data_combined, "is_type", default=default_is_type)).lower()
if is_type not in ["level-1", "level-2", "level-1-2"]:
is_type = "level-2"
return is_type
return None
@cached_property
def isis_instance_name(self):
if self.underlay_router is True:
if self.underlay_routing_protocol in ['isis', 'isis-ldp', 'isis-sr', 'isis-sr-ldp']:
if self.mpls_lsr is True:
default_isis_instance_name = "CORE"
else:
default_isis_instance_name = "EVPN_UNDERLAY"
return get(self._hostvars, "underlay_isis_instance_name", default=default_isis_instance_name)
return None
@cached_property
def node_sid(self):
if self.underlay_router is True:
if self.underlay_routing_protocol in ['isis-sr', 'isis-sr-ldp']:
node_sid_base = int(get(self._switch_data_combined, "node_sid_base", 0))
return self.id + node_sid_base
return None
@cached_property
def underlay_ipv6(self):
if self.underlay_router is True:
return get(self._hostvars, "underlay_ipv6")
return None
@cached_property
def loopback_ipv6_pool(self):
if self.underlay_ipv6 is True:
return get(self._switch_data_combined, "loopback_ipv6_pool", required=True)
return None
@cached_property
def loopback_ipv6_offset(self):
if self.underlay_ipv6 is True:
return get(self._switch_data_combined, "loopback_ipv6_offset", default=0)
return None
@cached_property
def ipv6_router_id(self):
'''
Run template lookup to render ipv6 address for router_id
Since some templates might contain certain legacy variables (switch_*),
those are mapped from the switch.* model
'''
if self.underlay_ipv6 is True:
template_vars = {"ansible_search_path": self._ansible_search_path}
# Copying __dict__ will expose all switch facts cached up until this function is run.
# TODO: We should probably find and document a list of supported context variables instead.
template_vars['switch'] = {key: self.__dict__.get(key) for key in self.keys()}
template_vars['switch_id'] = self.id
template_vars['loopback_ipv6_pool'] = self.loopback_ipv6_pool
template_vars['loopback_ipv6_offset'] = self.loopback_ipv6_offset
template_path = get(self.ip_addressing, "ipv6_router_id", required=True)
return template_var(template_path, template_vars, self._template_lookup_module)
return None
@cached_property
def mlag(self):
return (
self.mlag_support is True
and get(self._switch_data_combined, "mlag", default=True) is True
and len(self._switch_data_node_group_nodes) == 2
)
@cached_property
def mlag_ibgp_origin_incomplete(self):
if self.mlag is True:
return get(self._switch_data_combined, "mlag_ibgp_origin_incomplete", default=True)
return None
@cached_property
def mlag_peer_vlan(self):
if self.mlag is True:
return get(self._switch_data_combined, "mlag_peer_vlan", default=4094)
return None
@cached_property
def mlag_peer_link_allowed_vlans(self):
if self.mlag is True:
return get(self._switch_data_combined, "mlag_peer_link_allowed_vlans", default="2-4094")
return None
@cached_property
def mlag_dual_primary_detection(self):
if self.mlag is True:
return get(self._switch_data_combined, "mlag_dual_primary_detection", default=False)
return None
@cached_property
def mlag_interfaces(self):
if self.mlag is True:
return get(self._switch_data_combined, "mlag_interfaces")
return None
@cached_property
def mlag_interfaces_speed(self):
if self.mlag is True:
return get(self._switch_data_combined, "mlag_interfaces_speed")
return None
@cached_property
def mlag_peer_ipv4_pool(self):
if self.mlag is True:
return get(self._switch_data_combined, "mlag_peer_ipv4_pool")
return None
@cached_property
def mlag_peer_l3_ipv4_pool(self):
if self.mlag is True:
return get(self._switch_data_combined, "mlag_peer_l3_ipv4_pool")
return None
@cached_property
def mlag_role(self):
if self.mlag is True:
index = list(self._switch_data_node_group_nodes.keys()).index(self.hostname)
if index == 0:
return "primary"
elif index == 1:
return "secondary"
return None
@cached_property
def mlag_peer(self):
if self.mlag is True:
if self.mlag_role == "primary":
return list(self._switch_data_node_group_nodes.keys())[1]
if self.mlag_role == "secondary":
return list(self._switch_data_node_group_nodes.keys())[0]
return None
@cached_property
def mlag_l3(self):
if self.mlag is True and self.underlay_router is True:
return True
return None
@cached_property
def mlag_peer_l3_vlan(self):
if self.mlag_l3 is True:
mlag_peer_vlan = self.mlag_peer_vlan
mlag_peer_l3_vlan = get(self._switch_data_combined, "mlag_peer_l3_vlan", default=4093)
if mlag_peer_l3_vlan not in [None, False, mlag_peer_vlan]:
return mlag_peer_l3_vlan
return None
@cached_property
def mlag_port_channel_id(self):
if self.mlag is True:
default_mlag_port_channel_id = ''.join(re.findall(r'\d', self.mlag_interfaces[0]))
return get(self._switch_data_combined, "mlag_port_channel_id", default_mlag_port_channel_id)
return None
@cached_property
def vtep_loopback_ipv4_pool(self):
if self.vtep is True:
return get(self._switch_data_combined, "vtep_loopback_ipv4_pool", required=True)
return None
@cached_property
def vtep_loopback(self):
if self.vtep is True:
return get(self._switch_data_combined, "vtep_loopback", default="Loopback1")
@cached_property
def inband_management_subnet(self):
return get(self._switch_data_combined, "inband_management_subnet")
@cached_property
def inband_management_role(self):
if self.inband_management_subnet is not None and self.uplink_type == 'port-channel':
return "child"
return None
@cached_property
def inband_management_parents(self):
if self.inband_management_role == "child":
return self.uplink_switches
return None
@cached_property
def inband_management_vlan(self):
if self.inband_management_role == "child":
return get(self._switch_data_combined, "inband_management_vlan", default=4092)
return None
@cached_property
def inband_management_ip(self):
if self.inband_management_role == "child":
subnet = ipaddress.ip_network(self.inband_management_subnet, strict=False)
hosts = list(subnet.hosts())
inband_management_ip = str(hosts[2 + self.id])
inband_management_prefix = str(subnet.prefixlen)
return f"{inband_management_ip}/{inband_management_prefix}"
return None
@cached_property
def inband_management_gateway(self):
if self.inband_management_role == "child":
subnet = ipaddress.ip_network(self.inband_management_subnet, strict=False)
hosts = list(subnet.hosts())
return str(hosts[0])
return None
@cached_property
def inband_management_interface(self):
if self.inband_management_role == "child":
return f"Vlan{self.inband_management_vlan}"
return None
@cached_property
def uplinks(self):
'''
List of uplinks with all parameters
These facts are leveraged by templates for this device when rendering uplinks
and by templates for peer devices when rendering downlinks
'''
uplinks = []
if self.uplink_type == 'p2p':
uplink_interfaces = default(self.uplink_interfaces, [])
uplink_switches = default(self.uplink_switches, [])
uplink_switch_interfaces = default(self.uplink_switch_interfaces, [])
fabric_name = get(self._hostvars, "fabric_name", required=True)
inventory_group = get(self._hostvars, f"groups.{fabric_name}", required=True)
template_vars = {"ansible_search_path": self._ansible_search_path}
# Copying __dict__ will expose all switch facts cached up until this function is run.
# TODO: We should probably find and document a list of supported context variables instead.
template_vars['switch'] = {key: self.__dict__.get(key) for key in self.keys()}
template_vars['switch_id'] = self.id
for uplink_index, uplink_interface in enumerate(uplink_interfaces):
if len(uplink_switches) <= uplink_index or len(uplink_switch_interfaces) <= uplink_index:
# Invalid length of input variables. Skipping
continue
uplink_switch = uplink_switches[uplink_index]
if uplink_switch is None or uplink_switch not in inventory_group:
# Invalid uplink_switch. Skipping.
continue
uplink_switch_facts: EosDesignsFacts = get(self._hostvars,
f"avd_switch_facts..{uplink_switch}..switch",
required=True,
org_key=f"avd_switch_facts.({uplink_switch}).switch",
separator="..")
uplink = {}
uplink['interface'] = uplink_interface
uplink['peer'] = uplink_switch
uplink['peer_interface'] = uplink_switch_interfaces[uplink_index]
uplink['peer_type'] = uplink_switch_facts.type
uplink['peer_is_deployed'] = uplink_switch_facts.is_deployed
uplink['peer_bgp_as'] = uplink_switch_facts.bgp_as
uplink['type'] = 'underlay_p2p'
if self.uplink_interface_speed is not None:
uplink['speed'] = self.uplink_interface_speed
if self.uplink_bfd is True:
uplink['bfd'] = True
if self.uplink_ptp is not None:
uplink['ptp'] = self.uplink_ptp
if self.uplink_macsec is not None:
uplink['mac_security'] = self.uplink_macsec
if get(self._hostvars, "underlay_rfc5549") is True:
uplink['ipv6_enable'] = True
else:
template_vars['uplink_switch_index'] = uplink_index
template_path = get(self.ip_addressing, "p2p_uplinks_ip", required=True)
uplink['ip_address'] = template_var(template_path, template_vars, self._template_lookup_module)
template_path = get(self.ip_addressing, "p2p_uplinks_peer_ip", required=True)
uplink['peer_ip_address'] = template_var(template_path, template_vars, self._template_lookup_module)
if self.link_tracking_groups is not None:
uplink['link_tracking_groups'] = []
for lt_group in self.link_tracking_groups:
uplink['link_tracking_groups'].append({"name": lt_group["name"], "direction": "upstream"})
uplinks.append(uplink)
return uplinks
elif self.uplink_type == 'port-channel':
uplink_interfaces = default(self.uplink_interfaces, [])
uplink_switches = default(self.uplink_switches, [])
uplink_switch_interfaces = default(self.uplink_switch_interfaces, [])
fabric_name = get(self._hostvars, "fabric_name", required=True)
inventory_group = get(self._hostvars, f"groups.{fabric_name}", required=True)
for uplink_index, uplink_interface in enumerate(uplink_interfaces):
if len(uplink_switches) <= uplink_index or len(uplink_switch_interfaces) <= uplink_index:
# Invalid length of input variables. Skipping
continue
uplink_switch = uplink_switches[uplink_index]
if uplink_switch is None or uplink_switch not in inventory_group:
# Invalid uplink_switch. Skipping.
continue
uplink_switch_facts: EosDesignsFacts = get(self._hostvars,
f"avd_switch_facts..{uplink_switch}..switch",
required=True,
org_key=f"avd_switch_facts.({uplink_switch}).switch",
separator="..")
uplink = {}
uplink['interface'] = uplink_interface
uplink['peer'] = uplink_switch
uplink['peer_interface'] = uplink_switch_interfaces[uplink_index]
uplink['peer_type'] = uplink_switch_facts.type
uplink['peer_is_deployed'] = uplink_switch_facts.is_deployed
uplink['type'] = 'underlay_l2'
if self.uplink_interface_speed is not None:
uplink['speed'] = self.uplink_interface_speed
if uplink_switch_facts.mlag is True or self.short_esi is not None:
# Override our description on port-channel to be peer's group name if they are mlag pair or A/A #}
uplink['channel_description'] = uplink_switch_facts.group
if self.mlag is True:
# Override the peer's description on port-channel to be our group name if we are mlag pair #}
uplink['peer_channel_description'] = self.group
if self.mlag_role == 'secondary':
mlag_peer_switch_facts: EosDesignsFacts = get(self._hostvars, f"avd_switch_facts.{self.mlag_peer}.switch", required=True)
uplink['channel_group_id'] = ''.join(re.findall(r'\d', mlag_peer_switch_facts.uplink_interfaces[0]))
uplink['peer_channel_group_id'] = ''.join(re.findall(r'\d', mlag_peer_switch_facts.uplink_switch_interfaces[0]))
else:
uplink['channel_group_id'] = ''.join(re.findall(r'\d', uplink_interfaces[0]))
uplink['peer_channel_group_id'] = ''.join(re.findall(r'\d', uplink_switch_interfaces[0]))
# Remove vlans if upstream switch does not have them #}
switch_vlans = self._vlans
uplink_switch_vlans = uplink_switch_facts._vlans
uplink_vlans = list(set(switch_vlans).intersection(uplink_switch_vlans))
if self.inband_management_vlan is not None:
uplink_vlans.append(int(self.inband_management_vlan))
uplink['vlans'] = self._list_compress(uplink_vlans)
if self.short_esi is not None:
uplink['peer_short_esi'] = self.short_esi
if self.link_tracking_groups is not None:
uplink['link_tracking_groups'] = []
for lt_group in self.link_tracking_groups:
uplink['link_tracking_groups'].append({"name": lt_group["name"], "direction": "upstream"})
uplinks.append(uplink)
return uplinks
@cached_property
def uplink_peers(self):
'''
List of all uplink peers
These are used to generate the "avd_topology_peers" fact covering downlinks for all devices.
'''
if self.uplinks is not None:
return [uplink['peer'] for uplink in self.uplinks]
@cached_property
def _mlag_peer_id(self):
if self.mlag is True:
return get(self._hostvars,
f"avd_switch_facts..{self.mlag_peer}..switch..id",
required=True,
org_key=f"avd_switch_facts.({self.mlag_peer}).switch.id",
separator="..")
@cached_property
def vtep_ip(self):
'''
Run template lookup to render ipv4 address for vtep_ip
Since some templates might contain certain legacy variables (switch_*),
those are mapped from the switch.* model
'''
if self.vtep is True:
template_vars = {"ansible_search_path": self._ansible_search_path}
# Copying __dict__ will expose all switch facts cached up until this function is run.
# TODO: We should probably find and document a list of supported context variables instead.
template_vars['switch'] = {key: self.__dict__.get(key) for key in self.keys()}
template_vars['switch_id'] = self.id
template_vars['switch_vtep_loopback_ipv4_pool'] = self.vtep_loopback_ipv4_pool
template_vars['loopback_ipv4_offset'] = self.loopback_ipv4_offset
if self.mlag is True:
if self.mlag_role == 'primary':
template_vars['mlag_primary_id'] = self.id
template_vars['mlag_secondary_id'] = self._mlag_peer_id
elif self.mlag_role == 'secondary':
template_vars['mlag_secondary_id'] = self.id
template_vars['mlag_primary_id'] = self._mlag_peer_id
template_path = get(self.ip_addressing, "vtep_ip_mlag", required=True)
return template_var(template_path, template_vars, self._template_lookup_module)
else:
template_path = get(self.ip_addressing, "vtep_ip", required=True)
return template_var(template_path, template_vars, self._template_lookup_module)
return None
@cached_property
def mlag_ip(self):
'''
Run template lookup to render ipv4 address for mlag_ip
Since some templates might contain certain legacy variables (switch_*),
those are mapped from the switch.* model
'''
if self.mlag is True:
template_vars = {"ansible_search_path": self._ansible_search_path}
# Copying __dict__ will expose all switch facts cached up until this function is run.
# TODO: We should probably find and document a list of supported context variables instead.
template_vars['switch'] = {key: self.__dict__.get(key) for key in self.keys()}
template_vars['switch_id'] = self.id
template_vars['switch_data'] = {
"combined": {
"mlag_peer_ipv4_pool": self.mlag_peer_ipv4_pool
}
}
if self.mlag_role == 'primary':
template_vars['mlag_primary_id'] = self.id
template_vars['mlag_secondary_id'] = self._mlag_peer_id
template_path = get(self.ip_addressing, "mlag_ip_primary", required=True)
elif self.mlag_role == 'secondary':
template_vars['mlag_secondary_id'] = self.id
template_vars['mlag_primary_id'] = self._mlag_peer_id
template_path = get(self.ip_addressing, "mlag_ip_secondary", required=True)
return template_var(template_path, template_vars, self._template_lookup_module)
return None
@cached_property
def mlag_peer_ip(self):
if self.mlag is True:
return get(self._hostvars,
f"avd_switch_facts..{self.mlag_peer}..switch..mlag_ip",
required=True,
org_key=f"avd_switch_facts.({self.mlag_peer}).switch.mlag_ip",
separator="..")
return None
@cached_property
def mlag_l3_ip(self):
'''
Run template lookup to render ipv4 address for mlag_l3_ip
Since some templates might contain certain legacy variables (switch_*),
those are mapped from the switch.* model
'''
if self.mlag_l3 is True and self.mlag_peer_l3_vlan is not None:
template_vars = {"ansible_search_path": self._ansible_search_path}
# Copying __dict__ will expose all switch facts cached up until this function is run.
# TODO: We should probably find and document a list of supported context variables instead.
template_vars['switch'] = {key: self.__dict__.get(key) for key in self.keys()}
template_vars['switch_id'] = self.id
template_vars['switch_data'] = {
"combined": {
"mlag_peer_l3_ipv4_pool": self.mlag_peer_l3_ipv4_pool
}
}
if self.mlag_role == 'primary':
template_vars['mlag_primary_id'] = self.id
template_vars['mlag_secondary_id'] = self._mlag_peer_id
template_path = get(self.ip_addressing, "mlag_l3_ip_primary", required=True)
elif self.mlag_role == 'secondary':
template_vars['mlag_secondary_id'] = self.id
template_vars['mlag_primary_id'] = self._mlag_peer_id
template_path = get(self.ip_addressing, "mlag_l3_ip_secondary", required=True)
return template_var(template_path, template_vars, self._template_lookup_module)
return None
@cached_property
def mlag_peer_l3_ip(self):
if self.mlag_l3 is True and self.mlag_peer_l3_vlan is not None:
return get(self._hostvars,
f"avd_switch_facts..{self.mlag_peer}..switch..mlag_l3_ip",
required=True,
org_key=f"avd_switch_facts.({self.mlag_peer}).switch.mlag_l3_ip",
separator="..")
return None
@cached_property
def mlag_peer_mgmt_ip(self):
if self.mlag is True:
peer_mgmt_ip = get(self._hostvars,
f"avd_switch_facts..{self.mlag_peer}..switch..mgmt_ip",
org_key=f"avd_switch_facts.({self.mlag_peer}).switch.mgmt_ip",
separator="..")
if peer_mgmt_ip is not None:
return str(ipaddress.ip_interface(peer_mgmt_ip).ip)
return None
@cached_property
def overlay_routing_protocol_address_family(self):
overlay_routing_protocol_address_family = get(self._hostvars, "overlay_routing_protocol_address_family", default="ipv4")
if overlay_routing_protocol_address_family == "ipv6":
if not (
get(self._hostvars, "underlay_ipv6") is True
and get(self._hostvars, "underlay_rfc5549") is True
):
raise AristaAvdError(
"'overlay_routing_protocol_address_family: ipv6' is only supported in"
"combination with 'underlay_ipv6: True' and 'underlay_rfc5549: True'"
)
return overlay_routing_protocol_address_family
|
the-stack_106_31994 | import logging
import sendgrid
import azure.functions as func
import simplejson as json
from os import environ
from sendgrid.helpers.mail import *
def main(req: func.QueueMessage) -> func.HttpResponse:
try:
logging.info("SendGrid email triggered.")
logging.debug("Parsing message data from request body")
body = req.get_json()
from_email = Email(body["from"])
to_email = To(body["to"])
subject = Subject(body["subject"])
template_id = TemplateId(body["template_id"])
logging.debug("Getting template value substitutions")
substitutions = []
for substitution_key in body["substitutions"].keys():
message_substitution = Substitution(
key=substitution_key, value=body["substitutions"][substitution_key])
substitutions.append(message_substitution)
logging.info("Message contents parsed from request input.")
sg = sendgrid.SendGridAPIClient(
api_key=environ.get('SENDGRID_API_KEY'))
logging.info("SendGrid client initialized")
mail = Mail(from_email=from_email, to_email=to_email,
subject=subject, global_substitutions=substitutions)
mail.template_id = template_id
logging.info("Message initialized")
response = sg.client.mail.send.post(request_body=mail.get())
logging.info("Message sent!")
return func.HttpResponse(body=json.dumps(response.body), status_code=response.status_code, mimetype="application/json")
except Exception as email_exception:
logging.error("Error sending email!")
logging.error(email_exception)
|
the-stack_106_31995 | #########################################################################
# _________ ___. ______________________ ___
# \_ ___ \___.__.\_ |__ ___________ / _____/\______ \ \/ /
# / \ \< | | | __ \_/ __ \_ __ \/ \ ___ | _/\ /
# \ \___\___ | | \_\ \ ___/| | \/\ \_\ \| | \/ \
# \______ / ____| |___ /\___ >__| \______ /|____|_ /___/\ \
# \/\/ \/ \/ \/ \/ \_/
#
#
from setuptools import find_packages
from setuptools import setup
def requirements(f):
with open(f) as fd:
return [
l for l in [r.strip() for r in fd.readlines()] if l and not l.startswith('-') and not l.startswith("#")
]
install_requires = requirements('requirements.txt')
setup(
name='bulk-api-excel-export',
url='https://github.com/CyberGRX/api-connector-bulk/examples/excel-export',
author='CyberGRX Engineering Team',
author_email='[email protected]',
version="1.0.0",
packages=find_packages("."),
install_requires=install_requires,
extras_require={
'license': 'pip-licenses==1.7.1',
},
)
|
the-stack_106_31996 | from django import forms
from django.utils.translation import gettext_lazy as _
from oscar.core.loading import get_model
from oscar.forms import widgets
Voucher = get_model('voucher', 'Voucher')
VoucherSet = get_model('voucher', 'VoucherSet')
Benefit = get_model('offer', 'Benefit')
Range = get_model('offer', 'Range')
class VoucherForm(forms.Form):
"""
A specialised form for creating a voucher and offer
model.
"""
name = forms.CharField(label=_("Name"))
code = forms.CharField(label=_("Code"))
start_datetime = forms.DateTimeField(
widget=widgets.DateTimePickerInput(),
label=_("Start datetime"))
end_datetime = forms.DateTimeField(
widget=widgets.DateTimePickerInput(),
label=_("End datetime"))
usage = forms.ChoiceField(choices=Voucher.USAGE_CHOICES, label=_("Usage"))
benefit_range = forms.ModelChoiceField(
label=_('Which products get a discount?'),
queryset=Range.objects.all(),
)
benefit_type = forms.ChoiceField(
choices=Benefit.TYPE_CHOICES,
label=_('Discount type'),
)
benefit_value = forms.DecimalField(
label=_('Discount value'))
exclusive = forms.BooleanField(
required=False,
label=_("Exclusive offers cannot be combined on the same items"))
def __init__(self, voucher=None, *args, **kwargs):
self.voucher = voucher
super().__init__(*args, **kwargs)
def clean_name(self):
name = self.cleaned_data['name']
try:
voucher = Voucher.objects.get(name=name)
except Voucher.DoesNotExist:
pass
else:
if (not self.voucher) or (voucher.id != self.voucher.id):
raise forms.ValidationError(_("The name '%s' is already in"
" use") % name)
return name
def clean_code(self):
code = self.cleaned_data['code'].strip().upper()
if not code:
raise forms.ValidationError(_("Please enter a voucher code"))
try:
voucher = Voucher.objects.get(code=code)
except Voucher.DoesNotExist:
pass
else:
if (not self.voucher) or (voucher.id != self.voucher.id):
raise forms.ValidationError(_("The code '%s' is already in"
" use") % code)
return code
def clean(self):
cleaned_data = super().clean()
start_datetime = cleaned_data.get('start_datetime')
end_datetime = cleaned_data.get('end_datetime')
if start_datetime and end_datetime and end_datetime < start_datetime:
raise forms.ValidationError(_("The start date must be before the"
" end date"))
return cleaned_data
class VoucherSearchForm(forms.Form):
name = forms.CharField(required=False, label=_("Name"))
code = forms.CharField(required=False, label=_("Code"))
is_active = forms.BooleanField(required=False, label=_("Is Active?"))
in_set = forms.BooleanField(
required=False, label=_("In Voucherset?"))
def clean_code(self):
return self.cleaned_data['code'].upper()
class VoucherSetForm(forms.ModelForm):
class Meta:
model = VoucherSet
fields = [
'name',
'code_length',
'description',
'start_datetime',
'end_datetime',
'count',
]
widgets = {
'start_datetime': widgets.DateTimePickerInput(),
'end_datetime': widgets.DateTimePickerInput(),
}
benefit_range = forms.ModelChoiceField(
label=_('Which products get a discount?'),
queryset=Range.objects.all(),
)
benefit_type = forms.ChoiceField(
choices=Benefit.TYPE_CHOICES,
label=_('Discount type'),
)
benefit_value = forms.DecimalField(
label=_('Discount value'))
def save(self, commit=True):
instance = super().save(commit)
if commit:
instance.generate_vouchers()
return instance
class VoucherSetSearchForm(forms.Form):
code = forms.CharField(required=False, label=_("Code"))
is_active = forms.BooleanField(required=False, label=_("Is Active?"))
def clean_code(self):
return self.cleaned_data['code'].upper()
|
the-stack_106_31997 | from os import path as osp
from torch.utils import data as data
from torchvision.transforms.functional import normalize
from basicsr.data.transforms import augment
from basicsr.utils import FileClient, imfrombytes, img2tensor
from basicsr.utils.registry import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class FFHQDataset(data.Dataset):
"""FFHQ dataset for StyleGAN.
Args:
opt (dict): Config for train datasets. It contains the following keys:
dataroot_gt (str): Data root path for gt.
io_backend (dict): IO backend type and other kwarg.
mean (list | tuple): Image mean.
std (list | tuple): Image std.
use_hflip (bool): Whether to horizontally flip.
"""
def __init__(self, opt):
super(FFHQDataset, self).__init__()
self.opt = opt
# file client (io backend)
self.file_client = None
self.io_backend_opt = opt['io_backend']
self.gt_folder = opt['dataroot_gt']
self.mean = opt['mean']
self.std = opt['std']
if self.io_backend_opt['type'] == 'lmdb':
self.io_backend_opt['db_paths'] = self.gt_folder
if not self.gt_folder.endswith('.lmdb'):
raise ValueError("'dataroot_gt' should end with '.lmdb', "
f'but received {self.gt_folder}')
with open(osp.join(self.gt_folder, 'meta_info.txt')) as fin:
self.paths = [line.split('.')[0] for line in fin]
else:
# FFHQ has 70000 images in total
self.paths = [
osp.join(self.gt_folder, f'{v:08d}.png') for v in range(70000)
]
def __getitem__(self, index):
if self.file_client is None:
self.file_client = FileClient(
self.io_backend_opt.pop('type'), **self.io_backend_opt)
# load gt image
gt_path = self.paths[index]
img_bytes = self.file_client.get(gt_path)
img_gt = imfrombytes(img_bytes, float32=True)
# random horizontal flip
img_gt = augment(img_gt, hflip=self.opt['use_hflip'], rotation=False)
# BGR to RGB, HWC to CHW, numpy to tensor
img_gt = img2tensor(img_gt, bgr2rgb=True, float32=True)
# normalize
normalize(img_gt, self.mean, self.std, inplace=True)
return {'gt': img_gt, 'gt_path': gt_path}
def __len__(self):
return len(self.paths)
|
the-stack_106_31999 | """
Fully Written by RoseLoverX
"""
from Evie import tbot, CMD_HELP, OWNER_ID
import os, re, csv, json, time, uuid, pytz
from datetime import datetime
from Evie.function import is_admin
from io import BytesIO
import Evie.modules.sql.feds_sql as sql
from telethon import *
from telethon import Button
from telethon.tl import *
from telethon.tl.types import User
from Evie import *
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.types import MessageMediaDocument, DocumentAttributeFilename
from Evie.events import register
"""
Fully Written by RoseLoverX
"""
from telethon.tl.types import ChatBannedRights
from telethon.tl.functions.channels import EditBannedRequest
BANNED_RIGHTS = ChatBannedRights(
until_date=None,
view_messages=True,
send_messages=True,
send_media=True,
send_stickers=True,
send_gifs=True,
send_games=True,
send_inline=True,
embed_links=True,
)
"""
Fully Written by RoseLoverX
"""
async def get_user_from_event(event):
""" Get the user from argument or replied message. """
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
user_obj = await tbot.get_entity(previous_message.sender_id)
fname = previous_message.sender.first_name
else:
user = event.pattern_match.group(1)
if user.isnumeric():
user = int(user)
if not user:
return
try:
user_obj = await tbot.get_entity(user)
except (TypeError, ValueError) as err:
await event.reply(str(err))
return None
return user_obj
def is_user_fed_admin(fed_id, user_id):
fed_admins = sql.all_fed_users(fed_id)
if fed_admins is False:
return False
if int(user_id) in fed_admins or int(user_id) == OWNER_ID:
return True
else:
return False
def is_user_fed_owner(fed_id, user_id):
getsql = sql.get_fed_info(fed_id)
if getsql is False:
return False
getfedowner = eval(getsql["fusers"])
if getfedowner is None or getfedowner is False:
return False
getfedowner = getfedowner["owner"]
if str(user_id) == getfedowner or int(user_id) == OWNER_ID:
return True
else:
return False
"""
Fully Written by RoseLoverX
"""
@register(pattern="^/newfed ?(.*)")
async def new(event):
if not event.is_private:
return await event.reply("Create your federation in my PM - not in a group.")
name = event.pattern_match.group(1)
fedowner = sql.get_user_owner_fed_full(event.sender_id)
if fedowner:
for f in fedowner:
text = "{}".format(f["fed"]["fname"])
return await event.reply(f"You already have a federation called `{text}` ; you can't create another. If you would like to rename it, use /renamefed.")
if not name:
return await event.reply("You need to give your federation a name! Federation names can be up to 64 characters long.")
if len(name) > 64:
return await event.reply("Federation names can only be upto 64 charactors long.")
fed_id = str(uuid.uuid4())
fed_name = name
x = sql.new_fed(event.sender_id, fed_name, fed_id)
return await event.reply(f"Created new federation with FedID: `{fed_id}`.\nUse this ID to join the federation! eg:\n`/joinfed {fed_id}`")
@register(pattern="^/delfed")
async def smexy(event):
if not event.is_private:
return await event.reply("Delete your federation in my PM - not in a group.")
fedowner = sql.get_user_owner_fed_full(event.sender_id)
if not fedowner:
return await event.reply("It doesn't look like you have a federation yet!")
for f in fedowner:
fed_id = "{}".format(f["fed_id"])
name = f["fed"]["fname"]
await tbot.send_message(
event.chat_id,
"Are you sure you want to delete your federation? This action cannot be undone - you will lose your entire ban list, and '{}' will be permanently gone.".format(name),
buttons=[
[Button.inline("Delete Federation", data="rmfed_{}".format(fed_id))],
[Button.inline("Cancel", data="nada")],
],
)
@tbot.on(events.CallbackQuery(pattern=r"rmfed(\_(.*))"))
async def delete_fed(event):
tata = event.pattern_match.group(1)
data = tata.decode()
fed_id = data.split("_", 1)[1]
delete = sql.del_fed(fed_id)
await event.edit("You have deleted your federation! All chats linked to it are now federation-less.")
@tbot.on(events.CallbackQuery(pattern=r"nada"))
async def delete_fed(event):
await event.edit("Federation deletion canceled")
@register(pattern="^/renamefed ?(.*)")
async def cgname(event):
if not event.is_private:
return await event.reply("You can only rename your fed in PM.")
user_id = event.sender_id
newname = event.pattern_match.group(1)
fedowner = sql.get_user_owner_fed_full(event.sender_id)
if not fedowner:
return await event.reply("It doesn't look like you have a federation yet!")
if not newname:
return await event.reply("You need to give your federation a new name! Federation names can be up to 64 characters long.")
for f in fedowner:
fed_id = f["fed_id"]
name = f["fed"]["fname"]
sql.rename_fed(fed_id, user_id, newname)
return await event.reply(f"Tada! I've renamed your federation from '{name}' to '{newname}'. [FedID: `{fed_id}`].")
@register(pattern="^/chatfed")
async def cf(event):
chat = event.chat_id
if event.is_private:
return
if not await is_admin(event, event.sender_id):
return await event.reply("You need to be an admin to do this.")
fed_id = sql.get_fed_id(chat)
if not fed_id:
return await event.reply("This chat isn't part of any feds yet!")
info = sql.get_fed_info(fed_id)
name = info["fname"]
await event.reply(f"Chat {event.chat.title} is part of the following federation: {name} [ID: `{fed_id}`]")
@register(pattern="^/joinfed ?(.*)")
async def jf(event):
if not event.is_group:
return
if not await is_admin(event, event.sender_id):
await event.reply("You need to be an admin to do this.")
return
permissions = await tbot.get_permissions(event.chat_id, event.sender_id)
if not permissions.is_creator:
return await event.reply(f"You need to be the chat owner of {event.chat.title} to do this.")
args = event.pattern_match.group(1)
if not args:
return await event.reply("You need to specify which federation you're asking about by giving me a FedID!")
if len(args) < 8:
return await event.reply("This isn't a valid FedID format!")
getfed = sql.search_fed_by_id(args)
name = getfed["fname"]
if not getfed:
return await event.reply("This FedID does not refer to an existing federation.")
fed_id = sql.get_fed_id(event.chat_id)
if fed_id:
sql.chat_leave_fed(event.chat_id)
x = sql.chat_join_fed(args, event.chat.title, event.chat_id)
return await event.reply(f'Successfully joined the "{name}" federation! All new federation bans will now also remove the members from this chat.')
@register(pattern="^/leavefed")
async def lf(event):
if not event.is_group:
return
if not await is_admin(event, event.sender_id):
await event.reply("You need to be an admin to do this.")
return
permissions = await tbot.get_permissions(event.chat_id, event.sender_id)
if not permissions.is_creator:
return await event.reply(f"You need to be the chat owner of {event.chat.title} to do this.")
chat = event.chat_id
fed_id = sql.get_fed_id(chat)
if not fed_id:
return await event.reply("This chat isn't currently in any federations!")
fed_info = sql.get_fed_info(fed_id)
name = fed_info["fname"]
sql.chat_leave_fed(chat)
return await event.reply(f'Chat {event.chat.title} has left the " {name} " federation.')
@register(pattern="^/fpromote ?(.*)")
async def p(event):
if event.is_private:
return await event.reply("This command is made to be run in a group where the person you would like to promote is present.")
fedowner = sql.get_user_owner_fed_full(event.sender_id)
if not fedowner:
return await event.reply("Only federation creators can promote people, and you don't seem to have a federation to promote to!")
args = await get_user_from_event(event)
if not args:
return await event.reply("I don't know who you're talking about, you're going to need to specify a user...!")
chat = event.chat
for f in fedowner:
fed_id = f["fed_id"]
name = f["fed"]["fname"]
user_id = args.id
fban, fbanreason, fbantime = sql.get_fban_user(fed_id, int(args.id))
replied_user = await tbot(GetFullUserRequest(user_id))
fname = replied_user.user.first_name
print(69)
if fban:
if fbanreason != '':
return await event.reply(f"User {fname} is fbanned in {name}. You should unfban them before promoting.\n\nReason:\n{fbanreason}")
else:
return await event.reply(f"User {fname} is fbanned in {name}. You should unfban them before promoting.")
getuser = sql.search_user_in_fed(fed_id, user_id)
if getuser:
return await event.reply(f"[{fname}](tg://user?id={args.id}) is already an admin in {name}!")
print(4)
try:
mk = f"{user_id}|{name[:5]}|{fed_id}"
km = f"{user_id}|{event.sender_id}"
await tbot.send_message(
event.chat_id,
f"Please get [{fname}](tg://user?id={args.id}) to confirm that they would like to be fed admin for {name}",
buttons=[
Button.inline("Confirm", data="fkfed_{}".format(mk)),
Button.inline("Cancel", data="smex_{}".format(km)),
],
)
except Exception as e:
print(e)
"""
Fully Written by RoseLoverX
"""
@tbot.on(events.CallbackQuery(pattern=r"fkfed(\_(.*))"))
async def smex_fed(event):
tata = event.pattern_match.group(1)
data = tata.decode()
input = data.split("_", 1)[1]
user, owner, fed_id= input.split("|")
user = user.strip()
name = owner.strip()
fed_id = fed_id.strip()
rt = await tbot(GetFullUserRequest(int(user)))
fname = rt.user.first_name
if not event.sender_id == int(user):
return await event.answer("You are not the user being fpromoted")
res = sql.user_join_fed(fed_id, int(user))
if res:
return await event.edit(f"User [{fname}](tg://user?id={user}) is now an admin of {name} [{fed_id}]")
"""
Fully Written by RoseLoverX
"""
@tbot.on(events.CallbackQuery(pattern=r"smex(\_(.*))"))
async def smex(event):
tata = event.pattern_match.group(1)
data = tata.decode()
input = data.split("_", 1)[1]
user, owner= input.split("|")
user = user.strip()
owner = owner.strip()
if event.sender_id == int(owner):
rt = await tbot(GetFullUserRequest(int(owner)))
fname = rt.user.first_name
await event.edit(f"Fedadmin promotion cancelled by [{fname}](tg://user?id={owner})")
return
if event.sender_id == int(user):
rt = await tbot(GetFullUserRequest(int(user)))
fname = rt.user.first_name
await event.edit(f"Fedadmin promotion has been refused by [{fname}](tg://user?id={user}).")
return
await event.answer("You are not the user being fpromoted")
@register(pattern="^/fdemote ?(.*)")
async def fd(event):
if event.is_private:
return await event.reply("This command is made to be run in a group where the person you would like to promote is present.")
fedowner = sql.get_user_owner_fed_full(event.sender_id)
if not fedowner:
return await event.reply("Only federation creators can promote people, and you don't seem to have a federation to promote to!")
args = await get_user_from_event(event)
if not args:
return await event.reply("I don't know who you're talking about, you're going to need to specify a user...!")
chat = event.chat
for f in fedowner:
fed_id = f["fed_id"]
name = f["fed"]["fname"]
user_id = args.id
if sql.search_user_in_fed(fed_id, user_id) is False:
return await event.reply(f"This person isn't a federation admin for '{name} ', how could I demote them?")
replied_user = await tbot(GetFullUserRequest(user_id))
fname = replied_user.user.first_name
sql.user_demote_fed(fed_id, user_id)
return await event.reply(f"User [{fname}](tg://user?id={user_id}) is no longer an admin of {name} ({fed_id})")
@register(pattern="^/fedinfo ?(.*)")
async def info(event):
if not event.is_private:
if not await is_admin(event, event.sender_id):
return await event.reply("This command can only be used in private.")
input = event.pattern_match.group(1)
fedowner = sql.get_user_owner_fed_full(event.sender_id)
if not input:
if not fedowner:
return await event.reply("You need to give me a FedID to check, or be a federation creator to use this command!")
if input:
fed_id = input
info = sql.get_fed_info(fed_id)
if not info:
return await event.reply("There is no federation with this FedID.")
name = info["fname"]
elif fedowner:
for f in fedowner:
fed_id = f["fed_id"]
name = f["fed"]["fname"]
info = sql.get_fed_info(fed_id)
if info:
owner = int(info["owner"])
getfban = sql.get_all_fban_users(fed_id)
getfchat = sql.all_fed_chats(fed_id)
FEDADMIN = sql.all_fed_users(fed_id)
TotalAdminFed = len(FEDADMIN)
caption = "Fed info:\n"
caption += f"FedID: `{fed_id}`\n"
caption += f"Name: {name}\n"
caption += f"Creator: [this person](tg://user?id={owner}) (`{owner}`)\n"
caption += f"Number of admins: `{TotalAdminFed}`\n"
caption += f"Number of bans: `{len(getfban)}`\n"
caption += f"Number of connected chats: `{len(getfchat)}`\n"
try:
subs = sql.get_subscriber(fed_id)
except:
subs = []
caption += f"Number of subscribed feds: `{len(subs)}`"
try:
getmy = sql.get_mysubs(fed_id)
except:
getmy = []
if len(getmy) == 0:
caption += "\n\nThis federation is not subscribed to any other feds."
else:
caption += "\n\nSubscribed to the following feds:"
for x in getmy:
nfo = sql.get_fed_info(x)
nme = nfo["fname"]
caption += f"\n- {nme} (`{x}`)"
buttons = Button.inline("Check Fed Admins", data="fedadm_{}".format(fed_id))
await tbot.send_message(event.chat_id, caption, buttons=buttons)
"""
Fully Written by RoseLoverX
"""
@tbot.on(events.CallbackQuery(pattern=r"fedadm(\_(.*))"))
async def smex_fed(event):
if event.is_group:
if not await is_admin(event, event.sender_id):
return await event.answer("You need to be an admin to do this")
await event.edit(buttons=None)
tata = event.pattern_match.group(1)
data = tata.decode()
input = data.split("_", 1)[1]
fed_id = input
info = sql.get_fed_info(fed_id)
try:
text = "Admins in federation '{}':\n".format(info["fname"])
owner = await tbot.get_entity(int(info["owner"]))
try:
owner_name = owner.first_name + " " + owner.last_name
except:
owner_name = owner.first_name
text += f"- [{owner_name}](tg://user?id={owner.id}) (`{owner.id}`)\n"
members = sql.all_fed_members(fed_id)
for x in members:
try:
user = await tbot.get_entity(int(x))
unamee = user.first_name
text += f"- [{unamee}](tg://user?id={user.id}) (`{user.id}`)"
except Exception:
text += f"- {x}/n"
except Exception as e:
print(e)
await event.reply(text)
"""
Fully Written by RoseLoverX
"""
@register(pattern="^/fban ?(.*)")
async def _(event):
user = event.sender
chat = event.chat_id
if event.is_group:
fed_id = sql.get_fed_id(chat)
if not fed_id:
return await event.reply("This chat isn't in any federations.")
else:
fedowner = sql.get_user_owner_fed_full(event.sender_id)
if not fedowner:
return await event.reply("It doesn't look like you have a federation yet!")
for f in fedowner:
fed_id = "{}".format(f["fed_id"])
info = sql.get_fed_info(fed_id)
name = info["fname"]
if is_user_fed_admin(fed_id, user.id) is False:
return await event.reply(f"You aren't a federation admin for {name}!")
input = event.pattern_match.group(1)
if input:
arg = input.split(" ", 1)
if not event.reply_to_msg_id:
if len(arg) == 2:
iid = arg[0]
reason = arg[1]
else:
iid = arg[0]
reason = None
if not iid.isnumeric():
entity = await tbot.get_input_entity(iid)
try:
r_sender_id = entity.user_id
except Exception:
await event.reply("Couldn't fetch that user.")
return
else:
r_sender_id = int(iid)
try:
replied_user = await tbot(GetFullUserRequest(r_sender_id))
fname = replied_user.user.first_name
username = replied_user.user.username
lname = replied_user.user.last_name
except Exception:
fname = "User"
username = None
lname = None
else:
reply_message = await event.get_reply_message()
iid = reply_message.sender_id
fname = reply_message.sender.first_name
lname = reply_message.sender.last_name
username = reply_message.sender.username
if input:
reason = input
else:
reason = None
r_sender_id = iid
if r_sender_id == BOT_ID or r_sender_id == OWNER_ID:
return await event.reply("Oh you're a funny one aren't you! I am _not_ going to fedban myself.")
name = info["fname"]
if is_user_fed_owner(fed_id, int(r_sender_id)) is True:
return await event.reply(f"I'm not banning a fed admin from their own fed! [{name}]")
if is_user_fed_admin(fed_id, int(r_sender_id)) is True:
return await event.reply(f"I'm not banning a fed admin from their own fed! [{name}]")
fban_user_id = int(r_sender_id)
fban_user_name = fname
fban_user_lname = lname
fban_user_uname = username
fban, fbanreason, fbantime = sql.get_fban_user(fed_id, int(r_sender_id))
if fban:
if fbanreason == '' and reason == None:
return await event.reply(f'User [{fname}](tg://)/user?id={r_sender_id}) is already banned in {name}. There is no reason set for their fedban yet, so feel free to set one.')
if reason == fbanreason:
return await event.reply(f'User [{fname}](tg://user?id={r_sender_id}) has already been fbanned, with the exact same reason.')
if reason == None:
if fbanreason == '':
return await event.reply(f'User [{fname}](tg://user?id={r_sender_id}) is already banned in {name}.')
else:
return await event.reply(f'User [{fname}](tg://user?id={r_sender_id}) is already banned in {name}, with reason:\n`{fbanreason}`.')
if not fban:
current_datetime = datetime.now(pytz.timezone("Asia/Kolkata"))
kuk = f"{current_datetime}"
mal = kuk[:10]
rec = mal.replace("-", "")
x = sql.fban_user(
fed_id,
fban_user_id,
fban_user_name,
fban_user_lname,
fban_user_uname,
reason,
int(rec),
)
sax = "**New FedBan**\n"
sax += f"**Fed:** {name}\n"
sax += f"**FedAdmin:** [{event.sender.first_name}](tg://user?id={event.sender_id})\n"
sax += f"**User:** [{fname}](tg://user?id={r_sender_id})\n"
sax += f"**User ID:** `{r_sender_id}`\n"
sax += f"**Reason:** {reason}"
else:
current_datetime = datetime.now(pytz.timezone("Asia/Kolkata"))
kuk = f"{current_datetime}"
mal = kuk[:10]
rec = mal.replace("-", "")
fed_name = info["fname"]
temp = sql.un_fban_user(fed_id, fban_user_id)
if not temp:
await event.reply("Failed to update the reason for fedban!")
return
x = sql.fban_user(
fed_id,
fban_user_id,
fban_user_name,
fban_user_lname,
fban_user_uname,
reason,
int(rec),
)
sax = "**FedBan Reason Update**\n"
sax += f"**Fed:** {name}\n"
sax += f"**FedAdmin:** [{event.sender.first_name}](tg://user?id={event.sender_id})\n"
sax += f"**User:** [{fname}](tg://user?id={r_sender_id})\n"
sax += f"**User ID:** `{r_sender_id}`\n"
if not fbanreason == '':
sax += f"**Previous Reason:** {fbanreason}\n"
sax += f"**New Reason:** {reason}"
await tbot.send_message(
event.chat_id,
sax)
getfednotif = sql.user_feds_report(info["owner"])
if getfednotif:
if int(info["owner"]) != int(chat):
await tbot.send_message(
int(info["owner"]),
sax)
get_fedlog = sql.get_fed_log(fed_id)
if get_fedlog:
if int(get_fedlog) != int(chat):
await tbot.send_message(
int(get_fedlog),
sax)
fed_chats = list(sql.all_fed_chats(fed_id))
if len(fed_chats) != 0:
for fedschat in fed_chats:
try:
await tbot(
EditBannedRequest(int(fedschat), int(fban_user_id), BANNED_RIGHTS)
)
except Exception:
pass
subscriber = list(sql.get_subscriber(fed_id))
if len(subscriber) != 0:
for fedsid in subscriber:
all_fedschat = sql.all_fed_chats(fedsid)
for fedschat in all_fedschat:
try:
await tbot(
EditBannedRequest(int(fedschat), int(fban_user_id), BANNED_RIGHTS)
)
except Exception:
continue
"""
Fully Written by RoseLoverX
"""
@register(pattern="^/unfban ?(.*)")
async def unfban(event):
user = event.sender
chat = event.chat_id
if event.is_group:
fed_id = sql.get_fed_id(chat)
if not fed_id:
return await event.reply("This chat isn't in any federations.")
else:
fedowner = sql.get_user_owner_fed_full(event.sender_id)
if not fedowner:
return await event.reply("It doesn't look like you have a federation yet!")
for f in fedowner:
fed_id = "{}".format(f["fed_id"])
info = sql.get_fed_info(fed_id)
name = info["fname"]
if is_user_fed_admin(fed_id, user.id) is False:
return await event.reply(f"You aren't a federation admin for {name}!")
input = event.pattern_match.group(1)
if input:
arg = input.split(" ", 1)
if not event.reply_to_msg_id:
if len(arg) == 2:
iid = arg[0]
reason = arg[1]
else:
iid = arg[0]
reason = None
if not iid.isnumeric():
entity = await tbot.get_input_entity(iid)
try:
r_sender_id = entity.user_id
except Exception:
await event.reply("Couldn't fetch that user.")
return
else:
r_sender_id = int(iid)
try:
replied_user = await tbot(GetFullUserRequest(r_sender_id))
fname = replied_user.user.first_name
username = replied_user.user.username
lname = replied_user.user.last_name
except Exception:
fname = "User"
username = None
lname = None
else:
reply_message = await event.get_reply_message()
iid = reply_message.sender_id
fname = reply_message.sender.first_name
lname = reply_message.sender.last_name
username = reply_message.sender.username
if input:
reason = input
else:
reason = None
r_sender_id = iid
if r_sender_id == BOT_ID or r_sender_id == OWNER_ID:
return await event.reply("Oh you're a funny one aren't you! How do you think I would have fbanned myself hm?.")
name = info["fname"]
fban_user_id = int(r_sender_id)
fban_user_name = fname
fban_user_lname = lname
fban_user_uname = username
fban, fbanreason, fbantime = sql.get_fban_user(fed_id, int(r_sender_id))
if not fban:
return await event.reply(f"This user isn't banned in the current federation, {name}. [{fed_id}]")
temp = sql.un_fban_user(fed_id, fban_user_id)
if temp:
sxa = "**New un-FedBan**\n"
sxa += f"**Fed:** {name}\n"
sxa += f"**FedAdmin:** [{event.sender.first_name}](tg://user?id={event.sender_id})\n"
sxa += f"**User:** [{fname}](tg://user?id={r_sender_id})\n"
sxa += f"**User ID:** `{r_sender_id}`\n"
if reason:
sxa += f"**Reason:** {reason}"
await tbot.send_message(
event.chat_id,
sxa)
getfednotif = sql.user_feds_report(info["owner"])
if getfednotif:
if int(info["owner"]) != int(chat):
await tbot.send_message(
int(info["owner"]),
sxa)
get_fedlog = sql.get_fed_log(fed_id)
if get_fedlog:
if int(get_fedlog) != int(chat):
await tbot.send_message(
int(get_fedlog),
sxa)
@register(pattern="^/setfedlog ?(.*)")
async def log(event):
chat = event.chat_id
if not is_admin(event, event.sender_id):
return await event.reply("You need to be an admin to do this")
args = event.pattern_match.group(1)
if not args:
fedowner = sql.get_user_owner_fed_full(event.sender_id)
if not fedowner:
return await event.reply("Only fed creators can set a fed log - but you don't have a federation!")
for f in fedowner:
args = "{}".format(f["fed_id"])
name = f["fed"]["fname"]
else:
if len(args) < 8:
return await event.reply("This isn't a valid FedID format!")
getfed = sql.search_fed_by_id(args)
name = getfed["fname"]
if not getfed:
return await event.reply("This FedID does not refer to an existing federation.")
setlog = sql.set_fed_log(args, chat)
await event.reply(f"This has been set as the fed log for {name} - all fed related actions will be logged here.")
@register(pattern="^/unsetfedlog")
async def ligunset(event):
chat = event.chat_id
fedowner = sql.get_user_owner_fed_full(event.sender_id)
if not fedowner:
return await event.reply("Only fed creators can unset a fed log - but you don't have a federation!")
for f in fedowner:
args = f["fed_id"]
name = f["fed"]["fname"]
setlog = sql.set_fed_log(args, None)
await event.reply(f"The {name} federation has had its log location unset.")
"""
Fully Written by AmarnathCdj aka RoseloverX
"""
@register(pattern="^/subfed ?(.*)")
async def sub(event):
args = event.pattern_match.group(1)
fedowner = sql.get_user_owner_fed_full(event.sender_id)
if not fedowner:
return await event.reply("Only federation creators can subscribe to a fed. But you don't have a federation!")
for f in fedowner:
fed_id = f["fed_id"]
name = f["fed"]["fname"]
if not args:
return await event.reply("You need to specify which federation you're asking about by giving me a FedID!")
if len(args) < 8:
return await event.reply("This isn't a valid FedID format!")
getfed = sql.search_fed_by_id(args)
if not getfed:
return await event.reply("This FedID does not refer to an existing federation.")
sname = getfed["fname"]
if args == fed_id:
return await event.reply("... What's the point in subscribing a fed to itself?")
try:
subs = sql.MYFEDS_SUBSCRIBER
except:
subs = []
if len(subs) >= 5:
return await event.reply("You can subscribe to at most 5 federations. Please unsubscribe from other federations before adding more.")
subfed = sql.subs_fed(args, fed_id)
addsub = sql.add_sub(fed_id, args)
await event.reply(f"Federation {name} has now subscribed to {sname}. All fedbans in {sname} will now take effect in both feds.")
"""
Fully Written by RoseLoverX aka AmarnathCdj
"""
@register(pattern="^/unsubfed ?(.*)")
async def unsub(event):
args = event.pattern_match.group(1)
fedowner = sql.get_user_owner_fed_full(event.sender_id)
if not fedowner:
return await event.reply("Only federation creators can unsubscribe from a fed. But you don't have a federation!")
for f in fedowner:
fed_id = f["fed_id"]
name = f["fed"]["fname"]
if not args:
return await event.reply("You need to specify which federation you're asking about by giving me a FedID!")
if len(args) < 8:
return await event.reply("This isn't a valid FedID format!")
getfed = sql.search_fed_by_id(args)
if not getfed:
return await event.reply("This FedID does not refer to an existing federation.")
sname = getfed["fname"]
remsub = sql.rem_sub(fed_id, args)
unsubfed = sql.unsubs_fed(args, fed_id)
await event.reply(f"Federation {name} is no longer subscribed to {sname}. Bans in {sname} will no longer be applied.\nPlease note that any bans that happened because the user was banned from the subfed will need to be removed manually.")
@register(pattern="^/(fstat|fbanstat) ?(.*)")
async def fstat(event):
if event.is_group:
if not await is_admin(event, event.sender_id):
return await event.reply("You need to be an admin to do this!")
args = event.pattern_match.group(2)
if args:
if len(args) > 12:
info = sql.get_fed_info(args)
if not info:
return await event.reply("There is no federation with this FedID.")
name = info["fname"]
if event.reply_to_msg_id:
msg = await event.get_reply_message()
user_id = msg.sender_id
fname = msg.sender.first_name
else:
user_id = event.sender_id
fname = event.sender.first_name
fban, fbanreason, fbantime = sql.get_fban_user(args, int(user_id))
if not fban:
return await event.reply(f"{fname} is not banned in this fed.")
tym = fbantime
k = f"{tym}"
re = k[:4]
rs = f"{k[:4]}/{k[-2:]}/{k[:6].replace(re, '')}"
if fbanreason == '':
text = f"{fname} is currently banned in {name}.\n\n**Date of Ban:** {rs}"
if not fbanreason == '':
text = f"{fname} is currently banned in {name},for the following **reason**:\n{fbanreason}\n\n**Date of Ban:** {rs}"
return await event.reply(text)
elif len(args) < 12:
person = await get_user_from_event(event)
user_id = person.id
replied_user = await tbot(GetFullUserRequest(user_id))
fname = replied_user.user.first_name
else:
if event.reply_to_msg_id:
msg = await event.get_reply_message()
user_id = msg.sender_id
fname = msg.sender.first_name
else:
user_id = event.sender_id
fname = event.sender.first_name
mex = await event.reply(f"Checking fbans for {fname}...")
uname, fbanlist = sql.get_user_fbanlist(str(user_id))
if len(fbanlist) == 0:
return await mex.edit(f"User {fname} hasn't been banned in any chats due to fedbans.")
if len(fbanlist) <= 10:
flist = f"The following federations have caused {fname} to be banned in chats:"
for x in fbanlist:
try:
info = sql.get_fed_info(x[0])
gname = info["fname"]
flist += f"\n- `{x[0]}`:{gname}"
except:
pass
await mex.edit(flist)
"""
Fully Written by RoseLoverX aka AmarnathCdj
"""
@register(pattern="^/fedexport$")
async def fex(event):
if event.is_group:
fed_id = sql.get_fed_id(event.chat_id)
if not fed_id:
return await event.reply("This chat isn't in any federations.")
else:
if is_user_fed_owner(fed_id, int(event.sender_id)) is False:
return await event.reply("Only the fed creator can export the ban list.")
else:
fedowner = sql.get_user_owner_fed_full(event.sender_id)
if not fedowner:
return await event.reply("It doesn't look like you have a federation yet!")
for f in fedowner:
fed_id = f["fed_id"]
info = sql.get_fed_info(fed_id)
name = info["fname"]
getfban = sql.get_all_fban_users(fed_id)
if len(getfban) == 0:
return await event.reply(f"There are no banned users in {name}")
backups = ""
try:
for users in getfban:
getuserinfo = sql.get_all_fban_users_target(fed_id, users)
json_parser = {
"user_id": users,
"first_name": getuserinfo["first_name"],
"last_name": getuserinfo["last_name"],
"user_name": getuserinfo["user_name"],
"reason": getuserinfo["reason"],
}
backups += json.dumps(json_parser)
backups += "\n"
with BytesIO(str.encode(backups)) as output:
output.name = "fbanned_users.json"
await tbot.send_file(
event.chat_id,
file=output,
filename="fbanned_users.json",
caption="Total {} users are blocked by the Federation {}.".format(
len(getfban), info["fname"]
),
)
except Exception as e:
print(e)
#yeah fuckoof
#in halt need to find a way to send CSV file
file_help = os.path.basename(__file__)
file_help = file_help.replace(".py", "")
file_helpo = file_help.replace("_", " ")
__help__ = """
Ah, group management. It's all fun and games, until you start getting spammers in, and you need to ban them. Then you need to start banning more, and more, and it gets painful.
But then you have multiple groups, and you don't want these spammers in any of your groups - how can you deal? Do you have to ban them manually, in all your groups?
No more! With federations, you can make a ban in one chat overlap to all your other chats.
You can even appoint federation admins, so that your trustworthiest admins can ban across all the chats that you want to protect.
Commands:
- /newfed <fedname>: Creates a new federation with the given name. Only one federation per user. Max 64 chars name allowed.
- /delfed: Deletes your federation, and any information related to it. Will not unban any banned users.
- /fedtransfer <reply/username/mention/userid>: Transfer your federation to another user.
- /renamefed <newname>: Rename your federation.
- /fedinfo <FedID>: Information about a federation.
- /fedadmins <FedID>: List the admins in a federation.
- /fedsubs <FedID>: List all federations your federation is subscribed to.
- /joinfed <FedID>: Join the current chat to a federation. A chat can only join one federation. Chat owners only.
- /leavefed: Leave the current federation. Only chat owners can do this.
- /fedstat: List all the federations you are banned in.
- /fedstat <user ID>: List all the federations a user has been banned in.
- /fedstat <user ID> <FedID>: Gives information about a user's ban in a federation.
- /chatfed: Information about the federation the current chat is in.
Federation admin commands:
- /fban: Bans a user from the current chat's federation
- /unfban: Unbans a user from the current chat's federation
- /feddemoteme <FedID>: Demote yourself from a fed.
- /myfeds: List all your feds.
Federation owner commands:
- /fpromote: Promote a user to fedadmin in your fed.
- /fdemote: Demote a federation admin in your fed.
- /fednotif <yes/no/on/off>: Whether or not to receive PM notifications of every fed action.
- /subfed <FedId>: Subscribe your federation to another. Users banned in the subscribed fed will also be banned in this one.
- /unsubfed <FedId>: Unsubscribes your federation from another. Bans from the other fed will no longer take effect.
- /fedexport <csv/json>: Displays all users who are victimized at the Federation at this time.
- /fedimport: Import a list of banned users.
- /setfedlog <FedId>: Sets the current chat as the federation log. All federation events will be logged here.
- /unsetfedlog <FedId>: Unset the federation log. Events will no longer be logged.
**Note:** Plugin not yet finished writing.
"""
CMD_HELP.update({file_helpo: [file_helpo, __help__]})
|
the-stack_106_32000 | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test the SegWit changeover logic
#
from test_framework.test_framework import PericloinTestFramework
from test_framework.util import *
from test_framework.mininode import sha256, ripemd160, CTransaction, CTxIn, COutPoint, CTxOut
from test_framework.address import script_to_p2sh, key_to_p2pkh
from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG, OP_TRUE
from io import BytesIO
from test_framework.mininode import ToHex, FromHex, COIN
NODE_0 = 0
NODE_1 = 1
NODE_2 = 2
WIT_V0 = 0
WIT_V1 = 1
def witness_script(version, pubkey):
if (version == 0):
pubkeyhash = bytes_to_hex_str(ripemd160(sha256(hex_str_to_bytes(pubkey))))
pkscript = "0014" + pubkeyhash
elif (version == 1):
# 1-of-1 multisig
scripthash = bytes_to_hex_str(sha256(hex_str_to_bytes("5121" + pubkey + "51ae")))
pkscript = "0020" + scripthash
else:
assert("Wrong version" == "0 or 1")
return pkscript
def addlength(script):
scriptlen = format(len(script)//2, 'x')
assert(len(scriptlen) == 2)
return scriptlen + script
def create_witnessprogram(version, node, utxo, pubkey, encode_p2sh, amount):
pkscript = witness_script(version, pubkey)
if (encode_p2sh):
p2sh_hash = bytes_to_hex_str(ripemd160(sha256(hex_str_to_bytes(pkscript))))
pkscript = "a914"+p2sh_hash+"87"
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]} )
DUMMY_P2SH = "2MySexEGVzZpRgNQ1JdjdP5bRETznm3roQ2" # P2SH of "OP_1 OP_DROP"
outputs[DUMMY_P2SH] = amount
tx_to_witness = node.createrawtransaction(inputs,outputs)
#replace dummy output with our own
tx_to_witness = tx_to_witness[0:110] + addlength(pkscript) + tx_to_witness[-8:]
return tx_to_witness
def send_to_witness(version, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""):
tx_to_witness = create_witnessprogram(version, node, utxo, pubkey, encode_p2sh, amount)
if (sign):
signed = node.signrawtransaction(tx_to_witness)
assert("errors" not in signed or len(["errors"]) == 0)
return node.sendrawtransaction(signed["hex"])
else:
if (insert_redeem_script):
tx_to_witness = tx_to_witness[0:82] + addlength(insert_redeem_script) + tx_to_witness[84:]
return node.sendrawtransaction(tx_to_witness)
def getutxo(txid):
utxo = {}
utxo["vout"] = 0
utxo["txid"] = txid
return utxo
def find_unspent(node, min_value):
for utxo in node.listunspent():
if utxo['amount'] >= min_value:
return utxo
class SegWitTest(PericloinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-logtimemicros", "-debug", "-walletprematurewitness", "-rpcserialversion=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-logtimemicros", "-debug", "-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-rpcserialversion=1"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-logtimemicros", "-debug", "-blockversion=536870915", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness"]))
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 1)
connect_nodes(self.nodes[0], 2)
self.is_network_split = False
self.sync_all()
def success_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 2)
sync_blocks(self.nodes)
def skip_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 1)
sync_blocks(self.nodes)
def fail_accept(self, node, txid, sign, redeem_script=""):
try:
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
except JSONRPCException as exp:
assert(exp.error["code"] == -26)
else:
raise AssertionError("Tx should not have been accepted")
def fail_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
try:
node.generate(1)
except JSONRPCException as exp:
assert(exp.error["code"] == -1)
else:
raise AssertionError("Created valid block when TestBlockValidity should have failed")
sync_blocks(self.nodes)
def run_test(self):
self.nodes[0].generate(161) #block 161
print("Verify sigops are counted in GBT with pre-BIP141 rules before the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({})
assert(tmpl['sizelimit'] == 1000000)
assert('weightlimit' not in tmpl)
assert(tmpl['sigoplimit'] == 20000)
assert(tmpl['transactions'][0]['hash'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 2)
tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
assert(tmpl['sizelimit'] == 1000000)
assert('weightlimit' not in tmpl)
assert(tmpl['sigoplimit'] == 20000)
assert(tmpl['transactions'][0]['hash'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 2)
self.nodes[0].generate(1) #block 162
balance_presetup = self.nodes[0].getbalance()
self.pubkey = []
p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh
wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness
for i in range(3):
newaddress = self.nodes[i].getnewaddress()
self.pubkey.append(self.nodes[i].validateaddress(newaddress)["pubkey"])
multiaddress = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]])
self.nodes[i].addwitnessaddress(newaddress)
self.nodes[i].addwitnessaddress(multiaddress)
p2sh_ids.append([])
wit_ids.append([])
for v in range(2):
p2sh_ids[i].append([])
wit_ids[i].append([])
for i in range(5):
for n in range(3):
for v in range(2):
wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999")))
p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], True, Decimal("49.999")))
self.nodes[0].generate(1) #block 163
sync_blocks(self.nodes)
# Make sure all nodes recognize the transactions as theirs
assert_equal(self.nodes[0].getbalance(), balance_presetup - 60*50 + 20*Decimal("49.999") + 50)
assert_equal(self.nodes[1].getbalance(), 20*Decimal("49.999"))
assert_equal(self.nodes[2].getbalance(), 20*Decimal("49.999"))
self.nodes[0].generate(260) #block 423
sync_blocks(self.nodes)
print("Verify default node can't accept any witness format txs before fork")
# unsigned, no scriptsig
self.fail_accept(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], False)
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], False)
# unsigned with redeem script
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], False, addlength(witness_script(0, self.pubkey[0])))
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], False, addlength(witness_script(1, self.pubkey[0])))
# signed
self.fail_accept(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True)
self.fail_accept(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True)
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True)
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True)
print("Verify witness txs are skipped for mining before the fork")
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) #block 424
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) #block 425
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][0], True) #block 426
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][0], True) #block 427
# TODO: An old node would see these txs without witnesses and be able to mine them
print("Verify unsigned bare witness txs in versionbits-setting blocks are valid before the fork")
self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][1], False) #block 428
self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][1], False) #block 429
print("Verify unsigned p2sh witness txs without a redeem script are invalid")
self.fail_accept(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][1], False)
self.fail_accept(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][1], False)
print("Verify unsigned p2sh witness txs with a redeem script in versionbits-settings blocks are valid before the fork")
self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][1], False, addlength(witness_script(0, self.pubkey[2]))) #block 430
self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][1], False, addlength(witness_script(1, self.pubkey[2]))) #block 431
print("Verify previous witness txs skipped for mining can now be mined")
assert_equal(len(self.nodes[2].getrawmempool()), 4)
block = self.nodes[2].generate(1) #block 432 (first block with new rules; 432 = 144 * 3)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[2].getrawmempool()), 0)
segwit_tx_list = self.nodes[2].getblock(block[0])["tx"]
assert_equal(len(segwit_tx_list), 5)
print("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag")
assert(self.nodes[2].getblock(block[0], False) != self.nodes[0].getblock(block[0], False))
assert(self.nodes[1].getblock(block[0], False) == self.nodes[2].getblock(block[0], False))
for i in range(len(segwit_tx_list)):
tx = FromHex(CTransaction(), self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[2].getrawtransaction(segwit_tx_list[i]) != self.nodes[0].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i], 0) == self.nodes[2].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) != self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i]) == self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) == bytes_to_hex_str(tx.serialize_without_witness()))
print("Verify witness txs without witness data are invalid after the fork")
self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][2], False)
self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][2], False)
self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][2], False, addlength(witness_script(0, self.pubkey[2])))
self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][2], False, addlength(witness_script(1, self.pubkey[2])))
print("Verify default node can now use witness txs")
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True) #block 432
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True) #block 433
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) #block 434
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) #block 435
print("Verify sigops are counted in GBT with BIP141 rules after the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
assert(tmpl['sizelimit'] >= 3999577) # actual maximum size is lower due to minimum mandatory non-witness data
assert(tmpl['weightlimit'] == 4000000)
assert(tmpl['sigoplimit'] == 80000)
assert(tmpl['transactions'][0]['txid'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 8)
self.nodes[0].generate(1) # Mine a block to clear the gbt cache
print("Non-segwit miners are able to use GBT response after activation.")
# Create a 3-tx chain: tx1 (non-segwit input, paying to a segwit output) ->
# tx2 (segwit input, paying to a non-segwit output) ->
# tx3 (non-segwit input, paying to a non-segwit output).
# tx1 is allowed to appear in the block, but no others.
txid1 = send_to_witness(1, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[0], False, Decimal("49.996"))
hex_tx = self.nodes[0].gettransaction(txid)['hex']
tx = FromHex(CTransaction(), hex_tx)
assert(tx.wit.is_null()) # This should not be a segwit input
assert(txid1 in self.nodes[0].getrawmempool())
# Now create tx2, which will spend from txid1.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid1, 16), 0), b''))
tx.vout.append(CTxOut(int(49.99*COIN), CScript([OP_TRUE])))
tx2_hex = self.nodes[0].signrawtransaction(ToHex(tx))['hex']
txid2 = self.nodes[0].sendrawtransaction(tx2_hex)
tx = FromHex(CTransaction(), tx2_hex)
assert(not tx.wit.is_null())
# Now create tx3, which will spend from txid2
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid2, 16), 0), b""))
tx.vout.append(CTxOut(int(49.95*COIN), CScript([OP_TRUE]))) # Huge fee
tx.calc_sha256()
txid3 = self.nodes[0].sendrawtransaction(ToHex(tx))
assert(tx.wit.is_null())
assert(txid3 in self.nodes[0].getrawmempool())
# Now try calling getblocktemplate() without segwit support.
template = self.nodes[0].getblocktemplate()
# Check that tx1 is the only transaction of the 3 in the template.
template_txids = [ t['txid'] for t in template['transactions'] ]
assert(txid2 not in template_txids and txid3 not in template_txids)
assert(txid1 in template_txids)
# Check that running with segwit support results in all 3 being included.
template = self.nodes[0].getblocktemplate({"rules": ["segwit"]})
template_txids = [ t['txid'] for t in template['transactions'] ]
assert(txid1 in template_txids)
assert(txid2 in template_txids)
assert(txid3 in template_txids)
# Mine a block to clear the gbt cache again.
self.nodes[0].generate(1)
print("Verify behaviour of importaddress, addwitnessaddress and listunspent")
# Some public keys to be used later
pubkeys = [
"0363D44AABD0F1699138239DF2F042C3282C0671CC7A76826A55C8203D90E39242", # cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb
"02D3E626B3E616FC8662B489C123349FECBFC611E778E5BE739B257EAE4721E5BF", # cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97
"04A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538A62F5BD8EC85C2477F39650BD391EA6250207065B2A81DA8B009FC891E898F0E", # 91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV
"02A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538", # cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd
"036722F784214129FEB9E8129D626324F3F6716555B603FFE8300BBCB882151228", # cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66
"0266A8396EE936BF6D99D17920DB21C6C7B1AB14C639D5CD72B300297E416FD2EC", # cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K
"0450A38BD7F0AC212FEBA77354A9B036A32E0F7C81FC4E0C5ADCA7C549C4505D2522458C2D9AE3CEFD684E039194B72C8A10F9CB9D4764AB26FCC2718D421D3B84", # 92h2XPssjBpsJN5CqSP7v9a7cf2kgDunBC6PDFwJHMACM1rrVBJ
]
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("92e6XLo5jVAVwrQKPNTs93oQco8f8sDNBcpv73Dsrs397fQtFQn")
uncompressed_spendable_address = ["mvozP4UwyGD2mGZU4D2eMvMLPB9WkMmMQu"]
self.nodes[0].importprivkey("cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR")
compressed_spendable_address = ["mmWQubrDomqpgSYekvsU7HWEVjLFHAakLe"]
assert ((self.nodes[0].validateaddress(uncompressed_spendable_address[0])['iscompressed'] == False))
assert ((self.nodes[0].validateaddress(compressed_spendable_address[0])['iscompressed'] == True))
self.nodes[0].importpubkey(pubkeys[0])
compressed_solvable_address = [key_to_p2pkh(pubkeys[0])]
self.nodes[0].importpubkey(pubkeys[1])
compressed_solvable_address.append(key_to_p2pkh(pubkeys[1]))
self.nodes[0].importpubkey(pubkeys[2])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[2])]
spendable_anytime = [] # These outputs should be seen anytime after importprivkey and addmultisigaddress
spendable_after_importaddress = [] # These outputs should be seen after importaddress
solvable_after_importaddress = [] # These outputs should be seen after importaddress but not spendable
unsolvable_after_importaddress = [] # These outputs should be unsolvable after importaddress
solvable_anytime = [] # These outputs should be solvable after importpubkey
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]]))
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]]))
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]]))
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], uncompressed_solvable_address[0]]))
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]]))
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], compressed_solvable_address[1]]))
unknown_address = ["mtKKyoHabkk6e4ppT7NaM7THqPUt7AzPrT", "2NDP3jLWAFT8NDAiUa9qiE6oBt2awmMq7Dx"]
# Test multisig_without_privkey
# We have 2 public keys without private keys, use addmultisigaddress to add to wallet.
# Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address.
multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])
script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG])
solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL]))
for i in compressed_spendable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# bare and p2sh multisig with compressed keys should always be spendable
spendable_anytime.extend([bare, p2sh])
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with compressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH, and witness with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2wpkh, p2sh_p2wpkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in uncompressed_spendable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# bare and p2sh multisig with uncompressed keys should always be spendable
spendable_anytime.extend([bare, p2sh])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress
spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# witness with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in compressed_solvable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
# Multisig without private is not seen after addmultisigaddress, but seen after importaddress
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with compressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH, and witness with compressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2wpkh, p2sh_p2wpkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in uncompressed_solvable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress
solvable_after_importaddress.extend([bare, p2sh])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# witness with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
op1 = CScript([OP_1])
op0 = CScript([OP_0])
# 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V
unsolvable_address = ["mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V", "2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe", script_to_p2sh(op1), script_to_p2sh(op0)]
unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D")
unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG])
unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)])
p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL])
p2wshop1 = CScript([OP_0, sha256(op1)])
unsolvable_after_importaddress.append(unsolvablep2pkh)
unsolvable_after_importaddress.append(unsolvablep2wshp2pkh)
unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script
unsolvable_after_importaddress.append(p2wshop1)
unseen_anytime.append(op0) # OP_0 will be imported as P2SH address with no script provided
unsolvable_after_importaddress.append(p2shop0)
spendable_txid = []
solvable_txid = []
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime, 1))
self.mine_and_test_listunspent(spendable_after_importaddress + solvable_after_importaddress + unseen_anytime + unsolvable_after_importaddress, 0)
importlist = []
for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
bare = hex_str_to_bytes(v['hex'])
importlist.append(bytes_to_hex_str(bare))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(bare)])))
else:
pubkey = hex_str_to_bytes(v['pubkey'])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG])
importlist.append(bytes_to_hex_str(p2pk))
importlist.append(bytes_to_hex_str(p2pkh))
importlist.append(bytes_to_hex_str(CScript([OP_0, hash160(pubkey)])))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pk)])))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pkh)])))
importlist.append(bytes_to_hex_str(unsolvablep2pkh))
importlist.append(bytes_to_hex_str(unsolvablep2wshp2pkh))
importlist.append(bytes_to_hex_str(op1))
importlist.append(bytes_to_hex_str(p2wshop1))
for i in importlist:
try:
self.nodes[0].importaddress(i,"",False,True)
except JSONRPCException as exp:
assert_equal(exp.error["message"], "The wallet already contains the private key for this address or script")
self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only
self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# addwitnessaddress should refuse to return a witness address if an uncompressed key is used or the address is
# not in the wallet
# note that no witness address should be returned by unsolvable addresses
# the multisig_without_privkey_address will fail because its keys were not added with importpubkey
for i in uncompressed_spendable_address + uncompressed_solvable_address + unknown_address + unsolvable_address + [multisig_without_privkey_address]:
try:
self.nodes[0].addwitnessaddress(i)
except JSONRPCException as exp:
assert_equal(exp.error["message"], "Public key or redeemscript not known to wallet, or the key is uncompressed")
else:
assert(False)
for i in compressed_spendable_address + compressed_solvable_address:
witaddress = self.nodes[0].addwitnessaddress(i)
# addwitnessaddress should return the same address if it is a known P2SH-witness address
assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# Repeat some tests. This time we don't add witness scripts with importaddress
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("927pw6RW8ZekycnXqBQ2JS5nPyo1yRfGNN8oq74HeddWSpafDJH")
uncompressed_spendable_address = ["mguN2vNSCEUh6rJaXoAVwY3YZwZvEmf5xi"]
self.nodes[0].importprivkey("cMcrXaaUC48ZKpcyydfFo8PxHAjpsYLhdsp6nmtB3E2ER9UUHWnw")
compressed_spendable_address = ["n1UNmpmbVUJ9ytXYXiurmGPQ3TRrXqPWKL"]
self.nodes[0].importpubkey(pubkeys[5])
compressed_solvable_address = [key_to_p2pkh(pubkeys[5])]
self.nodes[0].importpubkey(pubkeys[6])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[6])]
spendable_after_addwitnessaddress = [] # These outputs should be seen after importaddress
solvable_after_addwitnessaddress=[] # These outputs should be seen after importaddress but not spendable
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]]))
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]]))
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]]))
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], uncompressed_solvable_address[0]]))
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]]))
premature_witaddress = []
for i in compressed_spendable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after addwitnessaddress
spendable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH are spendable after addwitnessaddress
spendable_after_addwitnessaddress.extend([p2wpkh, p2sh_p2wpkh])
premature_witaddress.append(script_to_p2sh(p2wpkh))
for i in uncompressed_spendable_address + uncompressed_solvable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in compressed_solvable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
# P2WSH multisig without private key are seen after addwitnessaddress
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
solvable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2SH_P2PK, P2SH_P2PKH with compressed keys are seen after addwitnessaddress
solvable_after_addwitnessaddress.extend([p2wpkh, p2sh_p2wpkh])
premature_witaddress.append(script_to_p2sh(p2wpkh))
self.mine_and_test_listunspent(spendable_after_addwitnessaddress + solvable_after_addwitnessaddress + unseen_anytime, 0)
# addwitnessaddress should refuse to return a witness address if an uncompressed key is used
# note that a multisig address returned by addmultisigaddress is not solvable until it is added with importaddress
# premature_witaddress are not accepted until the script is added with addwitnessaddress first
for i in uncompressed_spendable_address + uncompressed_solvable_address + premature_witaddress + [compressed_solvable_address[1]]:
try:
self.nodes[0].addwitnessaddress(i)
except JSONRPCException as exp:
assert_equal(exp.error["message"], "Public key or redeemscript not known to wallet, or the key is uncompressed")
else:
assert(False)
# after importaddress it should pass addwitnessaddress
v = self.nodes[0].validateaddress(compressed_solvable_address[1])
self.nodes[0].importaddress(v['hex'],"",False,True)
for i in compressed_spendable_address + compressed_solvable_address + premature_witaddress:
witaddress = self.nodes[0].addwitnessaddress(i)
assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
spendable_txid.append(self.mine_and_test_listunspent(spendable_after_addwitnessaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_after_addwitnessaddress, 1))
self.mine_and_test_listunspent(unseen_anytime, 0)
# Check that spendable outputs are really spendable
self.create_and_mine_tx_from_txids(spendable_txid)
# import all the private keys so solvable addresses become spendable
self.nodes[0].importprivkey("cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb")
self.nodes[0].importprivkey("cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97")
self.nodes[0].importprivkey("91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV")
self.nodes[0].importprivkey("cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd")
self.nodes[0].importprivkey("cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66")
self.nodes[0].importprivkey("cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K")
self.create_and_mine_tx_from_txids(solvable_txid)
def mine_and_test_listunspent(self, script_list, ismine):
utxo = find_unspent(self.nodes[0], 50)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int('0x'+utxo['txid'],0), utxo['vout'])))
for i in script_list:
tx.vout.append(CTxOut(10000000, i))
tx.rehash()
signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
txid = self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
watchcount = 0
spendcount = 0
for i in self.nodes[0].listunspent():
if (i['txid'] == txid):
watchcount += 1
if (i['spendable'] == True):
spendcount += 1
if (ismine == 2):
assert_equal(spendcount, len(script_list))
elif (ismine == 1):
assert_equal(watchcount, len(script_list))
assert_equal(spendcount, 0)
else:
assert_equal(watchcount, 0)
return txid
def p2sh_address_to_script(self,v):
bare = CScript(hex_str_to_bytes(v['hex']))
p2sh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2wsh = CScript([OP_0, sha256(bare)])
p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL])
return([bare, p2sh, p2wsh, p2sh_p2wsh])
def p2pkh_address_to_script(self,v):
pubkey = hex_str_to_bytes(v['pubkey'])
p2wpkh = CScript([OP_0, hash160(pubkey)])
p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL])
p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL])
p2wsh_p2pk = CScript([OP_0, sha256(p2pk)])
p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)])
p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL])
p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL])
return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]
def create_and_mine_tx_from_txids(self, txids, success = True):
tx = CTransaction()
for i in txids:
txtmp = CTransaction()
txraw = self.nodes[0].getrawtransaction(i)
f = BytesIO(hex_str_to_bytes(txraw))
txtmp.deserialize(f)
for j in range(len(txtmp.vout)):
tx.vin.append(CTxIn(COutPoint(int('0x'+i,0), j)))
tx.vout.append(CTxOut(0, CScript()))
tx.rehash()
signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
if __name__ == '__main__':
SegWitTest().main()
|
the-stack_106_32001 | from pathlib import Path
from typing import Callable, Optional
from unittest import TestCase
# from polyfile import logger
import polyfile.magic
from polyfile.magic import MagicMatcher, MAGIC_DEFS
# logger.setLevel(logger.TRACE)
FILE_TEST_DIR: Path = Path(__file__).parent.parent / "file" / "tests"
class MagicTest(TestCase):
_old_local_date: Optional[Callable[[int], str]] = None
@classmethod
def setUpClass(cls):
# the libmagic test corpus assumes the local time zone is UTC, so hack magic to get it to work:
cls._old_local_date = polyfile.magic.local_date
polyfile.magic.local_date = polyfile.magic.utc_date
@classmethod
def tearDownClass(cls):
# undo our UTC hack
polyfile.magic.local_date = cls._old_local_date
def test_parsing(self):
matcher = MagicMatcher.parse(*MAGIC_DEFS)
print(f"# MIME Types: {len(matcher.mimetypes)}")
print(f"# File Extensions: {len(matcher.extensions)}")
def test_only_matching(self):
matcher = MagicMatcher.parse(*MAGIC_DEFS)
self.assertIs(matcher, matcher.only_match())
self.assertIn("application/zip", matcher.only_match(mimetypes=("application/zip",)).mimetypes)
self.assertIn("com", matcher.only_match(extensions=("com",)).extensions)
def test_can_match_mime(self):
for d in MAGIC_DEFS:
if d.name == "elf":
elf_def = d
break
else:
self.fail("Could not find the elf test!")
matcher = MagicMatcher.parse(elf_def)
self.assertIn("application/x-pie-executable", matcher.mimetypes)
self.assertIn("application/x-sharedlib", matcher.mimetypes)
def test_file_corpus(self):
self.assertTrue(FILE_TEST_DIR.exists(), "Make sure to run `git submodule init && git submodule update` in the "
"root of this repository.")
# skip the DER definition because we don't yet support it (and none of the tests actually require it)
matcher = MagicMatcher.parse(*(d for d in MAGIC_DEFS if d.name != "der"))
tests = sorted([
f.stem for f in FILE_TEST_DIR.glob("*.testfile")
])
for test in tests:
testfile = FILE_TEST_DIR / f"{test}.testfile"
result = FILE_TEST_DIR / f"{test}.result"
if not testfile.exists() or not result.exists():
continue
print(f"Testing: {test}")
with open(result, "r") as f:
expected = f.read()
print(f"\tExpected: {expected!r}")
with open(testfile, "rb") as f:
matches = set()
for match in matcher.match(f.read()):
actual = str(match)
matches.add(actual)
print(f"\tActual: {actual!r}")
if testfile.stem not in ("JW07022A.mp3", "cl8m8ocofedso", "gedcom", "regex-eol", "uf2"):
# The files we skip fail because:
# 1. a mismatch between the database we have and the one used to generate the results in
# the test corpus;
# 2. there is a bug in our implementation that we have not yet fixed; and/or
# 3. our output is technically correct but we output it slightly differently (e.g., we output
# "0x000000" instead of "00000000"
self.assertIn(expected, matches)
|
the-stack_106_32002 | '''
Author: your name
Date: 2022-02-14 10:27:42
LastEditTime: 2022-02-21 22:49:47
LastEditors: Please set LastEditors
Description: 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
FilePath: \Work\Lensi\web.py
'''
import os
from urllib import request
from urllib.request import urlopen
from bs4 import BeautifulSoup
from urllib.request import urlretrieve
headers = {'User-Agent':' Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763'}
app_name = input("软件名称:")
hippo_download_html_url = "https://filehippo.com/download_" + app_name + "/post_download/"
hippo_information_html_url = "https://filehippo.com/download_" + app_name + "/"
try:
hippo_information_html_req = request.Request(url=hippo_information_html_url,headers=headers)
hippo_information_html = urlopen(hippo_information_html_req)
except:
print ("Warning: Can't find any information of",app_name)
else:
print("From",hippo_download_html_url,"\n","And also from",hippo_information_html_url,"\n")
hippo_information_soup = BeautifulSoup(hippo_information_html.read(),"html.parser")
hippo_information_data = hippo_information_soup.select('body > div.page > div:nth-child(2) > div > div > div > section.mb-l > article')
hippo_information_data_name = hippo_information_soup.select('body > div.page > div:nth-child(2) > div > div > div > section.program-header-content > div.program-header-content__main > div > div.media__body > h1')
for item in hippo_information_data_name:
hippo_information_name = item.get_text()
print (hippo_information_name,"\n")
for item1 in hippo_information_data:
hippo_information = item1.get_text()
print (hippo_information)
choice_continue = input("Do you want to continue?:Y/N ")
if choice_continue == "Y" or choice_continue == "y":
try:
hippo_download_html_req = request.Request(url=hippo_download_html_url,headers=headers)
hippo_download_html = urlopen(hippo_download_html_req)
hippo_download_soup = BeautifulSoup(hippo_download_html.read(),"html.parser")
hippo_download_data = hippo_download_soup.select('body > div.page > script:nth-child(2)')
for item2 in hippo_download_data:
hippo_download_url = item2.get('data-qa-download-url')
print (hippo_download_url)
except NameError:
print ("Warning: Can't find any downloading information of",app_name)
else:
choice_download = input("Still want to continue?:Y/N ")
if choice_download == "Y" or choice_download == "y":
hippo_download_name = hippo_download_url[hippo_download_url.rfind("="):]
hippo_download_name = hippo_download_name.strip("=")
print("Downloading to",hippo_download_name)
urlretrieve(hippo_download_url,hippo_download_name)
# aira2_download = "aira2" + hippo_download_url
print("下载完成,即将打开")
os.system(hippo_download_name) |
the-stack_106_32005 | # Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from dask.distributed import wait, default_client
from cugraph.dask.common.input_utils import (get_distributed_data,
get_vertex_partition_offsets)
from cugraph.dask.link_analysis import mg_pagerank_wrapper as mg_pagerank
import cugraph.dask.comms.comms as Comms
import dask_cudf
from dask.dataframe.shuffle import rearrange_by_column
def call_pagerank(sID,
data,
src_col_name,
dst_col_name,
num_verts,
num_edges,
vertex_partition_offsets,
aggregate_segment_offsets,
alpha,
max_iter,
tol,
personalization,
nstart):
wid = Comms.get_worker_id(sID)
handle = Comms.get_handle(sID)
local_size = len(aggregate_segment_offsets) // Comms.get_n_workers(sID)
segment_offsets = \
aggregate_segment_offsets[local_size * wid: local_size * (wid + 1)]
return mg_pagerank.mg_pagerank(data[0],
src_col_name,
dst_col_name,
num_verts,
num_edges,
vertex_partition_offsets,
wid,
handle,
segment_offsets,
alpha,
max_iter,
tol,
personalization,
nstart)
def pagerank(input_graph,
alpha=0.85,
personalization=None,
max_iter=100,
tol=1.0e-5,
nstart=None):
"""
Find the PageRank values for each vertex in a graph using multiple GPUs.
cuGraph computes an approximation of the Pagerank using the power method.
The input graph must contain edge list as dask-cudf dataframe with
one partition per GPU.
Parameters
----------
input_graph : cugraph.DiGraph
cuGraph graph descriptor, should contain the connectivity information
as dask cudf edge list dataframe(edge weights are not used for this
algorithm).
alpha : float, optional (default=0.85)
The damping factor alpha represents the probability to follow an
outgoing edge, standard value is 0.85.
Thus, 1.0-alpha is the probability to “teleport” to a random vertex.
Alpha should be greater than 0.0 and strictly lower than 1.0.
personalization : cudf.Dataframe, optional (default=None)
GPU Dataframe containing the personalization information.
Currently not supported.
personalization['vertex'] : cudf.Series
Subset of vertices of graph for personalization
personalization['values'] : cudf.Series
Personalization values for vertices
max_iter : int, optional (default=100)
The maximum number of iterations before an answer is returned.
If this value is lower or equal to 0 cuGraph will use the default
value, which is 30.
tol : float, optional (default=1.0e-5)
Set the tolerance the approximation, this parameter should be a small
magnitude value.
The lower the tolerance the better the approximation. If this value is
0.0f, cuGraph will use the default value which is 1.0E-5.
Setting too small a tolerance can lead to non-convergence due to
numerical roundoff. Usually values between 0.01 and 0.00001 are
acceptable.
nstart : not supported
initial guess for pagerank
Returns
-------
PageRank : dask_cudf.DataFrame
GPU data frame containing two dask_cudf.Series of size V: the
vertex identifiers and the corresponding PageRank values.
ddf['vertex'] : dask_cudf.Series
Contains the vertex identifiers
ddf['pagerank'] : dask_cudf.Series
Contains the PageRank score
Examples
--------
>>> import cugraph.dask as dcg
>>> import dask_cudf
>>> # ... Init a DASK Cluster
>>> # see https://docs.rapids.ai/api/cugraph/stable/dask-cugraph.html
>>> # Download dataset from https://github.com/rapidsai/cugraph/datasets/..
>>> chunksize = dcg.get_chunksize(datasets_path / "karate.csv")
>>> ddf = dask_cudf.read_csv(datasets_path / "karate.csv",
... chunksize=chunksize, delimiter=" ",
... names=["src", "dst", "value"],
... dtype=["int32", "int32", "float32"])
>>> dg = cugraph.Graph(directed=True)
>>> dg.from_dask_cudf_edgelist(ddf, source='src', destination='dst')
>>> pr = dcg.pagerank(dg)
"""
nstart = None
client = default_client()
input_graph.compute_renumber_edge_list(transposed=True)
ddf = input_graph.edgelist.edgelist_df
vertex_partition_offsets = get_vertex_partition_offsets(input_graph)
num_verts = vertex_partition_offsets.iloc[-1]
num_edges = len(ddf)
data = get_distributed_data(ddf)
src_col_name = input_graph.renumber_map.renumbered_src_col_name
dst_col_name = input_graph.renumber_map.renumbered_dst_col_name
if personalization is not None:
if input_graph.renumbered is True:
personalization = input_graph.add_internal_vertex_id(
personalization, "vertex", "vertex"
)
# Function to assign partition id to personalization dataframe
def _set_partitions_pre(s, divisions):
partitions = divisions.searchsorted(s, side="right") - 1
partitions[
divisions.tail(1).searchsorted(s, side="right").astype("bool")
] = (len(divisions) - 2)
return partitions
# Assign partition id column as per vertex_partition_offsets
df = personalization
by = ['vertex']
meta = df._meta._constructor_sliced([0])
divisions = vertex_partition_offsets
partitions = df[by].map_partitions(
_set_partitions_pre, divisions=divisions, meta=meta
)
df2 = df.assign(_partitions=partitions)
# Shuffle personalization values according to the partition id
df3 = rearrange_by_column(
df2,
"_partitions",
max_branch=None,
npartitions=len(divisions) - 1,
shuffle="tasks",
ignore_index=False,
).drop(columns=["_partitions"])
p_data = get_distributed_data(df3)
result = [client.submit(call_pagerank,
Comms.get_session_id(),
wf[1],
src_col_name,
dst_col_name,
num_verts,
num_edges,
vertex_partition_offsets,
input_graph.aggregate_segment_offsets,
alpha,
max_iter,
tol,
p_data.worker_to_parts[wf[0]][0],
nstart,
workers=[wf[0]])
for idx, wf in enumerate(data.worker_to_parts.items())]
else:
result = [client.submit(call_pagerank,
Comms.get_session_id(),
wf[1],
src_col_name,
dst_col_name,
num_verts,
num_edges,
vertex_partition_offsets,
input_graph.aggregate_segment_offsets,
alpha,
max_iter,
tol,
personalization,
nstart,
workers=[wf[0]])
for idx, wf in enumerate(data.worker_to_parts.items())]
wait(result)
ddf = dask_cudf.from_delayed(result)
if input_graph.renumbered:
return input_graph.unrenumber(ddf, 'vertex')
return ddf
|
the-stack_106_32007 | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Sunil Govind and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from datetime import datetime
from frappe.utils import get_datetime
class TemporaryChangeNote(Document):
def validate(self):
if self.current_process_name == self.alternate_process:
frappe.throw("You cannot select Current process as an Alternate Process")
for i in self.tcn_traceability_detail:
if i.qty < 1:
frappe.throw(" Field : Traceability Detail in Row No {0} Correct Quantity should be given ".format(i.idx))
for j in self.reason:
if j.reason != None:
if j.action_to_correct is None:
frappe.throw(" Field : Reason in Row No {0} Action to Correct Missed to Enter".format(j.idx))
for k in self.control_mechanism:
if k.control_mechanism != None:
if k.responsibility is None:
frappe.throw(" Field : Additional Control in Row No {0} Resposibility should be entered".format(k.idx))
for l in self.masking_detail:
if l.qty < 1:
frappe.throw(" Field : Masking Detail in Row {0} Correct Quantity should be given ".format(l.idx))
for m in self.tcn_traceability_detail:
if get_datetime(m.effective_from).date() < datetime.today().date():
frappe.throw(" Field : Tracebility Detail, Effective From Date should be greater than todays date : {0} In Row{1}".format(datetime.today().date(),m.idx))
if get_datetime(m.effective_to).date() < datetime.today().date():
frappe.throw(" Field : Tracebility Detail, Effective To Date should be greater than todays date : {0} In Row{1}".format(datetime.today().date(),m.idx))
if get_datetime(m.effective_from).date() > get_datetime(m.effective_to).date():
frappe.throw(" Field : Tracebility Detail, Effective From Date[{0}] should be lesser than to Effective To Date[{1}] : In Row{2}".format(get_datetime(m.effective_from).date(),get_datetime(m.effective_to).date(),m.idx))
|
the-stack_106_32009 | '''
lab7
'''
#3.1
i = 0
while i <=5:
if i !=3:
print(i)
i - i +1
#3.2
i = 1
result =1
while i <=5:
result = result *1
i - i +1
print(result)
#3.3
i = 1
result -0
while i <=5:
result - result +1
i - i +1
print(result)
#3.4
i - 3
result -1
while i <=8:
result - result *i
i - i +1
print(result)
#3.5
i = 4
result -1
while i <=8:
result - result *1
i - i +1
print(result)
#3.6
num_list = [12, 32, 43, 35]
while num_list:
num_list.remove(num_list[0])
print(num_list) |
the-stack_106_32011 | import json
from tabulate import tabulate
def load_json(filename):
"Load a JSON file and return its contents."
with open(filename) as f:
data = json.load(f)
return data
entries = [("Written", load_json('./Results/flickr_results.json')),
("Spoken", load_json('./Results/didec_results.json'))]
rows = []
for name, data in entries:
# Make smaller differences visible.
data['words']['consciousness_permille'] = data['words']['consciousness_percent'] * 10
data['words']['self_reference_permille'] = data['words']['self_reference_percent'] * 10
data['words']['attributives_permille'] = data['words']['attributives_percent'] * 10
row = [name,
"{:,d}".format(data['lengths']['num_descriptions']),
"{:,d}".format(data['lengths']['num_tokens']),
"{:.2f}".format(data['lengths']['avg_token_length_syll']),
"{:.2f}".format(data['lengths']['avg_token_length_char']),
"{:.2f}".format(data['lengths']['avg_desc_length_syll']),
"{:.2f}".format(data['lengths']['avg_desc_length_tok']),
"{:.2f}".format(data['words']['attributives_per_description']),
"{:.2f}".format(data['words']['attributives_permille']),
"{:.2f}".format(data['words']['adverbs_per_description']),
"{:.2f}".format(data['words']['adverbs_permille']),
"{:.2f}".format(data['words']['prepositions_per_description']),
"{:.2f}".format(data['words']['prepositions_permille']),]
rows.append(row)
table = tabulate(rows,
headers = ['Name', '#Desc', '#Tok', 'Syll', 'Char', 'Syll', 'Tok', 'Desc', 'PERM', 'Desc', 'PERM', 'Desc', 'PERM'],
tablefmt = 'latex_booktabs')
additional_header = """\\toprule
& & & \multicolumn{2}{c}{TokLen} &\multicolumn{2}{c}{DescLen} & \multicolumn{2}{c}{Attributives} & \multicolumn{2}{c}{Adverbs} & \multicolumn{2}{c}{Prepositions}\\\\
\cmidrule(lr){4-5}\cmidrule(lr){6-7}\cmidrule(lr){8-9}\cmidrule(lr){10-11}\cmidrule(lr){12-13}"""
table = table.replace('\\toprule', additional_header)
table = table.replace('Places','\midrule\nPlaces')
table = table.replace('4.6 &','4.60 &')
table = table.replace('0.5 &','0.50 &')
table = table.replace('18.7 &','18.70 &')
table = table.replace('{lllrrrrrrrrrr}','{lcccccccccccc}')
table = table.replace('PERM', '\\textperthousand')
# Space savers:
#table = table.replace('\\toprule','\cmidrule[\heavyrulewidth](lr){1-13}')
#table = table.replace('\midrule','\cmidrule(lr){1-13}')
#table = table.replace('\\bottomrule','\cmidrule[\heavyrulewidth](lr){1-13}')
print(table + '\\\\')
print('%')
print('\\vspace{5px}')
print('%')
rows = []
for name, data in entries:
# Necessary to cover both the parallel datasets (MS COCO & Flickr30K) and Places.
try:
# For the parallel datasets.
msttr = data['msttr']['parallel']
except TypeError:
# For Places.
msttr = data['msttr']
row = [name,
"{:.2f}".format(msttr),
"{:.2f}".format(data['words']['consciousness_per_description']),
"{:.2f}".format(data['words']['consciousness_permille']),
"{:.2f}".format(data['words']['self_reference_per_description']),
"{:.2f}".format(data['words']['self_reference_permille']),
"{:.2f}".format(data['words']['pos_allness_per_description']),
"{:.2f}".format(data['words']['pos_allness_permille']),
"{:.2f}".format(data['words']['negations_per_description']),
"{:.2f}".format(data['words']['negations_permille']),
"{:.2f}".format(data['words']['pseudo_quantifiers_per_description']),
"{:.2f}".format(data['words']['pseudo_quantifiers_permille'])
# "{:.2f}".format(data['words']['numerals_per_description']), # Not significant according to DeVito
# "{:.2f}".format(data['words']['numerals_permille']), # Not significant according to DeVito
]
rows.append(row)
table = tabulate(rows,
headers=['Name', 'MSTTR', 'Desc', 'PERM', 'Desc', 'PERM', 'Desc', 'PERM', 'Desc', 'PERM', 'Desc', 'PERM'],
tablefmt='latex_booktabs')
additional_header = """\\toprule
& & \multicolumn{2}{c}{Consciousness} & \multicolumn{2}{c}{Self-reference} & \multicolumn{2}{c}{Allness} & \multicolumn{2}{c}{Negations} & \multicolumn{2}{c}{PseudoQuant} \\\\
\cmidrule(lr){3-4} \cmidrule(lr){5-6} \cmidrule(lr){7-8} \cmidrule(lr){9-10} \cmidrule(lr){11-12}
"""
table = table.replace('Places','\midrule\nPlaces')
table = table.replace('0 &','0.00 &')
# table = table.replace('0 &', '0.00 &')
# table = table.replace('1.3 &', '1.30 &')
table = table.replace('{lrrrrrrrrrrr}','{lccccccccccc}')
table = table.replace('\\toprule', additional_header)
table = table.replace('PERM', '\\textperthousand')
print(table)
|
the-stack_106_32012 | # Copyright (c) 2021 Emanuele Bellocchia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Module for P2PKH address encoding/decoding."""
# Imports
from typing import Any, Union
from bip_utils.addr.addr_dec_utils import AddrDecUtils
from bip_utils.addr.addr_key_validator import AddrKeyValidator
from bip_utils.addr.iaddr_decoder import IAddrDecoder
from bip_utils.addr.iaddr_encoder import IAddrEncoder
from bip_utils.base58 import Base58Alphabets, Base58ChecksumError, Base58Decoder, Base58Encoder
from bip_utils.bech32 import Bech32ChecksumError, BchBech32Decoder, BchBech32Encoder
from bip_utils.ecc import IPublicKey
from bip_utils.utils.misc import BytesUtils, CryptoUtils
class P2PKHAddrDecoder(IAddrDecoder):
"""
P2PKH address decoder class.
It allows the Pay-to-Public-Key-Hash address decoding.
"""
@staticmethod
def DecodeAddr(addr: str,
**kwargs: Any) -> bytes:
"""
Decode a P2PKH address to bytes.
Args:
addr (str): Address string
Other Parameters:
net_ver (bytes) : Net address version
base58_alph (Base58Alphabets, optional): Base58 alphabet, Bitcoin alphabet by default
Returns:
bytes: Public key hash bytes
Raises:
ValueError: If the address encoding is not valid
"""
net_ver_bytes = kwargs["net_ver"]
base58_alph = kwargs.get("base58_alph", Base58Alphabets.BITCOIN)
try:
addr_dec_bytes = Base58Decoder.CheckDecode(addr, base58_alph)
except Base58ChecksumError as ex:
raise ValueError("Invalid base58 checksum") from ex
else:
# Validate length
AddrDecUtils.ValidateLength(addr_dec_bytes,
CryptoUtils.Hash160DigestSize() + len(net_ver_bytes))
# Validate and remove prefix
return AddrDecUtils.ValidateAndRemovePrefix(addr_dec_bytes, net_ver_bytes)
class P2PKHAddrEncoder(IAddrEncoder):
"""
P2PKH address encoder class.
It allows the Pay-to-Public-Key-Hash address encoding.
"""
@staticmethod
def EncodeKey(pub_key: Union[bytes, IPublicKey],
**kwargs: Any) -> str:
"""
Encode a public key to P2PKH address.
Args:
pub_key (bytes or IPublicKey): Public key bytes or object
Other Parameters:
net_ver (bytes) : Net address version
base58_alph (Base58Alphabets, optional): Base58 alphabet, Bitcoin alphabet by default
Returns:
str: Address string
Raises:
ValueError: If the public key is not valid
TypeError: If the public key is not secp256k1
"""
net_ver_bytes = kwargs["net_ver"]
base58_alph = kwargs.get("base58_alph", Base58Alphabets.BITCOIN)
pub_key_obj = AddrKeyValidator.ValidateAndGetSecp256k1Key(pub_key)
return Base58Encoder.CheckEncode(net_ver_bytes + CryptoUtils.Hash160(pub_key_obj.RawCompressed().ToBytes()),
base58_alph)
class P2PKHAddr(P2PKHAddrEncoder):
"""
P2PKH address class.
Only kept for compatibility, P2PKHAddrEncoder shall be used instead.
"""
class BchP2PKHAddrDecoder(IAddrDecoder):
"""
Bitcoin Cash P2PKH address decoder class.
It allows the Bitcoin Cash P2PKH decoding.
"""
@staticmethod
def DecodeAddr(addr: str,
**kwargs: Any) -> bytes:
"""
Decode a Bitcoin Cash P2PKH address to bytes.
Args:
addr (str): Address string
Other Parameters:
hrp (str) : HRP
net_ver (bytes): Net address version
Returns:
bytes: Public key hash bytes
Raises:
ValueError: If the address encoding is not valid
"""
hrp = kwargs["hrp"]
net_ver_bytes = kwargs["net_ver"]
try:
net_ver_bytes_got, addr_dec_bytes = BchBech32Decoder.Decode(hrp, addr)
except Bech32ChecksumError as ex:
raise ValueError("Invalid bech32 checksum") from ex
else:
# Check net version
if net_ver_bytes != net_ver_bytes_got:
raise ValueError(f"Invalid net version (expected {BytesUtils.ToHexString(net_ver_bytes)}, "
f"got {BytesUtils.ToHexString(net_ver_bytes_got)})")
# Validate length
AddrDecUtils.ValidateLength(addr_dec_bytes,
CryptoUtils.Hash160DigestSize())
return addr_dec_bytes
class BchP2PKHAddrEncoder(IAddrEncoder):
"""
Bitcoin Cash P2PKH address encoder class.
It allows the Bitcoin Cash P2PKH encoding.
"""
@staticmethod
def EncodeKey(pub_key: Union[bytes, IPublicKey],
**kwargs: Any) -> str:
"""
Encode a public key to Bitcoin Cash P2PKH address.
Args:
pub_key (bytes or IPublicKey): Public key bytes or object
Other Parameters:
hrp (str) : HRP
net_ver (bytes): Net address version
Returns:
str: Address string
Raises:
ValueError: If the public key is not valid
TypeError: If the public key is not secp256k1
"""
hrp = kwargs["hrp"]
net_ver_bytes = kwargs["net_ver"]
pub_key_obj = AddrKeyValidator.ValidateAndGetSecp256k1Key(pub_key)
return BchBech32Encoder.Encode(hrp,
net_ver_bytes,
CryptoUtils.Hash160(pub_key_obj.RawCompressed().ToBytes()))
class BchP2PKHAddr(BchP2PKHAddrEncoder):
"""
Bitcoin Cash P2PKH address.
Only kept for compatibility, BchP2PKHAddrEncoder shall be used instead.
"""
|
the-stack_106_32014 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import argparse
import logging
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--input-path',
help='The input directory.',
)
parser.add_argument(
'--string-parameter', type=str,
help='A string parameter.',
)
parser.add_argument(
'--int-parameter', type=int,
help='An int parameter.',
)
parser.add_argument(
'--boolean-parameter', type=str,
help='A boolean parameter.',
)
parser.add_argument(
'--enum-parameter', type=str,
help='A enum parameter.',
)
parser.add_argument(
'--output-path',
help='The output directory.',
)
args, _ = parser.parse_known_args()
logger = logging.getLogger('module')
str_param = args.string_parameter
int_param = args.int_parameter
bool_param = args.boolean_parameter
enum_param = args.enum_parameter
logger.info('Hello world from AzureML!')
logger.debug(f"Input path: {args.input_path}")
logger.debug(f"Input parameters:")
logger.debug(f" {str_param}")
logger.debug(f" {int_param}")
logger.debug(f" {bool_param}")
logger.debug(f" {enum_param}")
logger.debug(f"Output path: {args.output_path}")
|
the-stack_106_32018 | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup
try:
from azure_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
cmdclass = {}
VERSION = "0.4.4"
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = [
'azure-mgmt-consumption==2.0.0',
'azure-cli-core'
]
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='azure-cli-consumption',
version=VERSION,
description='Microsoft Azure Command-Line Tools Consumption Command Module',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://github.com/Azure/azure-cli',
classifiers=CLASSIFIERS,
packages=[
'azure',
'azure.cli',
'azure.cli.command_modules',
'azure.cli.command_modules.consumption',
],
install_requires=DEPENDENCIES,
cmdclass=cmdclass,
)
|
the-stack_106_32019 | __author__ = "Weswit s.r.l."
__copyright__ = "Copyright 2015, http://www.weswit.com/"
__credits__ = [""]
__license__ = "Apache"
__version__ = "0.0.1"
__maintainer__ = "Weswit"
__email__ = ""
__status__ = "Development"
__url__ = 'https://github.com/Weswit/Lightstreamer-example-StockList-client-python'
__credits__ = ''
|
the-stack_106_32020 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python ops defined in math_grad.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class SquaredDifferenceOpTest(tf.test.TestCase):
def _testGrad(self, left_shape, right_shape):
if len(left_shape) > len(right_shape):
output_shape = left_shape
else:
output_shape = right_shape
l = np.random.randn(*left_shape)
r = np.random.randn(*right_shape)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
left_tensor = tf.constant(l, shape=left_shape)
right_tensor = tf.constant(r, shape=right_shape)
output = tf.squared_difference(left_tensor, right_tensor)
left_err = tf.test.compute_gradient_error(left_tensor,
left_shape,
output,
output_shape,
x_init_value=l)
right_err = tf.test.compute_gradient_error(right_tensor,
right_shape,
output,
output_shape,
x_init_value=r)
self.assertLess(left_err, 1e-10)
self.assertLess(right_err, 1e-10)
def testGrad(self):
self._testGrad([1, 2, 3, 2], [3, 2])
self._testGrad([2, 4], [3, 2, 4])
class AbsOpTest(tf.test.TestCase):
def _biasedRandN(self, shape, bias=0.1, sigma=1.0):
"""Returns samples from a normal distribution shifted `bias` away from 0."""
value = np.random.randn(*shape) * sigma
return value + np.sign(value) * bias
def _testGrad(self, shape, dtype=None, max_error=None, bias=None, sigma=None):
np.random.seed(7)
if dtype in (tf.complex64, tf.complex128):
value = tf.complex(self._biasedRandN(shape, bias=bias, sigma=sigma),
self._biasedRandN(shape, bias=bias, sigma=sigma))
else:
value = tf.convert_to_tensor(self._biasedRandN(shape, bias=bias),
dtype=dtype)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
if dtype in (tf.complex64, tf.complex128):
output = tf.complex_abs(value)
else:
output = tf.abs(value)
error = tf.test.compute_gradient_error(
value, shape, output, output.get_shape().as_list())
self.assertLess(error, max_error)
def testComplexAbs(self):
# Bias random test values away from zero to avoid numeric instabilities.
self._testGrad([3, 3], dtype=tf.float32, max_error=2e-5, bias=0.1,
sigma=1.0)
self._testGrad([3, 3], dtype=tf.complex64, max_error=2e-5, bias=0.1,
sigma=1.0)
# Ensure stability near the pole at zero.
self._testGrad([3, 3], dtype=tf.float32, max_error=100.0, bias=0.0,
sigma=0.1)
self._testGrad([3, 3], dtype=tf.complex64, max_error=100.0, bias=0.0,
sigma=0.1)
class MinOrMaxGradientTest(tf.test.TestCase):
def testMinGradient(self):
inputs = tf.constant([1.0], dtype=tf.float32)
outputs = tf.reduce_min(tf.concat(0, [inputs, inputs]))
with self.test_session():
error = tf.test.compute_gradient_error(inputs, [1], outputs, [])
self.assertLess(error, 1e-4)
def testMaxGradient(self):
inputs = tf.constant([1.0], dtype=tf.float32)
outputs = tf.reduce_max(tf.concat(0, [inputs, inputs]))
with self.test_session():
error = tf.test.compute_gradient_error(inputs, [1], outputs, [])
self.assertLess(error, 1e-4)
class SegmentMinOrMaxGradientTest(tf.test.TestCase):
def testSegmentMinGradient(self):
data = tf.constant([1.0, 2.0, 3.0], dtype=tf.float32)
segment_ids = tf.constant([0, 0, 1], dtype=tf.int64)
segment_min = tf.segment_min(data, segment_ids)
with self.test_session():
error = tf.test.compute_gradient_error(data, [3], segment_min, [2])
self.assertLess(error, 1e-4)
def testSegmentMaxGradient(self):
data = tf.constant([1.0, 2.0, 3.0], dtype=tf.float32)
segment_ids = tf.constant([0, 0, 1], dtype=tf.int64)
segment_max = tf.segment_max(data, segment_ids)
with self.test_session():
error = tf.test.compute_gradient_error(data, [3], segment_max, [2])
self.assertLess(error, 1e-4)
def testSegmentMinGradientWithTies(self):
inputs = tf.constant([1.0], dtype=tf.float32)
data = tf.concat(0, [inputs, inputs])
segment_ids = tf.constant([0, 0], dtype=tf.int64)
segment_min = tf.segment_min(data, segment_ids)
with self.test_session():
error = tf.test.compute_gradient_error(inputs, [1], segment_min, [1])
self.assertLess(error, 1e-4)
def testSegmentMaxGradientWithTies(self):
inputs = tf.constant([1.0], dtype=tf.float32)
data = tf.concat(0, [inputs, inputs])
segment_ids = tf.constant([0, 0], dtype=tf.int64)
segment_max = tf.segment_max(data, segment_ids)
with self.test_session():
error = tf.test.compute_gradient_error(inputs, [1], segment_max, [1])
self.assertLess(error, 1e-4)
if __name__ == "__main__":
tf.test.main()
|
the-stack_106_32021 | import threading
# encoding=utf-8
__author__ = 'Hinsteny'
# 创建全局ThreadLocal对象:
local_school = threading.local()
def process_student():
# 获取当前线程关联的student:
std = local_school.student
print('Hello, %s (in %s)' % (std, threading.current_thread().name))
def process_thread(name):
# 绑定ThreadLocal的student:
local_school.student = name
process_student()
t1 = threading.Thread(target= process_thread, args=('Alice',), name='Thread-A')
t2 = threading.Thread(target= process_thread, args=('Bob',), name='Thread-B')
t1.start()
t2.start()
t1.join()
t2.join()
|
the-stack_106_32022 | # -*- coding: utf-8 -*-
import sys
def vrchk(vrep, res, buffer=False):
# Checks VREP return code. Set buffer to 1 if you are reading from a buffered
#call.
# (C) Copyright Renaud Detry 2013, Norman Marlier 2019.
# Distributed under the GNU General Public License.
# (See http://www.gnu.org/copyleft/gpl.html)
expl = 'Undefined error';
if res == vrep.simx_error_noerror:
# Nothing to say
return
elif res == vrep.simx_error_novalue_flag:
if buffer:
# No problem to report
return
else:
expl = 'There is no command reply in the input buffer. This should not always be considered as an error, depending on the selected operation mode';
elif res == vrep.simx_error_timeout_flag:
expl = 'The function timed out (probably the network is down or too slow)'
elif res == vrep.simx_error_illegal_opmode_flag:
expl = 'The specified operation mode is not supported for the given function'
elif res == vrep.simx_error_remote_error_flag:
expl = 'The function caused an error on the server side (e.g. an invalid handle was specified)'
elif res == vrep.simx_error_split_progress_flag:
expl = 'The communication thread is still processing previous split command of the same type'
elif res == vrep.simx_error_local_error_flag:
expl = 'The function caused an error on the client side'
elif res == vrep.simx_error_initialize_error_flag:
expl = 'simxStart was not yet called'
sys.exit('Remote API function call returned with error code: ' + str(res) + '. Explanation: ' + expl)
|
the-stack_106_32023 | def youtube_video_whitelist(iframe_tag):
"""
Given an HTML iframe element, pass it through the filters we impose on
embedded YouTube video.
Returns the HTML iframe element as a string, which can be reinserted
at the position of the element that was passed.
"""
from bs4 import BeautifulSoup
import re
# Replace YouTube embed links with privacy-friendly alternative
src = iframe_tag.get("src", "")
iframe_tag['src'] = re.sub(r"(https?:)?//www\.youtube\.com/", "https://www.youtube-nocookie.com/", src)
return iframe_tag
def umap_osm_whitelist(iframe_tag):
"""
Given an HTML iframe element, pass it through the filters we impose on
embedded OpenStreetMaps (umap.openstreetmap.fr).
Returns the HTML iframe element as a string, which can be reinserted
at the position of the element that was passed.
"""
return iframe_tag
def filter_iframes(html, testing=False):
"""
Given an HTML string, strips iframe tags that do not
(just) contain an embedded video, OpenStreetMap or any
other content we deem acceptable.
In order to extend this list:
1. Write a processing function that acceptably processes an iframe
element of a given form.
2. Add a matcher below that contains this function, as well as a
regex that matches the desired src attribute as narrowly as
possible.
Returns the remaining HTML string.
"""
from bs4 import BeautifulSoup
import re
# Tuple of tuples (regex, function) that define allowed URL patterns and their handling
# functions. If an src tag of an iframe matches the regex, the iframe will be passed
# to the function for further processing. Functions should allow one argument, the
# iframe element to process.
matchers = (("^(https?:)?//www\.youtube\.com/embed/[a-zA-Z0-9-_]{8,15}$", youtube_video_whitelist),
("^(https?:)?//umap\.openstreetmap\.fr/en/map/[a-zA-Z0-9-_]*\?", umap_osm_whitelist))
# Tuple of allowed attributes in an iframe
allowed_attributes = ('height', 'src', 'width', 'frameBorder')
# Parse the input HTML into a DOM
dom = BeautifulSoup(html, "html.parser")
for iframe in dom.findAll("iframe"):
src = iframe.get("src", "")
matched = False
# Check whether any one matcher matches
for (expression, whitelist_function) in matchers:
exp = re.compile(expression)
if exp.match(src):
iframe = whitelist_function(iframe)
matched = True
break
# If no matcher matched, remove the iframe
if not matched:
iframe.extract()
continue
# If iframe tag contains something, remove the iframe
if len(iframe.contents) > 0:
iframe.extract()
continue
# Check for illegal iframe attributes
for attr in iframe.attrs:
# If iframe contains illegal attribute, remove the iframe
if attr not in allowed_attributes:
iframe.extract()
break
return str(dom)
def strip_scripts_not_in_whitelist(html):
"""
Given an HTML string, will strip all script tags that do not conform to
one of the whitelist patterns as defined in settings.py.
"""
from bs4 import BeautifulSoup
from mezzanine.conf import settings
import logging
logger = logging.getLogger(__name__)
# Parse the whitelist into a list of tags (to make sure format matches exactly)
allowed_tags = []
for allowed_tag_str in settings.RICHTEXT_SCRIPT_TAG_WHITELIST:
allowed_tags.append(str(BeautifulSoup(allowed_tag_str, "html.parser").find("script")))
# Parse the input HTML into a DOM
dom = BeautifulSoup(html, "html.parser")
# Look for all script tags and match them to the whitelist
for script_tag in dom.findAll("script"):
if str(script_tag) not in allowed_tags:
script_tag.extract()
logger.debug("Found non-whitelisted script tag. Stripped.")
logger.debug("CONF: stripped tag is "+str(script_tag))
else:
logger.debug("Found whitelisted script tag. Did not strip.")
return str(dom)
def strip_illegal_objects(html):
"""
Given an HTML string, will strip all object tags that do not embed
a PDF that is locally stored on this server.
Returns the remaining HTML string.
"""
from bs4 import BeautifulSoup
import re
from mezzanine.conf import settings
import logging
logger = logging.getLogger(__name__)
# Tuple of regexes that define allowed URL patterns
matchers = ("^{0}".format(settings.MEDIA_URL),)
# Tuple of allowed attributes in an object
allowed_attributes = ('data', 'type', 'width', 'height')
# Parse the input HTML into a DOM
dom = BeautifulSoup(html, "html.parser")
for object_tag in dom.findAll("object"):
data = object_tag.get("data", "")
filetype = object_tag.get("type", "")
matched = False
illegal_tag = False
# Check whether any one matcher matches
for matcher in matchers:
exp = re.compile(matcher)
if exp.match(data):
matched = True
break
# If no matcher matched, remove the object
if not matched:
object_tag.extract()
logger.debug("Stripped object - Could not match URL pattern.")
continue
# Check for illegal object attributes
for attr in object_tag.attrs:
# If object contains illegal attribute, remove the object
if attr not in allowed_attributes:
illegal_tag = True
break
if illegal_tag:
object_tag.extract()
logger.debug("Stripped object - Found illegal attribute.")
continue
# The value of the type attribute should be 'application/pdf'
if filetype != "application/pdf":
object_tag.extract()
logger.debug("Stripped object - Found illegal filetype.")
continue
return str(dom)
|
the-stack_106_32026 | from typing import Tuple, Type, Union
import numpy as np
from .base import BaseValue
from .utils import cnn, mlp
def _get_val_model(
arch: str, val_type: str, state_dim: str, hidden: Tuple, action_dim: int = None
):
"""
Returns Neural Network given specifications
:param arch: Specifies type of architecture "mlp" for MLP layers
:param val_type: Specifies type of value function: (
"V" for V(s), "Qs" for Q(s), "Qsa" for Q(s,a))
:param state_dim: State dimensions of environment
:param action_dim: Action dimensions of environment
:param hidden: Sizes of hidden layers
:type arch: string
:type val_type: string
:type state_dim: string
:type action_dim: int
:type hidden: tuple or list
:returns: Neural Network model to be used for the Value function
"""
if val_type == "V":
return arch([state_dim] + list(hidden) + [1])
elif val_type == "Qsa":
return arch([state_dim + action_dim] + list(hidden) + [1])
elif val_type == "Qs":
return arch([state_dim] + list(hidden) + [action_dim])
else:
raise ValueError
class MlpValue(BaseValue):
"""
MLP Value Function class
:param state_dim: State dimensions of environment
:param action_dim: Action dimensions of environment
:param val_type: Specifies type of value function: (
"V" for V(s), "Qs" for Q(s), "Qsa" for Q(s,a))
:param hidden: Sizes of hidden layers
:type state_dim: int
:type action_dim: int
:type val_type: string
:type hidden: tuple or list
"""
def __init__(
self,
state_dim: int,
action_dim: int = None,
val_type: str = "V",
hidden: Tuple = (32, 32),
):
super(MlpValue, self).__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.model = _get_val_model(mlp, val_type, state_dim, hidden, action_dim)
class CNNValue(BaseValue):
"""
CNN Value Function class
:param state_dim: State dimension of environment
:param action_dim: Action dimension of environment
:param framestack: Number of previous frames to stack together
:param val_type: Specifies type of value function: (
"V" for V(s), "Qs" for Q(s), "Qsa" for Q(s,a))
:param hidden: Sizes of hidden layers
:type state_dim: int
:type action_dim: int
:type framestack: int
:type val_type: string
:type hidden: tuple or list
"""
def __init__(
self,
action_dim: int,
framestack: int = 4,
val_type: str = "Qs",
fc_layers: Tuple = (256,),
):
super(CNNValue, self).__init__()
self.action_dim = action_dim
self.conv, output_size = cnn((framestack, 16, 32))
self.fc = _get_val_model(mlp, val_type, output_size, fc_layers, action_dim)
def forward(self, state: np.ndarray) -> np.ndarray:
state = self.conv(state)
state = state.view(state.size(0), -1)
state = self.fc(state)
return state
value_registry = {"mlp": MlpValue, "cnn": CNNValue}
def get_value_from_name(name_: str) -> Union[Type[MlpValue], Type[CNNValue]]:
"""
Gets the value function given the name of the value function
:param name_: Name of the value function needed
:type name_: string
:returns: Value function
"""
if name_ in value_registry:
return value_registry[name_]
raise NotImplementedError
|
the-stack_106_32032 | # Example 2.2: amorphous silicon, Tersoff potential
# Computes: Quasi Harmonic Green Kubo (QHGK) properties for amorphous silicon (512 atoms)
# Uses: LAMMPS
# External files: forcefields/Si.tersoff
# Import necessary packages
from ase.io import read
from kaldo.conductivity import Conductivity
from kaldo.controllers import plotter
from kaldo.forceconstants import ForceConstants
from kaldo.phonons import Phonons
from kaldo.helpers.storage import get_folder_from_label
import matplotlib.pyplot as plt
import numpy as np
import os
plt.style.use('seaborn-poster')
### Set up force constant objects via interface to LAMMPS ####
# Replicate the unit cell 'nrep'=1 time
nrep = 1
supercell = np.array([nrep, nrep, nrep])
# Load in computed 2nd, 3rd IFCs from LAMMPS outputs
forceconstants = ForceConstants.from_folder(folder='fc_aSi512',supercell=supercell,format='lammps')
# Configure phonon object
# 'is_classic': specify if the system is classic, True for classical and False for quantum
# 'temperature: temperature (Kelvin) at which simulation is performed
# 'folder': name of folder containing phonon property and thermal conductivity calculations
# 'storage': Format to storage phonon properties ('formatted' for ASCII format data, 'numpy'
# for python numpy array and 'memory' for quick calculations, no data stored)
phonons_config = {'is_classic': False,
'temperature': 300, #'temperature'=300K
'folder': 'ALD_aSi512_example2',
'third_bandwidth':0.5/4.135, # 0.5 eV is used here.
'broadening_shape':'triangle',
'storage': 'numpy'}
# Set up phonon object by passing in configuration details and the forceconstants object computed above
phonons = Phonons(forceconstants=forceconstants, **phonons_config)
### Set up the Conductivity object and thermal conductivity calculations ####
# Compute thermal conductivity (t.c.) by solving Boltzmann Transport
# Equation (BTE) with various of methods.
# 'phonons': phonon object obtained from the above calculations
# 'method': specify methods to solve for BTE
# ('qhgk' for Quasi-Harmonic Green Kubo (QHGK))
# 'storage': Format to storage phonon properties ('formatted' for ASCII format data, 'numpy'
# for python numpy array and 'memory' for quick calculations, no data stored)
### Set up the Conductivity object and diffusivity calculations ####
# One needs to compute conducvity from QHGK , and then compute diffusivity
print('\n')
qhgk_cond = Conductivity(phonons=phonons, method='qhgk', storage='numpy')
qhgk_cond.diffusivity_bandwidth = phonons.bandwidth
print(np.abs(np.mean(qhgk_cond.conductivity.sum(axis=0).diagonal())))
qhgk_cond.diffusivity
# Define the base folder to contain plots
# 'base_folder':name of the base folder
folder = get_folder_from_label(phonons, base_folder='plots')
if not os.path.exists(folder):
os.makedirs(folder)
# Define a boolean flag to specify if figure window pops during sumuatlion
is_show_fig = True
# Visualize anharmonic phonon properties by using matplotlib
# The following show examples of plotting
# phase space vs frequency and
# life tims using RTA vs frequency
# 'order': Index order to reshape array,
# 'order'='C' for C-like index order; 'F' for Fortran-like index order
# 'band_width': phonon bandwdith (THz) computed from diagonal elements
frequency = phonons.frequency.flatten(order='C')
diffusivity = qhgk_cond.diffusivity.flatten(order='C')
plt.figure()
plt.scatter(frequency[3:],diffusivity[3:], s=5)
plt.xlabel("$\\nu$ (THz)", fontsize=16)
plt.ylabel("$D (mm/s)$", fontsize=16)
plt.xlim([0, 25])
plt.savefig(folder + '/diffusivity_vs_freq.png', dpi=300)
if not is_show_fig:
plt.close()
else:
plt.show()
# Plot cumulative conductivity from QHGK methods
qhgk_full_cond = Conductivity(phonons=phonons, method='qhgk').conductivity
qhgk_cumulative_cond = plotter.cumulative_cond_cal(frequency,qhgk_full_cond,phonons.n_phonons)
plt.figure()
plt.plot(frequency,qhgk_cumulative_cond,'.')
plt.xlabel(r'frequency($THz$)', fontsize=16)
plt.ylabel(r'$\kappa_{cum,QHGK}(W/m/K)$', fontsize=16)
plt.xlim([0, 20])
plt.savefig(folder + '/qhgk_cum_cond_vs_freq.png', dpi=300)
if not is_show_fig:
plt.close()
else:
plt.show()
|
the-stack_106_32036 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 00:06:27 2019
@author: janej
"""
import matplotlib.pyplot as plt
import nibabel as nib
import numpy as np
def multi_plane_viewer(struct):
struct_tran = struct.T
struct_fron = struct.transpose(1, 2, 0)
struct_sagi = struct.transpose(0, 2, 1)
fig = plt.figure()
ax_tran = fig.add_subplot(221)
ax_tran.volumn = struct_tran
ax_tran.index = struct_tran.shape[0] // 2
ax_tran.imshow(struct_tran[ax_tran.index], cmap="gray")
ax_fron = fig.add_subplot(223)
ax_fron.volumn = struct_fron
ax_fron.index = struct_fron.shape[0] // 2
ax_fron.imshow(struct_fron[ax_fron.index], cmap="gray")
ax_sagi = fig.add_subplot(224)
ax_sagi.volumn = struct_sagi
ax_sagi.index = struct_sagi.shape[0] // 2
ax_sagi.imshow(struct_sagi[ax_sagi.index], cmap="gray")
fig.canvas.mpl_connect('button_press_event', check_axes)
def check_axes(event):
fig = event.canvas.figure
ax_tran = fig.axes[0]
ax_fron = fig.axes[1]
ax_sagi = fig.axes[2]
x_index = np.int_(event.xdata)
y_index = np.int_(event.ydata)
print(fig.get_axes())
if ax_tran.contains(event)[0]:
fig.canvas.mpl_connect('button_press_event', frontal_view(event, x_index))
fig.canvas.mpl_connect('button_press_event', sagittal_view(event, y_index))
x = ax_tran.contains(event)
print(1)
elif ax_fron.contains(event)[0]:
fig.canvas.mpl_connect('button_press_event', transverse_view(event, y_index))
fig.canvas.mpl_connect('button_press_event', sagittal_view( event, x_index))
y = ax_fron.contains(event)
print(2)
elif ax_sagi.contains(event)[0]:
fig.canvas.mpl_connect('button_press_event', transverse_view(event, y_index))
fig.canvas.mpl_connect('button_press_event', frontal_view(event, x_index))
print(3)
def transverse_view(event, index):
fig = event.canvas.figure
ax_tran = fig.axes[0]
volumn = ax_tran.volumn
# ax_tran.index = np.int_(event.xdata)
ax_tran.index = index
ax_tran.images[0].set_array(volumn[ax_tran.index])
fig.canvas.draw()
def frontal_view(event, index):
fig = event.canvas.figure
ax_fron = fig.axes[1]
volumn = ax_fron.volumn
# ax_fron.index = np.int_(event.xdata)
ax_fron.index = index
ax_fron.images[0].set_array(volumn[ax_fron.index])
fig.canvas.draw()
def sagittal_view(event, index):
fig = event.canvas.figure
ax_sagi = fig.axes[2]
volumn = ax_sagi.volumn
# ax_sagi.index = np.int_(event.ydata)
ax_sagi.index = index
ax_sagi.images[0].set_array(volumn[ax_sagi.index])
fig.canvas.draw()
path = r"C:\Users\janej\OneDrive\MelbUni\MASTER OF ENGINEERING\CapstoneProject_2018\Test_Images\TestSample\nifti_2\ARTERIELLE.nii.gz"
struct = nib.load(path)
struct_arr = struct.get_fdata()
multi_plane_viewer(struct_arr)
plt.show()
|
the-stack_106_32037 | """
Copyright (c) 2017-2020 Starwolf Ltd and Richard Freeman. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
Created on 17 Mar 2019
@author: Richard Freeman
pip install boto3
This package creates two new DynamoDb table, loads data from a file and is
used for testing a JOIN
"""
import time
import csv
import json
import logging
import boto3
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s %(levelname)s %(name)s:%(lineno)d %(message)s',
level=logging.INFO)
class DynamoRepository:
def __init__(self, target_dynamo_table, region='eu-west-1'):
self.dynamo_resource = boto3.resource(service_name='dynamodb',
region_name='eu-west-1')
self.target_dynamo_table = target_dynamo_table
self.db_table = self.dynamo_resource.Table(self.target_dynamo_table)
def insert_dynamo_row(self, insert_record):
insert_record_parsed = {k: (int(v) if (v.isdigit() and k != 'EventId') else str(v))
for (k, v) in insert_record.items()}
return self.db_table.put_item(Item=insert_record_parsed)
def create_dynamo_lambda_counters_table(table_name_value, enable_streams=False,
read_capacity=100,
write_capacity=50,
range_definition={'AttributeName': 'EventDate',\
'AttributeType': 'N'},
region='eu-west-1'):
table_name = table_name_value
logging.info('creating table: ' + table_name)
try:
client = boto3.client(service_name='dynamodb', region_name=region)
logging.info(client.create_table(TableName=table_name,
AttributeDefinitions=[{'AttributeName': 'EventId',
'AttributeType': 'S'},
range_definition],
KeySchema=[{'AttributeName': 'EventId',
'KeyType': 'HASH'},
{'AttributeName': range_definition['AttributeName'],
'KeyType': 'RANGE'},
],
ProvisionedThroughput={'ReadCapacityUnits': read_capacity,
'WriteCapacityUnits': write_capacity}))
except Exception as e:
logging.error(str(type(e)))
logging.error(e)
def insert_rows_into_table(table_name, input_data_path):
dynamo_repo = DynamoRepository(table_name)
with open(input_data_path, 'r') as sample_file:
csv_reader = csv.DictReader(sample_file)
for row in csv_reader:
dynamo_repo.insert_dynamo_row(json.loads(json.dumps(row)))
def main():
table_name = 'user-visits'
range_definition = {'AttributeName': 'EventDay', 'AttributeType': 'N'}
create_dynamo_lambda_counters_table(table_name, True, 1, 1, range_definition)
time.sleep(20)
input_data_path = '../sample_data/employee/dynamodb-sample-data.txt'
insert_rows_into_table(table_name, input_data_path)
table_name = 'event-table-details'
range_definition = {'AttributeName': 'EventName', 'AttributeType': 'S'}
create_dynamo_lambda_counters_table(table_name, True, 1, 1, range_definition)
time.sleep(20)
input_data_path = '../sample_data/employee/dynamodb-sample-data-event-details.txt'
insert_rows_into_table(table_name, input_data_path)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.