python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Job module
"""
from . import util
from .air_model import AirModel
class Job(AirModel):
"""
Manage a Job
### json
Returns a JSON string representation of the job
### refresh
Syncs the job with all values returned by the API
### update
Update the job with the provided data
Arguments:
kwargs (dict, optional): All optional keyword arguments are applied as key/value
pairs in the request's JSON payload
"""
_deletable = False
def __repr__(self):
if self._deleted or not self.category:
return super().__repr__()
return f'<Job {self.category} {self.id}>'
class JobApi:
""" High-level interface for the Job API """
def __init__(self, client):
self.client = client
self.url = self.client.api_url + '/job/'
def get(self, job_id, **kwargs):
"""
Get an existing job
Arguments:
job_id (str): Job ID
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
[`Job`](/docs/job)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.jobs.get('3dadd54d-583c-432e-9383-a2b0b1d7f551')
<Job START 3dadd54d-583c-432e-9383-a2b0b1d7f551>
```
"""
url = f'{self.url}{job_id}/'
res = self.client.get(url, params=kwargs)
util.raise_if_invalid_response(res)
return Job(self, **res.json())
def list(self, **kwargs):
#pylint: disable=line-too-long
"""
List existing jobs
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
list
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.jobs.list()
[<Job START c51b49b6-94a7-4c93-950c-e7fa4883591>, <Job STOP 3134711d-015e-49fb-a6ca-68248a8d4aff>]
```
""" #pylint: enable=line-too-long
res = self.client.get(f'{self.url}', params=kwargs)
util.raise_if_invalid_response(res, data_type=list)
return [Job(self, **job) for job in res.json()]
| air_sdk-main | air_sdk/job.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Interface module
"""
from . import util
from .air_model import AirModel
class Interface(AirModel):
"""
View an Interface
### json
Returns a JSON string representation of the interface
### refresh
Syncs the interface with all values returned by the API
"""
_deletable = False
_updatable = False
def __repr__(self):
if self._deleted or not self.name:
return super().__repr__()
return f'<Interface {self.name} {self.id}>'
class InterfaceApi:
""" High-level interface for the Interface API """
def __init__(self, client):
self.client = client
self.url = self.client.api_url + '/interface/'
def get(self, interface_id, **kwargs):
"""
Get an existing interface
Arguments:
interface_id (str): Interface ID
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
[`Interface`](/docs/interface)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.interfaces.get('3dadd54d-583c-432e-9383-a2b0b1d7f551')
<Interface eth0 3dadd54d-583c-432e-9383-a2b0b1d7f551>
```
"""
url = f'{self.url}{interface_id}/'
res = self.client.get(url, params=kwargs)
util.raise_if_invalid_response(res)
return Interface(self, **res.json())
def list(self, **kwargs):
#pylint: disable=line-too-long
"""
List existing interfaces
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
list
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.interfaces.list()
[<Interface eth0 c51b49b6-94a7-4c93-950c-e7fa4883591>, <Interface eth1 3134711d-015e-49fb-a6ca-68248a8d4aff>]
```
"""
#pylint: enable=line-too-long
res = self.client.get(f'{self.url}', params=kwargs)
util.raise_if_invalid_response(res, data_type=list)
return [Interface(self, **interface) for interface in res.json()]
| air_sdk-main | air_sdk/interface.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
""" Exposes the AIR API client module """
from .air_api import *
| air_sdk-main | air_sdk/__init__.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
ResourceBudget module
"""
from . import util
from .air_model import AirModel
class ResourceBudget(AirModel):
"""
Manage a ResourceBudget
### json
Returns a JSON string representation of the budget
### refresh
Syncs the budget with all values returned by the API
### update
Update the budget with the provided data
Arguments:
kwargs (dict, optional): All optional keyword arguments are applied as key/value
pairs in the request's JSON payload
"""
_deletable = False
def __repr__(self):
if self._deleted:
return super().__repr__()
return f'<ResourceBudget {self.id}>'
class ResourceBudgetApi:
""" High-level interface for the ResourceBudget API """
def __init__(self, client):
self.client = client
self.url = self.client.api_url + '/resource-budget/'
def get(self, budget_id, **kwargs):
"""
Get an existing budget
Arguments:
budget_id (str): ResourceBudget ID
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
[`ResourceBudget`](/docs/resourcebudget)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.resource_budgets.get('c604c262-396a-48a0-a8f6-31708c0cff82')
<ResourceBudget c604c262-396a-48a0-a8f6-31708c0cff82>
```
"""
url = f'{self.url}{budget_id}/'
res = self.client.get(url, params=kwargs)
util.raise_if_invalid_response(res)
return ResourceBudget(self, **res.json())
def list(self, **kwargs):
#pylint: disable=line-too-long
"""
List existing budgets
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
list
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.resource_budgets.list()
[<ResourceBudget c604c262-396a-48a0-a8f6-31708c0cff82>, <ResourceBudget 906675f7-8b8d-4f52-b59d-52847af2f0ef>]
```
""" #pylint: enable=line-too-long
res = self.client.get(f'{self.url}', params=kwargs)
util.raise_if_invalid_response(res, data_type=list)
return [ResourceBudget(self, **budget) for budget in res.json()]
| air_sdk-main | air_sdk/resource_budget.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Topology module
"""
import io
import os
from . import util
from .air_model import AirModel
class Topology(AirModel):
"""
Manage a Topology
### delete
Delete the topology. Once successful, the object should no longer be used and will raise
[`AirDeletedObject`](/docs/exceptions) when referenced.
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - Delete failed
### json
Returns a JSON string representation of the topology
### refresh
Syncs the topology with all values returned by the API
### update
Update the topology with the provided data
Arguments:
kwargs (dict, optional): All optional keyword arguments are applied as key/value
pairs in the request's JSON payload
"""
_ignored_update_fields = ['links', 'nodes']
def __repr__(self):
if self._deleted or not self.name:
return super().__repr__()
return f'<Topology {self.name} {self.id}>'
def add_permission(self, email, **kwargs):
"""
Adds permission for a given user to this topology.
Arguments:
email (str): Email address of the user being given permission
kwargs (dict, optional): All other optional keyword arguments are applied as key/value
pairs in the request's JSON payload
Returns:
[`Permission`](/docs/permission)
Example:
```
>>> topology.add_permission('[email protected]', write_ok=True)
<Permission 217bea68-7048-4262-9bbc-b98ab16c603e>
```
"""
return self._api.client.permissions.create(email=email, topology=self.id, **kwargs)
class TopologyApi:
""" High-level interface for the Topology API """
def __init__(self, client):
self.client = client
self.url = self.client.api_url + '/topology/'
@util.deprecated('TopologyApi.list()')
def get_topologies(self): #pylint: disable=missing-function-docstring
return self.list()
@util.deprecated('TopologyApi.create()')
def create_topology(self, json=None, dot=None): #pylint: disable=missing-function-docstring
return self.create(json=json, dot=dot)
@util.deprecated('Topology.update()')
def update_topology(self, topology_id, data): #pylint: disable=missing-function-docstring
topology = self.get(topology_id)
return topology.update(**data)
def get(self, topology_id, **kwargs):
"""
Get an existing topology
Arguments:
topology_id (str): Topology ID
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
[`Topology`](/docs/topology)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.topologies.get('3dadd54d-583c-432e-9383-a2b0b1d7f551')
<Topology my_network 3dadd54d-583c-432e-9383-a2b0b1d7f551>
```
"""
url = f'{self.url}{topology_id}/'
res = self.client.get(url, params=kwargs)
util.raise_if_invalid_response(res)
return Topology(self, **res.json())
def list(self, **kwargs):
#pylint: disable=line-too-long
"""
List existing topologies
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
list
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.topologies.list()
[<Topology my_network1 c51b49b6-94a7-4c93-950c-e7fa4883591>, <Topology my_network2 3134711d-015e-49fb-a6ca-68248a8d4aff>]
```
""" #pylint: enable=line-too-long
res = self.client.get(f'{self.url}', params=kwargs)
util.raise_if_invalid_response(res, data_type=list)
return [Topology(self, **topology) for topology in res.json()]
@util.required_kwargs([('json', 'dot')])
def create(self, **kwargs):
#pylint: disable=line-too-long
"""
Create a new topology. The caller must provide either `dot` (recommended) or `json`.
Arguments:
dot (str | fd, optional): Topology in DOT format. This can be passed as a string
containing the raw DOT data, a path to the DOT file on your local disk,
or as a file descriptor for a local file
json (dict, optional): Topology in JSON format
Returns:
[`Topology`](/docs/topology)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.topologies.create(dot='/tmp/my_net.dot')
<Topology my_net 01298e0c-4ef1-43ec-9675-93160eb29d9f>
>>> air.topologies.create(dot='graph "my sim" { "server1" [ function="server" os="generic/ubuntu1804"] }')
<Topology my_net 6256baa8-f54b-4190-85c8-1cc574590080>
>>> air.topologies.create(dot=open('/tmp/my_net.dot', 'r'))
<Topology my_net a3d09f12-56ff-4889-8e03-3b714d32c3e5>
```
"""
if kwargs.get('json'):
res = self.client.post(self.url, json=kwargs['json'])
else:
if isinstance(kwargs['dot'], io.IOBase):
payload = kwargs['dot']
elif os.path.isfile(kwargs['dot']):
payload = open(kwargs['dot'], 'r').read()
else:
payload = kwargs['dot'].encode('utf-8')
res = self.client.post(self.url, data=payload,
headers={'Content-type': 'text/vnd.graphviz'})
util.raise_if_invalid_response(res, status_code=201)
return Topology(self, **res.json())
| air_sdk-main | air_sdk/topology.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Marketplace Demo module
"""
from . import util
from .air_model import AirModel
class Marketplace(AirModel):
"""
Manage marketplace demos
"""
_updatable = False
_deletable = False
def __repr__(self):
if self._deleted or not self.name:
return super().__repr__()
return f'<Marketplace Demo \'{self.name}\' {self.id}>'
class MarketplaceApi:
""" High-level interface for the Marketplace API """
def __init__(self, client):
self.client = client
self.url = self.client.api_url + '/marketplace/demo/'
def list(self, **kwargs):
#pylint: disable=line-too-long
"""
List existing keys
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
list
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.marketplace.list()
[<Marketplace Demo EVPN Centralized c51b49b6-94a7-4c93-950c-e7fa4883591>]
```
""" #pylint: enable=line-too-long
res = self.client.get(f'{self.url}', params=kwargs)
util.raise_if_invalid_response(res, data_type=list)
return [Marketplace(self, **key) for key in res.json()]
def get(self, demo_id, **kwargs):
"""
Get an existing marketplace demo
Arguments:
demo_id (str): Demo ID
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
[`Demo`](/docs/marketplace)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.marketplace.get('3dadd54d-583c-432e-9383-a2b0b1d7f551')
<Marketplace Demo EVPN Centralized 3dadd54d-583c-432e-9383-a2b0b1d7f551>
```
"""
url = f'{self.url}{demo_id}/'
res = self.client.get(url, params=kwargs)
util.raise_if_invalid_response(res)
return Marketplace(self, **res.json())
| air_sdk-main | air_sdk/marketplace.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
SimulationInterface module
"""
from . import util
from .air_model import AirModel
class SimulationInterface(AirModel):
"""
Manage a SimulationInterface
### json
Returns a JSON string representation of the simulation interface
### refresh
Syncs the simulation interface with all values returned by the API
### update
Update the simulation interface with the provided data
Arguments:
kwargs (dict, optional): All optional keyword arguments are applied as key/value
pairs in the request's JSON payload
"""
_deletable = False
def __repr__(self):
if self._deleted:
return super().__repr__()
return f'<SimulationInterface {self.id}>'
class SimulationInterfaceApi:
""" High-level interface for the SimulationInterface API """
def __init__(self, client):
self.client = client
self.url = self.client.api_url + '/simulation-interface/'
@util.deprecated('SimulationInterfaceApi.list()')
def get_simulation_interfaces(self, simulation_id='', original_id=''): #pylint: disable=missing-function-docstring
return self.list(simulation=simulation_id, original=original_id)
def get(self, simulation_interface_id, **kwargs):
"""
Get an existing simulation interface
Arguments:
simulation_interface_id (str): SimulationInterface ID
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
[`SimulationInterface`](/docs/simulationinterface)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.simulation_interfaces.get('3dadd54d-583c-432e-9383-a2b0b1d7f551')
<SimulationInterface 3dadd54d-583c-432e-9383-a2b0b1d7f551>
```
"""
url = f'{self.url}{simulation_interface_id}/'
res = self.client.get(url, params=kwargs)
util.raise_if_invalid_response(res)
return SimulationInterface(self, **res.json())
def list(self, **kwargs):
#pylint: disable=line-too-long
"""
List existing simulation interfaces
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
list
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.simulation_interfaces.list()
[<SimulationInterface c51b49b6-94a7-4c93-950c-e7fa4883591>, <SimulationInterface 3134711d-015e-49fb-a6ca-68248a8d4aff>]
```
""" #pylint: enable=line-too-long
if kwargs.get('interface'):
kwargs['original'] = kwargs['interface']
del kwargs['interface']
res = self.client.get(f'{self.url}', params=kwargs)
util.raise_if_invalid_response(res, data_type=list)
return [SimulationInterface(self, **simulation_interface)
for simulation_interface in res.json()]
| air_sdk-main | air_sdk/simulation_interface.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Custom exceptions for the AIR SDK
"""
class AirError(Exception):
"""
Base exception class. All custom exceptions should inherit from this class.
"""
def __init__(self, message='', status_code=None):
self.status_code = status_code
super().__init__(message)
class AirAuthorizationError(AirError):
""" Raised when authorization with the API fails. """
def __init__(self, message='An error occurred when authorizing the Air API', status_code=None):
self.message = message
super().__init__(message=self.message, status_code=status_code)
class AirUnexpectedResponse(AirError):
""" Raised when the API returns an unexpected response. """
def __init__(self, message='', status_code=None):
self.message = 'Received an unexpected response from the Air API'
if status_code:
self.message += f' ({status_code})'
self.message += f': {message}'
super().__init__(message=self.message, status_code=status_code)
class AirForbiddenError(AirError):
""" Raised when an API call returns a 403 Forbidden error """
def __init__(self, message='Received 403 Forbidden. Please call AirApi.authorize().'):
self.message = message
super().__init__(message=self.message, status_code=403)
class AirObjectDeleted(AirError):
""" Raised when accessing a previously instantiated object that has since been deleted """
def __init__(self, cls, message=''):
self.message = message
if not self.message:
self.message = f'{cls} object has been deleted and should no longer be referenced'
super().__init__(message=self.message)
| air_sdk-main | air_sdk/exceptions.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Login module
"""
from . import util
from .air_model import AirModel
class Login(AirModel):
"""
View login information
### json
Returns a JSON string representation of the login info
### refresh
Syncs the login info with all values returned by the API
"""
_deletable = False
_updatable = False
def __repr__(self):
if self._deleted:
return super().__repr__()
return f'<Login {self.id}>'
class LoginApi:
""" High-level interface for the Login API """
def __init__(self, client):
self.client = client
self.url = self.client.api_url + '/login/'
def get(self, **kwargs):
"""
Get login information or start an OAuth request. This is equivalent to `login.list()`.
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
[`Login`](/docs/login)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.login.get()
<Login>
```
"""
return self.list(**kwargs)
def list(self, **kwargs):
"""
Get login information or start an OAuth request. This is equivalent to `login.get()`.
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
[`Login`](/docs/login)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.login.get()
<Login>
```
"""
res = self.client.get(f'{self.url}', params=kwargs)
util.raise_if_invalid_response(res)
return Login(self, **res.json())
| air_sdk-main | air_sdk/login.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Node module
"""
from . import util
from .air_model import AirModel
class Node(AirModel):
"""
Manage a Node
### delete
Delete the node. Once successful, the object should no longer be used and will raise
[`AirDeletedObject`](/docs/exceptions) when referenced.
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - Delete failed
### json
Returns a JSON string representation of the node
### refresh
Syncs the node with all values returned by the API
### update
Update the node with the provided data
Arguments:
kwargs (dict, optional): All optional keyword arguments are applied as key/value
pairs in the request's JSON payload
"""
_ignored_update_fields = ['interfaces']
def __repr__(self):
if self._deleted or not self.name:
return super().__repr__()
return f'<Node {self.name} {self.id}>'
class NodeApi:
""" High-level interface for the Node API """
def __init__(self, client):
self.client = client
self.url = self.client.api_url + '/node/'
@util.deprecated('NodeApi.list()')
def get_nodes(self, simulation_id=''): #pylint: disable=missing-function-docstring
return self.list(simulation=simulation_id)
def get(self, node_id, **kwargs):
"""
Get an existing node
Arguments:
node_id (str): Node ID
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
[`Node`](/docs/node)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.nodes.get('3dadd54d-583c-432e-9383-a2b0b1d7f551')
<Node server 3dadd54d-583c-432e-9383-a2b0b1d7f551>
```
"""
if kwargs.get('simulation_id'):
kwargs['simulation'] = kwargs['simulation_id']
del kwargs['simulation_id']
url = f'{self.url}{node_id}/'
res = self.client.get(url, params=kwargs)
util.raise_if_invalid_response(res)
return Node(self, **res.json())
def list(self, **kwargs):
#pylint: disable=line-too-long
"""
List existing nodes
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
list
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.nodes.list()
[<Node server c51b49b6-94a7-4c93-950c-e7fa4883591>, <Node switch 3134711d-015e-49fb-a6ca-68248a8d4aff>]
```
""" #pylint: enable=line-too-long
res = self.client.get(f'{self.url}', params=kwargs)
util.raise_if_invalid_response(res, data_type=list)
return [Node(self, **node) for node in res.json()]
@util.required_kwargs(['name', 'topology'])
def create(self, **kwargs):
"""
Create a new node
Arguments:
name (str): Node name
topology (str | `Topology`): `Topology` or ID
kwargs (dict, optional): All other optional keyword arguments are applied as key/value
pairs in the request's JSON payload
Returns:
[`Node`](/docs/node)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.nodes.create(name='server', topology=topology)
<Node server 01298e0c-4ef1-43ec-9675-93160eb29d9f>
```
"""
res = self.client.post(self.url, json=kwargs)
util.raise_if_invalid_response(res, status_code=201)
return Node(self, **res.json())
| air_sdk-main | air_sdk/node.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Demo module
"""
from . import util
from .air_model import AirModel
class Demo(AirModel):
"""
View Demos
### json
Returns a JSON string representation of the demo
### refresh
Syncs the demo with all values returned by the API
"""
_deletable = False
_updatable = False
def __repr__(self):
if self._deleted or not self.name:
return super().__repr__()
return f'<Demo \'{self.name}\' {self.id}>'
class DemoApi:
""" High-level interface for the Demo API """
def __init__(self, client):
self.client = client
self.url = self.client.api_url + '/demo/'
def get(self, demo_id, **kwargs):
"""
Get an existing demo
Arguments:
dmeo_id (str): Demo ID
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
[`Demo`](/docs/demo)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.demos.get('3dadd54d-583c-432e-9383-a2b0b1d7f551')
<Demo EVPN 3dadd54d-583c-432e-9383-a2b0b1d7f551>
```
"""
url = f'{self.url}{demo_id}/'
res = self.client.get(url, params=kwargs)
util.raise_if_invalid_response(res)
return Demo(self, **res.json())
def list(self, **kwargs):
#pylint: disable=line-too-long
"""
List existing demos
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
list
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.demos.list()
[<Demo EVPN c51b49b6-94a7-4c93-950c-e7fa4883591>, <Demo Challenges 3134711d-015e-49fb-a6ca-68248a8d4aff>]
```
""" #pylint: enable=line-too-long
res = self.client.get(f'{self.url}', params=kwargs)
util.raise_if_invalid_response(res, data_type=list)
return [Demo(self, **demo) for demo in res.json()]
| air_sdk-main | air_sdk/demo.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
SimulationNode module
"""
from . import util
from .air_model import AirModel
class SimulationNode(AirModel):
"""
Manage a SimulationNode
### json
Returns a JSON string representation of the simulation node
### refresh
Syncs the simulation node with all values returned by the API
### update
Update the simulation node with the provided data
Arguments:
kwargs (dict, optional): All optional keyword arguments are applied as key/value
pairs in the request's JSON payload
"""
_deletable = False
def __repr__(self):
if self._deleted:
return super().__repr__()
return f'<SimulationNode {self.id}>'
@util.required_kwargs(['executor', 'data'])
def create_instructions(self, **kwargs):
"""
Create instructions for the `SimulationNode`'s agent to execute
Arguments:
data (str | list): Instruction data
executor (str): Agent executor type
kwargs (dict, optional): All other optional keyword arguments are applied as key/value
pairs in the request's JSON payload
Returns:
dict: Response JSON
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> simulation_node.create_instructions(data='echo foo', executor='shell')
{'id': '67f73552-ffdf-4e5f-9881-aeae227604a3'}
```
"""
url = f'{self._api.url}{self.id}/instructions/'
if isinstance(kwargs['data'], list):
kwargs['data'] = '\n'.join(kwargs['data'])
res = self._api.client.post(url, json=kwargs)
util.raise_if_invalid_response(res, status_code=201, data_type=str)
return {'id': res.json()}
def list_instructions(self, **kwargs):
#pylint: disable=line-too-long
"""
List all instructions for a `SimulationNode`
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
list
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> simulation_node.instructions.list()
[{'id': '56abc69b-489f-429a-aed9-600f26afc956'}, {'id': '7c9c3449-f071-4bbc-bb42-bef04e44d74e'}]
```
""" #pylint: enable=line-too-long
url = f'{self._api.url}{self.id}/instructions/'
res = self._api.client.get(url, params=kwargs)
util.raise_if_invalid_response(res, data_type=list)
return res.json()
def delete_instructions(self):
"""
Delete all instructions for a `SimulationNode`
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - Instruction delete failed
Example:
```
>>> simulation_node.instructions.delete()
```
"""
url = f'{self._api.url}{self.id}/instructions/'
res = self._api.client.delete(url)
util.raise_if_invalid_response(res, status_code=204, data_type=None)
@util.required_kwargs(['action'])
def control(self, **kwargs):
"""
Sends a control command to the `SimulationNode`.
Arguments:
action (str): Control command
kwargs (dict, optional): All other optional keyword arguments are applied as key/value
pairs in the request's JSON payload
Returns:
dict: Response JSON
Example:
```
>>> simulation_node.control(action='reset')
{'result': 'success'}
```
"""
url = f'{self._api.url}{self.id}/control/'
res = self._api.client.post(url, json=kwargs)
util.raise_if_invalid_response(res)
return res.json()
def rebuild(self, **kwargs):
"""
Rebuild the `SimulationNode` back to it's initial state. **All existing data will be lost!**
"""
self.control(action='rebuild', **kwargs)
def reset(self, **kwargs):
""" Reset the `SimulationNode` """
self.control(action='reset', **kwargs)
class SimulationNodeApi:
""" Wrapper for the SimulationNode API """
def __init__(self, client):
self.client = client
self.url = self.client.api_url + '/simulation-node/'
@util.deprecated('SimulationNode.update()')
def update_simulation_node(self, simulation_node_id, data): #pylint: disable=missing-function-docstring
node = self.get(simulation_node_id)
node.update(**data)
@util.deprecated('SimulationNodeApi.list()')
def get_simulation_nodes(self, **kwargs): #pylint: disable=missing-function-docstring
return self.list(**kwargs)
def get(self, simulation_node_id, **kwargs):
"""
Get an existing simulation node
Arguments:
simulation_node_id (str): SimulationNode ID
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
[`SimulationNode`](/docs/simulationnode)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.simulation_nodes.get('3dadd54d-583c-432e-9383-a2b0b1d7f551')
<SimulationNode my_sim 3dadd54d-583c-432e-9383-a2b0b1d7f551>
```
"""
url = f'{self.url}{simulation_node_id}/'
res = self.client.get(url, params=kwargs)
util.raise_if_invalid_response(res)
return SimulationNode(self, **res.json())
def list(self, **kwargs):
#pylint: disable=line-too-long
"""
List existing simulation nodes
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
list
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.simulation_nodes.list()
[<SimulationNode sim1 c51b49b6-94a7-4c93-950c-e7fa4883591>, <SimulationNode sim2 3134711d-015e-49fb-a6ca-68248a8d4aff>]
```
""" #pylint: enable=line-too-long
res = self.client.get(f'{self.url}', params=kwargs)
util.raise_if_invalid_response(res, data_type=list)
return [SimulationNode(self, **simulation_node) for simulation_node in res.json()]
| air_sdk-main | air_sdk/simulation_node.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Image module
"""
from . import util
from .air_model import AirModel
class Image(AirModel):
"""
Manage an Image
### delete
Delete the image. Once successful, the object should no longer be used and will raise
[`AirDeletedObject`](/docs/exceptions) when referenced.
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - Delete failed
### json
Returns a JSON string representation of the image
### refresh
Syncs the image with all values returned by the API
### update
Update the image with the provided data
Arguments:
kwargs (dict, optional): All optional keyword arguments are applied as key/value
pairs in the request's JSON payload
"""
def copy(self, organization):
"""
Make a copy of the image in another organization
Arguments:
organization (str | `Organization`): `Organization` or ID
Returns:
[`Image`](/docs/image)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - Copy failed
Example:
```
>>> image = air.images.get('33d8a377-ef0a-4a0d-ac2a-076e32678e18')
>>> target_org = air.organizations.get('b0e47509-4099-4e24-b96f-d1278d431f46')
>>> image.copy(target_org)
<Image cumulus-vx-5.4.0 3dadd54d-583c-432e-9383-a2b0b1d7f551>
```
"""
url = f'{self._api.url}{self.id}/copy/'
res = self._api.client.post(url, json={'organization': organization})
util.raise_if_invalid_response(res, status_code=201)
return Image(self._api, **res.json())
def upload(self, filename):
"""
Upload an image file
Arguments:
filename (str): Absolute path to the local image
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - Upload failed
"""
url = f'{self._api.url}{self.id}/upload/'
with open(filename, 'rb') as image_file:
res = self._api.client.put(url, data=image_file)
util.raise_if_invalid_response(res, status_code=204, data_type=None)
def __repr__(self):
if self._deleted or not self.name:
return super().__repr__()
return f'<Image {self.name} {self.id}>'
class ImageApi:
""" High-level interface for the Image API """
def __init__(self, client):
self.client = client
self.url = self.client.api_url + '/image/'
def get(self, image_id, **kwargs):
"""
Get an existing image
Arguments:
image_id (str): Image ID
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
[`Image`](/docs/image)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.images.get('3dadd54d-583c-432e-9383-a2b0b1d7f551')
<Image cumulus-vx-4.2.1 3dadd54d-583c-432e-9383-a2b0b1d7f551>
```
"""
url = f'{self.url}{image_id}/'
res = self.client.get(url, params=kwargs)
util.raise_if_invalid_response(res)
return Image(self, **res.json())
def list(self, **kwargs):
#pylint: disable=line-too-long
"""
List existing images
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
list
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.images.list()
[<Image cumulus-vx-4.2.1 c51b49b6-94a7-4c93-950c-e7fa4883591>, <Image generic/ubuntu18.04 3134711d-015e-49fb-a6ca-68248a8d4aff>]
```
""" #pylint: enable=line-too-long
res = self.client.get(f'{self.url}', params=kwargs)
util.raise_if_invalid_response(res, data_type=list)
return [Image(self, **image) for image in res.json()]
@util.required_kwargs(['name', 'organization'])
def create(self, **kwargs):
"""
Create a new image
Arguments:
name (str): Image name
organization (str | `Organization`): `Organization` or ID
filename (str, optional): Absolute path to the local file which should be uploaded
kwargs (dict, optional): All other optional keyword arguments are applied as key/value
pairs in the request's JSON payload
Returns:
[`Image`](/docs/image)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.images.create(name='my_image', filename='/tmp/my_image.qcow2', agent_enabled=False)
<Image my_image 01298e0c-4ef1-43ec-9675-93160eb29d9f>
```
"""
res = self.client.post(self.url, json=kwargs)
util.raise_if_invalid_response(res, status_code=201)
image = Image(self, **res.json())
if kwargs.get('filename'):
image.upload(kwargs['filename'])
return image
| air_sdk-main | air_sdk/image.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Account module
"""
from . import util
from .air_model import AirModel
class Account(AirModel):
"""
Manage an Account
### json
Returns a JSON string representation of the account
### refresh
Syncs the account with all values returned by the API
"""
_deletable = False
_updatable = False
def __repr__(self):
if self._deleted or not self.username:
return super().__repr__()
return f'<Account {self.username} {self.id}>'
class AccountApi:
""" High-level interface for the Account API """
def __init__(self, client):
self.client = client
self.url = self.client.api_url + '/account/'
def get(self, account_id, **kwargs):
"""
Get an existing account
Arguments:
account_id (str): Account ID
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
[`Account`](/docs/account)
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.accounts.get('3dadd54d-583c-432e-9383-a2b0b1d7f551')
<Account [email protected] 3dadd54d-583c-432e-9383-a2b0b1d7f551>
```
"""
url = f'{self.url}{account_id}/'
res = self.client.get(url, params=kwargs)
util.raise_if_invalid_response(res)
return Account(self, **res.json())
def list(self, **kwargs):
#pylint: disable=line-too-long
"""
List existing accounts
Arguments:
kwargs (dict, optional): All other optional keyword arguments are applied as query
parameters/filters
Returns:
list
Raises:
[`AirUnexpectedResposne`](/docs/exceptions) - API did not return a 200 OK
or valid response JSON
Example:
```
>>> air.accounts.list()
[<Account [email protected] c51b49b6-94a7-4c93-950c-e7fa4883591>, <Account [email protected] 3134711d-015e-49fb-a6ca-68248a8d4aff>]
```
"""
#pylint: enable=line-too-long
res = self.client.get(f'{self.url}', params=kwargs)
util.raise_if_invalid_response(res, data_type=list)
return [Account(self, **account) for account in res.json()]
| air_sdk-main | air_sdk/account.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for simulation.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring,duplicate-code,unused-argument
import datetime as dt
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import simulation
class TestSimulation(TestCase):
def setUp(self):
self.api = MagicMock()
self.api.url = 'http://testserver/api/'
self.model = simulation.Simulation(self.api)
self.model.id = 'abc123'
self.model.title = 'test'
def test_init_(self):
self.assertFalse(self.model._deletable)
self.assertTrue(self.model._updatable)
def test_repr(self):
self.assertEqual(str(self.model), f'<Simulation \'{self.model.title}\' {self.model.id}>')
def test_repr_deleted(self):
self.model._deleted = True
self.assertTrue('Deleted Object' in str(self.model))
@patch('air_sdk.air_sdk.simulation.Simulation.refresh')
def test_create_service(self, mock_refresh):
res = self.model.create_service('test', 'intf', 22, foo='bar')
self.api.client.services.create.assert_called_with(simulation=self.model.id, name='test',
interface='intf', dest_port=22,
foo='bar')
mock_refresh.asserrt_called()
self.assertEqual(res, self.api.client.services.create.return_value)
def test_add_permission(self):
res = self.model.add_permission('[email protected]', foo='bar')
self.api.client.permissions.create.assert_called_with(email='[email protected]',
simulation=self.model.id, foo='bar')
self.assertEqual(res, self.api.client.permissions.create.return_value)
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_control(self, mock_raise):
res = self.model.control(action='test')
self.api.client.post(f'{self.api.url}{self.model.id}/control/', json={'action': 'test'})
mock_raise.assert_called_with(self.api.client.post.return_value)
self.assertEqual(res, self.api.client.post.return_value.json.return_value)
def test_control_required_kwargs(self):
with self.assertRaises(AttributeError) as err:
self.model.control()
self.assertTrue('requires action' in str(err.exception))
@patch('air_sdk.air_sdk.simulation.Simulation.start')
def test_load(self, mock_start):
self.model.load()
mock_start.assert_called()
@patch('air_sdk.air_sdk.simulation.Simulation.control')
def test_start(self, mock_control):
self.model.start()
mock_control.assert_called_with(action='load')
@patch('air_sdk.air_sdk.simulation.Simulation.store')
def test_stop(self, mock_store):
self.model.stop()
mock_store.assert_called()
@patch('air_sdk.air_sdk.simulation.Simulation.control')
def test_store(self, mock_control):
self.model.store()
mock_control.assert_called_with(action='store')
@patch('air_sdk.air_sdk.simulation.Simulation.control')
def test_delete(self, mock_control):
self.model.delete()
mock_control.assert_called_with(action='destroy')
class TestSimulationApi(TestCase):
def setUp(self):
self.client = MagicMock()
self.client.api_url = 'http://testserver/api'
self.api = simulation.SimulationApi(self.client)
def test_init_(self):
self.assertEqual(self.api.client, self.client)
self.assertEqual(self.api.url, 'http://testserver/api/simulation/')
@patch('air_sdk.air_sdk.simulation.SimulationApi.list')
def test_get_simulations(self, mock_list):
self.assertEqual(self.api.get_simulations(), mock_list.return_value)
@patch('air_sdk.air_sdk.simulation.SimulationApi.get')
def test_get_simulation(self, mock_get):
res = self.api.get_simulation('abc123')
mock_get.assert_called_with('abc123')
self.assertEqual(res, mock_get.return_value)
@patch('air_sdk.air_sdk.simulation.SimulationApi.create')
def test_create_simulation(self, mock_create):
res = self.api.create_simulation(foo='bar')
mock_create.assert_called_with(foo='bar')
self.assertEqual(res, mock_create.return_value)
@patch('air_sdk.air_sdk.simulation.SimulationApi.get')
def test_update_simulation(self, mock_get):
self.api.update_simulation('abc123', {'foo': 'bar'})
mock_get.assert_called_with('abc123')
mock_get.return_value.update.assert_called_with(foo='bar')
@patch('air_sdk.air_sdk.simulation.SimulationApi.get')
def test_duplicate(self, mock_get):
mock_get.return_value.control.return_value = {'simulation': {'test': 'xyz'}}
sim, res = self.api.duplicate('abc123', foo='bar')
mock_get.assert_called_with('abc123')
mock_get.return_value.control.assert_called_with(foo='bar', action='duplicate')
self.assertIsInstance(sim, simulation.Simulation)
self.assertEqual(sim.test, 'xyz')
self.assertEqual(res, mock_get.return_value.control.return_value)
@patch('air_sdk.air_sdk.simulation.SimulationApi.get')
def test_duplicate_object(self, mock_get):
mock_snap = MagicMock()
mock_snap.control.return_value = {'simulation': {'test': 'xyz'}}
sim, res = self.api.duplicate(mock_snap, foo='bar')
mock_get.assert_not_called()
mock_snap.control.assert_called_with(foo='bar', action='duplicate')
self.assertIsInstance(sim, simulation.Simulation)
self.assertEqual(res, mock_snap.control.return_value)
@patch('air_sdk.air_sdk.simulation.SimulationApi.get')
def test_control(self, mock_get):
res = self.api.control('abc123', 'test', foo='bar')
mock_get.assert_called_with('abc123')
mock_get.return_value.control.assert_called_with(action='test', foo='bar')
self.assertEqual(res, mock_get.return_value.control.return_value)
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_get_citc_simulation(self, mock_raise):
self.client.get.return_value.json.return_value = {'foo': 'bar'}
res = self.api.get_citc_simulation()
self.client.get.assert_called_with(f'{self.client.api_url}/simulation/citc/')
mock_raise.assert_called_with(self.client.get.return_value)
self.assertIsInstance(res, simulation.Simulation)
self.assertEqual(res.foo, 'bar')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_get(self, mock_raise):
self.client.get.return_value.json.return_value = {'test': 'success'}
res = self.api.get('abc123', foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/simulation/abc123/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value)
self.assertIsInstance(res, simulation.Simulation)
self.assertEqual(res.test, 'success')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_list(self, mock_raise):
self.client.get.return_value.json.return_value = [{'id': 'abc'}, {'id': 'xyz'}]
res = self.api.list(foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/simulation/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value, data_type=list)
self.assertEqual(len(res), 2)
self.assertIsInstance(res[0], simulation.Simulation)
self.assertEqual(res[0].id, 'abc')
self.assertEqual(res[1].id, 'xyz')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
@patch('air_sdk.air_sdk.util.validate_timestamps')
def test_create(self, mock_validate, mock_raise):
self.client.post.return_value.json.return_value = {'id': 'abc'}
res = self.api.create(topology='abc123')
self.client.post.assert_called_with(f'{self.client.api_url}/simulation/',
json={'topology': 'abc123'})
mock_raise.assert_called_with(self.client.post.return_value, status_code=201)
self.assertIsInstance(res, simulation.Simulation)
self.assertEqual(res.id, 'abc')
mock_validate.assert_called_with('Simulation created', expires_at=None, sleep_at=None)
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
@patch('air_sdk.air_sdk.util.validate_timestamps')
def test_create_timestamps(self, mock_validate, mock_raise):
self.api.create(topology='abc123', expires_at='expired', sleep_at='sleepy')
mock_validate.assert_called_with('Simulation created', expires_at='expired',
sleep_at='sleepy')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
@patch('air_sdk.air_sdk.util.validate_timestamps')
def test_create_datetime(self, mock_validate, mock_raise):
time = dt.datetime(2030, 12, 12, 22, 5, 3)
self.api.create(topology='abc123', expires_at=time, sleep_at=time)
mock_validate.assert_called_with('Simulation created', expires_at=time,
sleep_at=time)
def test_create_required_kwargs(self):
with self.assertRaises(AttributeError) as err:
self.api.create()
self.assertTrue('requires topology' in str(err.exception))
| air_sdk-main | tests/simulation.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for link.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import link
class TestLink(TestCase):
def setUp(self):
self.model = link.Link(MagicMock())
self.model.id = 'abc123'
def test_init_(self):
self.assertTrue(self.model._deletable)
self.assertTrue(self.model._updatable)
def test_repr(self):
self.assertEqual(str(self.model), f'<Link {self.model.id}>')
def test_repr_deleted(self):
self.model._deleted = True
self.assertTrue('Deleted Object' in str(self.model))
class TestLinkApi(TestCase):
def setUp(self):
self.client = MagicMock()
self.client.api_url = 'http://testserver/api'
self.api = link.LinkApi(self.client)
def test_init_(self):
self.assertEqual(self.api.client, self.client)
self.assertEqual(self.api.url, 'http://testserver/api/link/')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_get(self, mock_raise):
self.client.get.return_value.json.return_value = {'test': 'success'}
res = self.api.get('abc123', foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/link/abc123/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value)
self.assertIsInstance(res, link.Link)
self.assertEqual(res.test, 'success')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_list(self, mock_raise):
self.client.get.return_value.json.return_value = [{'id': 'abc', 'interfaces': ['foo']},
{'id': 'xyz'}]
res = self.api.list(foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/link/', params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value, data_type=list)
self.assertEqual(len(res), 2)
self.assertIsInstance(res[0], link.Link)
self.assertEqual(res[0].id, 'abc')
self.assertEqual(res[1].id, 'xyz')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_create(self, mock_raise):
self.client.post.return_value.json.return_value = {'id': 'abc'}
res = self.api.create(topology='abc123', interfaces=['def123'])
self.client.post.assert_called_with(f'{self.client.api_url}/link/',
json={'topology': 'abc123', 'interfaces': ['def123']})
mock_raise.assert_called_with(self.client.post.return_value, status_code=201)
self.assertIsInstance(res, link.Link)
self.assertEqual(res.id, 'abc')
def test_create_required_kwargs(self):
with self.assertRaises(AttributeError) as err:
self.api.create(interfaces=[])
self.assertTrue('requires topology' in str(err.exception))
with self.assertRaises(AttributeError) as err:
self.api.create(topology='abc123')
self.assertTrue('requires interfaces' in str(err.exception))
| air_sdk-main | tests/link.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for ssh_key.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import ssh_key
class TestSSHKey(TestCase):
def setUp(self):
self.model = ssh_key.SSHKey(MagicMock())
self.model.id = 'abc123'
self.model.name = 'public'
def test_init_(self):
self.assertTrue(self.model._deletable)
self.assertFalse(self.model._updatable)
def test_repr(self):
self.assertEqual(str(self.model), f'<SSHKey {self.model.name} {self.model.id}>')
def test_repr_deleted(self):
self.model._deleted = True
self.assertTrue('Deleted Object' in str(self.model))
class TestSSHKeyApi(TestCase):
def setUp(self):
self.client = MagicMock()
self.client.api_url = 'http://testserver/api'
self.api = ssh_key.SSHKeyApi(self.client)
def test_init_(self):
self.assertEqual(self.api.client, self.client)
self.assertEqual(self.api.url, 'http://testserver/api/sshkey/')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_list(self, mock_raise):
self.client.get.return_value.json.return_value = [{'id': 'abc'}, {'id': 'xyz'}]
res = self.api.list(foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/sshkey/', params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value, data_type=list)
self.assertEqual(len(res), 2)
self.assertIsInstance(res[0], ssh_key.SSHKey)
self.assertEqual(res[0].id, 'abc')
self.assertEqual(res[1].id, 'xyz')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_create(self, mock_raise):
self.client.post.return_value.json.return_value = {'id': 'abc'}
res = self.api.create(public_key='abc123', name='test')
self.client.post.assert_called_with(f'{self.client.api_url}/sshkey/',
json={'public_key': 'abc123', 'name': 'test'})
mock_raise.assert_called_with(self.client.post.return_value, status_code=201)
self.assertIsInstance(res, ssh_key.SSHKey)
self.assertEqual(res.id, 'abc')
def test_create_required_kwargs(self):
with self.assertRaises(AttributeError) as err:
self.api.create(name='test')
self.assertTrue('requires public_key' in str(err.exception))
with self.assertRaises(AttributeError) as err:
self.api.create(public_key='abc123')
self.assertTrue('requires name' in str(err.exception))
| air_sdk-main | tests/ssh_key.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for worker.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import worker
class TestWorker(TestCase):
def setUp(self):
self.api = MagicMock()
self.model = worker.Worker(self.api)
self.model.fqdn = 'test.test'
self.model.id = 'abc123'
def test_init_(self):
self.assertFalse(self.model._deletable)
self.assertTrue(self.model._updatable)
def test_repr(self):
self.assertEqual(str(self.model), f'<Worker {self.model.fqdn} {self.model.id}>')
def test_repr_deleted(self):
self.model._deleted = True
self.assertTrue('Deleted Object' in str(self.model))
@patch('air_sdk.air_sdk.air_model.AirModel._patch')
def test_set_available(self, mock_patch):
self.model.available = False
self.model.set_available(True)
self.assertTrue(self.model.available)
mock_patch.assert_called()
class TestWorkerApi(TestCase):
def setUp(self):
self.client = MagicMock()
self.client.api_url = 'http://testserver/api'
self.api = worker.WorkerApi(self.client)
def test_init_(self):
self.assertEqual(self.api.client, self.client)
self.assertEqual(self.api.url, 'http://testserver/api/worker/')
@patch('air_sdk.air_sdk.worker.WorkerApi.list')
def test_get_workers(self, mock_list):
res = self.api.get_workers(foo='bar')
mock_list.assert_called_with(foo='bar')
self.assertEqual(res, mock_list.return_value)
@patch('air_sdk.air_sdk.worker.WorkerApi.get')
def test_update_worker(self, mock_get):
res = self.api.update_worker('abc123', foo='bar')
mock_get.assert_called_with('abc123')
mock_get.return_value.update.assert_called_with(foo='bar')
self.assertEqual(res, mock_get.return_value.update.return_value)
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_get(self, mock_raise):
self.client.get.return_value.json.return_value = {'test': 'success'}
res = self.api.get('abc123', foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/worker/abc123/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value)
self.assertIsInstance(res, worker.Worker)
self.assertEqual(res.test, 'success')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_list(self, mock_raise):
self.client.get.return_value.json.return_value = [{'id': 'abc'}, {'id': 'xyz'}]
res = self.api.list(foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/worker/', params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value, data_type=list)
self.assertEqual(len(res), 2)
self.assertIsInstance(res[0], worker.Worker)
self.assertEqual(res[0].id, 'abc')
self.assertEqual(res[1].id, 'xyz')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_create(self, mock_raise):
self.client.post.return_value.json.return_value = {'id': 'abc'}
res = self.api.create(cpu=1, memory=2, storage=3, ip_address='10.1.1.1', port_range='1-2',
username='foo', password='bar')
self.client.post.assert_called_with(f'{self.client.api_url}/worker/',
json={'cpu': 1, 'memory': 2, 'storage': 3,
'ip_address': '10.1.1.1', 'port_range': '1-2',
'username': 'foo', 'password': 'bar'})
mock_raise.assert_called_with(self.client.post.return_value, status_code=201)
self.assertIsInstance(res, worker.Worker)
self.assertEqual(res.id, 'abc')
def test_create_required_kwargs(self):
with self.assertRaises(AttributeError) as err:
self.api.create(memory=2, storage=3, ip_address='10.1.1.1', port_range='1-2',
username='foo', password='bar')
self.assertTrue('requires cpu' in str(err.exception))
with self.assertRaises(AttributeError) as err:
self.api.create(cpu=1, storage=3, ip_address='10.1.1.1', port_range='1-2',
username='foo', password='bar')
self.assertTrue('requires memory' in str(err.exception))
with self.assertRaises(AttributeError) as err:
self.api.create(cpu=1, memory=2, ip_address='10.1.1.1', port_range='1-2',
username='foo', password='bar')
self.assertTrue('requires storage' in str(err.exception))
with self.assertRaises(AttributeError) as err:
self.api.create(cpu=1, memory=2, storage=3, port_range='1-2', username='foo',
password='bar')
self.assertTrue('requires ip_address' in str(err.exception))
with self.assertRaises(AttributeError) as err:
self.api.create(cpu=1, memory=2, storage=3, ip_address='10.1.1.1', username='foo',
password='bar')
self.assertTrue('requires port_range' in str(err.exception))
with self.assertRaises(AttributeError) as err:
self.api.create(cpu=1, memory=2, storage=3, ip_address='10.1.1.1', port_range='1-2',
password='bar')
self.assertTrue('requires username' in str(err.exception))
with self.assertRaises(AttributeError) as err:
self.api.create(cpu=1, memory=2, storage=3, ip_address='10.1.1.1', port_range='1-2',
username='foo')
self.assertTrue('requires password' in str(err.exception))
| air_sdk-main | tests/worker.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for air_api.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring,protected-access
#pylint: disable=arguments-differ,unused-argument,no-member,too-many-public-methods
from json import JSONDecodeError
from unittest import TestCase
from unittest.mock import MagicMock, patch
import datetime as dt
import pytest
import requests
from ..air_sdk import air_api
from ..air_sdk.account import AccountApi
from ..air_sdk.air_model import AirModel, LazyLoaded
from ..air_sdk.capacity import CapacityApi
from ..air_sdk.demo import DemoApi
from ..air_sdk.exceptions import AirAuthorizationError, AirForbiddenError, AirUnexpectedResponse
from ..air_sdk.image import ImageApi
from ..air_sdk.interface import InterfaceApi
from ..air_sdk.job import JobApi
from ..air_sdk.link import LinkApi
from ..air_sdk.login import LoginApi
from ..air_sdk.marketplace import MarketplaceApi
from ..air_sdk.node import NodeApi
from ..air_sdk.organization import OrganizationApi
from ..air_sdk.permission import PermissionApi
from ..air_sdk.resource_budget import ResourceBudgetApi
from ..air_sdk.service import ServiceApi
from ..air_sdk.ssh_key import SSHKeyApi
from ..air_sdk.simulation import SimulationApi
from ..air_sdk.simulation_interface import SimulationInterfaceApi
from ..air_sdk.simulation_node import SimulationNodeApi
from ..air_sdk.token import TokenApi
from ..air_sdk.topology import TopologyApi
from ..air_sdk.worker import WorkerApi
class TestAirSession(TestCase):
def setUp(self):
self.session = air_api.AirSession()
def test_init(self):
self.assertIsInstance(self.session, requests.Session)
@patch('air_sdk.air_sdk.air_api.requests.Session.rebuild_auth')
@patch('air_sdk.air_sdk.air_api.urlparse')
def test_rebuild_auth_allowed(self, mock_parse, mock_rebuild):
mock_url = MagicMock()
mock_url.hostname = 'air.nvidia.com'
mock_parse.return_value = mock_url
mock_req = MagicMock()
self.session.rebuild_auth(mock_req, None)
mock_parse.assert_called_with(mock_req.url)
mock_rebuild.assert_not_called()
@patch('air_sdk.air_sdk.air_api.requests.Session.rebuild_auth')
@patch('air_sdk.air_sdk.air_api.urlparse')
def test_rebuild_auth_not_allowed(self, mock_parse, mock_rebuild):
mock_url = MagicMock()
mock_url.hostname = 'air.evil.com'
mock_parse.return_value = mock_url
mock_req = MagicMock()
mock_res = MagicMock()
self.session.rebuild_auth(mock_req, mock_res)
mock_rebuild.assert_called_with(mock_req, mock_res)
class TestAirApi(TestCase):
@patch('air_sdk.air_sdk.air_api.AirSession')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def setUp(self, mock_raise, mock_session):
self.session = mock_session
self.req = self.session.return_value
self.req.headers = {}
self.api = air_api.AirApi('http://test/api/', 'v1', bearer_token='foo')
def test_init(self):
self.assertEqual(self.api.client, self.req)
self.assertEqual(self.api.client.headers['content-type'], 'application/json')
self.assertEqual(self.api.api_url, 'http://test/api/v1')
self.assertEqual(self.api.token, 'foo')
self.assertIsNone(self.api.username)
@patch('air_sdk.air_sdk.air_api.AirApi.authorize')
def test_init_authorize(self, mock_auth):
self.api = air_api.AirApi('http://test/api/', 'v1', bearer_token='foo')
mock_auth.assert_called_with(bearer_token='foo')
def test_accounts(self):
self.assertIsInstance(self.api.accounts, AccountApi)
def test_api_tokens(self):
self.assertIsInstance(self.api.api_tokens, TokenApi)
def test_capacity(self):
self.assertIsInstance(self.api.capacity, CapacityApi)
def test_demos(self):
self.assertIsInstance(self.api.demos, DemoApi)
def test_images(self):
self.assertIsInstance(self.api.images, ImageApi)
def test_interfaces(self):
self.assertIsInstance(self.api.interfaces, InterfaceApi)
def test_jobs(self):
self.assertIsInstance(self.api.jobs, JobApi)
def test_links(self):
self.assertIsInstance(self.api.links, LinkApi)
def test_login(self):
self.assertIsInstance(self.api.login, LoginApi)
def test_marketplace(self):
self.assertIsInstance(self.api.marketplace, MarketplaceApi)
def test_node(self):
self.assertIsInstance(self.api.node, NodeApi)
def test_nodes(self):
self.assertIsInstance(self.api.nodes, NodeApi)
def test_organizations(self):
self.assertIsInstance(self.api.organizations, OrganizationApi)
def test_permission(self):
self.assertIsInstance(self.api.permission, PermissionApi)
def test_permissions(self):
self.assertIsInstance(self.api.permissions, PermissionApi)
def test_resource_budgets(self):
self.assertIsInstance(self.api.resource_budgets, ResourceBudgetApi)
def test_service(self):
self.assertIsInstance(self.api.service, ServiceApi)
def test_services(self):
self.assertIsInstance(self.api.services, ServiceApi)
def test_simulation(self):
self.assertIsInstance(self.api.simulation, SimulationApi)
def test_simulations(self):
self.assertIsInstance(self.api.simulations, SimulationApi)
def test_simulation_interface(self):
self.assertIsInstance(self.api.simulation_interface, SimulationInterfaceApi)
def test_simulation_interfaces(self):
self.assertIsInstance(self.api.simulation_interfaces, SimulationInterfaceApi)
def test_simulation_node(self):
self.assertIsInstance(self.api.simulation_node, SimulationNodeApi)
def test_simulation_nodes(self):
self.assertIsInstance(self.api.simulation_nodes, SimulationNodeApi)
def test_ssh_keys(self):
self.assertIsInstance(self.api.ssh_keys, SSHKeyApi)
def test_topology(self):
self.assertIsInstance(self.api.topology, TopologyApi)
def test_topologies(self):
self.assertIsInstance(self.api.topologies, TopologyApi)
def test_worker(self):
self.assertIsInstance(self.api.worker, WorkerApi)
def test_workers(self):
self.assertIsInstance(self.api.workers, WorkerApi)
@patch('air_sdk.air_sdk.login.LoginApi.list')
def test_authorize_token(self, mock_login):
mock_login.return_value.username = 'john'
self.api.authorize(bearer_token='foo')
self.assertEqual(self.api.token, 'foo')
self.assertEqual(self.api.client.headers['authorization'], 'Bearer foo')
self.assertEqual(self.api.username, 'john')
@patch('air_sdk.air_sdk.login.LoginApi.list')
def test_authorize_password(self, mock_login):
mock_login.return_value.username = 'john'
self.api.get_token = MagicMock(return_value='abc123')
self.api.authorize(username='foo', password='bar')
self.assertEqual(self.api.token, 'abc123')
self.assertEqual(self.api.client.headers['authorization'], 'Bearer abc123')
self.assertEqual(self.api.username, 'john')
def test_authorize_bad_args(self):
with pytest.raises(ValueError) as err:
self.api.authorize()
self.assertEqual(str(err.value), 'Must include either `bearer_token` or ' + \
'`username` and `password` arguments')
@patch('air_sdk.air_sdk.air_api.AirApi.post')
def test_get_token(self, mock_post):
mock_post.return_value.json.return_value = {'token': 'abc123'}
res = self.api.get_token('foo', 'bar')
self.assertEqual(res, 'abc123')
mock_post.assert_called_with('http://test/api/v1/login/',
json={'username': 'foo', 'password': 'bar'})
@patch('air_sdk.air_sdk.air_api.AirApi.post')
def test_get_token_no_token(self, mock_post):
mock_post.return_value.json.return_value = {'redirect': 'http://test'}
with self.assertRaises(AirAuthorizationError) as err:
self.api.get_token('foo', 'bar')
self.assertEqual(err.exception.message, 'API did not provide a token for foo')
@patch('air_sdk.air_sdk.air_api.AirApi.post')
def test_get_token_bad_json(self, mock_post):
mock_post.return_value.json.side_effect = JSONDecodeError('', '{}', 1)
with self.assertRaises(AirAuthorizationError) as err:
self.api.get_token('foo', 'bar')
self.assertEqual(err.exception.message, 'API did not return a valid JSON response')
def test_request(self):
res = self.api._request('GET', 'http://test/', 'test', foo='bar')
self.api.client.request.assert_called_with('GET', 'http://test/', 'test',
allow_redirects=False, foo='bar')
self.assertEqual(res, self.api.client.request.return_value)
def test_request_403(self):
self.api.client.request.return_value.status_code = 403
with self.assertRaises(AirForbiddenError):
self.api._request('GET', 'http://test/', 'test', foo='bar')
def test_request_raises(self):
mock_res = MagicMock()
self.api.client.request.return_value.raise_for_status = \
MagicMock(side_effect=requests.exceptions.HTTPError(response=mock_res))
with self.assertRaises(AirUnexpectedResponse) as err:
self.api._request('GET', 'http://test/', 'test', foo='bar')
self.assertEqual(err.exception.message,
'Received an unexpected response from the Air API ' + \
f'({mock_res.status_code}): {mock_res.text}')
self.assertEqual(err.exception.status_code, mock_res.status_code)
@patch('air_sdk.air_sdk.air_api._serialize_dict')
def test_request_serialized_json_list(self, mock_serialize):
serialized = ['serialized_foo', 'serialized_bar']
mock_serialize.side_effect = serialized
data = ['foo', 'bar']
self.api._request('GET', 'http://test/', json=data)
mock_for_assert = MagicMock()
mock_for_assert(data[0])
mock_for_assert(data[1])
self.assertEqual(mock_serialize.mock_calls, mock_for_assert.mock_calls)
self.api.client.request.assert_called_with('GET', 'http://test/', allow_redirects=False,
json=[serialized[0], serialized[1]])
@patch('air_sdk.air_sdk.air_api._serialize_dict')
def test_request_serialized_json(self, mock_serialize):
self.api._request('GET', 'http://test/', json='foo')
mock_serialize.assert_called_with('foo')
self.api.client.request.assert_called_with('GET', 'http://test/', allow_redirects=False,
json=mock_serialize.return_value)
@patch('air_sdk.air_sdk.air_api._serialize_dict')
def test_request_serialized_params(self, mock_serialize):
self.api._request('GET', 'http://test/', params='foo')
mock_serialize.assert_called_with('foo')
self.api.client.request.assert_called_with('GET', 'http://test/', allow_redirects=False,
params=mock_serialize.return_value)
def test_request_redirect(self):
self.api.client.request.return_value.status_code = 301
self.api.client.request.return_value.headers = {'Location': 'http://air.nvidia.com/'}
self.api._request('GET', 'http://test/', json={'foo': 'bar'})
self.api.client.request.assert_called_with('GET', 'http://air.nvidia.com/',
json={'foo': 'bar'})
self.assertEqual(self.api.client.request.call_count, 3)
def test_request_redirect_ignored(self):
self.api.client.request.return_value.status_code = 301
self.api.client.request.return_value.headers = {'Location': 'http://air.evil.com/'}
self.api._request('GET', 'http://test/', json={'foo': 'bar'})
self.assertEqual(self.api.client.request.call_count, 2)
def test_get(self):
self.api._request = MagicMock()
self.api.get('http://test/api/v1/foo/', 'arg1', arg2='test')
self.api._request.assert_called_with('GET', 'http://test/api/v1/foo/', 'arg1', arg2='test')
def test_post(self):
self.api._request = MagicMock()
self.api.post('http://test/api/v1/foo/', 'arg1', arg2='test')
self.api._request.assert_called_with('POST', 'http://test/api/v1/foo/', 'arg1', arg2='test')
def test_put(self):
self.api._request = MagicMock()
self.api.put('http://test/api/v1/foo/', 'arg1', arg2='test')
self.api._request.assert_called_with('PUT', 'http://test/api/v1/foo/', 'arg1', arg2='test')
def test_patch(self):
self.api._request = MagicMock()
self.api.patch('http://test/api/v1/foo/', 'arg1', arg2='test')
self.api._request.assert_called_with('PATCH', 'http://test/api/v1/foo/', 'arg1',
arg2='test')
def test_delete(self):
self.api._request = MagicMock()
self.api.delete('http://test/api/v1/foo/', 'arg1', arg2='test')
self.api._request.assert_called_with('DELETE', 'http://test/api/v1/foo/', 'arg1',
arg2='test')
class TestHelpers(TestCase):
def test_normalize_api_version(self):
res = air_api._normalize_api_version('v1')
self.assertEqual(res, 'v1')
def test_normalize_api_version_str(self):
res = air_api._normalize_api_version('1')
self.assertEqual(res, 'v1')
def test_normalize_api_version_int(self):
res = air_api._normalize_api_version(1)
self.assertEqual(res, 'v1')
def test_normalize_api_url(self):
res = air_api._normalize_api_url('http://localhost/api/')
self.assertEqual(res, 'http://localhost/api/')
def test_normalize_api_url_nothing(self):
res = air_api._normalize_api_url('http://localhost')
self.assertEqual(res, 'http://localhost/api/')
def test_normalize_api_url_no_slash(self):
res = air_api._normalize_api_url('http://localhost/api')
self.assertEqual(res, 'http://localhost/api/')
def test_serialize_dict_air_model(self):
mock_model = AirModel(MagicMock(), id='abc123')
test_dict = {'test': mock_model}
res = air_api._serialize_dict(test_dict)
self.assertDictEqual(res, {'test': 'abc123'})
def test_serialize_dict_lazy_load(self):
mock_model = LazyLoaded('abc123', 'test')
test_dict = {'test': mock_model}
res = air_api._serialize_dict(test_dict)
self.assertDictEqual(res, {'test': 'abc123'})
def test_serialize_dict_dict(self):
test_dict = {'test': {'foo': 'bar'}}
res = air_api._serialize_dict(test_dict)
self.assertDictEqual(res, {'test': {'foo': 'bar'}})
@patch('air_sdk.air_sdk.air_api._serialize_list')
def test_serialize_dict_list(self, mock_list):
test_dict = {'test': ['foo']}
res = air_api._serialize_dict(test_dict)
mock_list.assert_called_with(['foo'])
self.assertDictEqual(res, {'test': mock_list.return_value})
def test_serialize_dict_datetime(self):
time = dt.datetime(2030, 12, 12, 22, 5, 3)
test_dict = {'test': {'foo': time}}
res = air_api._serialize_dict(test_dict)
self.assertDictEqual(res, {'test': {'foo': '2030-12-12T22:05:03'}})
def test_serialize_dict(self):
test_dict = {'test': 'foo'}
res = air_api._serialize_dict(test_dict)
self.assertDictEqual(res, {'test': 'foo'})
def test_serialize_dict_private(self):
test_dict = {'test': 'foo', '_private': 'bar'}
res = air_api._serialize_dict(test_dict)
self.assertDictEqual(res, {'test': 'foo'})
def test_serialize_list_air_model(self):
mock_model = AirModel(MagicMock(), id='abc123')
test_list = [mock_model]
res = air_api._serialize_list(test_list)
self.assertListEqual(res, ['abc123'])
def test_serialize_list_lazy_load(self):
mock_model = LazyLoaded('abc123', 'test')
test_list = [mock_model]
res = air_api._serialize_list(test_list)
self.assertListEqual(res, ['abc123'])
@patch('air_sdk.air_sdk.air_api._serialize_dict')
def test_serialize_list_dict(self, mock_dict):
test_list = [{'foo': 'bar'}]
mock_dict.called_with({'foo': 'bar'})
res = air_api._serialize_list(test_list)
self.assertListEqual(res, [mock_dict.return_value])
def test_serialize_list_list(self):
test_list = [['foo']]
res = air_api._serialize_list(test_list)
self.assertListEqual(res, test_list)
def test_serialize_list(self):
test_list = ['test']
res = air_api._serialize_list(test_list)
self.assertListEqual(res, ['test'])
def test_serialize_list_private(self):
test_list = ['_private', 'test']
res = air_api._serialize_list(test_list)
self.assertListEqual(res, ['test'])
| air_sdk-main | tests/air_api.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for service.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring,unused-argument
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import service
from ..air_sdk.air_model import AirModel
class TestService(TestCase):
def setUp(self):
self.model = service.Service(MagicMock())
self.model.id = 'abc123'
self.model.name = 'http'
def test_init_(self):
self.assertTrue(self.model._deletable)
self.assertTrue(self.model._updatable)
def test_repr(self):
self.assertEqual(str(self.model), f'<Service {self.model.name} {self.model.id}>')
def test_repr_deleted(self):
self.model._deleted = True
self.assertTrue('Deleted Object' in str(self.model))
class TestServiceApi(TestCase):
def setUp(self):
self.client = MagicMock()
self.client.api_url = 'http://testserver/api'
self.api = service.ServiceApi(self.client)
def test_init_(self):
self.assertEqual(self.api.client, self.client)
self.assertEqual(self.api.url, 'http://testserver/api/service/')
@patch('air_sdk.air_sdk.service.ServiceApi.list')
def test_get_services(self, mock_list):
self.assertEqual(self.api.get_services(), mock_list.return_value)
@patch('air_sdk.air_sdk.service.ServiceApi.get')
def test_get_service(self, mock_get):
res = self.api.get_service('abc123')
mock_get.assert_called_with('abc123')
self.assertEqual(res, mock_get.return_value)
@patch('air_sdk.air_sdk.service.ServiceApi.create')
def test_create_service(self, mock_create):
res = self.api.create_service('abc123', 'test', 'intf', 22, foo='bar')
mock_create.assert_called_with(simulation='abc123', name='test', interface='intf',
dest_port=22, foo='bar')
self.assertEqual(res, mock_create.return_value)
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_get(self, mock_raise):
self.client.get.return_value.json.return_value = {'test': 'success'}
res = self.api.get('abc123', foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/service/abc123/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value)
self.assertIsInstance(res, service.Service)
self.assertEqual(res.test, 'success')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_list(self, mock_raise):
self.client.get.return_value.json.return_value = [{'id': 'abc'}, {'id': 'xyz'}]
res = self.api.list(foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/service/', params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value, data_type=list)
self.assertEqual(len(res), 2)
self.assertIsInstance(res[0], service.Service)
self.assertEqual(res[0].id, 'abc')
self.assertEqual(res[1].id, 'xyz')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
@patch('air_sdk.air_sdk.service.ServiceApi._resolve_interface')
def test_create(self, mock_resolve, mock_raise):
sim = AirModel(MagicMock(), id='xyz123')
self.client.post.return_value.json.return_value = {'id': 'abc'}
res = self.api.create(name='abc123', simulation=sim, interface='test:eth0')
mock_resolve.assert_called_with('test:eth0', sim)
self.client.post.assert_called_with(f'{self.client.api_url}/service/',
json={'name': 'abc123', 'simulation': sim,
'interface': mock_resolve.return_value})
mock_raise.assert_called_with(self.client.post.return_value, status_code=201)
self.assertIsInstance(res, service.Service)
self.assertEqual(res.id, 'abc')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
@patch('air_sdk.air_sdk.service.ServiceApi._resolve_interface')
def test_create_id(self, mock_resolve, mock_raise):
self.client.post.return_value.json.return_value = {'id': 'abc'}
self.api.create(name='abc123', simulation='xyz123', interface='test123')
mock_resolve.assert_not_called()
self.client.post.assert_called_with(f'{self.client.api_url}/service/',
json={'name': 'abc123', 'simulation': 'xyz123',
'interface': 'test123'})
def test_create_required_kwargs(self):
with self.assertRaises(AttributeError) as err:
self.api.create(simulation='abc123', interface='xyz123')
self.assertTrue('requires name' in str(err.exception))
with self.assertRaises(AttributeError) as err:
self.api.create(name='test', interface='xyz123')
self.assertTrue('requires simulation' in str(err.exception))
with self.assertRaises(AttributeError) as err:
self.api.create(name='test', simulation='xyz123')
self.assertTrue('requires interface' in str(err.exception))
def test_resolve_interface(self):
intf1 = MagicMock()
intf1.name = 'eth0'
intf2 = MagicMock()
intf2.name = 'eth1'
node1 = MagicMock()
node1.name = 'server'
node1.interfaces = [intf1, intf2]
node2 = MagicMock()
node2.name = 'foo'
node2.interfaces = [intf1, intf2]
self.client.nodes.list.return_value = [node1, node2]
mock_simint = MagicMock()
self.client.simulation_interfaces.list.return_value = [mock_simint]
res = self.api._resolve_interface('server:eth0', 'abc123')
self.client.nodes.list.assert_called_with(simulation='abc123')
self.client.simulation_interfaces.list.assert_called_with(original=intf1,
simulation='abc123')
self.assertEqual(res, mock_simint)
def test_resolve_interface_bad_input(self):
with self.assertRaises(ValueError) as err:
self.api._resolve_interface('eth0', 'abc123')
self.assertEqual(str(err.exception),
'`interface` must be an Interface object or in the format of ' + \
'"node_name:interface_name"')
def test_resolve_interface_not_found(self):
with self.assertRaises(ValueError) as err:
self.api._resolve_interface('server:eth0', 'abc123')
self.assertEqual(str(err.exception), 'Interface server:eth0 does not exist')
| air_sdk-main | tests/service.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for capacity.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring,unused-argument
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import capacity
from ..air_sdk.simulation import Simulation
class TestCapacity(TestCase):
def setUp(self):
self.model = capacity.Capacity(MagicMock())
self.model.copies = 30
def test_init_(self):
self.assertFalse(self.model._deletable)
self.assertFalse(self.model._updatable)
def test_repr(self):
self.assertEqual(str(self.model), f'<Capacity {self.model.copies}>')
def test_repr_deleted(self):
self.model._deleted = True
self.assertTrue('Deleted Object' in str(self.model))
class TestCapacityApi(TestCase):
def setUp(self):
self.client = MagicMock()
self.client.api_url = 'http://testserver/api'
self.api = capacity.CapacityApi(self.client)
def test_init_(self):
self.assertEqual(self.api.client, self.client)
self.assertEqual(self.api.url, 'http://testserver/api/capacity/')
@patch('air_sdk.air_sdk.capacity.CapacityApi.get')
def test_get_capacity_by_sim(self, mock_get):
mock_sim = MagicMock()
res = self.api.get_capacity(mock_sim)
mock_get.assert_called_with(simulation_id=mock_sim.id)
self.assertEqual(res, mock_get.return_value)
@patch('air_sdk.air_sdk.capacity.CapacityApi.get')
def test_get_capacity_by_id(self, mock_get):
res = self.api.get_capacity(simulation_id='abc123')
mock_get.assert_called_with(simulation_id='abc123')
self.assertEqual(res, mock_get.return_value)
def test_get_capacity_missing_param(self):
with self.assertRaises(ValueError) as err:
self.api.get_capacity()
self.assertEqual(str(err.exception), 'Must pass a simulation or simulation_id argument')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_get(self, mock_raise):
self.client.get.return_value.json.return_value = {'test': 'success'}
res = self.api.get('abc123', foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/capacity/abc123/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value)
self.assertIsInstance(res, capacity.Capacity)
self.assertEqual(res.test, 'success')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_get_simulation(self, mock_raise):
sim = Simulation(MagicMock())
sim.id = 'abc123'
self.api.get(sim)
self.client.get.assert_called_with(f'{self.client.api_url}/capacity/abc123/', params={})
| air_sdk-main | tests/capacity.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for token.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import token
class TestAPIToken(TestCase):
def setUp(self):
self.model = token.Token(MagicMock())
# self.model.id = 'abc123'
# self.model.token = 'abc123'
self.model.name = 'public'
def test_init_(self):
self.assertTrue(self.model._deletable)
self.assertFalse(self.model._updatable)
def test_repr(self):
self.assertEqual(str(self.model), f'<Token {self.model.name}>')
def test_repr_id(self):
self.model.id = 'abc123'
self.assertEqual(str(self.model), f'<Token {self.model.name} {self.model.id}>')
def test_repr_token(self):
self.model.token = 'abc123'
self.assertEqual(str(self.model), f'<Token {self.model.name} {self.model.token}>')
def test_repr_deleted(self):
self.model._deleted = True
self.assertTrue('Deleted Object' in str(self.model))
class TestTokenApi(TestCase):
def setUp(self):
self.client = MagicMock()
self.client.api_url = 'http://testserver/api'
self.api = token.TokenApi(self.client)
def test_init_(self):
self.assertEqual(self.api.client, self.client)
self.assertEqual(self.api.url, 'http://testserver/api/api-token/')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_list(self, mock_raise):
self.client.get.return_value.json.return_value = [{'id': 'abc'}, {'id': 'xyz'}]
res = self.api.list(foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/api-token/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value, data_type=list)
self.assertEqual(len(res), 2)
self.assertIsInstance(res[0], token.Token)
self.assertEqual(res[0].id, 'abc')
self.assertEqual(res[1].id, 'xyz')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_create(self, mock_raise):
self.client.post.return_value.json.return_value = {'id': 'abc'}
res = self.api.create(name='test')
self.client.post.assert_called_with(f'{self.client.api_url}/api-token/',
json={'name': 'test'})
mock_raise.assert_called_with(self.client.post.return_value, status_code=201)
self.assertIsInstance(res, token.Token)
self.assertEqual(res.id, 'abc')
def test_create_required_kwargs(self):
with self.assertRaises(AttributeError) as err:
self.api.create()
self.assertTrue('requires name' in str(err.exception))
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_delete(self, mock_raise):
self.api.delete('abc')
self.client.delete.assert_called_with(f'{self.client.api_url}/api-token/abc/',
params={})
mock_raise.assert_called_with(self.client.delete.return_value, status_code=204,
data_type=None)
| air_sdk-main | tests/token.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for permission.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import permission
class TestPermission(TestCase):
def setUp(self):
self.model = permission.Permission(MagicMock())
self.model.id = 'abc123'
def test_init_(self):
self.assertTrue(self.model._deletable)
self.assertFalse(self.model._updatable)
def test_repr(self):
self.assertEqual(str(self.model), f'<Permission {self.model.id}>')
def test_repr_deleted(self):
self.model._deleted = True
self.assertTrue('Deleted Object' in str(self.model))
class TestPermissionApi(TestCase):
def setUp(self):
self.client = MagicMock()
self.client.api_url = 'http://testserver/api'
self.api = permission.PermissionApi(self.client)
def test_init_(self):
self.assertEqual(self.api.client, self.client)
self.assertEqual(self.api.url, 'http://testserver/api/permission/')
@patch('air_sdk.air_sdk.permission.PermissionApi.create')
def test_create_permission(self, mock_create):
res = self.api.create_permission('[email protected]', foo='bar')
mock_create.assert_called_with(email='[email protected]', foo='bar')
self.assertEqual(res, mock_create.return_value)
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_get(self, mock_raise):
self.client.get.return_value.json.return_value = {'test': 'success'}
res = self.api.get('abc123', foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/permission/abc123/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value)
self.assertIsInstance(res, permission.Permission)
self.assertEqual(res.test, 'success')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_list(self, mock_raise):
self.client.get.return_value.json.return_value = [{'id': 'abc'}, {'id': 'xyz'}]
res = self.api.list(foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/permission/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value, data_type=list)
self.assertEqual(len(res), 2)
self.assertIsInstance(res[0], permission.Permission)
self.assertEqual(res[0].id, 'abc')
self.assertEqual(res[1].id, 'xyz')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_create(self, mock_raise):
self.client.post.return_value.json.return_value = {'id': 'abc'}
res = self.api.create(simulation='abc123', email='[email protected]')
self.client.post.assert_called_with(f'{self.client.api_url}/permission/',
json={'simulation': 'abc123', 'email': '[email protected]'})
mock_raise.assert_called_with(self.client.post.return_value, status_code=201)
self.assertIsInstance(res, permission.Permission)
self.assertEqual(res.id, 'abc')
def test_create_required_kwargs(self):
with self.assertRaises(AttributeError) as err:
self.api.create(simulation='abc123')
self.assertTrue('requires email' in str(err.exception))
with self.assertRaises(AttributeError) as err:
self.api.create(email='[email protected]')
msg = 'requires one of the following: (\'topology\', \'simulation\', \'subject_id\')'
self.assertTrue(msg in str(err.exception))
| air_sdk-main | tests/permission.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for util.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring,no-self-use,unused-argument
import datetime
from json import JSONDecodeError
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import exceptions, util
class TestUtil(TestCase):
def test_raise_if_invalid_response(self):
mock_res = MagicMock()
mock_res.status_code = 200
mock_res.json.return_value = {'id': 'abc123'}
util.raise_if_invalid_response(mock_res)
def test_raise_if_invalid_response_status(self):
mock_res = MagicMock()
mock_res.status_code = 400
with self.assertRaises(exceptions.AirUnexpectedResponse) as err:
util.raise_if_invalid_response(mock_res)
self.assertEqual(err.exception.message,
'Received an unexpected response from the Air API (400): ' +
str(mock_res.text))
self.assertEqual(err.exception.status_code, 400)
def test_raise_if_invalid_response_no_data(self):
mock_res = MagicMock()
mock_res.status_code = 200
util.raise_if_invalid_response(mock_res, data_type=None)
def test_raise_if_invalid_bad_json(self):
mock_res = MagicMock()
mock_res.status_code = 200
mock_res.json.side_effect = JSONDecodeError('test', 'foo', 0)
with self.assertRaises(exceptions.AirUnexpectedResponse) as err:
util.raise_if_invalid_response(mock_res)
self.assertEqual(err.exception.message,
'Received an unexpected response from the Air API (200): ' +
str(mock_res.text))
self.assertEqual(err.exception.status_code, 200)
def test_raise_if_invalid_bad_data_type(self):
mock_res = MagicMock()
mock_res.status_code = 200
mock_res.json.return_value = {}
with self.assertRaises(exceptions.AirUnexpectedResponse) as err:
util.raise_if_invalid_response(mock_res, data_type=list)
self.assertEqual(err.exception.message,
'Received an unexpected response from the Air API (200): ' + \
'Expected API response to be of type <class \'list\'>, ' + \
'got <class \'dict\'>')
self.assertEqual(err.exception.status_code, 200)
def test_required_kwargs(self):
@util.required_kwargs(['foo', 'bar'])
def decorated(**kwargs):
pass
with self.assertRaises(AttributeError) as err:
decorated(foo='test')
self.assertTrue('requires bar' in str(err.exception))
def test_required_kwargs_str(self):
@util.required_kwargs('foo')
def decorated(**kwargs):
pass
with self.assertRaises(AttributeError) as err:
decorated(f='test')
self.assertTrue('requires foo' in str(err.exception))
@patch('logging.warning')
def test_deprecated(self, mock_log):
@util.deprecated()
def decorated():
pass
decorated()
self.assertTrue('has been deprecated and will be removed in a future release.' \
in mock_log.call_args[0][0])
@patch('logging.warning')
def test_deprecated_new(self, mock_log):
@util.deprecated('new_func')
def decorated():
pass
decorated()
self.assertTrue('has been deprecated and will be removed in a future release. ' + \
'Use new_func instead.' in mock_log.call_args[0][0])
@patch('logging.warning')
def test_validate_timestamps(self, mock_log):
now = datetime.datetime.now()
past = now - datetime.timedelta(hours=8)
util.validate_timestamps('Simulation created', expires_at=None, sleep_at=past)
log = mock_log.call_args[0][0]
self.assertTrue(f'Simulation created with `sleep_at` in the past: {past}' in log)
@patch('logging.warning')
def test_validate_timestamps_future(self, mock_log):
now = datetime.datetime.now()
past = now - datetime.timedelta(hours=8)
future = now + datetime.timedelta(hours=8)
util.validate_timestamps('Simulation created', expires_at=past, sleep_at=future)
log = mock_log.call_args[0][0]
self.assertTrue(f'Simulation created with `expires_at` in the past: {past}' in log)
| air_sdk-main | tests/util.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for organization.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring,protected-access
from copy import deepcopy
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import organization
class TestOrganization(TestCase):
def setUp(self):
self.api = MagicMock()
mock_res = MagicMock()
self.api.client.post.return_value = mock_res
self.model = organization.Organization(self.api, id='abc123', name='NVIDIA')
self.model._api_url = '/organization/'
def test_init(self):
self.assertTrue(self.model._deletable)
self.assertTrue(self.model._updatable)
self.assertEqual(self.model.ORG_MEMBER_ROLE, 'Organization Member')
self.assertEqual(self.model._members_api_url,
f'{self.api.url}{self.model.id}/members/')
def test_repr(self):
self.assertEqual(str(self.model), f'<Organization {self.model.name} {self.model.id}>')
def test_repr_deleted(self):
self.model._deleted = True
self.assertTrue('Deleted Object' in str(self.model))
@patch('air_sdk.air_sdk.organization.Organization.refresh')
def test_add_member(self, mock_refresh):
username = '[email protected]'
self.model.add_member(username)
self.api.client.post.assert_called_once_with(self.model._members_api_url,
json={'username': username,
'roles': [self.model.ORG_MEMBER_ROLE]})
mock_refresh.assert_called_once()
@patch('air_sdk.air_sdk.organization.Organization.refresh')
def test_add_member_with_role(self, mock_refresh):
username = '[email protected]'
role = 'test role'
self.model.add_member(username, [role])
self.api.client.post.assert_called_once_with(self.model._members_api_url,
json={'username': username, 'roles': [role]})
mock_refresh.assert_called_once()
@patch('air_sdk.air_sdk.organization.Organization.refresh')
def test_add_members(self, mock_refresh):
member1 = {'username': '[email protected]', 'roles': ['Organization Admin']}
member2 = {'username': '[email protected]'}
member2_with_role = deepcopy(member2)
member2_with_role['roles'] = [self.model.ORG_MEMBER_ROLE]
self.model.add_members([member1, member2])
self.api.client.post.assert_called_once_with(self.model._members_api_url,
json=[member1, member2_with_role])
mock_refresh.assert_called_once()
@patch('air_sdk.air_sdk.organization.Organization.refresh')
def test_remove_member(self, mock_refresh):
username = '[email protected]'
self.model.remove_member(username)
self.api.client.delete.assert_called_once_with(self.model._members_api_url,
json={'username': username})
mock_refresh.assert_called_once()
mock_refresh.assert_called_once()
@patch('air_sdk.air_sdk.organization.Organization.refresh')
def test_remove_member_no_refresh(self, mock_refresh):
username = '[email protected]'
self.model.remove_member(username, _refresh_when_done=False)
self.api.client.delete.assert_called_once()
mock_refresh.assert_not_called()
@patch('air_sdk.air_sdk.organization.Organization.refresh')
@patch('air_sdk.air_sdk.organization.Organization.remove_member')
def test_remove_members(self, mock_remove, mock_refresh):
members = ['[email protected]', '[email protected]']
self.model.remove_members(members)
mock_for_assert = MagicMock()
mock_for_assert(members[0], _refresh_when_done=False)
mock_for_assert(members[1], _refresh_when_done=False)
self.assertEqual(mock_remove.mock_calls, mock_for_assert.mock_calls)
mock_refresh.assert_called_once()
class TestOrganizationApi(TestCase):
def setUp(self):
self.client = MagicMock()
self.client.api_url = 'http://testserver/api'
self.api = organization.OrganizationApi(self.client)
def test_init_(self):
self.assertEqual(self.api.client, self.client)
self.assertEqual(self.api.url, 'http://testserver/api/organization/')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_get(self, mock_raise):
org_id = 'abc123'
self.client.get.return_value.json.return_value = {'id': org_id, 'name': 'test'}
res = self.api.get('abc123', foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/organization/abc123/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value)
self.assertIsInstance(res, organization.Organization)
self.assertEqual(res.id, org_id)
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_list(self, mock_raise):
self.client.get.return_value.json.return_value = [{'id': 'abc'}, {'id': 'xyz'}]
res = self.api.list(foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/organization/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value, data_type=list)
self.assertEqual(len(res), 2)
self.assertIsInstance(res[0], organization.Organization)
self.assertEqual(res[0].id, 'abc')
self.assertEqual(res[1].id, 'xyz')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_create(self, mock_raise):
self.client.post.return_value.json.return_value = {'id': 'abc'}
res = self.api.create(name='abc123', members=['def123'])
self.client.post.assert_called_with(f'{self.client.api_url}/organization/',
json={'name': 'abc123', 'members': ['def123']})
mock_raise.assert_called_with(self.client.post.return_value, status_code=201)
self.assertIsInstance(res, organization.Organization)
self.assertEqual(res.id, 'abc')
def test_create_required_kwargs(self):
with self.assertRaises(AttributeError) as err:
self.api.create(members=[])
self.assertTrue('requires name' in str(err.exception))
| air_sdk-main | tests/organization.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for air_model.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring,unused-argument
#pylint: disable=too-many-public-methods,duplicate-code,protected-access
import datetime as dt
from datetime import date, datetime
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import air_model
from ..air_sdk.exceptions import AirObjectDeleted
from ..air_sdk.node import Node
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
class TestAirModel(TestCase):
def setUp(self):
self.api = MagicMock()
self.model = air_model.AirModel(self.api, foo='bar')
self.model._api.client.patch = MagicMock()
self.model._api.client.put = MagicMock()
self.model._api.client.delete = MagicMock()
self.model.id = 'abc123'
def test_init(self, mock_raise):
self.assertFalse(self.model._deleted)
self.assertTrue(self.model._updatable)
self.assertTrue(self.model._deletable)
self.assertEqual(self.model._api, self.api)
self.assertEqual(self.model.foo, 'bar')
def test_model_keys(self, mock_raise):
model_keys = {'account': 'accounts', 'base_simulation': 'simulations', 'bios': 'images',
'connection': 'links', 'demo': 'demos', 'interface': 'simulation_interfaces',
'interfaces': {'Node': 'interfaces',
'SimulationNode': 'simulation_interfaces',
'Link': 'interfaces'},
'job': 'jobs', 'last_worker': 'worker',
'node': {'Interface': 'nodes', 'NodeInstruction': 'simulation_nodes',
'SimulationInterface': 'simulation_nodes',
'TopologyInstruction': 'nodes'},
'nodes': 'simulation_nodes',
'original': {'SimulationInterface': 'interfaces',
'SimulationNode': 'nodes'},
'organization': 'organizations', 'os': 'images',
'preferred_worker': 'workers', 'services': 'services',
'simulation': 'simulations', 'topology': 'topologies', 'worker': 'workers'}
self.assertDictEqual(self.model.model_keys, model_keys)
def test_load(self, mock_raise):
model = air_model.AirModel(self.api, normal='http://testserver/api/v1/thing3/abc456')
self.assertEqual(model.normal, 'http://testserver/api/v1/thing3/abc456')
def test_load_datetime(self, mock_raise):
model = air_model.AirModel(self.api, expires_at='2030-12-12T22:05:03')
self.assertIsInstance(model.expires_at, (datetime, date))
@patch('air_sdk.air_sdk.air_model.AirModel.model_keys', {'lazy_list': 'lazy_api'})
@patch('air_sdk.air_sdk.air_model.LazyLoadedList.__getitem__')
def test_load_list(self, mock_list_get, mock_raise):
model = air_model.AirModel(self.api, lazy_list=['http://testserver/api/v1/thing/abc123'])
self.assertIsInstance(model.lazy_list, air_model.LazyLoadedList)
self.assertEqual(model.lazy_list[0].id, mock_list_get.return_value.id)
@patch('air_sdk.air_sdk.air_model.AirModel.model_keys', {'lazy_lazy': 'lazy_api'})
def test_load_lazy_exists(self, mock_raise):
lazy = air_model.LazyLoaded('def123', 'lazy')
model = air_model.AirModel(self.api, lazy_lazy=lazy)
self.assertEqual(model.lazy_lazy, self.api.client.lazy.get.return_value)
@patch('air_sdk.air_sdk.air_model.AirModel.model_keys', {'lazy_item': 'lazy_api'})
def test_load_http(self, mock_raise):
model = air_model.AirModel(self.api, lazy_item='http://testserver/api/v1/thing2/xyz123')
self.assertEqual(model.lazy_item.id, self.api.client.lazy_api.get.return_value.id)
@patch('air_sdk.air_sdk.air_model.AirModel.model_keys', {'lazy_item': 'lazy_api'})
def test_load_lazy(self, mock_raise):
model = air_model.AirModel(self.api, lazy_item='xyz123')
self.assertEqual(model.lazy_item.id, self.api.client.lazy_api.get.return_value.id)
def test_repr(self, mock_raise):
self.assertRegex(str(self.model),
r'<air_sdk.air_sdk.air_model.AirModel object at 0x[0-9a-f]+>')
def test_repr_deleted(self, mock_raise):
self.model._deleted = True
self.assertRegex(str(self.model),
r'<Deleted Object \(<air_sdk.air_sdk.air_model.AirModel ' +
r'object at 0x[0-9a-f]+>\)>')
def test_getattribute_get_deleted(self, mock_raise):
self.assertFalse(self.model._deleted)
self.api.client.get.assert_not_called()
def test_getattribute_deleted(self, mock_raise):
self.model._deleted = True
with self.assertRaises(AirObjectDeleted) as err:
_ = self.model.foo
self.assertEqual(err.exception.message,
'<class \'air_sdk.air_sdk.air_model.AirModel\'> object has ' + \
'been deleted and should no longer be referenced')
def test_getattribute_lazy(self, mock_raise):
self.model.lazy = air_model.LazyLoaded('abc123', 'thing')
self.assertEqual(self.model.lazy, self.api.client.thing.get.return_value)
self.api.client.thing.get.assert_called_with('abc123')
def test_setattr_set_deleted(self, mock_raise):
self.model._deleted = True
self.assertTrue(self.model._deleted)
@patch('air_sdk.air_sdk.air_model.AirModel._patch')
def test_setattr_not_updatable(self, mock_patch, mock_raise):
self.model._updatable = False
mock_patch.reset_mock()
self.model.foo = 'test'
self.assertEqual(self.model.foo, 'test')
mock_patch.assert_not_called()
def test_setattr(self, mock_raise):
self.model._patch = MagicMock()
self.model.foo = 'test'
self.model._patch.assert_called_with('foo', 'test')
self.assertEqual(self.model.foo, 'test')
def test_setattr_no_change(self, mock_raise):
self.model._patch = MagicMock()
self.model.foo = 'bar'
self.model._patch.assert_not_called()
self.assertEqual(self.model.foo, 'bar')
def test_setattr_internal(self, mock_raise):
self.model._patch = MagicMock()
self.model._foo = 'bar'
self.model._patch.assert_not_called()
self.assertEqual(self.model._foo, 'bar')
@patch('air_sdk.air_sdk.air_model.AirModel.model_keys', {'simulation': 'simulations'})
def test_get_model_key(self, mock_raise):
self.assertEqual(self.model._get_model_key('simulation'), 'simulations')
@patch('air_sdk.air_sdk.air_model.AirModel.model_keys',
{'simulation': {'Node': 'a', 'Node2': 'b'}})
def test_get_model_key_dict(self, mock_raise):
node = Node(MagicMock())
self.assertEqual(node._get_model_key('simulation'), 'a')
def test_patch(self, mock_raise):
self.model.id = 'abc123'
self.model._patch('foo', 'bar')
self.model._api.client.patch.assert_called_with(f'{self.api.url}abc123/',
json={'foo': 'bar'})
mock_raise.assert_called_with(self.model._api.client.patch.return_value)
@patch('air_sdk.air_sdk.air_model.AirModel.refresh')
def test_update(self, mock_refresh, mock_raise):
self.model.update(test='new')
mock_refresh.assert_called()
self.model._api.client.put.assert_called_with(f'{self.model._api.url}{self.model.id}/',
json=self.model.__dict__)
mock_raise.assert_called_with(self.model._api.client.put.return_value)
self.assertEqual(self.model.test, 'new')
def test_update_not_updatable(self, mock_raise):
self.model._updatable = False
with self.assertRaises(NotImplementedError) as err:
self.model.update(test='new')
self.assertEqual(str(err.exception), 'AirModel does not support updates')
def test_update_ignored_fields(self, mock_raise):
self.model.ignore_me = True
self.model._ignored_update_fields = ['ignore_me']
self.model.update(test='new')
payload = self.model.__dict__
del payload['ignore_me']
self.model._api.client.put.assert_called_with(f'{self.model._api.url}{self.model.id}/',
json=payload)
mock_raise.assert_called_with(self.model._api.client.put.return_value)
self.assertEqual(self.model.test, 'new')
def test_delete(self, mock_raise):
self.model.delete()
self.assertTrue(self.model._deleted)
self.model._deleted = False
self.model._api.client.delete.assert_called_with(f'{self.model._api.url}{self.model.id}/')
mock_raise.assert_called_with(self.model._api.client.delete.return_value, data_type=None,
status_code=204)
def test_delete_not_deletable(self, mock_raise):
self.model._deletable = False
with self.assertRaises(NotImplementedError) as err:
self.model.delete()
self.assertEqual(str(err.exception), 'AirModel does not support deletes')
@patch('air_sdk.air_sdk.AirModel._load')
def test_refresh(self, mock_load, mock_raise):
self.model.refresh()
self.model._api.get.assert_called_with(self.model.id)
mock_load.assert_called_with(**self.model._api.get.return_value.__dict__)
def test_json(self, mock_raise):
self.assertEqual(self.model.json(), '{"foo": "bar", "id": "abc123"}')
def test_json_private(self, mock_raise):
self.model._private = 'foo'
self.assertEqual(self.model.json(), '{"foo": "bar", "id": "abc123"}')
def test_json_lazy_loaded(self, mock_raise):
self.model.test = air_model.LazyLoaded('foo', 'bar')
self.assertEqual(self.model.json(), '{"foo": "bar", "id": "abc123", "test": "foo"}')
def test_json_lazy_loaded_list(self, mock_raise):
self.model.test = air_model.LazyLoadedList([air_model.LazyLoaded('a', 1),
air_model.LazyLoaded('b', 2)], MagicMock())
self.assertEqual(self.model.json(), '{"foo": "bar", "id": "abc123", "test": ["a", "b"]}')
def test_json_datetime(self, mock_raise):
time = dt.datetime(2030, 12, 12, 22, 5, 3)
self.model.test = time
self.assertEqual(self.model.json(),
'{"foo": "bar", "id": "abc123", "test": "2030-12-12T22:05:03"}')
class TestLazyLoaded(TestCase):
def setUp(self):
self.model = air_model.LazyLoaded('abc123', 'tests')
def test_init(self):
self.assertEqual(self.model.id, 'abc123')
self.assertEqual(self.model.model, 'tests')
def test_repr(self):
self.assertEqual(str(self.model), f'<air_sdk.air_model.LazyLoaded Test {self.model.id}>')
def test_repr_topology(self):
model = air_model.LazyLoaded('abc123', 'topologies')
self.assertEqual(str(model), f'<air_sdk.air_model.LazyLoaded Topology {self.model.id}>')
class TestLazyLoadedList(TestCase):
def setUp(self):
self.api = MagicMock()
self.item1 = air_model.LazyLoaded('abc', 'tests')
self.item2 = air_model.LazyLoaded('xyz', 'tests')
self.model = air_model.LazyLoadedList([self.item1, self.item2], self.api)
def test_init(self):
self.assertEqual(self.model._api, self.api)
self.assertListEqual(self.model, [self.item1, self.item2])
def test_getitem(self):
self.assertEqual(self.model[0], self.api.client.tests.get.return_value)
self.api.client.tests.get.assert_called_with('abc')
def test_iter(self):
mock_item = MagicMock()
mock_item.test = 'foo'
self.api.client.tests.get.return_value = mock_item
for item in self.model:
self.assertEqual(item.test, 'foo')
class TestHelpers(TestCase):
def test_get_item_id(self):
self.assertEqual(air_model._get_item_id('foo'), 'foo')
def test_get_item_id_dict(self):
self.assertEqual(air_model._get_item_id({'id': 'abc123'}), 'abc123')
def test_get_item_id_url(self):
self.assertEqual(air_model._get_item_id('http://testserver/api/v1/test/abc123'), 'abc123')
| air_sdk-main | tests/air_model.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for job.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import job
class TestJob(TestCase):
def setUp(self):
self.model = job.Job(MagicMock())
self.model.id = 'abc123'
self.model.category = 'START'
def test_init_(self):
self.assertFalse(self.model._deletable)
self.assertTrue(self.model._updatable)
def test_repr(self):
self.assertEqual(str(self.model), f'<Job {self.model.category} {self.model.id}>')
def test_repr_deleted(self):
self.model._deleted = True
self.assertTrue('Deleted Object' in str(self.model))
class TestJobApi(TestCase):
def setUp(self):
self.client = MagicMock()
self.client.api_url = 'http://testserver/api'
self.api = job.JobApi(self.client)
def test_init_(self):
self.assertEqual(self.api.client, self.client)
self.assertEqual(self.api.url, 'http://testserver/api/job/')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_get(self, mock_raise):
self.client.get.return_value.json.return_value = {'test': 'success'}
res = self.api.get('abc123', foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/job/abc123/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value)
self.assertIsInstance(res, job.Job)
self.assertEqual(res.test, 'success')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_list(self, mock_raise):
self.client.get.return_value.json.return_value = [{'id': 'abc'}, {'id': 'xyz'}]
res = self.api.list(foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/job/', params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value, data_type=list)
self.assertEqual(len(res), 2)
self.assertIsInstance(res[0], job.Job)
self.assertEqual(res[0].id, 'abc')
self.assertEqual(res[1].id, 'xyz')
| air_sdk-main | tests/job.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for interface.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import interface
class TestInterface(TestCase):
def setUp(self):
self.model = interface.Interface(MagicMock())
self.model.id = 'abc123'
self.model.name = 'eth0'
def test_init_(self):
self.assertFalse(self.model._deletable)
self.assertFalse(self.model._updatable)
def test_repr(self):
self.assertEqual(str(self.model), f'<Interface {self.model.name} {self.model.id}>')
def test_repr_deleted(self):
self.model._deleted = True
self.assertTrue('Deleted Object' in str(self.model))
class TestInterfaceApi(TestCase):
def setUp(self):
self.client = MagicMock()
self.client.api_url = 'http://testserver/api'
self.api = interface.InterfaceApi(self.client)
def test_init_(self):
self.assertEqual(self.api.client, self.client)
self.assertEqual(self.api.url, 'http://testserver/api/interface/')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_get(self, mock_raise):
self.client.get.return_value.json.return_value = {'test': 'success'}
res = self.api.get('abc123', foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/interface/abc123/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value)
self.assertIsInstance(res, interface.Interface)
self.assertEqual(res.test, 'success')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_list(self, mock_raise):
self.client.get.return_value.json.return_value = [{'id': 'abc'}, {'id': 'xyz'}]
res = self.api.list(foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/interface/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value, data_type=list)
self.assertEqual(len(res), 2)
self.assertIsInstance(res[0], interface.Interface)
self.assertEqual(res[0].id, 'abc')
self.assertEqual(res[1].id, 'xyz')
| air_sdk-main | tests/interface.py |
air_sdk-main | tests/__init__.py |
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for resource_budget.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import resource_budget
class TestResourceBudget(TestCase):
def setUp(self):
self.model = resource_budget.ResourceBudget(MagicMock())
self.model.id = 'abc123'
self.model.category = 'START'
def test_init_(self):
self.assertFalse(self.model._deletable)
self.assertTrue(self.model._updatable)
def test_repr(self):
self.assertEqual(str(self.model), f'<ResourceBudget {self.model.id}>')
def test_repr_deleted(self):
self.model._deleted = True
self.assertTrue('Deleted Object' in str(self.model))
class TestResourceBudgetApi(TestCase):
def setUp(self):
self.client = MagicMock()
self.client.api_url = 'http://testserver/api'
self.api = resource_budget.ResourceBudgetApi(self.client)
def test_init_(self):
self.assertEqual(self.api.client, self.client)
self.assertEqual(self.api.url, 'http://testserver/api/resource-budget/')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_get(self, mock_raise):
self.client.get.return_value.json.return_value = {'test': 'success'}
res = self.api.get('abc123', foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/resource-budget/abc123/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value)
self.assertIsInstance(res, resource_budget.ResourceBudget)
self.assertEqual(res.test, 'success')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_list(self, mock_raise):
self.client.get.return_value.json.return_value = [{'id': 'abc'}, {'id': 'xyz'}]
res = self.api.list(foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/resource-budget/', params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value, data_type=list)
self.assertEqual(len(res), 2)
self.assertIsInstance(res[0], resource_budget.ResourceBudget)
self.assertEqual(res[0].id, 'abc')
self.assertEqual(res[1].id, 'xyz')
| air_sdk-main | tests/resource_budget.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for topology.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring,unused-argument
import io
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import topology
class TestTopology(TestCase):
def setUp(self):
self.api = MagicMock()
self.api.url = 'http://testserver/api/'
self.model = topology.Topology(self.api)
self.model.id = 'abc123'
self.model.name = 'test'
def test_init_(self):
self.assertTrue(self.model._deletable)
self.assertTrue(self.model._updatable)
self.assertListEqual(topology.Topology._ignored_update_fields, ['links', 'nodes'])
def test_repr(self):
self.assertEqual(str(self.model), f'<Topology {self.model.name} {self.model.id}>')
def test_add_permission(self):
res = self.model.add_permission('[email protected]', foo='bar')
self.api.client.permissions.create.assert_called_with(email='[email protected]',
topology=self.model.id, foo='bar')
self.assertEqual(res, self.api.client.permissions.create.return_value)
def test_repr_deleted(self):
self.model._deleted = True
self.assertTrue('Deleted Object' in str(self.model))
class TestTopologyApi(TestCase):
def setUp(self):
self.client = MagicMock()
self.client.api_url = 'http://testserver/api'
self.api = topology.TopologyApi(self.client)
def test_init_(self):
self.assertEqual(self.api.client, self.client)
self.assertEqual(self.api.url, 'http://testserver/api/topology/')
@patch('air_sdk.air_sdk.topology.TopologyApi.list')
def test_get_topologies(self, mock_list):
self.assertEqual(self.api.get_topologies(), mock_list.return_value)
@patch('air_sdk.air_sdk.topology.TopologyApi.create')
def test_create_topology(self, mock_create):
res = self.api.create_topology(dot='test')
mock_create.assert_called_with(dot='test', json=None)
self.assertEqual(res, mock_create.return_value)
@patch('air_sdk.air_sdk.topology.TopologyApi.get')
def test_update_topology(self, mock_get):
self.api.update_topology('abc123', {'foo': 'bar'})
mock_get.assert_called_with('abc123')
mock_get.return_value.update.assert_called_with(foo='bar')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_get(self, mock_raise):
self.client.get.return_value.json.return_value = {'test': 'success'}
res = self.api.get('abc123', foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/topology/abc123/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value)
self.assertIsInstance(res, topology.Topology)
self.assertEqual(res.test, 'success')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_list(self, mock_raise):
self.client.get.return_value.json.return_value = [{'id': 'abc'}, {'id': 'xyz'}]
res = self.api.list(foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/topology/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value, data_type=list)
self.assertEqual(len(res), 2)
self.assertIsInstance(res[0], topology.Topology)
self.assertEqual(res[0].id, 'abc')
self.assertEqual(res[1].id, 'xyz')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_create_json(self, mock_raise):
self.client.post.return_value.json.return_value = {'id': 'abc'}
res = self.api.create(json={'foo': 'bar'})
self.client.post.assert_called_with(f'{self.client.api_url}/topology/',
json={'foo': 'bar'})
mock_raise.assert_called_with(self.client.post.return_value, status_code=201)
self.assertIsInstance(res, topology.Topology)
self.assertEqual(res.id, 'abc')
@patch('os.path.isfile', return_value=False)
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_create_dot(self, mock_raise, *args):
self.client.post.return_value.json.return_value = {'id': 'abc'}
res = self.api.create(dot='test')
self.client.post.assert_called_with(f'{self.client.api_url}/topology/', data=b'test',
headers={'Content-type': 'text/vnd.graphviz'})
mock_raise.assert_called_with(self.client.post.return_value, status_code=201)
self.assertIsInstance(res, topology.Topology)
self.assertEqual(res.id, 'abc')
@patch('os.path.isfile', return_value=False)
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_create_dot_file(self, mock_raise, *args):
self.client.post.return_value.json.return_value = {'id': 'abc'}
mock_file = MagicMock(spec=io.IOBase)
res = self.api.create(dot=mock_file)
self.client.post.assert_called_with(f'{self.client.api_url}/topology/', data=mock_file,
headers={'Content-type': 'text/vnd.graphviz'})
mock_raise.assert_called_with(self.client.post.return_value, status_code=201)
self.assertIsInstance(res, topology.Topology)
self.assertEqual(res.id, 'abc')
@patch('builtins.open')
@patch('os.path.isfile', return_value=True)
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_create_dot_file_path(self, mock_raise, mock_isfile, mock_open):
self.client.post.return_value.json.return_value = {'id': 'abc'}
file_path = '/tmp/topo.dot'
res = self.api.create(dot=file_path)
self.client.post.assert_called_with(f'{self.client.api_url}/topology/',
data=mock_open.return_value.read.return_value,
headers={'Content-type': 'text/vnd.graphviz'})
mock_raise.assert_called_with(self.client.post.return_value, status_code=201)
self.assertIsInstance(res, topology.Topology)
self.assertEqual(res.id, 'abc')
mock_isfile.assert_called_once_with(file_path)
mock_open.assert_called_once_with(file_path, 'r')
def test_create_required_kwargs(self):
with self.assertRaises(AttributeError) as err:
self.api.create()
self.assertTrue('requires one of the following: (\'json\', \'dot\')' in str(err.exception))
| air_sdk-main | tests/topology.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for marketplace.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import marketplace
class TestMarketplace(TestCase):
def setUp(self):
self.model = marketplace.Marketplace(MagicMock())
self.model.id = 'abc123'
self.model.name = 'test'
def test_init_(self):
self.assertFalse(self.model._deletable)
self.assertFalse(self.model._updatable)
def test_repr(self):
self.assertEqual(str(self.model),
f'<Marketplace Demo \'{self.model.name}\' {self.model.id}>')
def test_repr_deleted(self):
self.model._deleted = True
self.assertTrue('Deleted Object' in str(self.model))
class TestMarketplaceApi(TestCase):
def setUp(self):
self.client = MagicMock()
self.client.api_url = 'http://testserver/api'
self.api = marketplace.MarketplaceApi(self.client)
def test_init_(self):
self.assertEqual(self.api.client, self.client)
self.assertEqual(self.api.url, 'http://testserver/api/marketplace/demo/')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_get(self, mock_raise):
self.client.get.return_value.json.return_value = {'test': 'success'}
res = self.api.get('abc123', foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/marketplace/demo/abc123/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value)
self.assertIsInstance(res, marketplace.Marketplace)
self.assertEqual(res.test, 'success')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_list(self, mock_raise):
self.client.get.return_value.json.return_value = [{'id': 'abc'}, {'id': 'xyz'}]
res = self.api.list(foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/marketplace/demo/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value, data_type=list)
self.assertEqual(len(res), 2)
self.assertIsInstance(res[0], marketplace.Marketplace)
self.assertEqual(res[0].id, 'abc')
self.assertEqual(res[1].id, 'xyz')
| air_sdk-main | tests/marketplace.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for simulation_interface.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring,unused-argument
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import simulation_interface
class TestSimulationInterface(TestCase):
def setUp(self):
self.model = simulation_interface.SimulationInterface(MagicMock())
self.model.id = 'abc123'
def test_init_(self):
self.assertFalse(self.model._deletable)
self.assertTrue(self.model._updatable)
def test_repr(self):
self.assertEqual(str(self.model), f'<SimulationInterface {self.model.id}>')
def test_repr_deleted(self):
self.model._deleted = True
self.assertTrue('Deleted Object' in str(self.model))
class TestSimulationInterfaceApi(TestCase):
def setUp(self):
self.client = MagicMock()
self.client.api_url = 'http://testserver/api'
self.api = simulation_interface.SimulationInterfaceApi(self.client)
def test_init_(self):
self.assertEqual(self.api.client, self.client)
self.assertEqual(self.api.url, 'http://testserver/api/simulation-interface/')
@patch('air_sdk.air_sdk.simulation_interface.SimulationInterfaceApi.list')
def test_get_simulation_interfaces(self, mock_list):
res = self.api.get_simulation_interfaces('abc123', 'xyz123')
mock_list.assert_called_with(simulation='abc123', original='xyz123')
self.assertEqual(res, mock_list.return_value)
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_get(self, mock_raise):
self.client.get.return_value.json.return_value = {'test': 'success'}
res = self.api.get('abc123', foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/simulation-interface/abc123/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value)
self.assertIsInstance(res, simulation_interface.SimulationInterface)
self.assertEqual(res.test, 'success')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_list(self, mock_raise):
self.client.get.return_value.json.return_value = [{'id': 'abc'}, {'id': 'xyz'}]
res = self.api.list(foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/simulation-interface/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value, data_type=list)
self.assertEqual(len(res), 2)
self.assertIsInstance(res[0], simulation_interface.SimulationInterface)
self.assertEqual(res[0].id, 'abc')
self.assertEqual(res[1].id, 'xyz')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_list_interface(self, mock_raise):
self.api.list(interface='test')
self.client.get.assert_called_with(f'{self.client.api_url}/simulation-interface/',
params={'original': 'test'})
| air_sdk-main | tests/simulation_interface.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for exceptions.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring
from unittest import TestCase
from ..air_sdk import exceptions
class TestAirError(TestCase):
def test_init(self):
err = exceptions.AirError('test', 200)
self.assertEqual(str(err), 'test')
self.assertEqual(err.status_code, 200)
class TestAirAuthorizationError(TestCase):
def test_init(self):
err = exceptions.AirAuthorizationError('test', 200)
self.assertEqual(err.message, 'test')
self.assertEqual(err.status_code, 200)
self.assertIsInstance(err, exceptions.AirError)
def test_init_default(self):
err = exceptions.AirAuthorizationError(status_code=200)
self.assertEqual(err.message, 'An error occurred when authorizing the Air API')
self.assertEqual(err.status_code, 200)
class TestAirUnexpectedResponse(TestCase):
def test_init(self):
err = exceptions.AirUnexpectedResponse('test')
self.assertEqual(err.message,
'Received an unexpected response from the Air API: test')
self.assertIsNone(err.status_code)
self.assertIsInstance(err, exceptions.AirError)
def test_init_status_code(self):
err = exceptions.AirUnexpectedResponse('test', status_code=200)
self.assertEqual(err.message,
'Received an unexpected response from the Air API (200): test')
self.assertEqual(err.status_code, 200)
class TestAirForbiddenError(TestCase):
def test_init(self):
err = exceptions.AirForbiddenError('test')
self.assertEqual(err.message, 'test')
self.assertEqual(err.status_code, 403)
self.assertIsInstance(err, exceptions.AirError)
def test_init_default(self):
err = exceptions.AirForbiddenError()
self.assertEqual(err.message, 'Received 403 Forbidden. Please call AirApi.authorize().')
self.assertEqual(err.status_code, 403)
class TestAirObjectDeleted(TestCase):
def test_init(self):
err = exceptions.AirObjectDeleted('foo', 'test')
self.assertEqual(err.message, 'test')
self.assertIsInstance(err, exceptions.AirError)
def test_init_default(self):
err = exceptions.AirObjectDeleted('foo')
self.assertEqual(err.message,
'foo object has been deleted and should no longer be referenced')
| air_sdk-main | tests/exceptions.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for login.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import login
class TestLogin(TestCase):
def setUp(self):
self.model = login.Login(MagicMock())
self.model.id = 'abc123'
def test_init_(self):
self.assertFalse(self.model._deletable)
self.assertFalse(self.model._updatable)
def test_repr(self):
self.assertEqual(str(self.model), f'<Login {self.model.id}>')
def test_repr_deleted(self):
self.model._deleted = True
self.assertTrue('Deleted Object' in str(self.model))
class TestLoginApi(TestCase):
def setUp(self):
self.client = MagicMock()
self.client.api_url = 'http://testserver/api'
self.api = login.LoginApi(self.client)
def test_init_(self):
self.assertEqual(self.api.client, self.client)
self.assertEqual(self.api.url, 'http://testserver/api/login/')
@patch('air_sdk.air_sdk.login.LoginApi.list')
def test_get(self, mock_list):
res = self.api.get(foo='bar')
mock_list.assert_called_with(foo='bar')
self.assertEqual(res, mock_list.return_value)
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_list(self, mock_raise):
self.client.get.return_value.json.return_value = {'id': 'abc'}
res = self.api.list(foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/login/', params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value)
self.assertIsInstance(res, login.Login)
self.assertEqual(res.id, 'abc')
| air_sdk-main | tests/login.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for node.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring,unused-argument
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import node
class TestNode(TestCase):
def setUp(self):
self.model = node.Node(MagicMock())
self.model.id = 'abc123'
self.model.name = 'server'
def test_init_(self):
self.assertTrue(self.model._deletable)
self.assertTrue(self.model._updatable)
self.assertListEqual(self.model._ignored_update_fields, ['interfaces'])
def test_repr(self):
self.assertEqual(str(self.model), f'<Node {self.model.name} {self.model.id}>')
def test_repr_deleted(self):
self.model._deleted = True
self.assertTrue('Deleted Object' in str(self.model))
class TestNodeApi(TestCase):
def setUp(self):
self.client = MagicMock()
self.client.api_url = 'http://testserver/api'
self.api = node.NodeApi(self.client)
def test_init_(self):
self.assertEqual(self.api.client, self.client)
self.assertEqual(self.api.url, 'http://testserver/api/node/')
@patch('air_sdk.air_sdk.node.NodeApi.list')
def test_get_nodes(self, mock_list):
res = self.api.get_nodes(simulation_id='foo')
mock_list.assert_called_with(simulation='foo')
self.assertEqual(res, mock_list.return_value)
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_get(self, mock_raise):
self.client.get.return_value.json.return_value = {'test': 'success'}
res = self.api.get('abc123', foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/node/abc123/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value)
self.assertIsInstance(res, node.Node)
self.assertEqual(res.test, 'success')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_get_simulation_id(self, mock_raise):
self.client.get.return_value.json.return_value = {'test': 'success'}
self.api.get('abc123', simulation_id='xyz123')
self.client.get.assert_called_with(f'{self.client.api_url}/node/abc123/',
params={'simulation': 'xyz123'})
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_list(self, mock_raise):
self.client.get.return_value.json.return_value = [{'id': 'abc'}, {'id': 'xyz'}]
res = self.api.list(foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/node/', params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value, data_type=list)
self.assertEqual(len(res), 2)
self.assertIsInstance(res[0], node.Node)
self.assertEqual(res[0].id, 'abc')
self.assertEqual(res[1].id, 'xyz')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_create(self, mock_raise):
self.client.post.return_value.json.return_value = {'id': 'abc'}
res = self.api.create(topology='abc123', name='test')
self.client.post.assert_called_with(f'{self.client.api_url}/node/',
json={'topology': 'abc123', 'name': 'test'})
mock_raise.assert_called_with(self.client.post.return_value, status_code=201)
self.assertIsInstance(res, node.Node)
self.assertEqual(res.id, 'abc')
def test_create_required_kwargs(self):
with self.assertRaises(AttributeError) as err:
self.api.create(name='test')
self.assertTrue('requires topology' in str(err.exception))
with self.assertRaises(AttributeError) as err:
self.api.create(topology='abc123')
self.assertTrue('requires name' in str(err.exception))
| air_sdk-main | tests/node.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for demo.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import demo
class TestDemo(TestCase):
def setUp(self):
self.model = demo.Demo(MagicMock())
self.model.id = 'abc123'
self.model.name = 'test'
def test_init_(self):
self.assertFalse(self.model._deletable)
self.assertFalse(self.model._updatable)
def test_repr(self):
self.assertEqual(str(self.model), f'<Demo \'{self.model.name}\' {self.model.id}>')
def test_repr_deleted(self):
self.model._deleted = True
self.assertTrue('Deleted Object' in str(self.model))
class TestDemoApi(TestCase):
def setUp(self):
self.client = MagicMock()
self.client.api_url = 'http://testserver/api'
self.api = demo.DemoApi(self.client)
def test_init_(self):
self.assertEqual(self.api.client, self.client)
self.assertEqual(self.api.url, 'http://testserver/api/demo/')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_get(self, mock_raise):
self.client.get.return_value.json.return_value = {'test': 'success'}
res = self.api.get('abc123', foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/demo/abc123/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value)
self.assertIsInstance(res, demo.Demo)
self.assertEqual(res.test, 'success')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_list(self, mock_raise):
self.client.get.return_value.json.return_value = [{'id': 'abc'}, {'id': 'xyz'}]
res = self.api.list(foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/demo/', params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value, data_type=list)
self.assertEqual(len(res), 2)
self.assertIsInstance(res[0], demo.Demo)
self.assertEqual(res[0].id, 'abc')
self.assertEqual(res[1].id, 'xyz')
| air_sdk-main | tests/demo.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for simulation_node.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring,unused-argument
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import simulation_node
class TestSimulationNode(TestCase):
def setUp(self):
self.api = MagicMock()
self.api.url = 'http://testserver/api/'
self.model = simulation_node.SimulationNode(self.api)
self.model.id = 'abc123'
def test_init_(self):
self.assertFalse(self.model._deletable)
self.assertTrue(self.model._updatable)
def test_repr(self):
self.assertEqual(str(self.model), f'<SimulationNode {self.model.id}>')
def test_repr_deleted(self):
self.model._deleted = True
self.assertTrue('Deleted Object' in str(self.model))
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_create_instructions(self, mock_raise):
res = self.model.create_instructions(executor='shell', data='echo')
self.api.client.post.assert_called_with(f'{self.api.url}abc123/instructions/',
json={'executor': 'shell', 'data': 'echo'})
mock_raise.assert_called_with(self.api.client.post.return_value, status_code=201,
data_type=str)
self.assertEqual(res, {'id': self.api.client.post.return_value.json.return_value})
def test_create_instructions_required_kwargs(self):
with self.assertRaises(AttributeError) as err:
self.model.create_instructions(executor='shell')
self.assertTrue('requires data' in str(err.exception))
with self.assertRaises(AttributeError) as err:
self.model.create_instructions(data='foo')
self.assertTrue('requires executor' in str(err.exception))
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_create_instructions_list(self, mock_raise):
self.model.create_instructions(executor='shell', data=['line1', 'line2'])
self.api.client.post.assert_called_with(f'{self.api.url}abc123/instructions/',
json={'executor': 'shell', 'data': 'line1\nline2'})
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_list_instructions(self, mock_raise):
res = self.model.list_instructions(foo='bar')
self.api.client.get.assert_called_with(f'{self.api.url}{self.model.id}/instructions/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.api.client.get.return_value, data_type=list)
self.assertEqual(res, self.api.client.get.return_value.json.return_value)
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_delete_instructions(self, mock_raise):
self.model.delete_instructions()
self.api.client.delete.assert_called_with(f'{self.api.url}{self.model.id}/instructions/')
mock_raise.assert_called_with(self.api.client.delete.return_value, status_code=204,
data_type=None)
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_control(self, mock_raise):
res = self.model.control(action='test')
self.api.client.post.assert_called_with(f'{self.api.url}{self.model.id}/control/',
json={'action': 'test'})
mock_raise.assert_called_with(self.api.client.post.return_value)
self.assertEqual(res, self.api.client.post.return_value.json.return_value)
def test_control_required_kwargs(self):
with self.assertRaises(AttributeError) as err:
self.model.control()
self.assertTrue('requires action' in str(err.exception))
@patch('air_sdk.air_sdk.simulation_node.SimulationNode.control')
def test_rebuild(self, mock_control):
self.model.rebuild(foo='bar')
mock_control.assert_called_with(action='rebuild', foo='bar')
@patch('air_sdk.air_sdk.simulation_node.SimulationNode.control')
def test_reset(self, mock_control):
self.model.reset(foo='bar')
mock_control.assert_called_with(action='reset', foo='bar')
class TestSimulationNodeApi(TestCase):
def setUp(self):
self.client = MagicMock()
self.client.api_url = 'http://testserver/api'
self.api = simulation_node.SimulationNodeApi(self.client)
def test_init_(self):
self.assertEqual(self.api.client, self.client)
self.assertEqual(self.api.url, 'http://testserver/api/simulation-node/')
@patch('air_sdk.air_sdk.simulation_node.SimulationNodeApi.get')
def test_update_simulation_node(self, mock_get):
self.api.update_simulation_node('abc123', {'foo': 'bar'})
mock_get.assert_called_with('abc123')
mock_get.return_value.update.assert_called_with(foo='bar')
@patch('air_sdk.air_sdk.simulation_node.SimulationNodeApi.list')
def test_get_simulation_nodes(self, mock_list):
res = self.api.get_simulation_nodes(foo='bar')
mock_list.assert_called_with(foo='bar')
self.assertEqual(res, mock_list.return_value)
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_get(self, mock_raise):
self.client.get.return_value.json.return_value = {'test': 'success'}
res = self.api.get('abc123', foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/simulation-node/abc123/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value)
self.assertIsInstance(res, simulation_node.SimulationNode)
self.assertEqual(res.test, 'success')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_list(self, mock_raise):
self.client.get.return_value.json.return_value = [{'id': 'abc'}, {'id': 'xyz'}]
res = self.api.list(foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/simulation-node/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value, data_type=list)
self.assertEqual(len(res), 2)
self.assertIsInstance(res[0], simulation_node.SimulationNode)
self.assertEqual(res[0].id, 'abc')
self.assertEqual(res[1].id, 'xyz')
| air_sdk-main | tests/simulation_node.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for image.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring
from unittest import TestCase
from unittest.mock import MagicMock, patch
from ..air_sdk import image, organization
class TestImage(TestCase):
def setUp(self):
self.mock_api = MagicMock()
self.model = image.Image(self.mock_api)
self.model.id = 'abc123'
self.model.name = 'ubuntu'
self.org1 = organization.Organization(self.mock_api, id='xyz456', name='NVIDIA')
def test_init_(self):
self.assertTrue(self.model._deletable)
self.assertTrue(self.model._updatable)
def test_repr(self):
self.assertEqual(str(self.model), f'<Image {self.model.name} {self.model.id}>')
def test_repr_deleted(self):
self.model._deleted = True
self.assertTrue('Deleted Object' in str(self.model))
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_copy(self, mock_raise):
new_id = 'def456'
mock_post = self.mock_api.client.post
mock_post.return_value.json.return_value = {'id': new_id, 'name': 'new-image'}
res = self.model.copy(self.org1)
mock_post.assert_called_once_with(f'{self.mock_api.url}{self.model.id}/copy/',
json={'organization': self.org1})
mock_raise.assert_called_once_with(self.mock_api.client.post.return_value, status_code=201)
self.assertEqual(res.id, new_id)
@patch('builtins.open')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_upload(self, mock_raise, mock_open):
self.model.upload('myfile')
mock_put = self.mock_api.client.put
mock_put.assert_called_with(f'{self.mock_api.url}{self.model.id}/upload/',
data=mock_open.return_value.__enter__.return_value)
mock_open.assert_called_with('myfile', 'rb')
mock_raise.assert_called_with(mock_put.return_value, status_code=204, data_type=None)
class TestImageApi(TestCase):
def setUp(self):
self.client = MagicMock()
self.client.api_url = 'http://testserver/api'
self.api = image.ImageApi(self.client)
def test_init_(self):
self.assertEqual(self.api.client, self.client)
self.assertEqual(self.api.url, 'http://testserver/api/image/')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_get(self, mock_raise):
self.client.get.return_value.json.return_value = {'test': 'success'}
res = self.api.get('abc123', foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/image/abc123/',
params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value)
self.assertIsInstance(res, image.Image)
self.assertEqual(res.test, 'success')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_list(self, mock_raise):
self.client.get.return_value.json.return_value = [{'id': 'abc'}, {'id': 'xyz'}]
res = self.api.list(foo='bar')
self.client.get.assert_called_with(f'{self.client.api_url}/image/', params={'foo': 'bar'})
mock_raise.assert_called_with(self.client.get.return_value, data_type=list)
self.assertEqual(len(res), 2)
self.assertIsInstance(res[0], image.Image)
self.assertEqual(res[0].id, 'abc')
self.assertEqual(res[1].id, 'xyz')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
def test_create(self, mock_raise):
org = 'acb123'
self.client.post.return_value.json.return_value = {'id': 'abc'}
res = self.api.create(name='myimage', organization=org)
self.client.post.assert_called_with(f'{self.client.api_url}/image/',
json={'name': 'myimage', 'organization': org})
mock_raise.assert_called_with(self.client.post.return_value, status_code=201)
self.assertIsInstance(res, image.Image)
self.assertEqual(res.id, 'abc')
@patch('air_sdk.air_sdk.util.raise_if_invalid_response')
@patch('air_sdk.air_sdk.image.Image.upload')
def test_create_upload(self, mock_upload, mock_raise):
org = 'abc123'
self.client.post.return_value.json.return_value = {'id': 'abc'}
res = self.api.create(name='myimage', filename='myfile', organization=org)
self.client.post.assert_called_with(f'{self.client.api_url}/image/',
json={'name': 'myimage', 'filename': 'myfile',
'organization': org})
mock_raise.assert_called_with(self.client.post.return_value, status_code=201)
mock_upload.assert_called_with('myfile')
self.assertIsInstance(res, image.Image)
def test_create_required_kwargs(self):
with self.assertRaises(AttributeError) as err:
self.api.create(organization='abc123')
self.assertTrue('requires name' in str(err.exception))
with self.assertRaises(AttributeError) as err:
self.api.create(name='abc123')
self.assertTrue('requires organization' in str(err.exception))
| air_sdk-main | tests/image.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Tests for account.py
"""
#pylint: disable=missing-function-docstring,missing-class-docstring
from unittest import TestCase
from unittest.mock import MagicMock
from ..air_sdk import account
class TestAccount(TestCase):
def setUp(self):
self.model = account.Account(MagicMock())
self.model.username = 'foo'
self.model.id = 'abc123'
self.assertFalse(self.model._deletable)
self.assertFalse(self.model._updatable)
def test_init(self):
self.assertFalse(self.model._deletable)
self.assertFalse(self.model._updatable)
def test_repr(self):
self.assertEqual(str(self.model), f'<Account {self.model.username} {self.model.id}>')
def test_repr_deleted(self):
self.model._deleted = True
self.assertTrue('Deleted Object' in str(self.model))
class TestAccountApi(TestCase):
def setUp(self):
self.mock_client = MagicMock()
self.mock_client.api_url = 'http://testserver/api'
self.mock_client.get.return_value.status_code = 200
self.url = self.mock_client.api_url + '/account/'
self.api = account.AccountApi(self.mock_client)
def test_init(self):
self.assertEqual(self.api.client, self.mock_client)
self.assertEqual(self.api.url, self.url)
def test_get(self):
self.mock_client.get.return_value.json.return_value = {'foo': 'bar'}
res = self.api.get('abc123', foo='bar')
self.mock_client.get.assert_called_with(self.url + 'abc123/', params={'foo': 'bar'})
self.assertIsInstance(res, account.Account)
self.assertEqual(res.foo, 'bar')
def test_list(self):
self.mock_client.get.return_value.json.return_value = [{'foo': 'bar'}]
res = self.api.list(foo='bar')
self.mock_client.get.assert_called_with(self.url, params={'foo': 'bar'})
self.assertEqual(len(res), 1)
self.assertIsInstance(res[0], account.Account)
self.assertEqual(res[0].foo, 'bar')
| air_sdk-main | tests/account.py |
# Copyright (c) 2014-2018, NVIDIA Corporation. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from setuptools import setup
setup(
name='pynvrtc',
version='9.2',
author='Sean Lee',
author_email='[email protected]',
packages=['pynvrtc'],
url='https://github.com/NVIDIA/pynvrtc',
license='MIT',
description='Python bindings to NVRTC',
long_description=open('README.rst').read(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Software Development :: Compilers',
'Topic :: Software Development :: Libraries',
],
)
| pynvrtc-master | setup.py |
# Copyright (c) 2014-2015, NVIDIA Corporation. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import nose
import os
if 'NVRTC_LIBRARY' not in os.environ:
print('WARNING: NVRTC_LIBRARY is not set, '
'assuming it is in [DY]LD_LIBRARY_PATH/PATH')
os.chdir('tests')
nose.run()
| pynvrtc-master | run-tests.py |
# Copyright (c) 2014-2015, NVIDIA Corporation. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .interface import NVRTCException, NVRTCInterface
class ProgramException(Exception):
def __init__(self, msg):
self._msg = msg
def __repr__(self):
return str(self)
def __str__(self):
return self.get_message()
def get_message(self):
return self._msg
class Program(object):
"""
An NVRTC program object.
This is a high-level wrapper around the NVRTC program API.
"""
def __init__(self,
src, name="default_program",
headers=[], include_names=[],
lib_name=''):
self._interface = NVRTCInterface(lib_name)
self._program = self._interface.nvrtcCreateProgram(src, name,
headers,
include_names)
def __del__(self):
if hasattr(self, '_interface'):
self._interface.nvrtcDestroyProgram(self._program)
def compile(self, options=[]):
"""
Compiles the program object to PTX using the compiler options
specified in `options`.
"""
try:
self._interface.nvrtcCompileProgram(self._program, options)
ptx = self._interface.nvrtcGetPTX(self._program)
return ptx
except NVRTCException as e:
log = self._interface.nvrtcGetProgramLog(self._program)
raise ProgramException(log)
| pynvrtc-master | pynvrtc/compiler.py |
# Copyright (c) 2014-2018, NVIDIA Corporation. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
from ctypes import (
POINTER,
c_int,
c_void_p,
byref,
create_string_buffer,
c_char_p,
c_size_t,
sizeof,
cdll,
)
from platform import system
is_python2 = sys.version_info.major == 2
# NVRTC status codes
NVRTC_SUCCESS = 0
NVRTC_ERROR_OUT_OF_MEMORY = 1
NVRTC_ERROR_PROGRAM_CREATION_FAILURE = 2
NVRTC_ERROR_INVALID_INPUT = 3
NVRTC_ERROR_INVALID_PROGRAM = 4
NVRTC_ERROR_INVALID_OPTION = 5
NVRTC_ERROR_COMPILATION = 6
NVRTC_ERROR_BUILTIN_OPERATION_FAILURE = 7
def encode_str(s):
if is_python2:
return s
return s.encode("utf-8")
def encode_str_list(str_list):
if is_python2:
return str_list
return list(map(encode_str, str_list))
class NVRTCException(Exception):
"""
Exception wrapper for NVRTC error codes.
"""
def __init__(self, msg):
Exception.__init__(self)
self._msg = msg
def __str__(self):
return 'NVRTC Error: %s' % self._msg
def __repr__(self):
return str(self)
class NVRTCInterface(object):
"""
Low-level interface to NVRTC. This class is primarily designed for
interfacing the high-level API with the NVRTC binary, but clients
are free to use NVRTC directly through this class.
"""
def __init__(self, lib_path=''):
self._lib = None
self._load_nvrtc_lib(lib_path)
def _load_nvrtc_lib(self, lib_path):
"""
Loads the NVRTC shared library, with an optional search path in
lib_path.
"""
if sizeof(c_void_p) == 8:
if system() == 'Windows':
def_lib_name = 'nvrtc64_92.dll'
elif system() == 'Darwin':
def_lib_name = 'libnvrtc.dylib'
else:
def_lib_name = 'libnvrtc.so'
else:
raise NVRTCException('NVRTC is not supported on 32-bit platforms.')
if len(lib_path) == 0:
name = def_lib_name
else:
name = lib_path
self._lib = cdll.LoadLibrary(name)
self._lib.nvrtcCreateProgram.argtypes = [
POINTER(c_void_p), # prog
c_char_p, # src
c_char_p, # name
c_int, # numHeaders
POINTER(c_char_p), # headers
POINTER(c_char_p) # include_names
]
self._lib.nvrtcCreateProgram.restype = c_int
self._lib.nvrtcDestroyProgram.argtypes = [
POINTER(c_void_p) # prog
]
self._lib.nvrtcDestroyProgram.restype = c_int
self._lib.nvrtcCompileProgram.argtypes = [
c_void_p, # prog
c_int, # numOptions
POINTER(c_char_p) # options
]
self._lib.nvrtcCompileProgram.restype = c_int
self._lib.nvrtcGetPTXSize.argtypes = [
c_void_p, # prog
POINTER(c_size_t) # ptxSizeRet
]
self._lib.nvrtcGetPTXSize.restype = c_int
self._lib.nvrtcGetPTX.argtypes = [
c_void_p, # prog
c_char_p # ptx
]
self._lib.nvrtcGetPTX.restype = c_int
self._lib.nvrtcGetProgramLogSize.argtypes = [
c_void_p, # prog
POINTER(c_size_t) # logSizeRet
]
self._lib.nvrtcGetProgramLogSize.restype = c_int
self._lib.nvrtcGetProgramLog.argtypes = [
c_void_p, # prog
c_char_p # log
]
self._lib.nvrtcGetProgramLog.restype = c_int
self._lib.nvrtcAddNameExpression.argtypes = [
c_void_p, # prog
c_char_p # nameExpression
]
self._lib.nvrtcAddNameExpression.restype = c_int
self._lib.nvrtcGetLoweredName.argtypes = [
c_void_p, # prog
c_char_p, # nameExpression
POINTER(c_char_p) # loweredName
]
self._lib.nvrtcGetLoweredName.restype = c_int
self._lib.nvrtcGetErrorString.argtypes = [
c_int # result
]
self._lib.nvrtcGetErrorString.restype = c_char_p
self._lib.nvrtcVersion.argtypes = [
POINTER(c_int), # major
POINTER(c_int) # minor
]
self._lib.nvrtcVersion.restype = c_int
def _throw_on_error(self, code):
"""
Raises an NVRTCException is the given code is not NVRTC_SUCCESS.
"""
if code == NVRTC_SUCCESS:
return
else:
raise NVRTCException(self.nvrtcGetErrorString(code))
def nvrtcCreateProgram(self, src, name, headers, include_names):
"""
Creates and returns a new NVRTC program object.
"""
res = c_void_p()
headers_array = (c_char_p * len(headers))()
headers_array[:] = encode_str_list(headers)
include_names_array = (c_char_p * len(include_names))()
include_names_array[:] = encode_str_list(include_names)
code = self._lib.nvrtcCreateProgram(byref(res),
c_char_p(encode_str(src)), c_char_p(encode_str(name)),
len(headers),
headers_array, include_names_array)
self._throw_on_error(code)
return res
def nvrtcDestroyProgram(self, prog):
"""
Destroys the given NVRTC program object.
"""
code = self._lib.nvrtcDestroyProgram(byref(prog))
self._throw_on_error(code)
return
def nvrtcCompileProgram(self, prog, options):
"""
Compiles the NVRTC program object into PTX, using the provided options
array. See the NVRTC API documentation for accepted options.
"""
options_array = (c_char_p * len(options))()
options_array[:] = encode_str_list(options)
code = self._lib.nvrtcCompileProgram(prog, len(options), options_array)
self._throw_on_error(code)
return
def nvrtcGetPTX(self, prog):
"""
Returns the compiled PTX for the NVRTC program object.
"""
size = c_size_t()
code = self._lib.nvrtcGetPTXSize(prog, byref(size))
self._throw_on_error(code)
buf = create_string_buffer(size.value)
code = self._lib.nvrtcGetPTX(prog, buf)
self._throw_on_error(code)
return buf.value.decode('utf-8')
def nvrtcGetProgramLog(self, prog):
"""
Returns the log for the NVRTC program object.
Only useful after calls to nvrtcCompileProgram or nvrtcVerifyProgram.
"""
size = c_size_t()
code = self._lib.nvrtcGetProgramLogSize(prog, byref(size))
self._throw_on_error(code)
buf = create_string_buffer(size.value)
code = self._lib.nvrtcGetProgramLog(prog, buf)
self._throw_on_error(code)
return buf.value.decode('utf-8')
def nvrtcAddNameExpression(self, prog, name_expression):
"""
Notes the given name expression denoting a __global__ function or
function template instantiation.
"""
code = self._lib.nvrtcAddNameExpression(prog,
c_char_p(encode_str(name_expression)))
self._throw_on_error(code)
return
def nvrtcGetLoweredName(self, prog, name_expression):
"""
Notes the given name expression denoting a __global__ function or
function template instantiation.
"""
lowered_name = c_char_p()
code = self._lib.nvrtcGetLoweredName(prog,
c_char_p(encode_str(name_expression)),
byref(lowered_name))
self._throw_on_error(code)
return lowered_name.value.decode('utf-8')
def nvrtcGetErrorString(self, code):
"""
Returns a text identifier for the given NVRTC status code.
"""
code_int = c_int(code)
res = self._lib.nvrtcGetErrorString(code_int)
return res.decode('utf-8')
def nvrtcVersion(self):
"""
Returns the loaded NVRTC library version as a (major, minor) tuple.
"""
major = c_int()
minor = c_int()
code = self._lib.nvrtcVersion(byref(major), byref(minor))
self._throw_on_error(code)
return (major.value, minor.value)
def __str__(self):
(major, minor) = self.nvrtcVersion()
return 'NVRTC Interface (Version: %d.%d)' % (major, minor)
def __repr__(self):
return str(self)
| pynvrtc-master | pynvrtc/interface.py |
# Copyright (c) 2014-2018, NVIDIA Corporation. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__version__ = '9.2'
| pynvrtc-master | pynvrtc/__init__.py |
# Copyright (c) 2014-2015, NVIDIA Corporation. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
def get_interface():
import pynvrtc.interface
# Grab the libnvrtc location from the environment
if 'NVRTC_LIBRARY' in os.environ:
return pynvrtc.interface.NVRTCInterface(os.environ['NVRTC_LIBRARY'])
else:
return pynvrtc.interface.NVRTCInterface()
| pynvrtc-master | tests/util.py |
# Copyright (c) 2014-2015, NVIDIA Corporation. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import unittest
from util import get_interface
class ImportTest(unittest.TestCase):
def test_simple_import(self):
get_interface()
| pynvrtc-master | tests/import-test.py |
# Copyright (c) 2014-2016, NVIDIA Corporation. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import unittest
from util import get_interface
### Low-level interface tests
class CompileTests(unittest.TestCase):
def test_create_program(self):
i = get_interface()
prog = i.nvrtcCreateProgram('__global__ void k() {}\n', 'simple.cu',
[], [])
i.nvrtcDestroyProgram(prog)
def test_compile_empty_program(self):
i = get_interface()
prog = i.nvrtcCreateProgram('#include "foo.h"\n'
'__global__ void k() {}\n', 'simple.cu',
['__device__ void f() {}\n'], ['foo.h'])
i.nvrtcCompileProgram(prog, [])
i.nvrtcDestroyProgram(prog)
def test_program_log(self):
import pynvrtc.interface
i = get_interface()
prog = i.nvrtcCreateProgram('__global__ int k() {}\n', 'simple.cu',
[], [])
self.assertRaises(pynvrtc.interface.NVRTCException,
i.nvrtcCompileProgram, prog, [])
log = i.nvrtcGetProgramLog(prog)
self.assertTrue(len(log) > 0)
i.nvrtcDestroyProgram(prog)
def test_program_output(self):
import pynvrtc.interface
i = get_interface()
prog = i.nvrtcCreateProgram('__global__ void k() {}\n', 'simple.cu',
[], [])
i.nvrtcCompileProgram(prog, [])
ptx = i.nvrtcGetPTX(prog)
self.assertTrue(len(ptx) > 0)
i.nvrtcDestroyProgram(prog)
def test_lower_name(self):
import pynvrtc.interface
i = get_interface()
prog = i.nvrtcCreateProgram('template<typename T>\n'
'__global__ void k(T *ptr) {}\n',
'simple.cu', [], [])
i.nvrtcAddNameExpression(prog, 'k<float>')
i.nvrtcAddNameExpression(prog, 'k<int>')
i.nvrtcCompileProgram(prog, [])
name = i.nvrtcGetLoweredName(prog, 'k<float>')
self.assertTrue(name == "_Z1kIfEvPT_")
name = i.nvrtcGetLoweredName(prog, 'k<int>')
self.assertTrue(name == "_Z1kIiEvPT_")
| pynvrtc-master | tests/compile-tests.py |
# Copyright (c) 2014-2018, NVIDIA Corporation. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This sample illustrates a simple CUDA source to PTX compiler implemented using
NVRTC. All command-line options are passed along to NVRTC. Arguments that
start with '-' are assumed to be options and are passed along accordingly.
Otherwise, the option is treated as a file name and is read as input.
NOTE: If you get errors about not being able to load nvrtc, please make sure
your [DY]LD_LIBRARY_PATH/PATH environment variable points to the nvrtc binary
in your CUDA installation, e.g.
$ export LD_LIBRARY_PATH=/usr/local/cuda-9.2/lib64:$LD_LIBRARY_PATH
"""
import sys
from pynvrtc.compiler import Program, ProgramException
if len(sys.argv) < 2:
print('Usage: %s [options] <cuda source file>' % sys.argv[0])
sys.exit(1)
try:
src = None
options = []
# Parse all options
for a in sys.argv[1:]:
if a.startswith('-'):
# Treat as compiler option
options.append(a)
else:
# Treat as compiler input
with open(a, 'rb') as f:
src = f.read()
# Create program object
p = Program(src)
# Run the compile
ptx = p.compile(options)
# Dump the output to stdout
print(ptx)
sys.exit(0)
except ProgramException as e:
# An error occurred, dump it to stdout
print('ERROR:\n%s\n' % repr(e))
| pynvrtc-master | samples/ptxgen.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# A tool to create a procedural generated volume saved in
# MHD (https://itk.org/Wiki/ITK/MetaIO/Documentation) format.
#
import os
import argparse
import math
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Create a procedural generated volume and save as a MHD file.')
parser.add_argument('output')
args = parser.parse_args()
size = 128
# write the header
with open(args.output + '.mhd', 'wt') as out_header:
out_header.write(('ObjectType = Image\n'
'NDims = 3\n'
'BinaryData = True\n'
'BinaryDataByteOrderMSB = False\n'
'CompressedData = False\n'
'TransformMatrix = 1 0 0 0 1 0 0 0 1\n'
'Offset = 0 0 0\n'
'CenterOfRotation = 0 0 0\n'
'ElementSpacing = 1 1 1\n'
'DimSize = ' + str(size) + ' ' + str(size) + ' ' + str(size) + '\n'
'AnatomicalOrientation = ???\n'
'ElementType = MET_UCHAR\n'
'ElementDataFile = ' + args.output + '.raw\n'))
density = np.zeros((size, size, size), dtype=np.uint8)
for z in range(size):
for y in range(size):
for x in range(size):
density[z][y][x] = int(
math.fabs(math.sin((float(x) / size) * 5.0 * math.pi)) *
math.fabs(math.sin((float(y) / size) * 4.0 * math.pi)) *
math.fabs(math.sin((float(z) / size) * 2.0 * math.pi)) * 255.0 + 0.5)
with open(args.output + '.raw', 'w') as out_data:
density.tofile(out_data)
| clara-viz-main | src/examples/renderer/gen_volume.py |
# Copyright (c) 2017 NVIDIA Corporation
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import tensorflow as tf
if hasattr(tf.compat, 'v1'):
tf.compat.v1.disable_eager_execution()
from open_seq2seq.utils.utils import deco_print, get_base_config, create_model,\
create_logdir, check_logdir, \
check_base_model_logdir
from open_seq2seq.utils import train, infer, evaluate
def main():
# Parse args and create config
args, base_config, base_model, config_module = get_base_config(sys.argv[1:])
if args.mode == "interactive_infer":
raise ValueError(
"Interactive infer is meant to be run from an IPython",
"notebook not from run.py."
)
# restore_best_checkpoint = base_config.get('restore_best_checkpoint', False)
# # Check logdir and create it if necessary
# checkpoint = check_logdir(args, base_config, restore_best_checkpoint)
load_model = base_config.get('load_model', None)
restore_best_checkpoint = base_config.get('restore_best_checkpoint', False)
base_ckpt_dir = check_base_model_logdir(load_model, args,
restore_best_checkpoint)
base_config['load_model'] = base_ckpt_dir
# Check logdir and create it if necessary
checkpoint = check_logdir(args, base_config, restore_best_checkpoint)
# Initilize Horovod
if base_config['use_horovod']:
import horovod.tensorflow as hvd
hvd.init()
if hvd.rank() == 0:
deco_print("Using horovod")
from mpi4py import MPI
MPI.COMM_WORLD.Barrier()
else:
hvd = None
if args.enable_logs:
if hvd is None or hvd.rank() == 0:
old_stdout, old_stderr, stdout_log, stderr_log = create_logdir(
args,
base_config
)
base_config['logdir'] = os.path.join(base_config['logdir'], 'logs')
if args.mode == 'train' or args.mode == 'train_eval' or args.benchmark:
if hvd is None or hvd.rank() == 0:
if checkpoint is None or args.benchmark:
if base_ckpt_dir:
deco_print("Starting training from the base model")
else:
deco_print("Starting training from scratch")
else:
deco_print(
"Restored checkpoint from {}. Resuming training".format(checkpoint),
)
elif args.mode == 'eval' or args.mode == 'infer':
if hvd is None or hvd.rank() == 0:
deco_print("Loading model from {}".format(checkpoint))
# Create model and train/eval/infer
with tf.Graph().as_default():
model = create_model(
args, base_config, config_module, base_model, hvd, checkpoint)
hooks = None
if ('train_params' in config_module and
'hooks' in config_module['train_params']):
hooks = config_module['train_params']['hooks']
if args.mode == "train_eval":
train(
model[0], eval_model=model[1], debug_port=args.debug_port,
custom_hooks=hooks)
elif args.mode == "train":
train(
model, eval_model=None, debug_port=args.debug_port, custom_hooks=hooks)
elif args.mode == "eval":
evaluate(model, checkpoint)
elif args.mode == "infer":
infer(model, checkpoint, args.infer_output_file)
if args.enable_logs and (hvd is None or hvd.rank() == 0):
sys.stdout = old_stdout
sys.stderr = old_stderr
stdout_log.close()
stderr_log.close()
if __name__ == '__main__':
main()
| OpenSeq2Seq-master | run.py |
from frame_asr import FrameASR
import numpy as np
import pyaudio as pa
import time
CHANNELS = 1
RATE = 16000
DURATION = 2.0
CHUNK_SIZE = int(DURATION*RATE)
p = pa.PyAudio()
print('Available audio input devices:')
for i in range(p.get_device_count()):
dev = p.get_device_info_by_index(i)
if dev.get('maxInputChannels'):
print(i, dev.get('name'))
print('Please type input device ID:')
dev_idx = int(input())
asr = FrameASR()
print('Initialization was successful')
def callback(in_data, frame_count, time_info, status):
signal = np.frombuffer(in_data, dtype=np.int16)
pred = asr.transcribe(signal)
if len(pred.strip()):
print('"{}"'.format(pred))
return (in_data, pa.paContinue)
stream = p.open(format=pa.paInt16,
channels=CHANNELS,
rate=RATE,
input=True,
input_device_index=dev_idx,
stream_callback=callback,
frames_per_buffer=CHUNK_SIZE)
stream.start_stream()
while stream.is_active():
time.sleep(0.1)
stream.stop_stream()
stream.close()
p.terminate()
| OpenSeq2Seq-master | demo_streaming_asr.py |
# Copyright (c) 2019 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
import numpy as np
import scipy.io.wavfile as wave
import tensorflow as tf
from collections import defaultdict
from open_seq2seq.utils.utils import get_base_config, check_logdir,\
create_model, get_interactive_infer_results
from open_seq2seq.data.speech2text.speech_utils import get_speech_features_from_file, get_speech_features
from ctc_decoders import Scorer, BeamDecoder
# Define the command line arguments that one would pass to run.py here
MODEL_PARAMS = ["--config_file=models/Jasper-Mini-for-Jetson/config_infer_stream.py",
"--mode=interactive_infer",
"--logdir=models/Jasper-Mini-for-Jetson/",
"--batch_size_per_gpu=1",
"--num_gpus=1",
"--use_horovod=False",
"--decoder_params/infer_logits_to_pickle=True",
"--data_layer_params/pad_to=0"
]
def softmax(x):
'''
Naive softmax implementation for NumPy
'''
m = np.expand_dims(np.max(x, axis=-1), -1)
e = np.exp(x - m)
return e / np.expand_dims(e.sum(axis=-1), -1)
class FrameASR:
def __init__(self, model_params=MODEL_PARAMS, scope_name='S2T',
sr=16000, frame_len=0.2, frame_overlap=2.4,
timestep_duration=0.02,
ext_model_infer_func=None, merge=True,
beam_width=1, language_model=None,
alpha=2.8, beta=1.0):
'''
Args:
model_params: list of OpenSeq2Seq arguments (same as for run.py)
scope_name: model's scope name
sr: sample rate, Hz
frame_len: frame's duration, seconds
frame_overlap: duration of overlaps before and after current frame, seconds
frame_overlap should be multiple of frame_len
timestep_duration: time per step at model's output, seconds
ext_model_infer_func: callback for external inference engine,
if it is not None, then we don't build TF inference graph
merge: whether to do merge in greedy decoder
beam_width: beam width for beam search decoder if larger than 1
language_model: path to LM (to use with beam search decoder)
alpha: LM weight (trade-off between acoustic and LM scores)
beta: word weight (added per every transcribed word in prediction)
'''
if ext_model_infer_func is None:
# Build TF inference graph
self.model_S2T, checkpoint_S2T = self._get_model(model_params, scope_name)
# Create the session and load the checkpoints
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
self.sess = tf.InteractiveSession(config=sess_config)
vars_S2T = {}
for v in tf.get_collection(tf.GraphKeys.VARIABLES):
if scope_name in v.name:
vars_S2T['/'.join(v.op.name.split('/')[1:])] = v
saver_S2T = tf.train.Saver(vars_S2T)
saver_S2T.restore(self.sess, checkpoint_S2T)
self.params = self.model_S2T.params
else:
# No TF, load pre-, post-processing parameters from config,
# use external inference engine
_, base_config, _, _ = get_base_config(model_params)
self.params = base_config
self.ext_model_infer_func = ext_model_infer_func
self.vocab = self._load_vocab(
self.model_S2T.params['data_layer_params']['vocab_file']
)
self.sr = sr
self.frame_len = frame_len
self.n_frame_len = int(frame_len * sr)
self.frame_overlap = frame_overlap
self.n_frame_overlap = int(frame_overlap * sr)
if self.n_frame_overlap % self.n_frame_len:
raise ValueError(
"'frame_overlap' should be multiple of 'frame_len'"
)
self.n_timesteps_overlap = int(frame_overlap / timestep_duration) - 2
self.buffer = np.zeros(shape=2*self.n_frame_overlap + self.n_frame_len, dtype=np.float32)
self.merge = merge
self._beam_decoder = None
# greedy decoder's state (unmerged transcription)
self.text = ''
# forerunner greedy decoder's state (unmerged transcription)
self.forerunner_text = ''
self.offset = 5
# self._calibrate_offset()
if beam_width > 1:
if language_model is None:
self._beam_decoder = BeamDecoder(self.vocab, beam_width)
else:
self._scorer = Scorer(alpha, beta, language_model, self.vocab)
self._beam_decoder = BeamDecoder(self.vocab, beam_width, ext_scorer=self._scorer)
self.reset()
def _get_audio(self, wav):
"""Parses audio from wav and returns array of audio features.
Args:
wav: numpy array containing wav
Returns:
tuple: source audio features as ``np.array``, length of source sequence,
sample id.
"""
source, audio_duration = get_speech_features(
wav, 16000., self.params['data_layer_params']
)
return source, \
np.int32([len(source)]), np.int32([0]), \
np.float32([audio_duration])
def _parse_audio_element(self, id_and_audio_filename):
"""Parses audio from file and returns array of audio features.
Args:
id_and_audio_filename: tuple of sample id and corresponding
audio file name.
Returns:
tuple: source audio features as ``np.array``, length of source sequence,
sample id.
"""
idx, audio_filename = id_and_audio_filename
source, audio_duration = get_speech_features_from_file(
audio_filename,
params=self.params
)
return source, \
np.int32([len(source)]), np.int32([idx]), \
np.float32([audio_duration])
def _preprocess_audio(self, model_in):
audio_arr = []
audio_length_arr = []
for line in model_in:
if isinstance(line, str):
features, features_length, _, _ = self._parse_audio_element([0, line])
elif isinstance(line, np.ndarray):
features, features_length, _, _ = self._get_audio(line)
else:
raise ValueError(
"Speech2Text's interactive inference mode only supports string or",
"numpy array as input. Got {}". format(type(line))
)
audio_arr.append(features)
audio_length_arr.append(features_length)
max_len = np.max(audio_length_arr)
pad_to = self.params.get("pad_to", 8)
if pad_to > 0 and self.params.get('backend') == 'librosa':
max_len += (pad_to - max_len % pad_to) % pad_to
for idx in range(len(audio_arr)):
audio_arr[idx] = np.pad(
audio_arr[idx], ((0, max_len-len(audio_arr[idx])), (0, 0)),
"constant", constant_values=0.
)
audio_features = np.reshape(
audio_arr,
[self.params['batch_size_per_gpu'],
-1,
self.params['data_layer_params']['num_audio_features']]
)
features_length = np.reshape(audio_length_arr, [self.params['batch_size_per_gpu']])
return [audio_features, features_length]
def _decode(self, frame, offset=0, merge=False):
assert len(frame)==self.n_frame_len
self.buffer[:-self.n_frame_len] = self.buffer[self.n_frame_len:]
self.buffer[-self.n_frame_len:] = frame
audio_features, features_length = self._preprocess_audio([self.buffer])
if self.ext_model_infer_func is None:
logits = get_interactive_infer_results(
self.model_S2T, self.sess,
model_in={'audio_features': audio_features,
'features_length': features_length})[0][0]
else:
# TODO: check ext_model_infer_func parameters and return value
logits = self.ext_model_infer_func(audio_features, features_length)
if self._beam_decoder is None:
decoded_forerunner = self._greedy_decoder(
logits[self.n_timesteps_overlap:],
self.vocab
)
decoded = decoded_forerunner[:-self.n_timesteps_overlap-offset]
forerunner_idx = max(0, len(self.forerunner_text) - \
(self.n_timesteps_overlap + offset))
self.forerunner_text = self.forerunner_text[:forerunner_idx] + \
decoded_forerunner
self.text += decoded
if merge:
decoded = self.greedy_merge(self.text)
decoded_forerunner = self.greedy_merge(self.forerunner_text)
else:
decoded = self._beam_decoder.decode(softmax(
logits[self.n_timesteps_overlap:-self.n_timesteps_overlap-offset]
))[0][-1]
return [decoded, decoded_forerunner]
def transcribe(self, frame=None):
if frame is None:
frame = np.zeros(shape=self.n_frame_len, dtype=np.float32)
if len(frame) < self.n_frame_len:
frame = np.pad(frame, [0, self.n_frame_len - len(frame)], 'constant')
return self._decode(frame, self.offset, self.merge)
def _calibrate_offset(self, wav_file, max_offset=10, n_calib_inter=100):
'''
Calibrate offset for frame-by-frame decoding
'''
sr, signal = wave.read(wav_file)
# warmup
n_warmup = 1 + int(np.ceil(2.0 * self.frame_overlap / self.frame_len))
for i in range(n_warmup):
decoded, _ = self._decode(signal[self.n_frame_len*i:self.n_frame_len*(i+1)], offset=0)
i = n_warmup
offsets = defaultdict(lambda: 0)
while i < n_warmup + n_calib_inter and (i+1)*self.n_frame_len < signal.shape[0]:
decoded_prev = decoded
decoded, _ = self._decode(signal[self.n_frame_len*i:self.n_frame_len*(i+1)], offset=0)
for offset in range(max_offset, 0, -1):
if decoded[:offset] == decoded_prev[-offset:] and decoded[:offset] != ''.join(['_']*offset):
offsets[offset] += 1
break
i += 1
self.offset = max(offsets, key=offsets.get)
def reset(self):
'''
Reset frame_history and decoder's state
'''
self.buffer=np.zeros(shape=self.buffer.shape, dtype=np.float32)
if self._beam_decoder is not None:
self._beam_decoder.reset()
self.prev_char = ''
self.text = ''
self.forerunner_text = ''
@staticmethod
def _get_model(args, scope):
'''
A simpler version of what run.py does. It returns the created model and its saved checkpoint
'''
with tf.variable_scope(scope):
args, base_config, base_model, config_module = get_base_config(args)
checkpoint = check_logdir(args, base_config)
model = create_model(args, base_config, config_module, base_model, None)
return model, checkpoint
@staticmethod
def _load_vocab(vocab_file):
vocab = []
with open(vocab_file, 'r') as f:
for line in f:
vocab.append(line[0])
vocab.append('_')
return vocab
@staticmethod
def _greedy_decoder(logits, vocab):
s = ''
for i in range(logits.shape[0]):
s += vocab[np.argmax(logits[i])]
return s
def greedy_merge(self, s, prev_char=''):
s_merged = ''
for i in range(len(s)):
if s[i] != prev_char:
prev_char = s[i]
if prev_char != '_':
s_merged += prev_char
return s_merged
| OpenSeq2Seq-master | frame_asr.py |
# Copyright (c) 2017 NVIDIA Corporation
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
#from open_seq2seq.data.text2text.text2text import SpecialTextTokens
import argparse
import sentencepiece as spm
import os
import sys
import codecs
vocab_size = 32768
def train_tokenizer_model(args):
print("========> Training tokenizer model")
vocab_size = args.vocab_size
model_prefix = args.model_prefix
input_file = args.text_input
spm.SentencePieceTrainer.Train(
"--input={0} --model_type=bpe --model_prefix={1} --vocab_size={2} --pad_id={3} --eos_id={4} --bos_id={5} --unk_id={6} --character_coverage=1.0"
.format(input_file,
model_prefix, vocab_size, 0, # PAD. TODO: these should not be hardcoded
1, 2, # EOS, SID
3) # UNK
)
def tokenize(args):
print("========> Using tokenizer model")
model_prefix1 = args.model_prefix1
model_prefix2 = args.model_prefix2
input_file1 = args.text_input1
input_file2 = args.text_input2
tokenized_output1 = args.tokenized_output1
tokenized_output2 = args.tokenized_output2
sp1 = spm.SentencePieceProcessor()
sp1.Load(model_prefix1+".model")
sp2 = spm.SentencePieceProcessor()
sp2.Load(model_prefix2 + ".model")
ind = 0
with open(input_file1, 'r') as file1, open(input_file2, 'r') as file2:
with open(tokenized_output1, 'w') as ofile1, open(tokenized_output2, 'w') as ofile2:
while True: # YaY!
_src_raw = file1.readline()
_tgt_raw = file2.readline()
if not _src_raw or not _tgt_raw:
break
src_raw = _src_raw.strip()
tgt_raw = _tgt_raw.strip()
try:
encoded_src_list = sp1.EncodeAsPieces(src_raw)
encoded_tgt_list = sp2.EncodeAsPieces(tgt_raw)
except:
continue
encoded_src = ' '.join([w for w in encoded_src_list])
encoded_tgt = ' '.join([w for w in encoded_tgt_list])
ofile1.write(encoded_src + "\n")
ofile2.write(encoded_tgt + "\n")
ind += 1
def encode(args):
print("========> Encoding...")
model_prefix1 = args.model_prefix
input_file1 = args.text_input
tokenized_output1 = args.tokenized_output
sp1 = spm.SentencePieceProcessor()
sp1.Load(model_prefix1+".model")
ind = 0
with open(input_file1, 'r') as file1:
with open(tokenized_output1, 'w') as ofile1:
while True: # YaY!
_src_raw = file1.readline()
if not _src_raw:
break
src_raw = _src_raw.strip()
try:
encoded_src_list = sp1.EncodeAsPieces(src_raw)
except:
continue
if sys.version_info < (3, 0):
encoded_src = ' '.join([w for w in encoded_src_list])
else:
encoded_src = ' '.join([w.decode("utf-8") for w in encoded_src_list])
ofile1.write(encoded_src + "\n")
ind += 1
print("========> ...Done")
def detokenize(args):
print("========> Detokenizing")
model_prefix = args.model_prefix
sp = spm.SentencePieceProcessor()
sp.Load(model_prefix+".model")
input_file = args.text_input
output_file = args.decoded_output
with open(output_file, 'w') as otpt:
with open(input_file, 'r') as inpt:
for line in inpt:
decoded_line = sp.DecodePieces(line.split(" "))
if sys.version_info >= (3, 0):
otpt.write(decoded_line)
else:
otpt.write(decoded_line.decode("utf-8"))
def main():
parser = argparse.ArgumentParser(description='Input Parameters')
parser.add_argument("--text_input",
help="Path to text")
parser.add_argument("--decoded_output",
help="Path were to save decoded output during decoding")
parser.add_argument("--text_input1",
help="Path to src text when tokenizing")
parser.add_argument("--text_input2",
help="Path to tgt text when tokenizing")
parser.add_argument("--tokenized_output",
help="Path to tokenized src text results")
parser.add_argument("--tokenized_output1",
help="Path to tokenized src text results")
parser.add_argument("--tokenized_output2",
help="Path to tokenized tgt text results")
parser.add_argument("--model_prefix",
help="model prefix")
parser.add_argument("--model_prefix1",
help="model prefix for src when tokenizing")
parser.add_argument("--model_prefix2",
help="model prefix for tgt when tokenizing")
parser.add_argument('--vocab_size', type=int, default=vocab_size,
help='Vocabulary size')
parser.add_argument('--mode', required=True,
help='train, tokenize, encode, or detokenize')
args, unknown = parser.parse_known_args()
if args.mode == "train":
train_tokenizer_model(args)
elif args.mode == "tokenize":
tokenize(args)
elif args.mode == "detokenize":
detokenize(args)
elif args.mode == "encode":
encode(args)
else:
raise ValueError('Unknown mode: {0}', args.mode)
if __name__ == '__main__':
main()
| OpenSeq2Seq-master | tokenizer_wrapper.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# OpenSeq2Seq documentation build configuration file, created by
# sphinx-quickstart on Thu Apr 12 14:49:40 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../../'))
sys.path.insert(0, os.path.abspath('../../../open_seq2seq'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinxcontrib.bibtex',
]
# Napoleon settings
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = True
napoleon_numpy_docstring = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'OpenSeq2Seq'
copyright = '2018, NVIDIA'
author = 'NVIDIA'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'display_version': False,
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logo.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
def setup(app):
app.add_stylesheet("theme_override.css")
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenSeq2Seqdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'OpenSeq2Seq.tex', 'OpenSeq2Seq Documentation',
'Oleksii Kuchaiev, Boris Ginsburg, Vitaliy Lavrukhin, Igor Gitman', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'openseq2seq', 'OpenSeq2Seq Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'OpenSeq2Seq', 'OpenSeq2Seq Documentation',
author, 'OpenSeq2Seq', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| OpenSeq2Seq-master | docs/sources/source/conf.py |
# coding: utf-8
import sys
sys.path.append("./transformerxl")
sys.path.append("./transformerxl/utils")
import argparse
from typing import List
import torch
import random
from utils.vocabulary import Vocab
from torch.nn.utils.rnn import pad_sequence
parser = argparse.ArgumentParser(
description='Process OS2S output with external LM')
parser.add_argument('--beam_dump', type=str, default='',
help='path to OS2S beam dump')
parser.add_argument('--beam_dump_with_lm', type=str, default='',
help='this is where beam dump will be augmented '
'with LM score')
parser.add_argument('--model', type=str, default='',
help='path to neural language model')
parser.add_argument('--vocab', type=str, default='',
help='path to vocabluary')
parser.add_argument('--reference', type=str, default='',
help='path to reference against which to compare')
args = parser.parse_args()
def levenshtein(a, b):
"""Calculates the Levenshtein distance between a and b.
The code was copied from: http://hetland.org/coding/python/levenshtein.py
"""
n, m = len(a), len(b)
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a, b = b, a
n, m = m, n
current = list(range(n + 1))
for i in range(1, m + 1):
previous, current = current, [i] + [0] * n
for j in range(1, n + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if a[j - 1] != b[i - 1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
def score_fun_linear(s1, s2, s3, s4):
return s4 + s1
class Scorer:
def __init__(self, model, path_2_vocab, score_fn=score_fun_linear):
self._model = model
self._model.eval()
self._model.crit.keep_order=True
self._vocab = Vocab(vocab_file=path_2_vocab)
self._vocab.build_vocab()
self._score_fn = score_fn
print('---->>> Testing Model.')
self.test_model(candidates=['they had one night in which to prepare for deach',
'they had one night in which to prepare for death',
'i hate school', 'i love school',
'the fox jumps on a grass',
'the crox jump a la glass'])
print('---->>> Done testing model')
@staticmethod
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def nlm_compute(self, candidates_full, batch_size=256):
results = torch.zeros(len(candidates_full))
with torch.no_grad():
for j, candidates in enumerate(self.chunks(candidates_full, batch_size)):
sents = self._vocab.encode_sents(
[['<S>'] + string.strip().lower().split() + ['<S>'] for string in candidates])
seq_lens = torch.tensor([x.shape[0] for x in sents], dtype=torch.long)
sents_th = torch.zeros(seq_lens.max(), seq_lens.shape[0],dtype=torch.long).cuda()
for i, sent in enumerate(sents):
sents_th[:seq_lens[i], i] = sent
mems = tuple()
ret = self._model(sents_th[:-1], sents_th[1:], *mems)
max_len = seq_lens.max()-1
mask = torch.arange(max_len).expand(seq_lens.shape[0], max_len) >= seq_lens.unsqueeze(1)-1
result = -1 * ret[0].masked_fill(mask.transpose(0,1).to("cuda"), 0).sum(dim=0)
results[j*batch_size:j*batch_size + len(result)] = result
return results
def test_model(self, candidates):
for item in zip(list(self.nlm_compute(candidates).cpu().detach().numpy()), candidates):
print("{0} ---- {1}".format(item[0], item[1]))
def chose_best_candidate(self, candidates: List) -> str:
candidates_t = [c[3] for c in candidates]
nln_scores = self.nlm_compute(candidates_t)
candidate = candidates[0][3]
score = -100000000000.0
for i in range(len(candidates)):
s1 = candidates[i][0]
s2 = candidates[i][1]
s3 = candidates[i][2]
s4 = nln_scores[i].item()
new_score = self._score_fn(s1, s2, s3, s4)
if new_score > score:
candidate = candidates[i][3]
score = new_score
return (candidate, nln_scores)
def main():
if args.beam_dump == '':
print("Please provide path to OS2S beam dump")
exit(1)
with open(args.model, 'rb') as f:
#rnn_lm = torch.load(f)
# after load the rnn params are not a continuous chunk of memory
# this makes them a continuous chunk, and will speed up forward pass
#rnn_lm.rnn.flatten_parameters()
lm = torch.load(f)
#lm = InferenceModel(rnn_lm)
scorer = Scorer(lm, args.vocab)
#scorer = Scorer(rnn_lm, args.vocab)
reference_strings = []
first = True
with open(args.reference, 'r') as inpr:
for line in inpr:
if first: # skip header
first = False
continue
reference_strings.append(line.split(',')[2])
print('Read {0} reference lines from {1}'.format(len(reference_strings),
args.reference))
scores = 0
words = 0
counter = 0
with open(args.beam_dump, 'r') as inpf:
with open(args.beam_dump_with_lm, 'w') as outf:
candidate_list = []
for line in inpf:
sline = line.strip()
# sample begin
if sline == "B=>>>>>>>>":
candidate_list = []
# sample end
elif sline == "E=>>>>>>>>":
if counter % 100 == 0:
print("Processed {0} candidates".format(counter))
candidate, nlm_scores = scorer.chose_best_candidate(candidate_list)
words += len(reference_strings[counter].split())
scores += levenshtein(reference_strings[counter].split(),
candidate.split())
counter += 1
# output augmented scores:
outf.write("B=>>>>>>>>\n")
assert(len(nlm_scores) == len(candidate_list))
for i in range(len(nlm_scores)):
outf.write("\t".join(
[str(nlm_scores[i].item())] + [str(t) for t in
list(candidate_list[i])]) + "\n")
outf.write("E=>>>>>>>>\n")
else:
sparts = sline.split()
s1 = float(sparts[0])
s2 = float(sparts[1])
s3 = float(sparts[2])
c = ' '.join(sparts[3:])
candidate_list.append((s1, s2, s3, c))
print("WER: {0} after processing {1} predictions".format((scores*1.0)/words,
counter))
if __name__ == "__main__":
main()
| OpenSeq2Seq-master | external_lm_rescore/process_beam_dump.py |
import sys
import math
import functools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
sys.path.append('utils')
from proj_adaptive_softmax import ProjectedAdaptiveLogSoftmax
from log_uniform_sampler import LogUniformSampler, sample_logits
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:,None,:].expand(-1, bsz, -1)
else:
return pos_emb[:,None,:]
class PositionwiseFF(nn.Module):
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False):
super(PositionwiseFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Linear(d_model, d_inner), nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout),
)
self.layer_norm = nn.LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
##### layer normalization + positionwise feed-forward
core_out = self.CoreNet(self.layer_norm(inp))
##### residual connection
output = core_out + inp
else:
##### positionwise feed-forward
core_out = self.CoreNet(inp)
##### residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
pre_lnorm=False):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.q_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
def forward(self, h, attn_mask=None, mems=None):
##### multihead attention
# [hlen x bsz x n_head x d_head]
if mems is not None:
c = torch.cat([mems, h], 0)
else:
c = h
if self.pre_lnorm:
##### layer normalization
c = self.layer_norm(c)
head_q = self.q_net(h)
head_k, head_v = torch.chunk(self.kv_net(c), 2, -1)
head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head)
head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head)
head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head)
# [qlen x klen x bsz x n_head]
attn_score = torch.einsum('ibnd,jbnd->ijbn', (head_q, head_k))
attn_score.mul_(self.scale)
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None,:,:,None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:,:,:,None], -float('inf'))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
# [qlen x klen x bsz x n_head] + [klen x bsz x n_head x d_head] -> [qlen x bsz x n_head x d_head]
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, head_v))
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = h + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(h + attn_out)
return output
class RelMultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
tgt_len=None, ext_len=None, mem_len=None, pre_lnorm=False):
super(RelMultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
def _parallelogram_mask(self, h, w, left=False):
mask = torch.ones((h, w)).byte()
m = min(h, w)
mask[:m,:m] = torch.triu(mask[:m,:m])
mask[-m:,-m:] = torch.tril(mask[-m:,-m:])
if left:
return mask
else:
return mask.flip(0)
def _shift(self, x, qlen, klen, mask, left=False):
if qlen > 1:
zero_pad = torch.zeros((x.size(0), qlen-1, x.size(2), x.size(3)),
device=x.device, dtype=x.dtype)
else:
zero_pad = torch.zeros(0, device=x.device, dtype=x.dtype)
if left:
mask = mask.flip(1)
x_padded = torch.cat([zero_pad, x], dim=1).expand(qlen, -1, -1, -1)
else:
x_padded = torch.cat([x, zero_pad], dim=1).expand(qlen, -1, -1, -1)
x = x_padded.masked_select(mask[:,:,None,None]) \
.view(qlen, klen, x.size(2), x.size(3))
return x
def _rel_shift(self, x, zero_triu=False):
zero_pad = torch.zeros((x.size(0), 1, *x.size()[2:]),
device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=1)
x_padded = x_padded.view(x.size(1) + 1, x.size(0), *x.size()[2:])
x = x_padded[1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(0), x.size(1)))
x = x * torch.tril(ones, x.size(1) - x.size(0))[:,:,None,None]
return x
def forward(self, w, r, attn_mask=None, mems=None):
raise NotImplementedError
class RelPartialLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelPartialLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
def forward(self, w, r, r_w_bias, r_r_bias, attn_mask=None, mems=None):
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
#### compute attention score
rw_head_q = w_head_q + r_w_bias # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
rr_head_q = w_head_q + r_r_bias
BD = torch.einsum('ibnd,jnd->ijbn', (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
BD = self._rel_shift(BD)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score = attn_score.float().masked_fill(
attn_mask[None,:,:,None], -float('inf')).type_as(attn_score)
elif attn_mask.dim() == 3:
attn_score = attn_score.float().masked_fill(
attn_mask[:,:,:,None], -float('inf')).type_as(attn_score)
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class RelLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None):
# r_emb: [klen, n_head, d_head], used for term B
# r_w_bias: [n_head, d_head], used for term C
# r_bias: [klen, n_head], used for term D
qlen, bsz = w.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head)
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head)
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head)
if klen > r_emb.size(0):
r_emb_pad = r_emb[0:1].expand(klen-r_emb.size(0), -1, -1)
r_emb = torch.cat([r_emb_pad, r_emb], 0)
r_bias_pad = r_bias[0:1].expand(klen-r_bias.size(0), -1)
r_bias = torch.cat([r_bias_pad, r_bias], 0)
else:
r_emb = r_emb[-klen:]
r_bias = r_bias[-klen:]
#### compute attention score
rw_head_q = w_head_q + r_w_bias[None] # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
B_ = torch.einsum('ibnd,jnd->ijbn', (w_head_q, r_emb)) # qlen x klen x bsz x n_head
D_ = r_bias[None, :, None] # 1 x klen x 1 x n_head
BD = self._rel_shift(B_ + D_)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None,:,:,None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:,:,:,None], -float('inf'))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class DecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs):
super(DecoderLayer, self).__init__()
self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class RelLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout,
**kwargs):
super(RelLearnableDecoderLayer, self).__init__()
self.dec_attn = RelLearnableMultiHeadAttn(n_head, d_model, d_head, dropout,
**kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, r_emb, r_w_bias, r_bias, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, r_emb, r_w_bias, r_bias,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class RelPartialLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout,
**kwargs):
super(RelPartialLearnableDecoderLayer, self).__init__()
self.dec_attn = RelPartialLearnableMultiHeadAttn(n_head, d_model,
d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, r, r_w_bias, r_r_bias, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, r, r_w_bias, r_r_bias,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class AdaptiveEmbedding(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
sample_softmax=False):
super(AdaptiveEmbedding, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = cutoffs + [n_token]
print('cutoffs', cutoffs)
self.div_val = div_val
self.d_proj = d_proj
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(
nn.Embedding(n_token, d_embed, sparse=sample_softmax>0)
)
if d_proj != d_embed:
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_embed)))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
print(r_idx, l_idx, d_emb_i)
self.emb_layers.append(nn.Embedding(r_idx-l_idx, d_emb_i))
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_emb_i)))
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
emb_flat = torch.zeros([inp_flat.size(0), self.d_proj],
dtype=param.dtype, device=param.device)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
emb_flat.index_copy_(0, indices_i, emb_i)
embed = emb_flat.view(*inp.size(), self.d_proj)
embed.mul_(self.emb_scale)
return embed
class MemTransformerLM(nn.Module):
def __init__(self, n_token, n_layer, n_head, d_model, d_head, d_inner,
dropout, dropatt, tie_weight=True, d_embed=None,
div_val=1, tie_projs=[False], pre_lnorm=False,
tgt_len=None, ext_len=None, mem_len=None,
cutoffs=[], adapt_inp=False,
same_length=False, attn_type=0, clamp_len=-1,
sample_softmax=-1):
super(MemTransformerLM, self).__init__()
self.n_token = n_token
d_embed = d_model if d_embed is None else d_embed
self.d_embed = d_embed
self.d_model = d_model
self.n_head = n_head
self.d_head = d_head
self.word_emb = AdaptiveEmbedding(n_token, d_embed, d_model, cutoffs,
div_val=div_val)
self.drop = nn.Dropout(dropout)
self.n_layer = n_layer
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
self.max_klen = tgt_len + ext_len + mem_len
self.attn_type = attn_type
self.layers = nn.ModuleList()
if attn_type == 0: # the default attention
for i in range(n_layer):
self.layers.append(
RelPartialLearnableDecoderLayer(
n_head, d_model, d_head, d_inner, dropout,
tgt_len=tgt_len, ext_len=ext_len, mem_len=mem_len,
dropatt=dropatt, pre_lnorm=pre_lnorm)
)
elif attn_type == 1: # learnable embeddings
for i in range(n_layer):
self.layers.append(
RelLearnableDecoderLayer(
n_head, d_model, d_head, d_inner, dropout,
tgt_len=tgt_len, ext_len=ext_len, mem_len=mem_len,
dropatt=dropatt, pre_lnorm=pre_lnorm)
)
elif attn_type in [2, 3]: # absolute embeddings
for i in range(n_layer):
self.layers.append(
DecoderLayer(
n_head, d_model, d_head, d_inner, dropout,
dropatt=dropatt, pre_lnorm=pre_lnorm)
)
self.sample_softmax = sample_softmax
# use sampled softmax
if sample_softmax > 0:
self.out_layer = nn.Linear(d_model, n_token)
if tie_weight:
self.out_layer.weight = self.word_emb.weight
self.tie_weight = tie_weight
self.sampler = LogUniformSampler(n_token, sample_softmax)
# use adaptive softmax (including standard softmax)
else:
self.crit = ProjectedAdaptiveLogSoftmax(n_token, d_embed, d_model,
cutoffs, div_val=div_val)
if tie_weight:
for i in range(len(self.crit.out_layers)):
self.crit.out_layers[i].weight = self.word_emb.emb_layers[i].weight
if tie_projs:
for i, tie_proj in enumerate(tie_projs):
if tie_proj and div_val == 1 and d_model != d_embed:
self.crit.out_projs[i] = self.word_emb.emb_projs[0]
elif tie_proj and div_val != 1:
self.crit.out_projs[i] = self.word_emb.emb_projs[i]
self.same_length = same_length
self.clamp_len = clamp_len
self._create_params()
def backward_compatible(self):
self.sample_softmax = -1
def _create_params(self):
if self.attn_type == 0: # default attention
self.pos_emb = PositionalEmbedding(self.d_model)
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
elif self.attn_type == 1: # learnable
self.r_emb = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.Tensor(
self.n_layer, self.n_head, self.d_head))
self.r_bias = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head))
elif self.attn_type == 2: # absolute standard
self.pos_emb = PositionalEmbedding(self.d_model)
elif self.attn_type == 3: # absolute deeper SA
self.r_emb = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head, self.d_head))
def reset_length(self, tgt_len, ext_len, mem_len):
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
def init_mems(self):
if self.mem_len > 0:
mems = []
param = next(self.parameters())
for i in range(self.n_layer+1):
empty = torch.empty(0, dtype=param.dtype, device=param.device)
mems.append(empty)
return mems
else:
return None
def _update_mems(self, hids, mems, qlen, mlen):
# does not deal with None
if mems is None: return None
# mems is not None
assert len(hids) == len(mems), 'len(hids) != len(mems)'
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + max(0, qlen - 0 - self.ext_len)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
return new_mems
def _forward(self, dec_inp, mems=None):
qlen, bsz = dec_inp.size()
word_emb = self.word_emb(dec_inp)
mlen = mems[0].size(0) if mems is not None else 0
klen = mlen + qlen
if self.same_length:
all_ones = word_emb.new_ones(qlen, klen)
mask_len = klen - self.mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (torch.triu(all_ones, 1+mlen)
+ torch.tril(all_ones, -mask_shift_len)).byte()[:, :, None] # -1
else:
dec_attn_mask = torch.triu(
word_emb.new_ones(qlen, klen), diagonal=1+mlen).byte()[:,:,None]
hids = []
if self.attn_type == 0: # default
pos_seq = torch.arange(klen-1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb)
pos_emb = self.drop(pos_emb)
hids.append(core_out)
for i, layer in enumerate(self.layers):
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, pos_emb, self.r_w_bias,
self.r_r_bias, dec_attn_mask=dec_attn_mask, mems=mems_i)
hids.append(core_out)
elif self.attn_type == 1: # learnable
core_out = self.drop(word_emb)
hids.append(core_out)
for i, layer in enumerate(self.layers):
if self.clamp_len > 0:
r_emb = self.r_emb[i][-self.clamp_len :]
r_bias = self.r_bias[i][-self.clamp_len :]
else:
r_emb, r_bias = self.r_emb[i], self.r_bias[i]
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, r_emb, self.r_w_bias[i],
r_bias, dec_attn_mask=dec_attn_mask, mems=mems_i)
hids.append(core_out)
elif self.attn_type == 2: # absolute
pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb + pos_emb[-qlen:])
hids.append(core_out)
for i, layer in enumerate(self.layers):
mems_i = None if mems is None else mems[i]
if mems_i is not None and i == 0:
mems_i += pos_emb[:mlen]
core_out = layer(core_out, dec_attn_mask=dec_attn_mask,
mems=mems_i)
hids.append(core_out)
elif self.attn_type == 3:
core_out = self.drop(word_emb)
hids.append(core_out)
for i, layer in enumerate(self.layers):
mems_i = None if mems is None else mems[i]
if mems_i is not None and mlen > 0:
cur_emb = self.r_emb[i][:-qlen]
cur_size = cur_emb.size(0)
if cur_size < mlen:
cur_emb_pad = cur_emb[0:1].expand(mlen-cur_size, -1, -1)
cur_emb = torch.cat([cur_emb_pad, cur_emb], 0)
else:
cur_emb = cur_emb[-mlen:]
mems_i += cur_emb.view(mlen, 1, -1)
core_out += self.r_emb[i][-qlen:].view(qlen, 1, -1)
core_out = layer(core_out, dec_attn_mask=dec_attn_mask,
mems=mems_i)
hids.append(core_out)
core_out = self.drop(core_out)
new_mems = self._update_mems(hids, mems, mlen, qlen)
return core_out, new_mems
def forward(self, data, target, *mems):
# nn.DataParallel does not allow size(0) tensors to be broadcasted.
# So, have to initialize size(0) mems inside the model forward.
# Moreover, have to return new_mems to allow nn.DataParallel to piece
# them together.
if not mems: mems = self.init_mems()
tgt_len = target.size(0)
hidden, new_mems = self._forward(data, mems=mems)
pred_hid = hidden[-tgt_len:]
if self.sample_softmax > 0 and self.training:
assert self.tie_weight
logit = sample_logits(self.word_emb,
self.out_layer.bias, target, pred_hid, self.sampler)
loss = -F.log_softmax(logit, -1)[:, :, 0]
else:
loss = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target.view(-1))
loss = loss.view(tgt_len, -1)
if new_mems is None:
return [loss]
else:
return [loss] + new_mems
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='unit test')
parser.add_argument('--n_layer', type=int, default=4, help='')
parser.add_argument('--n_rel_layer', type=int, default=4, help='')
parser.add_argument('--n_head', type=int, default=2, help='')
parser.add_argument('--d_head', type=int, default=2, help='')
parser.add_argument('--d_model', type=int, default=200, help='')
parser.add_argument('--d_embed', type=int, default=200, help='')
parser.add_argument('--d_inner', type=int, default=200, help='')
parser.add_argument('--dropout', type=float, default=0.0, help='')
parser.add_argument('--cuda', action='store_true', help='')
parser.add_argument('--seed', type=int, default=1111, help='')
parser.add_argument('--multi_gpu', action='store_true', help='')
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
B = 4
tgt_len, mem_len, ext_len = 36, 36, 0
data_len = tgt_len * 20
args.n_token = 10000
import data_utils
data = torch.LongTensor(data_len*B).random_(0, args.n_token).to(device)
diter = data_utils.LMOrderedIterator(data, B, tgt_len, device=device, ext_len=ext_len)
cutoffs = [args.n_token // 2]
tie_projs = [False] + [True] * len(cutoffs)
for div_val in [1, 2]:
for d_embed in [200, 100]:
model = MemTransformerLM(args.n_token, args.n_layer, args.n_head,
args.d_model, args.d_head, args.d_inner, args.dropout,
dropatt=args.dropout, tie_weight=True,
d_embed=d_embed, div_val=div_val,
tie_projs=tie_projs, pre_lnorm=True,
tgt_len=tgt_len, ext_len=ext_len, mem_len=mem_len,
cutoffs=cutoffs, attn_type=0).to(device)
print(sum(p.numel() for p in model.parameters()))
mems = tuple()
for idx, (inp, tgt, seqlen) in enumerate(diter):
print('batch {}'.format(idx))
out = model(inp, tgt, *mems)
mems = out[1:]
| OpenSeq2Seq-master | external_lm_rescore/transformerxl/mem_transformer.py |
from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class AdaptiveLogSoftmax(nn.Module):
def __init__(self, in_features, n_classes, cutoffs, keep_order=False):
super(AdaptiveLogSoftmax, self).__init__()
cutoffs = list(cutoffs)
if (cutoffs != sorted(cutoffs)) \
or (min(cutoffs) <= 0) \
or (max(cutoffs) >= (n_classes - 1)) \
or (len(set(cutoffs)) != len(cutoffs)) \
or any([int(c) != c for c in cutoffs]):
raise ValueError("cutoffs should be a sequence of unique, positive "
"integers sorted in an increasing order, where "
"each value is between 1 and n_classes-1")
self.in_features = in_features
self.n_classes = n_classes
self.cutoffs = cutoffs + [n_classes]
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.in_features))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
self.keep_order = keep_order
def forward(self, hidden, target, weight, bias, keep_order=False):
if hidden.size(0) != target.size(0):
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
head_weight = torch.cat(
[weight[:self.shortlist_size], self.cluster_weight], dim=0)
head_bias = torch.cat(
[bias[:self.shortlist_size], self.cluster_bias], dim=0)
head_logit = F.linear(hidden, head_weight, bias=head_bias)
head_logprob = F.log_softmax(head_logit, dim=1)
nll = torch.zeros_like(target,
dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, h_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < h_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i.gather(1, target_i[:,None]).squeeze(1)
else:
weight_i = weight[l_idx:h_idx]
bias_i = bias[l_idx:h_idx]
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = F.linear(hidden_i, weight_i, bias=bias_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob_i[:, -i] \
+ tail_logprob_i.gather(1, target_i[:,None]).squeeze(1)
if (hasattr(self, 'keep_order') and self.keep_order) or keep_order:
nll.index_copy_(0, indices_i, -logprob_i)
else:
nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return nll
| OpenSeq2Seq-master | external_lm_rescore/transformerxl/utils/adaptive_softmax.py |
from torch.nn.parallel import DataParallel
import torch
from torch.nn.parallel._functions import Scatter
from torch.nn.parallel.parallel_apply import parallel_apply
def scatter(inputs, target_gpus, chunk_sizes, dim=0):
r"""
Slices tensors into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not tensors.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
try:
return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
except:
print('obj', obj.size())
print('dim', dim)
print('chunk_sizes', chunk_sizes)
quit()
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict) and len(obj) > 0:
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
return scatter_map(inputs)
finally:
scatter_map = None
def scatter_kwargs(inputs, kwargs, target_gpus, chunk_sizes, dim=0):
r"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, chunk_sizes, dim) if inputs else []
kwargs = scatter(kwargs, target_gpus, chunk_sizes, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
class BalancedDataParallel(DataParallel):
def __init__(self, gpu0_bsz, *args, **kwargs):
self.gpu0_bsz = gpu0_bsz
super().__init__(*args, **kwargs)
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
if self.gpu0_bsz == 0:
device_ids = self.device_ids[1:]
else:
device_ids = self.device_ids
inputs, kwargs = self.scatter(inputs, kwargs, device_ids)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids)
if self.gpu0_bsz == 0:
replicas = replicas[1:]
outputs = self.parallel_apply(replicas, device_ids, inputs, kwargs)
return self.gather(outputs, self.output_device)
def parallel_apply(self, replicas, device_ids, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, device_ids)
def scatter(self, inputs, kwargs, device_ids):
bsz = inputs[0].size(self.dim)
num_dev = len(self.device_ids)
gpu0_bsz = self.gpu0_bsz
bsz_unit = (bsz - gpu0_bsz) // (num_dev - 1)
if gpu0_bsz < bsz_unit:
chunk_sizes = [gpu0_bsz] + [bsz_unit] * (num_dev - 1)
delta = bsz - sum(chunk_sizes)
for i in range(delta):
chunk_sizes[i + 1] += 1
if gpu0_bsz == 0:
chunk_sizes = chunk_sizes[1:]
else:
return super().scatter(inputs, kwargs, device_ids)
return scatter_kwargs(inputs, kwargs, device_ids, chunk_sizes, dim=self.dim)
| OpenSeq2Seq-master | external_lm_rescore/transformerxl/utils/data_parallel.py |
from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
CUDA_MAJOR = int(torch.version.cuda.split('.')[0])
CUDA_MINOR = int(torch.version.cuda.split('.')[1])
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
keep_order=False):
super(ProjectedAdaptiveLogSoftmax, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
self.out_layers = nn.ModuleList()
self.out_projs = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(
nn.Parameter(torch.Tensor(d_proj, d_embed))
)
else:
self.out_projs.append(None)
self.out_layers.append(nn.Linear(d_embed, n_token))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
self.out_projs.append(
nn.Parameter(torch.Tensor(d_proj, d_emb_i))
)
self.out_layers.append(nn.Linear(d_emb_i, r_idx-l_idx))
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
proj_hid = F.linear(hidden, proj.t().contiguous())
logit = F.linear(proj_hid, weight, bias=bias)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def forward(self, hidden, target, keep_order=False):
'''
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
'''
if hidden.size(0) != target.size(0):
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers[0].weight,
self.out_layers[0].bias, self.out_projs[0])
nll = -F.log_softmax(logit, dim=-1) \
.gather(1, target.unsqueeze(1)).squeeze(1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
nll = torch.zeros_like(target,
dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i.gather(1, target_i[:,None]).squeeze(1)
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob_i[:, -i] \
+ tail_logprob_i.gather(1, target_i[:,None]).squeeze(1)
if (hasattr(self, 'keep_order') and self.keep_order) or keep_order:
nll.index_copy_(0, indices_i, -logprob_i)
else:
nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return nll
| OpenSeq2Seq-master | external_lm_rescore/transformerxl/utils/proj_adaptive_softmax.py |
import torch
from torch import nn
import numpy as np
class LogUniformSampler(object):
def __init__(self, range_max, n_sample):
"""
Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
`P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
expected count can be approximated by 1 - (1 - p)^n
and we use a numerically stable version -expm1(num_tries * log1p(-p))
Our implementation fixes num_tries at 2 * n_sample, and the actual #samples will vary from run to run
"""
with torch.no_grad():
self.range_max = range_max
log_indices = torch.arange(1., range_max+2., 1.).log_()
self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
# print('P', self.dist.numpy().tolist()[-30:])
self.log_q = (- (-self.dist.double().log1p_() * 2 * n_sample).expm1_()).log_().float()
self.n_sample = n_sample
def sample(self, labels):
"""
labels: [b1, b2]
Return
true_log_probs: [b1, b2]
samp_log_probs: [n_sample]
neg_samples: [n_sample]
"""
# neg_samples = torch.empty(0).long()
n_sample = self.n_sample
n_tries = 2 * n_sample
with torch.no_grad():
neg_samples = torch.multinomial(self.dist, n_tries, replacement=True).unique()
device = labels.device
neg_samples = neg_samples.to(device)
true_log_probs = self.log_q[labels].to(device)
samp_log_probs = self.log_q[neg_samples].to(device)
return true_log_probs, samp_log_probs, neg_samples
def sample_logits(embedding, bias, labels, inputs, sampler):
"""
embedding: an nn.Embedding layer
bias: [n_vocab]
labels: [b1, b2]
inputs: [b1, b2, n_emb]
sampler: you may use a LogUniformSampler
Return
logits: [b1, b2, 1 + n_sample]
"""
true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels)
n_sample = neg_samples.size(0)
b1, b2 = labels.size(0), labels.size(1)
all_ids = torch.cat([labels.view(-1), neg_samples])
all_w = embedding(all_ids)
true_w = all_w[: -n_sample].view(b1, b2, -1)
sample_w = all_w[- n_sample:].view(n_sample, -1)
all_b = bias[all_ids]
true_b = all_b[: -n_sample].view(b1, b2)
sample_b = all_b[- n_sample:]
hit = (labels[:, :, None] == neg_samples).detach()
true_logits = torch.einsum('ijk,ijk->ij',
[true_w, inputs]) + true_b - true_log_probs
sample_logits = torch.einsum('lk,ijk->ijl',
[sample_w, inputs]) + sample_b - samp_log_probs
sample_logits.masked_fill_(hit, -1e30)
logits = torch.cat([true_logits[:, :, None], sample_logits], -1)
return logits
# class LogUniformSampler(object):
# def __init__(self, range_max, unique=False):
# """
# Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
# `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
# """
# self.range_max = range_max
# log_indices = torch.arange(1., range_max+2., 1.).log_()
# self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
# self.unique = unique
# if self.unique:
# self.exclude_mask = torch.ByteTensor(range_max).fill_(0)
# def sample(self, n_sample, labels):
# pos_sample, new_labels = labels.unique(return_inverse=True)
# n_pos_sample = pos_sample.size(0)
# n_neg_sample = n_sample - n_pos_sample
# if self.unique:
# self.exclude_mask.index_fill_(0, pos_sample, 1)
# sample_dist = self.dist.clone().masked_fill_(self.exclude_mask, 0)
# self.exclude_mask.index_fill_(0, pos_sample, 0)
# else:
# sample_dist = self.dist
# neg_sample = torch.multinomial(sample_dist, n_neg_sample)
# sample = torch.cat([pos_sample, neg_sample])
# sample_prob = self.dist[sample]
# return new_labels, sample, sample_prob
if __name__ == '__main__':
S, B = 3, 4
n_vocab = 10000
n_sample = 5
H = 32
labels = torch.LongTensor(S, B).random_(0, n_vocab)
# sampler = LogUniformSampler(n_vocab, unique=False)
# new_labels, sample, sample_prob = sampler.sample(n_sample, labels)
sampler = LogUniformSampler(n_vocab, unique=True)
# true_probs, samp_probs, neg_samples = sampler.sample(n_sample, labels)
# print('true_probs', true_probs.numpy().tolist())
# print('samp_probs', samp_probs.numpy().tolist())
# print('neg_samples', neg_samples.numpy().tolist())
# print('sum', torch.sum(sampler.dist).item())
# assert torch.all(torch.sort(sample.unique())[0].eq(torch.sort(sample)[0])).item()
embedding = nn.Embedding(n_vocab, H)
bias = torch.zeros(n_vocab)
inputs = torch.Tensor(S, B, H).normal_()
logits, out_labels = sample_logits(embedding, bias, labels, inputs, sampler, n_sample)
print('logits', logits.detach().numpy().tolist())
print('logits shape', logits.size())
print('out_labels', out_labels.detach().numpy().tolist())
print('out_labels shape', out_labels.size())
| OpenSeq2Seq-master | external_lm_rescore/transformerxl/utils/log_uniform_sampler.py |
import functools
import os, shutil
import numpy as np
import torch
def logging(s, log_path, print_=True, log_=True):
if print_:
print(s)
if log_:
with open(log_path, 'a+') as f_log:
f_log.write(s + '\n')
def get_logger(log_path, **kwargs):
return functools.partial(logging, log_path=log_path, **kwargs)
def create_exp_dir(dir_path, scripts_to_save=None, debug=False):
if debug:
print('Debug Mode : no experiment dir created')
return functools.partial(logging, log_path=None, log_=False)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
print('Experiment dir : {}'.format(dir_path))
if scripts_to_save is not None:
script_path = os.path.join(dir_path, 'scripts')
if not os.path.exists(script_path):
os.makedirs(script_path)
for script in scripts_to_save:
dst_file = os.path.join(dir_path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
return get_logger(log_path=os.path.join(dir_path, 'log.txt'))
def save_checkpoint(model, optimizer, path, epoch):
torch.save(model, os.path.join(path, 'model_{}.pt'.format(epoch)))
torch.save(optimizer.state_dict(), os.path.join(path, 'optimizer_{}.pt'.format(epoch)))
| OpenSeq2Seq-master | external_lm_rescore/transformerxl/utils/exp_utils.py |
import os
from collections import Counter, OrderedDict
import torch
class Vocab(object):
def __init__(self, special=[], min_freq=0, max_size=None, lower_case=True,
delimiter=None, vocab_file=None):
self.counter = Counter()
self.special = special
self.min_freq = min_freq
self.max_size = max_size
self.lower_case = lower_case
self.delimiter = delimiter
self.vocab_file = vocab_file
def tokenize(self, line, add_eos=False, add_double_eos=False):
line = line.strip()
# convert to lower case
if self.lower_case:
line = line.lower()
# empty delimiter '' will evaluate False
if self.delimiter == '':
symbols = line
else:
symbols = line.split(self.delimiter)
if add_double_eos: # lm1b
return ['<S>'] + symbols + ['<S>']
elif add_eos:
return symbols + ['<eos>']
else:
return symbols
def count_file(self, path, verbose=False, add_eos=False):
if verbose: print('counting file {} ...'.format(path))
assert os.path.exists(path)
sents = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos)
self.counter.update(symbols)
sents.append(symbols)
return sents
def count_sents(self, sents, verbose=False):
"""
sents : a list of sentences, each a list of tokenized symbols
"""
if verbose: print('counting {} sents ...'.format(len(sents)))
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
self.counter.update(symbols)
def _build_from_file(self, vocab_file):
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(vocab_file, 'r', encoding='utf-8') as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
self.unk_idx = self.sym2idx['<UNK>']
def build_vocab(self):
if self.vocab_file:
print('building vocab from {}'.format(self.vocab_file))
self._build_from_file(self.vocab_file)
print('final vocab size {}'.format(len(self)))
else:
print('building vocab with min_freq={}, max_size={}'.format(
self.min_freq, self.max_size))
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
self.add_special(sym)
for sym, cnt in self.counter.most_common(self.max_size):
if cnt < self.min_freq: break
self.add_symbol(sym)
print('final vocab size {} from {} unique tokens'.format(
len(self), len(self.counter)))
def encode_file(self, path, ordered=False, verbose=False, add_eos=True,
add_double_eos=False):
if verbose: print('encoding file {} ...'.format(path))
print(path)
assert os.path.exists(path)
encoded = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
# if verbose and idx > 0 and idx % 1 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos,
add_double_eos=add_double_eos)
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def encode_sents(self, sents, ordered=False, verbose=False):
if verbose: print('encoding {} sents ...'.format(len(sents)))
encoded = []
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def add_special(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym])
def add_symbol(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
def get_sym(self, idx):
assert 0 <= idx < len(self), 'Index {} out of range'.format(idx)
return self.idx2sym[idx]
def get_idx(self, sym):
if sym in self.sym2idx:
return self.sym2idx[sym]
else:
# print('encounter unk {}'.format(sym))
assert '<eos>' not in sym
assert hasattr(self, 'unk_idx')
return self.sym2idx.get(sym, self.unk_idx)
def get_symbols(self, indices):
return [self.get_sym(idx) for idx in indices]
def get_indices(self, symbols):
return [self.get_idx(sym) for sym in symbols]
def convert_to_tensor(self, symbols):
return torch.LongTensor(self.get_indices(symbols))
def convert_to_sent(self, indices, exclude=None):
if exclude is None:
return ' '.join([self.get_sym(idx) for idx in indices])
else:
return ' '.join([self.get_sym(idx) for idx in indices if idx not in exclude])
def __len__(self):
return len(self.idx2sym)
| OpenSeq2Seq-master | external_lm_rescore/transformerxl/utils/vocabulary.py |
import numpy as np
import pickle
import tensorflow as tf
def load_test_sample(pickle_file):
with open(pickle_file, 'rb') as f:
seq, label = pickle.load(f, encoding='bytes')
return seq, label
def load_vocab(vocab_file):
vocab = []
with open(vocab_file, 'r') as f:
for line in f:
vocab.append(line[0])
vocab.append('_')
return vocab
class CTCCustomDecoderTests(tf.test.TestCase):
def setUp(self):
self.seq, self.label = load_test_sample('ctc_decoder_with_lm/ctc-test.pickle')
self.vocab = load_vocab('open_seq2seq/test_utils/toy_speech_data/vocab.txt')
self.beam_width = 16
self.tol = 1e-3
def test_decoders(self):
'''
Test all CTC decoders on a sample transcript ('ten seconds').
Standard TF decoders should output 'then seconds'.
Custom CTC decoder with LM rescoring should yield 'ten seconds'.
'''
logits = tf.constant(self.seq)
seq_len = tf.constant([self.seq.shape[0]])
greedy_decoded = tf.nn.ctc_greedy_decoder(logits, seq_len,
merge_repeated=True)
beam_search_decoded = tf.nn.ctc_beam_search_decoder(logits, seq_len,
beam_width=self.beam_width,
top_paths=1,
merge_repeated=False)
custom_op_module = tf.load_op_library('ctc_decoder_with_lm/libctc_decoder_with_kenlm.so')
decoded_ixs, decoded_vals, decoded_shapes, log_probabilities = (
custom_op_module.ctc_beam_search_decoder_with_lm(
logits, seq_len, beam_width=self.beam_width,
model_path='ctc_decoder_with_lm/ctc-test-lm.binary',
trie_path='ctc_decoder_with_lm/ctc-test-lm.trie',
alphabet_path='open_seq2seq/test_utils/toy_speech_data/vocab.txt',
alpha=2.0,
beta=0.5,
trie_weight=0.1,
top_paths=1, merge_repeated=False
)
)
with tf.Session() as sess:
res_greedy, res_beam, res_ixs, res_vals, res_probs = sess.run([greedy_decoded,
beam_search_decoded, decoded_ixs, decoded_vals, log_probabilities])
decoded_greedy, prob_greedy = res_greedy
decoded_text = ''.join([self.vocab[c] for c in decoded_greedy[0].values])
self.assertTrue( abs(7079.117 + prob_greedy[0][0]) < self.tol )
self.assertTrue( decoded_text == 'then seconds' )
decoded_beam, prob_beam = res_beam
decoded_text = ''.join([self.vocab[c] for c in decoded_beam[0].values])
if tf.__version__ >= '1.11':
# works for newer versions only (with CTC decoder fix)
self.assertTrue( abs(1.1842575 + prob_beam[0][0]) < self.tol )
self.assertTrue( decoded_text == 'then seconds' )
self.assertTrue( abs(4.619581 + res_probs[0][0]) < self.tol )
decoded_text = ''.join([self.vocab[c] for c in res_vals[0]])
self.assertTrue( decoded_text == self.label )
def test_beam_decoders(self):
'''
Test on random data that custom decoder outputs the same transcript
if its parameters are equal to zero: alpha = beta = trie_weight = 0.0
'''
np.random.seed(1234)
logits = tf.constant(np.random.uniform(size=self.seq.shape).astype(np.float32))
seq_len = tf.constant([self.seq.shape[0]])
beam_search_decoded = tf.nn.ctc_beam_search_decoder(logits, seq_len,
beam_width=self.beam_width,
top_paths=1,
merge_repeated=False)
custom_op_module = tf.load_op_library('ctc_decoder_with_lm/libctc_decoder_with_kenlm.so')
decoded_ixs, decoded_vals, decoded_shapes, log_probabilities = (
custom_op_module.ctc_beam_search_decoder_with_lm(
logits, seq_len, beam_width=self.beam_width,
model_path='ctc_decoder_with_lm/ctc-test-lm.binary',
trie_path='ctc_decoder_with_lm/ctc-test-lm.trie',
alphabet_path='open_seq2seq/test_utils/toy_speech_data/vocab.txt',
alpha=0.0,
beta=0.0,
trie_weight=0.0,
top_paths=1, merge_repeated=False
)
)
with tf.Session() as sess:
res_beam, res_ixs, res_vals, res_probs = sess.run([beam_search_decoded,
decoded_ixs, decoded_vals, log_probabilities])
decoded_beam, prob_beam = res_beam
prob1 = prob_beam[0][0]
decoded_text1 = ''.join([self.vocab[c] for c in decoded_beam[0].values])
prob2 = res_probs[0][0]
if tf.__version__ >= '1.11':
# works for newer versions only (with CTC decoder fix)
self.assertTrue( abs(prob1 - prob2) < self.tol )
self.assertTrue( prob2 < 0 )
decoded_text2 = ''.join([self.vocab[c] for c in res_vals[0]])
self.assertTrue( decoded_text1 == decoded_text2 )
if __name__ == '__main__':
tf.test.main()
| OpenSeq2Seq-master | ctc_decoder_with_lm/ctc-test.py |
import pandas as pd
import os
import argparse
def get_corpus(csv_files):
'''
Get text corpus from a list of CSV files
'''
SEP = '\n'
corpus = ''
for f in csv_files:
df = pd.read_csv(f)
corpus += SEP.join(df['transcript']) + SEP
return corpus
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Build N-gram LM model from CSV files')
parser.add_argument('csv', metavar='csv', type=str, nargs='+',
help='CSV file with transcripts')
parser.add_argument('--n', type=int, help='n for n-grams', default=3)
args = parser.parse_args()
corpus = get_corpus(args.csv)
path_prefix, _ = os.path.splitext(args.csv[0])
corpus_name = path_prefix + '.txt'
arpa_name = path_prefix + '.arpa'
lm_name = path_prefix + '-lm.binary'
with open(corpus_name, 'w') as f:
f.write(corpus)
command = 'kenlm/build/bin/lmplz --text {} --arpa {} --o {}'.format(
corpus_name, arpa_name, args.n)
print(command)
os.system(command)
command = 'kenlm/build/bin/build_binary trie -q 8 -b 7 -a 256 {} {}'.format(
arpa_name, lm_name)
print(command)
os.system(command)
command = 'ctc_decoder_with_lm/generate_trie'
if os.path.isfile(command) and os.access(command, os.X_OK):
trie_name = path_prefix + '-lm.trie'
command += ' open_seq2seq/test_utils/toy_speech_data/vocab.txt {} {} {}'.format(
lm_name, corpus_name, trie_name)
print('INFO: Generating a trie for custom TF op based CTC decoder.')
print(command)
os.system(command)
else:
print('INFO: Skipping trie generation, since no custom TF op based CTC decoder found.')
print('INFO: Please use Baidu CTC decoder with this language model.')
| OpenSeq2Seq-master | scripts/build_lm.py |
import os
import sys
import argparse
import librosa
parser = argparse.ArgumentParser(description='Conversion parameters')
parser.add_argument("--source_dir", required=False, type=str, default="calibration/sound_files/",
help="Path to source of flac LibriSpeech files")
parser.add_argument("--target_dir", required=False, type=str, default="calibration/sound_files_wav/",
help="Path to source of flac LibriSpeech files")
parser.add_argument("--sample_rate", required=False, type=int, default=16000,
help="Output sample rate")
args = parser.parse_args()
source_dir = args.source_dir
sample_rate = args.sample_rate
target_dir = args.target_dir
def getListOfFiles(dirName):
"""create a list of file and sub directories
names in the given directory
"""
listOfFile = os.listdir(dirName)
allFiles = list()
# Iterate over all the entries
for entry in listOfFile:
# Create full path
fullPath = os.path.join(dirName, entry)
# If entry is a directory then get the list of files in this directory
if os.path.isdir(fullPath):
allFiles = allFiles + getListOfFiles(fullPath)
else:
if fullPath[-3:] == "wav" or fullPath[-4:] == "flac":
allFiles.append(fullPath)
return allFiles
def convert_to_wav(flac_files,sample_rate,target_dir):
"""This function converts flac input to wav output of given sample rate"""
for sound_file in flac_files:
dir_tree = sound_file.split("/")[-4:]
save_path = '/'.join(dir_tree[:-1])
name = dir_tree[-1][:-4] + "wav"
if not os.path.isdir(save_path):
os.makedirs(save_path)
sig, sr = librosa.load(sound_file, sample_rate)
output_dir = target_dir+save_path
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
librosa.output.write_wav(output_dir + "/" + name, sig, sample_rate)
flac_files = getListOfFiles(source_dir)
convert_to_wav(flac_files,sample_rate,target_dir)
| OpenSeq2Seq-master | scripts/change_sample_rate.py |
# Copyright (c) 2018 NVIDIA Corporation
import time
import numpy as np
import tensorflow as tf
from open_seq2seq.utils.utils import get_base_config, check_logdir,\
create_model, deco_print
from open_seq2seq.models.text2speech import save_audio
if __name__ == '__main__':
# Define the command line arguments that one would pass to run.py here
config_file_path = "example_configs/text2speech/tacotron_gst.py"
checkpoint_path = "result/tacotron-gst-8gpu/logs/"
syn_save_dir = "/data/speech/LibriSpeech-Syn/syn"
args_T2S = ["--config_file={}".format(config_file_path),
"--mode=infer",
"--logdir={}".format(checkpoint_path),
"--batch_size_per_gpu=256",
"--infer_output_file=",
"--num_gpus=1",
"--use_horovod=False"]
# A simpler version of what run.py does. It returns the created model and
# its saved checkpoint
def get_model(args):
args, base_config, base_model, config_module = get_base_config(args)
checkpoint = check_logdir(args, base_config)
model = create_model(args, base_config, config_module, base_model, None)
return model, checkpoint
# A variant of iterate_data
def iterate_data(model, sess, verbose, num_steps=None):
# Helper function to save audio
def infer(outputs, i):
predicted_final_specs = outputs[1]
sequence_lengths = outputs[4]
for j in range(len(predicted_final_specs)):
predicted_final_spec = predicted_final_specs[j]
audio_length = sequence_lengths[j]
if audio_length > 2:
if "both" in model.get_data_layer().params['output_type']:
predicted_mag_spec = outputs[5][j][:audio_length - 1, :]
else:
predicted_final_spec = predicted_final_spec[:audio_length - 1, :]
predicted_mag_spec = model.get_data_layer().get_magnitude_spec(
predicted_final_spec, is_mel=True)
save_audio(
predicted_mag_spec,
syn_save_dir,
0,
n_fft=model.get_data_layer().n_fft,
sampling_rate=model.get_data_layer().sampling_rate,
mode="syn",
number=i * batch_size + j,
save_format="disk",
gl_iters=4,
verbose=False
)
else:
print("WARNING: An audio file was not saved, this will error out in"
"future steps")
total_time = 0.0
bench_start = model.params.get('bench_start', 10)
size_defined = model.get_data_layer().get_size_in_samples() is not None
if size_defined:
dl_sizes = []
total_samples = []
fetches = []
# on horovod num_gpus is 1
for worker_id in range(model.num_gpus):
cur_fetches = [
model.get_data_layer(worker_id).input_tensors,
model.get_output_tensors(worker_id),
]
if size_defined:
dl_sizes.append(model.get_data_layer(worker_id).get_size_in_samples())
try:
total_objects = 0.0
cur_fetches.append(model.get_num_objects_per_step(worker_id))
except NotImplementedError:
total_objects = None
deco_print("WARNING: Can't compute number of objects per step, since "
"train model does not define get_num_objects_per_step method.")
fetches.append(cur_fetches)
total_samples.append(0.0)
sess.run([model.get_data_layer(i).iterator.initializer
for i in range(model.num_gpus)])
step = 0
processed_batches = 0
if verbose:
if model.on_horovod:
ending = " on worker {}".format(model.hvd.rank())
else:
ending = ""
while True:
tm = time.time()
fetches_vals = {}
if size_defined:
fetches_to_run = {}
# removing finished data layers
for worker_id in range(model.num_gpus):
if total_samples[worker_id] < dl_sizes[worker_id]:
fetches_to_run[worker_id] = fetches[worker_id]
fetches_vals = sess.run(fetches_to_run)
else:
# if size is not defined we have to process fetches sequentially, so not
# to lose data when exception is thrown on one data layer
for worker_id, one_fetch in enumerate(fetches):
try:
fetches_vals[worker_id] = sess.run(one_fetch)
except tf.errors.OutOfRangeError:
continue
if step >= bench_start:
total_time += time.time() - tm
# looping over num_gpus. In Horovod case this loop is "dummy",
# since num_gpus = 1
for worker_id, fetches_val in fetches_vals.items():
inputs, outputs = fetches_val[:2]
if total_objects is not None:
total_objects += np.sum(fetches_val[-1])
# assuming any element of inputs["source_tensors"] .shape[0] is batch size
batch_size = inputs["source_tensors"][0].shape[0]
total_samples[worker_id] += batch_size
if size_defined:
# this data_layer is at the last batch with few more elements, cutting
if total_samples[worker_id] > dl_sizes[worker_id]:
last_batch_size = dl_sizes[worker_id] % batch_size
for key, value in inputs.items():
inputs[key] = model.clip_last_batch(value, last_batch_size)
outputs = model.clip_last_batch(outputs, last_batch_size)
infer(outputs, processed_batches)
processed_batches += 1
if verbose:
if size_defined:
data_size = int(np.sum(np.ceil(np.array(dl_sizes) / batch_size)))
if step == 0 or len(fetches_vals) == 0 or \
(data_size > 10 and processed_batches % (data_size // 10) == 0):
deco_print("Processed {}/{} batches{}".format(
processed_batches, data_size, ending
))
else:
deco_print("Processed {} batches{}".format(processed_batches, ending),
end='\r')
if len(fetches_vals) == 0:
break
step += 1
# break early in the case of INT8 calibration
if num_steps is not None and step >= num_steps:
break
if verbose:
if step > bench_start:
deco_print(
"Avg time per step{}: {:.3}s".format(
ending, 1.0 * total_time / (step - bench_start)
),
)
if total_objects is not None:
avg_objects = 1.0 * total_objects / total_time
deco_print("Avg objects per second{}: {:.3f}".format(ending,
avg_objects))
else:
deco_print("Not enough steps for benchmarking{}".format(ending))
model_T2S, checkpoint_T2S = get_model(args_T2S)
# Create the session and load the checkpoints
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=sess_config)
saver_T2S = tf.train.Saver()
saver_T2S.restore(sess, checkpoint_T2S)
iterate_data(model_T2S, sess, True)
| OpenSeq2Seq-master | scripts/tacotron_gst_create_syn_data.py |
# Copyright (c) 2019 NVIDIA Corporation
"""This file takes given a logits output pickle and start and end shifts
words to speech and writes them in a csv file
"""
from __future__ import absolute_import, division, print_function
import pickle
import argparse
import sys
import csv
import os
sys.path.append(os.getcwd())
from open_seq2seq.utils.ctc_decoder import ctc_greedy_decoder
parser = argparse.ArgumentParser(description="Infer words' timestamps from logits' dumps")
parser.add_argument("--dumpfile", required=True,
help="Path to the dumped logits file")
parser.add_argument("--start_shift", type=float, default=None, help="Calibration start shift")
parser.add_argument("--end_shift", type=float, default=None, help="Calibration end shift")
parser.add_argument("--calibration_file", default=None, help="Calibration parameters filepath")
parser.add_argument("--save_file", default="sample.csv")
args = parser.parse_args()
dump = pickle.load(open(args.dumpfile, "rb"))
results = dump["logits"]
vocab = dump["vocab"]
step_size = dump["step_size"]
start_shift = args.start_shift
end_shift = args.end_shift
save_file = args.save_file
calibration_file = args.calibration_file
if start_shift is None and end_shift is None:
if calibration_file is None:
print('Warning: no calibration parameters were provided, using zeros instead')
start_shift, end_shift = 0, 0
else:
with open(calibration_file) as calib:
line = calib.readline().split()
start_shift = float(line[0])
end_shift = float(line[1])
# suppose CTC blank symbol is appended to the end of vocab
blank_idx = len(vocab)
with open(save_file, "w") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerow(["wav_filename", "transcript", "start_time", "end_time"])
for r in results:
letters, starts, ends = ctc_greedy_decoder(results[r], vocab, step_size, 28, start_shift, end_shift)
writer.writerow([r, letters,
' '.join(['{:.5f}'.format(f) for f in starts]),
' '.join(['{:.5f}'.format(f) for f in ends])])
print("Results written to : {}".format(save_file))
| OpenSeq2Seq-master | scripts/dump_to_time.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import os
import numpy as np
import pandas as pd
if __name__ == '__main__':
data_root = "/data/speech/MAILABS"
sub_dirs = ["en_US/by_book/male/elliot_miller/hunters_space",
"en_US/by_book/male/elliot_miller/pink_fairy_book",
"en_US/by_book/male/elliot_miller/pirates_of_ersatz",
"en_US/by_book/male/elliot_miller/poisoned_pen",
"en_US/by_book/male/elliot_miller/silent_bullet",
"en_US/by_book/female/mary_ann/northandsouth",
"en_US/by_book/female/mary_ann/midnight_passenger",
"en_US/by_book/female/judy_bieber/dorothy_and_wizard_oz",
"en_US/by_book/female/judy_bieber/emerald_city_of_oz",
"en_US/by_book/female/judy_bieber/ozma_of_oz",
"en_US/by_book/female/judy_bieber/rinkitink_in_oz",
"en_US/by_book/female/judy_bieber/sky_island",
"en_US/by_book/female/judy_bieber/the_master_key",
"en_US/by_book/female/judy_bieber/the_sea_fairies"]
# Check to make sure all the csvs can be found
while True:
check = 0
for sub_dir in sub_dirs:
csv = os.path.join(data_root, sub_dir, "metadata.csv")
if not os.path.isfile(csv):
print(("{} cannot be found. Please ensure that you have"
"entered the correct directory where you extracted the MAILABS"
"dataset").format(csv))
break
else:
check += 1
if check == len(sub_dirs):
break
data_root = input("Please input where you extracted the MAILABS US dataset: ")
# Load all csvs
names = ["1", "2", "3"]
_files = None
for sub_dir in sub_dirs:
csv = os.path.join(data_root, sub_dir, "metadata.csv")
files = pd.read_csv(
csv, encoding='utf-8', sep='\x7c', header=None, quoting=3, names=names)
files['1'] = sub_dir + '/wavs/' + files['1'].astype(str)
if _files is None:
_files = files
else:
_files = _files.append(files)
# Optionally split data into train and validation sets
num_files = _files.shape[0]
np.random.shuffle(_files.values)
# Option 1: Take x% for train and 100-x % for val
# x = 0.8
# train, val = np.split(_files, [int(num_files/10.*x)])
# Option 2: Take x files for val, and rest for train
# x = 32
# train = _files[:-x]
# val = _files[-x:]
# Option 3: Don't have a validation set
train = _files
val = None
# Save new csvs
train_csv = os.path.join(data_root, "train.csv")
val_csv = os.path.join(data_root, "val.csv")
train.to_csv(
train_csv, encoding='utf-8', sep='\x7c',
header=None, quoting=3, index=False)
if val:
val.to_csv(
val_csv, encoding='utf-8', sep='\x7c',
header=None, quoting=3, index=False)
print("Change dataset_location in tacotron_gst.py to {}".format(data_root))
| OpenSeq2Seq-master | scripts/tacotron_gst_combine_csv.py |
# Copyright (c) 2017 NVIDIA Corporation
"""This file helps to calculate word to speech alignments for your model
Please execute get_calibration_files.sh before running this script
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import pickle
import json
import numpy as np
import tensorflow as tf
sys.path.append(os.getcwd())
from open_seq2seq.utils.utils import deco_print, get_calibration_config, create_model,\
create_logdir, check_logdir, \
check_base_model_logdir
from open_seq2seq.utils import infer
from open_seq2seq.utils.ctc_decoder import ctc_greedy_decoder
if hasattr(tf.compat, 'v1'):
tf.compat.v1.disable_eager_execution()
def run():
"""This function executes a saved checkpoint for
50 LibriSpeech dev clean files whose alignments are stored in
calibration/target.json
This function saves a pickle file with logits after running
through the model as calibration/sample.pkl
:return: None
"""
args, base_config, base_model, config_module = get_calibration_config(sys.argv[1:])
config_module["infer_params"]["data_layer_params"]["dataset_files"] = \
["calibration/sample.csv"]
config_module["base_params"]["decoder_params"]["infer_logits_to_pickle"] = True
load_model = base_config.get('load_model', None)
restore_best_checkpoint = base_config.get('restore_best_checkpoint',
False)
base_ckpt_dir = check_base_model_logdir(load_model, args,
restore_best_checkpoint)
base_config['load_model'] = base_ckpt_dir
# Check logdir and create it if necessary
checkpoint = check_logdir(args, base_config, restore_best_checkpoint)
# Initilize Horovod
if base_config['use_horovod']:
import horovod.tensorflow as hvd
hvd.init()
if hvd.rank() == 0:
deco_print("Using horovod")
from mpi4py import MPI
MPI.COMM_WORLD.Barrier()
else:
hvd = None
if args.enable_logs:
if hvd is None or hvd.rank() == 0:
old_stdout, old_stderr, stdout_log, stderr_log = create_logdir(
args, base_config
)
base_config['logdir'] = os.path.join(base_config['logdir'], 'logs')
if args.mode == 'infer':
if hvd is None or hvd.rank() == 0:
deco_print("Loading model from {}".format(checkpoint))
else:
print("Run in infer mode only")
sys.exit()
with tf.Graph().as_default():
model = create_model(
args, base_config, config_module, base_model, hvd, checkpoint)
infer(model, checkpoint, args.infer_output_file)
return args.calibration_out
def calibrate(source, target):
"""This function calculates the mean start and end shift
needed for your model to get word to speech alignments
"""
print("calibrating {}".format(source))
start_shift = []
end_shift = []
dump = pickle.load(open(source, "rb"))
results = dump["logits"]
vocab = dump["vocab"]
step_size = dump["step_size"]
blank_idx = len(vocab)
with open(target, "r") as read_file:
target = json.load(read_file)
for wave_file in results:
transcript, start, end = ctc_greedy_decoder(results[wave_file], vocab,
step_size, blank_idx, 0, 0)
words = transcript.split(" ")
k = 0
print(words)
alignments = []
for new_word in words:
alignments.append({"word": new_word, "start": start[k], "end": end[k]})
k += 1
if len(target[wave_file]["words"]) == len(words):
for i, new_word in enumerate(target[wave_file]["words"]):
if new_word["case"] == "success" and \
new_word["alignedWord"] == alignments[i]["word"]:
start_shift.append(new_word["start"] - alignments[i]["start"])
end_shift.append(new_word["end"] - alignments[i]["end"])
mean_start_shift = np.mean(start_shift)
mean_end_shift = np.mean(end_shift)
return mean_start_shift, mean_end_shift
if __name__ == '__main__':
calibration_out = run()
start_mean, end_mean = calibrate("calibration/sample.pkl",
"calibration/target.json")
print("Mean start shift is {:.5f} seconds".format(start_mean))
print("Mean end shift is: {:.5f} seconds".format(end_mean))
with open(calibration_out, "w") as f:
string = "{} {}".format(start_mean, end_mean)
f.write(string)
| OpenSeq2Seq-master | scripts/calibrate_model.py |
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2018 Mozilla Corporation
from __future__ import absolute_import, division, print_function
# Make sure we can import stuff from util/
# This script needs to be run from the root of the DeepSpeech repository
import sys
import os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import codecs
import fnmatch
import pandas
import tqdm
import subprocess
import tarfile
import unicodedata
from sox import Transformer
import urllib
from tensorflow.python.platform import gfile
def _maybe_download(fname, data_dir, data_url):
data_path = os.path.join(data_dir, fname)
if not os.path.exists(data_path):
print("Can't find '{}'. Downloading...".format(data_path))
urllib.request.urlretrieve(data_url, filename=data_path + '.tmp')
os.rename(data_path + '.tmp', data_path)
else:
print("Skipping file '{}'".format(data_path))
return data_path
def _download_and_preprocess_data(data_dir):
# Conditionally download data to data_dir
print("Downloading Librivox data set (55GB) into {} if not already present...".format(data_dir))
with tqdm.tqdm(total=7) as bar:
TRAIN_CLEAN_100_URL = "http://www.openslr.org/resources/12/train-clean-100.tar.gz"
TRAIN_CLEAN_360_URL = "http://www.openslr.org/resources/12/train-clean-360.tar.gz"
TRAIN_OTHER_500_URL = "http://www.openslr.org/resources/12/train-other-500.tar.gz"
DEV_CLEAN_URL = "http://www.openslr.org/resources/12/dev-clean.tar.gz"
DEV_OTHER_URL = "http://www.openslr.org/resources/12/dev-other.tar.gz"
TEST_CLEAN_URL = "http://www.openslr.org/resources/12/test-clean.tar.gz"
TEST_OTHER_URL = "http://www.openslr.org/resources/12/test-other.tar.gz"
def filename_of(x): return os.path.split(x)[1]
train_clean_100 = _maybe_download(filename_of(TRAIN_CLEAN_100_URL), data_dir, TRAIN_CLEAN_100_URL)
bar.update(1)
train_clean_360 = _maybe_download(filename_of(TRAIN_CLEAN_360_URL), data_dir, TRAIN_CLEAN_360_URL)
bar.update(1)
train_other_500 = _maybe_download(filename_of(TRAIN_OTHER_500_URL), data_dir, TRAIN_OTHER_500_URL)
bar.update(1)
dev_clean = _maybe_download(filename_of(DEV_CLEAN_URL), data_dir, DEV_CLEAN_URL)
bar.update(1)
dev_other = _maybe_download(filename_of(DEV_OTHER_URL), data_dir, DEV_OTHER_URL)
bar.update(1)
test_clean = _maybe_download(filename_of(TEST_CLEAN_URL), data_dir, TEST_CLEAN_URL)
bar.update(1)
test_other = _maybe_download(filename_of(TEST_OTHER_URL), data_dir, TEST_OTHER_URL)
bar.update(1)
# Conditionally extract LibriSpeech data
# We extract each archive into data_dir, but test for existence in
# data_dir/LibriSpeech because the archives share that root.
print("Extracting librivox data if not already extracted...")
with tqdm.tqdm(total=7) as bar:
LIBRIVOX_DIR = "LibriSpeech"
work_dir = os.path.join(data_dir, LIBRIVOX_DIR)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "train-clean-100"), train_clean_100)
bar.update(1)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "train-clean-360"), train_clean_360)
bar.update(1)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "train-other-500"), train_other_500)
bar.update(1)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "dev-clean"), dev_clean)
bar.update(1)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "dev-other"), dev_other)
bar.update(1)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "test-clean"), test_clean)
bar.update(1)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "test-other"), test_other)
bar.update(1)
# Convert FLAC data to wav, from:
# data_dir/LibriSpeech/split/1/2/1-2-3.flac
# to:
# data_dir/LibriSpeech/split-wav/1-2-3.wav
#
# And split LibriSpeech transcriptions, from:
# data_dir/LibriSpeech/split/1/2/1-2.trans.txt
# to:
# data_dir/LibriSpeech/split-wav/1-2-0.txt
# data_dir/LibriSpeech/split-wav/1-2-1.txt
# data_dir/LibriSpeech/split-wav/1-2-2.txt
# ...
print("Converting FLAC to WAV and splitting transcriptions...")
with tqdm.tqdm(total=7) as bar:
train_100 = _convert_audio_and_split_sentences(work_dir, "train-clean-100", "train-clean-100-wav")
bar.update(1)
train_360 = _convert_audio_and_split_sentences(work_dir, "train-clean-360", "train-clean-360-wav")
bar.update(1)
train_500 = _convert_audio_and_split_sentences(work_dir, "train-other-500", "train-other-500-wav")
bar.update(1)
dev_clean = _convert_audio_and_split_sentences(work_dir, "dev-clean", "dev-clean-wav")
bar.update(1)
dev_other = _convert_audio_and_split_sentences(work_dir, "dev-other", "dev-other-wav")
bar.update(1)
test_clean = _convert_audio_and_split_sentences(work_dir, "test-clean", "test-clean-wav")
bar.update(1)
test_other = _convert_audio_and_split_sentences(work_dir, "test-other", "test-other-wav")
bar.update(1)
# Write sets to disk as CSV files
train_100.to_csv(os.path.join(data_dir, "librivox-train-clean-100.csv"), index=False)
train_360.to_csv(os.path.join(data_dir, "librivox-train-clean-360.csv"), index=False)
train_500.to_csv(os.path.join(data_dir, "librivox-train-other-500.csv"), index=False)
dev_clean.to_csv(os.path.join(data_dir, "librivox-dev-clean.csv"), index=False)
dev_other.to_csv(os.path.join(data_dir, "librivox-dev-other.csv"), index=False)
test_clean.to_csv(os.path.join(data_dir, "librivox-test-clean.csv"), index=False)
test_other.to_csv(os.path.join(data_dir, "librivox-test-other.csv"), index=False)
def _maybe_extract(data_dir, extracted_data, archive):
# If data_dir/extracted_data does not exist, extract archive in data_dir
if not gfile.Exists(os.path.join(data_dir, extracted_data)):
tar = tarfile.open(archive)
tar.extractall(data_dir)
tar.close()
def _convert_audio_and_split_sentences(extracted_dir, data_set, dest_dir):
source_dir = os.path.join(extracted_dir, data_set)
target_dir = os.path.join(extracted_dir, dest_dir)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
# Loop over transcription files and split each one
#
# The format for each file 1-2.trans.txt is:
# 1-2-0 transcription of 1-2-0.flac
# 1-2-1 transcription of 1-2-1.flac
# ...
#
# Each file is then split into several files:
# 1-2-0.txt (contains transcription of 1-2-0.flac)
# 1-2-1.txt (contains transcription of 1-2-1.flac)
# ...
#
# We also convert the corresponding FLACs to WAV in the same pass
files = []
for root, dirnames, filenames in os.walk(source_dir):
for filename in fnmatch.filter(filenames, '*.trans.txt'):
trans_filename = os.path.join(root, filename)
with codecs.open(trans_filename, "r", "utf-8") as fin:
for line in fin:
# Parse each segment line
first_space = line.find(" ")
seqid, transcript = line[:first_space], line[first_space+1:]
# We need to do the encode-decode dance here because encode
# returns a bytes() object on Python 3, and text_to_char_array
# expects a string.
transcript = unicodedata.normalize("NFKD", transcript) \
.encode("ascii", "ignore") \
.decode("ascii", "ignore")
transcript = transcript.lower().strip()
# Convert corresponding FLAC to a WAV
flac_file = os.path.join(root, seqid + ".flac")
wav_file = os.path.join(target_dir, seqid + ".wav")
if not os.path.exists(wav_file):
Transformer().build(flac_file, wav_file)
wav_filesize = os.path.getsize(wav_file)
files.append((os.path.abspath(wav_file), wav_filesize, transcript))
return pandas.DataFrame(data=files, columns=["wav_filename", "wav_filesize", "transcript"])
if __name__ == "__main__":
_download_and_preprocess_data(sys.argv[1])
| OpenSeq2Seq-master | scripts/import_librivox.py |
# Replace the first box of Interactive_Infer_example.ipynb with this
import IPython
import librosa
import numpy as np
import scipy.io.wavfile as wave
import tensorflow as tf
from open_seq2seq.utils.utils import deco_print, get_base_config, check_logdir,\
create_logdir, create_model, get_interactive_infer_results
from open_seq2seq.models.text2speech_wavenet import save_audio
args_T2S = ["--config_file=Infer_T2S_Wave/config.py",
"--mode=interactive_infer",
"--logdir=Infer_T2S_Wave/",
"--batch_size_per_gpu=1",
]
# A simpler version of what run.py does. It returns the created model and its
# saved checkpoint
def get_model(args, scope):
with tf.variable_scope(scope):
args, base_config, base_model, config_module = get_base_config(args)
checkpoint = check_logdir(args, base_config)
model = create_model(args, base_config, config_module, base_model, None)
return model, checkpoint
model_T2S, checkpoint_T2S = get_model(args_T2S, "T2S")
# Create the session and load the checkpoints
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=sess_config)
vars_T2S = {}
for v in tf.get_collection(tf.GraphKeys.VARIABLES):
if "T2S" in v.name:
vars_T2S["/".join(v.op.name.split("/")[1:])] = v
saver_T2S = tf.train.Saver(vars_T2S)
saver_T2S.restore(sess, checkpoint_T2S)
# Define the inference function
n_fft = model_T2S.get_data_layer().n_fft
sampling_rate = model_T2S.get_data_layer().sampling_rate
def infer(line):
"""
Infers one value at a time using a sliding window with width equal to the
receptive field.
"""
print("Input File")
print(line)
GET_SPEC_FROM_WAV = False
max_steps = 200000
receptive_field = 6139 # 3x10
source = np.zeros([1, receptive_field])
src_length = np.full([1], receptive_field)
audio = []
spec_offset = 0
if GET_SPEC_FROM_WAV: # get spectrogram from .wav file
file_name = str.encode(line)
spec, spec_length = model_T2S.get_data_layer(). \
_parse_spectrogram_element(file_name)
else: # get spectrogram from .npy file
spec = np.load(line + ".npy").T
spec = np.repeat(spec, 256, axis=0)
spec_length = spec.shape[0]
spec = np.expand_dims(spec, axis=0)
spec_length = np.reshape(spec_length, [1])
while(spec_offset < max_steps):
output = get_interactive_infer_results(
model_T2S, sess,
model_in=(source, src_length, spec, spec_length, spec_offset)
)
predicted = output[-1][0]
audio.append(predicted)
source[0][0] = predicted
source[0] = np.roll(source[0], -1)
if spec_offset % 1000 == 0:
print("Saving audio for step {}".format(spec_offset))
wav = save_audio(
np.array(audio), "result", 0,
sampling_rate=sampling_rate, mode="infer"
)
spec_offset += 1
| OpenSeq2Seq-master | scripts/wavenet_naive_infer.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import os
import numpy as np
import pandas as pd
if __name__ == '__main__':
MAILABS_data_root = "/data/speech/MAILABS"
libri_data_root = "/data/speech/librispeech"
libri_csvs = ["librivox-train-clean-100.csv",
"librivox-train-clean-360.csv",
"librivox-train-other-500.csv"]
sub_dirs = ["en_US/by_book/male/elliot_miller/hunters_space",
"en_US/by_book/male/elliot_miller/pink_fairy_book",
"en_US/by_book/male/elliot_miller/pirates_of_ersatz",
"en_US/by_book/male/elliot_miller/poisoned_pen",
"en_US/by_book/male/elliot_miller/silent_bullet",
"en_US/by_book/female/mary_ann/northandsouth",
"en_US/by_book/female/mary_ann/midnight_passenger",
"en_US/by_book/female/judy_bieber/dorothy_and_wizard_oz",
"en_US/by_book/female/judy_bieber/emerald_city_of_oz",
"en_US/by_book/female/judy_bieber/ozma_of_oz",
"en_US/by_book/female/judy_bieber/rinkitink_in_oz",
"en_US/by_book/female/judy_bieber/sky_island",
"en_US/by_book/female/judy_bieber/the_master_key",
"en_US/by_book/female/judy_bieber/the_sea_fairies"]
# Check to make sure all the csvs can be found
while True:
check = 0
for sub_dir in sub_dirs:
csv = os.path.join(MAILABS_data_root, sub_dir, "metadata.csv")
if not os.path.isfile(csv):
print(("{} cannot be found. Please ensure that you have"
"entered the correct directory where you extracted the MAILABS"
"dataset").format(csv))
break
else:
check += 1
if check == len(sub_dirs):
break
MAILABS_data_root = input("Please input where you extracted the MAILABS US"
" dataset: ")
while True:
check = 0
for csv_file in libri_csvs:
csv = os.path.join(libri_data_root, csv_file)
if not os.path.isfile(csv):
print(("{} cannot be found. Please ensure that you have"
"entered the correct directory where you extracted the"
"librispeech dataset").format(csv))
break
else:
check += 1
if check == len(libri_csvs):
break
libri_data_root = input("Please input where you extracted the librispeech"
" dataset: ")
# Load libri csvs
libri_files = None
for csv in libri_csvs:
csv = os.path.join(libri_data_root, csv)
file = pd.read_csv(csv, encoding='utf-8', quoting=3)
if libri_files is None:
libri_files = file
else:
libri_files = libri_files.append(file)
# Load MAILABS csvs
MAILABS_files = None
names = ["1", "2", "3"]
for sub_dir in sub_dirs:
csv = os.path.join(MAILABS_data_root, sub_dir, "metadata.csv")
files = pd.read_csv(
csv, encoding='utf-8', sep='\x7c', header=None, quoting=3, names=names)
files['1'] = sub_dir + '/wavs/' + files['1'].astype(str)
if MAILABS_files is None:
MAILABS_files = files
else:
MAILABS_files = MAILABS_files.append(files)
num_M_files = MAILABS_files.shape[0]
np.random.shuffle(MAILABS_files.values)
curr_M_i = 0
num_libri = libri_files.shape[0]
# Mix MAILABS wavs with libri transcripts
for i, row in enumerate(libri_files.itertuples()):
libri_files.iat[i, 0] = MAILABS_files.iloc[curr_M_i, 0]
libri_files.iat[i, 1] = -1
curr_M_i += 1
if curr_M_i >= num_M_files:
curr_M_i = 0
if i % int(num_libri/100) == 0:
print("Processed {} out of {}".format(i, num_libri))
libri_files.to_csv(
"generate.csv", encoding='utf-8', sep='\x7c',
header=None, quoting=3, index=False)
| OpenSeq2Seq-master | scripts/tacotron_gst_create_infer_csv.py |
'''
Interface to Baidu's CTC decoders
from https://github.com/PaddlePaddle/DeepSpeech/decoders/swig
'''
import argparse
import pickle
import numpy as np
from ctc_decoders import Scorer
from ctc_decoders import ctc_greedy_decoder
from ctc_decoders import ctc_beam_search_decoder_batch, ctc_beam_search_decoder
from collections import defaultdict
import multiprocessing
parser = argparse.ArgumentParser(
description='CTC decoding and tuning with LM rescoring'
)
parser.add_argument('--mode',
help='either \'eval\' (default) or \'infer\'',
default='eval'
)
parser.add_argument('--infer_output_file',
help='output CSV file for \'infer\' mode',
required=False
)
parser.add_argument('--logits',
help='pickle file with CTC logits',
required=True
)
parser.add_argument('--labels',
help='CSV file with audio filenames \
(and ground truth transcriptions for \'eval\' mode)',
required=True
)
parser.add_argument('--lm',
help='KenLM binary file',
required=True
)
parser.add_argument('--vocab',
help='vocab file with characters (alphabet)',
required=True
)
parser.add_argument('--alpha', type=float,
help='value of LM weight',
required=True
)
parser.add_argument('--alpha_max', type=float,
help='maximum value of LM weight (for a grid search in \'eval\' mode)',
required=False
)
parser.add_argument('--alpha_step', type=float,
help='step for LM weight\'s tuning in \'eval\' mode',
required=False, default=0.1
)
parser.add_argument('--beta', type=float,
help='value of word count weight',
required=True
)
parser.add_argument('--beta_max', type=float,
help='maximum value of word count weight (for a grid search in \
\'eval\' mode',
required=False
)
parser.add_argument('--beta_step', type=float,
help='step for word count weight\'s tuning in \'eval\' mode',
required=False, default=0.1
)
parser.add_argument('--beam_width', type=int,
help='beam width for beam search decoder',
required=False, default=128
)
parser.add_argument('--dump_all_beams_to',
help='filename to dump all beams in eval mode for debug purposes',
required=False, default='')
args = parser.parse_args()
if args.alpha_max is None:
args.alpha_max = args.alpha
# include alpha_max in tuning range
args.alpha_max += args.alpha_step/10.0
if args.beta_max is None:
args.beta_max = args.beta
# include beta_max in tuning range
args.beta_max += args.beta_step/10.0
num_cpus = multiprocessing.cpu_count()
def levenshtein(a, b):
"""Calculates the Levenshtein distance between a and b.
The code was taken from: http://hetland.org/coding/python/levenshtein.py
"""
n, m = len(a), len(b)
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a, b = b, a
n, m = m, n
current = list(range(n + 1))
for i in range(1, m + 1):
previous, current = current, [i] + [0] * n
for j in range(1, n + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if a[j - 1] != b[i - 1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
def load_dump(pickle_file):
with open(pickle_file, 'rb') as f:
data = pickle.load(f, encoding='bytes')
return data
def get_logits(data, labels):
'''
Get logits from pickled data.
There are two versions of pickle file (and data):
1. raw logits NumPy array
2. dictionary with logits and additional meta information
'''
if isinstance(data, np.ndarray):
# convert NumPy array to dict format
logits = {}
for idx, line in enumerate(labels):
audio_filename = line[0]
logits[audio_filename] = data[idx]
else:
logits = data['logits']
return logits
def load_labels(csv_file):
labels = np.loadtxt(csv_file, skiprows=1, delimiter=',', dtype=str)
return labels
def load_vocab(vocab_file):
vocab = []
with open(vocab_file, 'r') as f:
for line in f:
vocab.append(line[0])
vocab.append('_')
return vocab
def greedy_decoder(logits, vocab, merge=True):
s = ''
c = ''
for i in range(logits.shape[0]):
c_i = vocab[np.argmax(logits[i])]
if merge and c_i == c:
continue
s += c_i
c = c_i
if merge:
s = s.replace('_', '')
return s
def softmax(x):
m = np.expand_dims(np.max(x, axis=-1), -1)
e = np.exp(x - m)
return e / np.expand_dims(e.sum(axis=-1), -1)
def evaluate_wer(logits, labels, vocab, decoder):
total_dist = 0.0
total_count = 0.0
wer_per_sample = np.empty(shape=len(labels))
empty_preds = 0
for idx, line in enumerate(labels):
audio_filename = line[0]
label = line[-1]
pred = decoder(logits[audio_filename], vocab)
dist = levenshtein(label.lower().split(), pred.lower().split())
if pred=='':
empty_preds += 1
total_dist += dist
total_count += len(label.split())
wer_per_sample[idx] = dist / len(label.split())
print('# empty preds: {}'.format(empty_preds))
wer = total_dist / total_count
return wer, wer_per_sample
data = load_dump(args.logits)
labels = load_labels(args.labels)
logits = get_logits(data, labels)
vocab = load_vocab(args.vocab)
vocab[-1] = '_'
probs_batch = []
for line in labels:
audio_filename = line[0]
probs_batch.append(softmax(logits[audio_filename]))
if args.mode == 'eval':
wer, _ = evaluate_wer(logits, labels, vocab, greedy_decoder)
print('Greedy WER = {:.4f}'.format(wer))
best_result = {'wer': 1e6, 'alpha': 0.0, 'beta': 0.0, 'beams': None}
for alpha in np.arange(args.alpha, args.alpha_max, args.alpha_step):
for beta in np.arange(args.beta, args.beta_max, args.beta_step):
scorer = Scorer(alpha, beta, model_path=args.lm, vocabulary=vocab[:-1])
res = ctc_beam_search_decoder_batch(probs_batch, vocab[:-1],
beam_size=args.beam_width,
num_processes=num_cpus,
ext_scoring_func=scorer)
total_dist = 0.0
total_count = 0.0
for idx, line in enumerate(labels):
label = line[-1]
score, text = [v for v in zip(*res[idx])]
pred = text[0]
dist = levenshtein(label.lower().split(), pred.lower().split())
total_dist += dist
total_count += len(label.split())
wer = total_dist / total_count
if wer < best_result['wer']:
best_result['wer'] = wer
best_result['alpha'] = alpha
best_result['beta'] = beta
best_result['beams'] = res
print('alpha={:.2f}, beta={:.2f}: WER={:.4f}'.format(alpha, beta, wer))
print('BEST: alpha={:.2f}, beta={:.2f}, WER={:.4f}'.format(
best_result['alpha'], best_result['beta'], best_result['wer']))
if args.dump_all_beams_to:
with open(args.dump_all_beams_to, 'w') as f:
for beam in best_result['beams']:
f.write('B=>>>>>>>>\n')
for pred in beam:
f.write('{} 0.0 0.0 {}\n'.format(pred[0], pred[1]))
f.write('E=>>>>>>>>\n')
elif args.mode == 'infer':
scorer = Scorer(args.alpha, args.beta, model_path=args.lm, vocabulary=vocab[:-1])
res = ctc_beam_search_decoder_batch(probs_batch, vocab[:-1],
beam_size=args.beam_width,
num_processes=num_cpus,
ext_scoring_func=scorer)
infer_preds = np.empty(shape=(len(labels), 2), dtype=object)
for idx, line in enumerate(labels):
filename = line[0]
score, text = [v for v in zip(*res[idx])]
infer_preds[idx, 0] = filename
infer_preds[idx, 1] = text[0]
np.savetxt(args.infer_output_file, infer_preds, fmt='%s', delimiter=',',
header='wav_filename,transcript')
| OpenSeq2Seq-master | scripts/decode.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import string
import os
import pandas as pd
if __name__ == '__main__':
synthetic_data_root = "/data/speech/librispeech-syn/"
synthetic_data_sample = synthetic_data_root + "{{}}/sample_step0_{}_syn.wav"
in_char = "\"'’“”àâèéêü"
out_char = "'''''aaeeeu"
punctuation = string.punctuation.replace("'", "")
table = str.maketrans(in_char, out_char, punctuation)
def _normalize_transcript(text):
"""Parses the transcript to remove punctation, lowercase all characters, and
all non-ascii characters
Args:
text: the string to parse
Returns:
text: the normalized text
"""
text = text.translate(table)
text = text.lower()
text = text.strip()
return text
names = ["wav_filename", "wav_filesize", "transcript"]
generated_files = pd.read_csv(
"generate.csv", encoding='utf-8', sep='\x7c',
header=None, quoting=3, names=names)
num_files = len(generated_files)
for i, row in enumerate(generated_files.itertuples()):
generated_files.iat[i, 0] = synthetic_data_sample.format(i)
line = _normalize_transcript(generated_files.iat[i, 2])
generated_files.iat[i, 1] = -1
generated_files.iat[i, 2] = line
if i % int(num_files/10) == 0:
print("Processed {} out of {}".format(i, num_files))
generated_files.to_csv(
os.path.join(synthetic_data_root, "synthetic_data.csv"), encoding='utf-8',
sep=',', quoting=3, index=False)
| OpenSeq2Seq-master | scripts/nsr_create_syn_train_csv.py |
%matplotlib inline
# Replace the first box of Interactive_Infer_example.ipynb with this
import IPython
import librosa
import numpy as np
import scipy.io.wavfile as wave
import tensorflow as tf
import matplotlib.pyplot as plt
from open_seq2seq.utils.utils import deco_print, get_base_config, check_logdir,\
create_logdir, create_model, get_interactive_infer_results
from open_seq2seq.models.text2speech import save_audio
args_T2S = ["--config_file=Infer_T2S/config.py",
"--mode=interactive_infer",
"--logdir=Infer_T2S/",
"--batch_size_per_gpu=1",
]
# A simpler version of what run.py does. It returns the created model and its saved checkpoint
def get_model(args, scope):
with tf.variable_scope(scope):
args, base_config, base_model, config_module = get_base_config(args)
checkpoint = check_logdir(args, base_config)
model = create_model(args, base_config, config_module, base_model, None)
return model, checkpoint
model_T2S, checkpoint_T2S = get_model(args_T2S, "T2S")
# Create the session and load the checkpoints
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=sess_config)
vars_T2S = {}
for v in tf.get_collection(tf.GraphKeys.VARIABLES):
if "T2S" in v.name:
vars_T2S["/".join(v.op.name.split("/")[1:])] = v
saver_T2S = tf.train.Saver(vars_T2S)
saver_T2S.restore(sess, checkpoint_T2S)
# line = "I was trained using Nvidia's Open Sequence to Sequence framework."
# Define the inference function
n_fft = model_T2S.get_data_layer().n_fft
sampling_rate = model_T2S.get_data_layer().sampling_rate
def infer(line):
print("Input English")
print(line)
# Generate speech
results = get_interactive_infer_results(model_T2S, sess, model_in=[line])
audio_length = results[1][4][0]
if model_T2S.get_data_layer()._both:
prediction = results[1][5][0]
else:
prediction = results[1][1][0]
prediction = prediction[:audio_length-1,:]
mag_prediction = model_T2S.get_data_layer().get_magnitude_spec(prediction)
mag_prediction_squared = np.clip(mag_prediction, a_min=0, a_max=255)
mag_prediction_squared = mag_prediction_squared**1.5
mag_prediction_squared = np.square(mag_prediction_squared)
mel_basis = librosa.filters.mel(sr=22050, n_fft=1024, n_mels=80, htk=True, norm=None)
mel = np.dot(mel_basis, mag_prediction_squared.T)
mel = np.log(np.clip(mel, a_min=1e-5, a_max=None))
np.save("spec2", mel)
plt.imshow(mel)
plt.gca().invert_yaxis()
plt.show()
wav = save_audio(mag_prediction, "unused", "unused", sampling_rate=sampling_rate, save_format="np.array", n_fft=n_fft)
audio = IPython.display.Audio(wav, rate=sampling_rate)
print("Generated Audio")
IPython.display.display(audio)
| OpenSeq2Seq-master | scripts/tacotron_save_spec.py |
import numpy as np
import pickle
import tensorflow as tf
from ctc_decoders import Scorer, ctc_beam_search_decoder
def load_test_sample(pickle_file):
with open(pickle_file, 'rb') as f:
seq, label = pickle.load(f, encoding='bytes')
return seq, label
def load_vocab(vocab_file):
vocab = []
with open(vocab_file, 'r') as f:
for line in f:
vocab.append(line[0])
vocab.append('_')
return vocab
def softmax(x):
m = np.expand_dims(np.max(x, axis=-1), -1)
e = np.exp(x - m)
return e / np.expand_dims(e.sum(axis=-1), -1)
class CTCCustomDecoderTests(tf.test.TestCase):
def setUp(self):
self.seq, self.label = load_test_sample('ctc_decoder_with_lm/ctc-test.pickle')
self.vocab = load_vocab('open_seq2seq/test_utils/toy_speech_data/vocab.txt')
self.beam_width = 16
self.tol = 1e-3
def test_decoders(self):
'''
Test all CTC decoders on a sample transcript ('ten seconds').
Standard TF decoders should output 'then seconds'.
Custom CTC decoder with LM rescoring should yield 'ten seconds'.
'''
logits = tf.constant(self.seq)
seq_len = tf.constant([self.seq.shape[0]])
greedy_decoded = tf.nn.ctc_greedy_decoder(logits, seq_len,
merge_repeated=True)
beam_search_decoded = tf.nn.ctc_beam_search_decoder(logits, seq_len,
beam_width=self.beam_width,
top_paths=1,
merge_repeated=False)
with tf.Session() as sess:
res_greedy, res_beam = sess.run([greedy_decoded,
beam_search_decoded])
decoded_greedy, prob_greedy = res_greedy
decoded_text = ''.join([self.vocab[c] for c in decoded_greedy[0].values])
self.assertTrue( abs(7079.117 + prob_greedy[0][0]) < self.tol )
self.assertTrue( decoded_text == 'then seconds' )
decoded_beam, prob_beam = res_beam
decoded_text = ''.join([self.vocab[c] for c in decoded_beam[0].values])
if tf.__version__ >= '1.11':
# works for newer versions only (with CTC decoder fix)
self.assertTrue( abs(1.1842 + prob_beam[0][0]) < self.tol )
self.assertTrue( decoded_text == 'then seconds' )
scorer = Scorer(alpha=2.0, beta=0.5,
model_path='ctc_decoder_with_lm/ctc-test-lm.binary',
vocabulary=self.vocab[:-1])
res = ctc_beam_search_decoder(softmax(self.seq.squeeze()), self.vocab[:-1],
beam_size=self.beam_width,
ext_scoring_func=scorer)
res_prob, decoded_text = res[0]
self.assertTrue( abs(4.0845 + res_prob) < self.tol )
self.assertTrue( decoded_text == self.label )
def test_beam_decoders(self):
'''
Test on random data that custom decoder outputs the same transcript
as standard TF beam search decoder
'''
seq = np.random.uniform(size=self.seq.shape).astype(np.float32)
logits = tf.constant(seq)
seq_len = tf.constant([self.seq.shape[0]])
beam_search_decoded = tf.nn.ctc_beam_search_decoder(logits, seq_len,
beam_width=self.beam_width,
top_paths=1,
merge_repeated=False)
with tf.Session() as sess:
res_beam = sess.run(beam_search_decoded)
decoded_beam, prob_beam = res_beam
prob1 = prob_beam[0][0]
decoded_text1 = ''.join([self.vocab[c] for c in decoded_beam[0].values])
res = ctc_beam_search_decoder(softmax(seq.squeeze()), self.vocab[:-1],
beam_size=self.beam_width)
prob2, decoded_text2 = res[0]
if tf.__version__ >= '1.11':
# works for newer versions only (with CTC decoder fix)
self.assertTrue( abs(prob1 - prob2) < self.tol )
self.assertTrue( prob2 < 0 )
self.assertTrue( decoded_text1 == decoded_text2 )
if __name__ == '__main__':
tf.test.main()
| OpenSeq2Seq-master | scripts/ctc_decoders_test.py |
import pandas as pd
import os
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Build N-gram LM model from text file')
parser.add_argument('text', metavar='text', type=str,
help='text file')
parser.add_argument('--n', type=int, help='n for n-grams', default=3)
args = parser.parse_args()
path_prefix, _ = os.path.splitext(args.text)
corpus_name = args.text
arpa_name = path_prefix + '.arpa'
lm_name = path_prefix + '-lm.binary'
command = 'kenlm/build/bin/lmplz --text {} --arpa {} --o {}'.format(
corpus_name, arpa_name, args.n)
print(command)
os.system(command)
command = 'kenlm/build/bin/build_binary trie -q 8 -b 7 -a 256 {} {}'.format(
arpa_name, lm_name)
print(command)
os.system(command)
command = 'ctc_decoder_with_lm/generate_trie'
if os.path.isfile(command) and os.access(command, os.X_OK):
trie_name = path_prefix + '-lm.trie'
command += ' open_seq2seq/test_utils/toy_speech_data/vocab.txt {} {} {}'.format(
lm_name, corpus_name, trie_name)
print('INFO: Generating a trie for custom TF op based CTC decoder.')
print(command)
os.system(command)
else:
print('INFO: Skipping trie generation, since no custom TF op based CTC decoder found.')
print('INFO: Please use Baidu CTC decoder with this language model.')
| OpenSeq2Seq-master | scripts/build_lm_text.py |
'''
Return the best evaluation accuracy from a file
output-ed by the sentiment analysis model
'''
import sys
def get_best_accuracy(output_file):
output = open(output_file, 'r')
keyword = "*** EVAL Accuracy: "
best_acc = 0.0
loss, stat, step = '', '', ''
get_stat = False
n = len(keyword)
m = len("*** Validation loss: ")
last = ''
get_step = False
for line in output.readlines():
line = line.strip()
if get_stat:
stat = line
get_stat = False
get_step = True
elif get_step:
step = line
get_step = False
else:
idx = line.find(keyword)
if idx != -1:
acc = float(line[n:])
if acc > best_acc:
best_acc = acc
loss = last
get_stat = True
last = line
print("*** Best accuracy:", str(best_acc))
print(loss)
print(stat)
print(step)
if __name__ == '__main__':
if len(sys.argv) < 2:
raise ValueError('No output file provided to analyze')
output_file = sys.argv[1]
get_best_accuracy(output_file) | OpenSeq2Seq-master | scripts/get_best_accuracy.py |
"""Set up paths for DS2"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = os.path.dirname(__file__)
# Add project path to PYTHONPATH
proj_path = os.path.join(this_dir, '..')
add_path(proj_path)
| OpenSeq2Seq-master | decoders/_init_paths.py |
OpenSeq2Seq-master | decoders/__init__.py |
|
"""Script to build and install decoder package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import setup, Extension, distutils
import glob
import platform
import os, sys
import multiprocessing.pool
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--num_processes",
default=1,
type=int,
help="Number of cpu processes to build package. (default: %(default)d)")
args = parser.parse_known_args()
# reconstruct sys.argv to pass to setup below
sys.argv = [sys.argv[0]] + args[1]
# monkey-patch for parallel compilation
# See: https://stackoverflow.com/a/13176803
def parallelCCompile(self,
sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None):
# those lines are copied from distutils.ccompiler.CCompiler directly
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
output_dir, macros, include_dirs, sources, depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
# parallel code
def _single_compile(obj):
try:
src, ext = build[obj]
except KeyError:
return
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# convert to list, imap is evaluated on-demand
thread_pool = multiprocessing.pool.ThreadPool(args[0].num_processes)
list(thread_pool.imap(_single_compile, objects))
return objects
def compile_test(header, library):
dummy_path = os.path.join(os.path.dirname(__file__), "dummy")
command = "bash -c \"g++ -include " + header \
+ " -l" + library + " -x c++ - <<<'int main() {}' -o " \
+ dummy_path + " >/dev/null 2>/dev/null && rm " \
+ dummy_path + " 2>/dev/null\""
return os.system(command) == 0
# hack compile to support parallel compiling
distutils.ccompiler.CCompiler.compile = parallelCCompile
FILES = glob.glob('kenlm/util/*.cc') \
+ glob.glob('kenlm/lm/*.cc') \
+ glob.glob('kenlm/util/double-conversion/*.cc')
FILES += glob.glob('openfst-1.6.3/src/lib/*.cc')
FILES = [
fn for fn in FILES
if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith(
'unittest.cc'))
]
LIBS = ['stdc++']
if platform.system() != 'Darwin':
LIBS.append('rt')
ARGS = ['-O3', '-DNDEBUG', '-DKENLM_MAX_ORDER=6', '-std=c++11']
if compile_test('zlib.h', 'z'):
ARGS.append('-DHAVE_ZLIB')
LIBS.append('z')
if compile_test('bzlib.h', 'bz2'):
ARGS.append('-DHAVE_BZLIB')
LIBS.append('bz2')
if compile_test('lzma.h', 'lzma'):
ARGS.append('-DHAVE_XZLIB')
LIBS.append('lzma')
os.system('swig -python -c++ ./decoders.i')
decoders_module = [
Extension(
name='_swig_decoders',
sources=FILES + glob.glob('*.cxx') + glob.glob('*.cpp'),
language='c++',
include_dirs=[
'.',
'kenlm',
'openfst-1.6.3/src/include',
'ThreadPool',
],
libraries=LIBS,
extra_compile_args=ARGS)
]
setup(
name='ctc_decoders',
version='1.1',
description="""CTC decoders""",
ext_modules=decoders_module,
py_modules=['ctc_decoders', 'swig_decoders'], )
| OpenSeq2Seq-master | decoders/setup.py |
"""Wrapper for various CTC decoders in SWIG."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import swig_decoders
class Scorer(swig_decoders.Scorer):
"""Wrapper for Scorer.
:param alpha: Parameter associated with language model. Don't use
language model when alpha = 0.
:type alpha: float
:param beta: Parameter associated with word count. Don't use word
count when beta = 0.
:type beta: float
:model_path: Path to load language model.
:type model_path: basestring
"""
def __init__(self, alpha, beta, model_path, vocabulary):
swig_decoders.Scorer.__init__(self, alpha, beta, model_path, vocabulary)
class BeamDecoder(swig_decoders.BeamDecoder):
"""Wrapper for BeamDecoder.
"""
def __init__(self, vocabulary, beam_size,
cutoff_prob=1.0,
cutoff_top_n=40,
ext_scorer=None):
swig_decoders.BeamDecoder.__init__(self, vocabulary, beam_size,
cutoff_prob,
cutoff_top_n,
ext_scorer)
def decode(self, probs_seq):
beam_results = swig_decoders.BeamDecoder.decode(
self,
probs_seq.tolist()
)
beam_results = [(res[0], res[1]) for res in beam_results]
return beam_results
def ctc_greedy_decoder(probs_seq, vocabulary):
"""Wrapper for ctc best path decoder in swig.
:param probs_seq: 2-D list of probability distributions over each time
step, with each element being a list of normalized
probabilities over vocabulary and blank.
:type probs_seq: 2-D list
:param vocabulary: Vocabulary list.
:type vocabulary: list
:return: Decoding result string.
:rtype: basestring
"""
result = swig_decoders.ctc_greedy_decoder(probs_seq.tolist(), vocabulary)
return result
def ctc_beam_search_decoder(probs_seq,
vocabulary,
beam_size,
cutoff_prob=1.0,
cutoff_top_n=40,
ext_scoring_func=None):
"""Wrapper for the CTC Beam Search Decoder.
:param probs_seq: 2-D list of probability distributions over each time
step, with each element being a list of normalized
probabilities over vocabulary and blank.
:type probs_seq: 2-D list
:param vocabulary: Vocabulary list.
:type vocabulary: list
:param beam_size: Width for beam search.
:type beam_size: int
:param cutoff_prob: Cutoff probability in pruning,
default 1.0, no pruning.
:type cutoff_prob: float
:param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n
characters with highest probs in vocabulary will be
used in beam search, default 40.
:type cutoff_top_n: int
:param ext_scoring_func: External scoring function for
partially decoded sentence, e.g. word count
or language model.
:type external_scoring_func: callable
:return: List of tuples of log probability and sentence as decoding
results, in descending order of the probability.
:rtype: list
"""
beam_results = swig_decoders.ctc_beam_search_decoder(
probs_seq.tolist(), vocabulary, beam_size, cutoff_prob, cutoff_top_n,
ext_scoring_func)
beam_results = [(res[0], res[1]) for res in beam_results]
return beam_results
def ctc_beam_search_decoder_batch(probs_split,
vocabulary,
beam_size,
num_processes,
cutoff_prob=1.0,
cutoff_top_n=40,
ext_scoring_func=None):
"""Wrapper for the batched CTC beam search decoder.
:param probs_seq: 3-D list with each element as an instance of 2-D list
of probabilities used by ctc_beam_search_decoder().
:type probs_seq: 3-D list
:param vocabulary: Vocabulary list.
:type vocabulary: list
:param beam_size: Width for beam search.
:type beam_size: int
:param num_processes: Number of parallel processes.
:type num_processes: int
:param cutoff_prob: Cutoff probability in vocabulary pruning,
default 1.0, no pruning.
:type cutoff_prob: float
:param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n
characters with highest probs in vocabulary will be
used in beam search, default 40.
:type cutoff_top_n: int
:param num_processes: Number of parallel processes.
:type num_processes: int
:param ext_scoring_func: External scoring function for
partially decoded sentence, e.g. word count
or language model.
:type external_scoring_function: callable
:return: List of tuples of log probability and sentence as decoding
results, in descending order of the probability.
:rtype: list
"""
probs_split = [probs_seq.tolist() for probs_seq in probs_split]
batch_beam_results = swig_decoders.ctc_beam_search_decoder_batch(
probs_split, vocabulary, beam_size, num_processes, cutoff_prob,
cutoff_top_n, ext_scoring_func)
batch_beam_results = [
[(res[0], res[1]) for res in beam_results]
for beam_results in batch_beam_results
]
return batch_beam_results
| OpenSeq2Seq-master | decoders/ctc_decoders.py |
# Copyright (c) 2017 NVIDIA Corporation
"""
This package provides multi-node, multi-GPU sequence to sequence learning
""" | OpenSeq2Seq-master | open_seq2seq/__init__.py |
# Copyright (c) 2019 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from .loss import Loss
class Text2SpeechLoss(Loss):
"""
Default text-to-speech loss.
"""
@staticmethod
def get_optional_params():
return {
"use_mask": bool,
"scale": float,
"stop_token_weight": float,
"mel_weight": float,
"mag_weight": float,
"l1_norm": bool
}
def __init__(self, params, model, name="text2speech_loss"):
super(Text2SpeechLoss, self).__init__(params, model, name)
self._n_feats = self._model.get_data_layer().params["num_audio_features"]
if "both" in self._model.get_data_layer().params["output_type"]:
self._both = True
else:
self._both = False
def _compute_loss(self, input_dict):
"""
Computes loss for text-to-speech model.
Args:
input_dict (dict):
* "decoder_output": dictionary containing:
"outputs": array containing:
* mel: mel-spectrogram predicted by the decoder [batch, time, n_mel]
* post_net_mel: spectrogram after adding the residual
corrections from the post net of shape [batch, time, feats]
* mag: mag-spectrogram predicted by the decoder [batch, time, n_mag]
"stop_token_predictions": stop_token predictions of shape [batch, time, 1]
* "target_tensors": array containing:
* spec: the true spectrogram of shape [batch, time, feats]
* stop_token: the stop_token of shape [batch, time]
* spec_length: the length of specs [batch]
Returns:
Singleton loss tensor
"""
decoder_predictions = input_dict["decoder_output"]["outputs"][0]
post_net_predictions = input_dict["decoder_output"]["outputs"][1]
stop_token_predictions = input_dict["decoder_output"]["stop_token_prediction"]
if self._both:
mag_pred = input_dict["decoder_output"]["outputs"][5]
mag_pred = tf.cast(mag_pred, dtype=tf.float32)
spec = input_dict["target_tensors"][0]
stop_token = input_dict["target_tensors"][1]
stop_token = tf.expand_dims(stop_token, -1)
spec_lengths = input_dict["target_tensors"][2]
batch_size = tf.shape(spec)[0]
num_feats = tf.shape(spec)[2]
decoder_predictions = tf.cast(decoder_predictions, dtype=tf.float32)
post_net_predictions = tf.cast(post_net_predictions, dtype=tf.float32)
stop_token_predictions = tf.cast(stop_token_predictions, dtype=tf.float32)
spec = tf.cast(spec, dtype=tf.float32)
stop_token = tf.cast(stop_token, dtype=tf.float32)
max_length = tf.cast(
tf.maximum(
tf.shape(spec)[1],
tf.shape(decoder_predictions)[1],
), tf.int32
)
decoder_pad = tf.zeros(
[
batch_size,
max_length - tf.shape(decoder_predictions)[1],
tf.shape(decoder_predictions)[2]
]
)
stop_token_pred_pad = tf.zeros(
[batch_size, max_length - tf.shape(decoder_predictions)[1], 1]
)
spec_pad = tf.zeros([batch_size, max_length - tf.shape(spec)[1], num_feats])
stop_token_pad = tf.ones([batch_size, max_length - tf.shape(spec)[1], 1])
decoder_predictions = tf.concat(
[decoder_predictions, decoder_pad],
axis=1
)
post_net_predictions = tf.concat(
[post_net_predictions, decoder_pad],
axis=1
)
stop_token_predictions = tf.concat(
[stop_token_predictions, stop_token_pred_pad],
axis=1
)
spec = tf.concat([spec, spec_pad], axis=1)
stop_token = tf.concat([stop_token, stop_token_pad], axis=1)
if self.params.get("l1_norm", False):
loss_f = tf.losses.absolute_difference
else:
loss_f = tf.losses.mean_squared_error
if self._both:
mag_pad = tf.zeros(
[
batch_size,
max_length - tf.shape(mag_pred)[1],
tf.shape(mag_pred)[2]
]
)
mag_pred = tf.concat(
[mag_pred, mag_pad],
axis=1
)
spec, mag_target = tf.split(
spec,
[self._n_feats["mel"], self._n_feats["magnitude"]],
axis=2
)
decoder_target = spec
post_net_target = spec
if self.params.get("use_mask", True):
mask = tf.sequence_mask(
lengths=spec_lengths,
maxlen=max_length,
dtype=tf.float32
)
mask = tf.expand_dims(mask, axis=-1)
decoder_loss = loss_f(
labels=decoder_target,
predictions=decoder_predictions,
weights=mask
)
post_net_loss = loss_f(
labels=post_net_target,
predictions=post_net_predictions,
weights=mask
)
if self._both:
mag_loss = loss_f(
labels=mag_target,
predictions=mag_pred,
weights=mask
)
stop_token_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=stop_token,
logits=stop_token_predictions
)
stop_token_loss = stop_token_loss * mask
stop_token_loss = tf.reduce_sum(stop_token_loss) / tf.reduce_sum(mask)
else:
decoder_loss = loss_f(
labels=decoder_target,
predictions=decoder_predictions
)
post_net_loss = loss_f(
labels=post_net_target,
predictions=post_net_predictions
)
if self._both:
mag_loss = loss_f(
labels=mag_target,
predictions=mag_pred
)
stop_token_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=stop_token,
logits=stop_token_predictions
)
stop_token_loss = tf.reduce_mean(stop_token_loss)
mel_weight = self.params.get("mel_weight", 1.0)
decoder_loss = mel_weight * decoder_loss
post_net_loss = mel_weight * post_net_loss
stop_token_weight = self.params.get("stop_token_weight", 1.0)
stop_token_loss = stop_token_weight * stop_token_loss
loss = decoder_loss + post_net_loss + stop_token_loss
if self._both:
mag_weight = self.params.get("mag_weight", 1.0)
loss += mag_weight * mag_loss
if self.params.get("scale", None):
loss = loss * self.params["scale"]
return loss
| OpenSeq2Seq-master | open_seq2seq/losses/text2speech_loss.py |
# Copyright (c) 2018 NVIDIA Corporation
import tensorflow as tf
from .loss import Loss
class WavenetLoss(Loss):
def __init__(self, params, model, name="wavenet_loss"):
super(WavenetLoss, self).__init__(params, model, name)
self._n_feats = self._model.get_data_layer().params["num_audio_features"]
def get_required_params(self):
return {}
def get_optional_params(self):
return {}
def _compute_loss(self, input_dict):
"""
Computes the cross-entropy loss for WaveNet.
Args:
input_dict (dict):
* "decoder_output": array containing: [
* logits: predicted output signal as logits
* outputs: array containing: [
* ground truth signal as encoded labels
* mu-law decoded audio
]
]
"""
prediction = tf.cast(input_dict["decoder_output"]["logits"], tf.float32)
target_output = input_dict["decoder_output"]["outputs"][0]
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=prediction,
labels=target_output
)
loss = tf.reduce_mean(loss)
return loss
| OpenSeq2Seq-master | open_seq2seq/losses/wavenet_loss.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from open_seq2seq.utils.utils import mask_nans, deco_print
from .loss import Loss
def dense_to_sparse(dense_tensor, sequence_length):
indices = tf.where(tf.sequence_mask(sequence_length))
values = tf.gather_nd(dense_tensor, indices)
shape = tf.shape(dense_tensor, out_type=tf.int64)
return tf.SparseTensor(indices, values, shape)
class CTCLoss(Loss):
"""Implementation of the CTC loss."""
@staticmethod
def get_optional_params():
return dict(Loss.get_optional_params(), **{
'mask_nan': bool,
})
def __init__(self, params, model, name="ctc_loss"):
"""CTC loss constructor.
See parent class for arguments description.
Config parameters:
* **mask_nan** (bool) --- whether to mask nans in the loss output. Defaults
to True.
"""
super(CTCLoss, self).__init__(params, model, name)
self._mask_nan = self.params.get("mask_nan", True)
# this loss can only operate in full precision
# if self.params['dtype'] != tf.float32:
# deco_print("Warning: defaulting CTC loss to work in float32")
self.params['dtype'] = tf.float32
def _compute_loss(self, input_dict):
"""CTC loss graph construction.
Expects the following inputs::
input_dict = {
}
Args:
input_dict (dict): input dictionary that has to contain
the following fields::
input_dict = {
"decoder_output": {
"logits": tensor, shape [batch_size, time length, tgt_vocab_size]
"src_length": tensor, shape [batch_size]
},
"target_tensors": [
tgt_sequence (shape=[batch_size, time length, num features]),
tgt_length (shape=[batch_size])
]
}
Returns:
averaged CTC loss.
"""
logits = input_dict['decoder_output']['logits']
tgt_sequence, tgt_length = input_dict['target_tensors']
# this loss needs an access to src_length since they
# might get changed in the encoder
src_length = input_dict['decoder_output']['src_length']
# Compute the CTC loss
total_loss = tf.nn.ctc_loss(
labels=dense_to_sparse(tgt_sequence, tgt_length),
inputs=logits,
sequence_length=src_length,
ignore_longer_outputs_than_inputs=True,
)
if self._mask_nan:
total_loss = mask_nans(total_loss)
# Calculate the average loss across the batch
avg_loss = tf.reduce_mean(total_loss)
return avg_loss
| OpenSeq2Seq-master | open_seq2seq/losses/ctc_loss.py |
# Copyright (c) 2018 NVIDIA Corporation
"""
Losses to be used in seq2seq models
"""
from .sequence_loss import BasicSequenceLoss, CrossEntropyWithSmoothing, \
PaddedCrossEntropyLossWithSmoothing, BasicSampledSequenceLoss
from .ctc_loss import CTCLoss
from .cross_entropy_loss import CrossEntropyLoss
from .wavenet_loss import WavenetLoss
from .jca_loss import MultiTaskCTCEntropyLoss
from .text2speech_loss import Text2SpeechLoss | OpenSeq2Seq-master | open_seq2seq/losses/__init__.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from .loss import Loss
from .ctc_loss import CTCLoss
from .sequence_loss import BasicSequenceLoss
# To-Do Replace this with a generic multi-task loss.
class MultiTaskCTCEntropyLoss(Loss):
"""
MultiTask CTC and cross entropy loss.
"""
@staticmethod
def get_required_params():
return dict(Loss.get_required_params(), **{
'ctc_loss_params': dict,
'seq_loss_params': dict,
'lambda_value': float,
'tgt_vocab_size': int,
'batch_size': int,
})
@staticmethod
def get_optional_params():
return dict(Loss.get_optional_params(), **{
})
def __init__(self, params, model, name="basic_sequence_loss"):
"""Constructor.
Args:
params (dict): dictionary with loss parameters.
Should contain the following:
* ctc_loss_params: Parameters required for CTC loss.
* seq_loss_params: Parameters required for Sequence loss.
* lambda_value: lambda value used to combine the two losses.
* tgt_vocab_size: Target vocabulary size.
* batch_size: Size of the per-worker batch.
"""
super(MultiTaskCTCEntropyLoss, self).__init__(params, model, name)
self.ctc_loss_params = self.params["ctc_loss_params"]
self.seq_loss_params = self.params["seq_loss_params"]
self.lambda_value = self.params["lambda_value"]
self.seq_loss_params["batch_size"] = self.params["batch_size"]
self.seq_loss_params["tgt_vocab_size"] = self.params["tgt_vocab_size"]
self.ctc_loss = CTCLoss(self.ctc_loss_params, model)
self.seq_loss = BasicSequenceLoss(self.seq_loss_params, model)
def _compute_loss(self, input_dict):
"""Computes multi-task ctc and cross entropy loss.
Args:
input_dict (dict): inputs to compute loss::
{
"logits": logits tensor of shape [batch_size, T, dim]
"target_sequence": tensor of shape [batch_size, T]
"tgt_lengths": tensor of shape [batch_size] or None
}
Returns:
Singleton loss tensor
"""
ctc_loss_input_dict = {
"decoder_output": input_dict['decoder_output']['ctc_outputs'],
"target_tensors": input_dict['target_tensors'],
}
seq_loss_input_dict = {
"decoder_output": input_dict['decoder_output']['seq_outputs'],
"target_tensors": input_dict['target_tensors'],
}
ctc_loss_value = self.ctc_loss.compute_loss(ctc_loss_input_dict)
sequence_loss_value = self.seq_loss.compute_loss(seq_loss_input_dict)
return (1 - self.lambda_value) * sequence_loss_value + self.lambda_value * ctc_loss_value
| OpenSeq2Seq-master | open_seq2seq/losses/jca_loss.py |
# Copyright (c) 2017 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import numpy as np
import tensorflow as tf
from open_seq2seq.losses.sequence_loss import CrossEntropyWithSmoothing, \
BasicSequenceLoss
class CrossEntropyWithSmoothingEqualsBasicSequenceLossTest(tf.test.TestCase):
def setUp(self):
print("Setting Up CrossEntropyWithSmoothingEqualsBasicSequenceLoss Test")
def tearDown(self):
print("Tear down CrossEntropyWithSmoothingEqualsBasicSequenceLoss Test")
def test_compute_loss(self):
seq_length = 13
tgt_vocab_size = 12
for offset in [0, 3, 4]:
for batch_size in [1, 4, 8]:
for _o in [True, False]:
for _m in [True, False]:
loss_params = {
"do_mask": _m,
"tgt_vocab_size": tgt_vocab_size,
"batch_size": batch_size,
"offset_target_by_one": _o,
}
targets = tf.placeholder(dtype=tf.int32, shape=[batch_size,
seq_length])
logits = tf.placeholder(dtype=tf.float32, shape=[batch_size,
seq_length,
tgt_vocab_size])
tgt_lengths = tf.placeholder(dtype=tf.int32, shape=[batch_size])
xentropy = CrossEntropyWithSmoothing(params=loss_params, model=None)
sparse_xentropy = BasicSequenceLoss(params=loss_params, model=None)
loss_input_dict = {
"decoder_output": {"logits": logits},
"target_tensors": [targets, tgt_lengths],
}
l1 = sparse_xentropy.compute_loss(input_dict=loss_input_dict)
l2 = xentropy.compute_loss(input_dict=loss_input_dict)
with self.test_session(use_gpu=True) as sess:
tgts = np.random.randint(tgt_vocab_size,
size=(batch_size, seq_length))
lgts = np.random.random(size=[batch_size,
seq_length, tgt_vocab_size])
feed_dict = {
targets: tgts,
logits: lgts,
tgt_lengths: np.array([seq_length-offset]*batch_size)
}
loss1 = sess.run(l1, feed_dict=feed_dict)
loss2 = sess.run(l2, feed_dict=feed_dict)
self.assertAlmostEqual(loss1, loss2, 4)
print("Loss: {}".format(loss1))
if __name__ == '__main__':
tf.test.main()
| OpenSeq2Seq-master | open_seq2seq/losses/sequence_loss_test.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import abc
import copy
import six
import tensorflow as tf
from open_seq2seq.utils.utils import check_params, cast_types
@six.add_metaclass(abc.ABCMeta)
class Loss:
"""Abstract class from which all losses must inherit.
"""
@staticmethod
def get_required_params():
"""Static method with description of required parameters.
Returns:
dict:
Dictionary containing all the parameters that **have to** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return {}
@staticmethod
def get_optional_params():
"""Static method with description of optional parameters.
Returns:
dict:
Dictionary containing all the parameters that **can** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return {
'dtype': [tf.float16, tf.float32],
}
def __init__(self, params, model, name="loss"):
"""Loss constructor.
Note that loss constructors should not modify TensorFlow graph, all
graph construction should happen in the
:meth:`self._compute_loss() <_compute_loss>` method.
Args:
params (dict): parameters describing the loss.
All supported parameters are listed in :meth:`get_required_params`,
:meth:`get_optional_params` functions.
model (instance of a class derived from :class:`Model<models.model.Model>`):
parent model that created this loss.
Could be None if no model access is required for the use case.
name (str): name for loss variable scope.
Config parameters:
* **dtype** --- data dtype. Could be either ``tf.float16`` or ``tf.float32``.
"""
check_params(params, self.get_required_params(), self.get_optional_params())
self._params = copy.deepcopy(params)
self._model = model
if 'dtype' not in self._params:
if self._model:
self._params['dtype'] = self._model.get_tf_dtype()
else:
self._params['dtype'] = tf.float32
self._name = name
def compute_loss(self, input_dict):
"""Wrapper around :meth:`self._compute_loss() <_compute_loss>` method.
Here name and dtype are set in the variable scope and then
:meth:`self._compute_loss() <_compute_loss>` method is called.
Args:
input_dict (dict): see :meth:`self._compute_loss() <_compute_loss>` docs.
Returns:
see :meth:`self._compute_loss() <_compute_loss>` docs.
"""
with tf.variable_scope(self._name, dtype=self.params['dtype']):
return self._compute_loss(self._cast_types(input_dict))
def _cast_types(self, input_dict):
"""This function performs automatic cast of all inputs to the loss dtype.
Args:
input_dict (dict): dictionary passed to
:meth:`self._compute_loss() <_compute_loss>` method.
Returns:
dict: same as input_dict, but with all Tensors cast to the loss dtype.
"""
return cast_types(input_dict, self.params['dtype'])
@abc.abstractmethod
def _compute_loss(self, input_dict):
"""This is the main function which should construct loss graph.
Typically, loss will take decoder-produced logits as an input and
return a singleton loss tensor.
Args:
input_dict (dict): dictionary containing loss inputs.
If the loss is used with :class:`models.encoder_decoder` class,
``input_dict`` will have the following content::
{
"decoder_output": dictionary returned from decoder.decode() method
"target_tensors": data_layer.input_tensors['target_tensors']
}
Returns:
singleton loss tensor. This tensor will be computed independently
for each GPU batch and then averaged
(``reduce_mean``) over the number of GPUs (or Horovod workers).
"""
pass
@property
def params(self):
"""Parameters used to construct the loss (dictionary)."""
return self._params
@property
def name(self):
"""Loss name."""
return self._name
| OpenSeq2Seq-master | open_seq2seq/losses/loss.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from .loss import Loss
class CrossEntropyLoss(Loss):
"""Implementation of the usual cross_entropy loss with softmax."""
def __init__(self, params, model, name="cross_entropy_loss"):
super(CrossEntropyLoss, self).__init__(params, model, name)
def _compute_loss(self, input_dict):
logits = input_dict['decoder_output']['logits']
labels = input_dict['target_tensors'][0]
return tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=labels)
| OpenSeq2Seq-master | open_seq2seq/losses/cross_entropy_loss.py |
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import tensorflow as tf
from .loss import Loss
class BasicSequenceLoss(Loss):
"""
Basic sequence-to-sequence loss. This one does not use one-hot encodings
"""
@staticmethod
def get_required_params():
return dict(Loss.get_required_params(), **{
'tgt_vocab_size': int,
'batch_size': int,
})
@staticmethod
def get_optional_params():
return dict(Loss.get_optional_params(), **{
'offset_target_by_one': bool,
'average_across_timestep': bool,
'do_mask': bool,
})
def __init__(self, params, model, name="basic_sequence_loss"):
"""Constructor.
Args:
params (dict): dictionary with loss parameters.
Should contain the following:
* tgt_vocab_size: Target vocabulary size
* batch_size_per_gpu: Size of the per-worker batch
* offset_target_by_one: (default: True). Keep it true for
auto-regressive models
* average_across_timestep: (default: False). If True, will average
loss across timesteps, else it will sum across timesteps
* do_mask: (default: True) whether to mask based on tgt_lengths
(which is passed as part of loss_input_dict to compute_loss
and has to be not None then)
"""
super(BasicSequenceLoss, self).__init__(params, model, name)
self._tgt_vocab_size = self.params["tgt_vocab_size"]
self._batch_size = self.params["batch_size"]
self._offset_target_by_one = self.params.get("offset_target_by_one", True)
self._average_across_timestep = self.params.get("average_across_timestep",
False)
self._do_mask = self.params.get("do_mask", True)
def _compute_loss(self, input_dict):
"""Computes cross entropy based sequence-to-sequence loss.
Args:
input_dict (dict): inputs to compute loss::
{
"logits": logits tensor of shape [batch_size, T, dim]
"target_sequence": tensor of shape [batch_size, T]
"tgt_lengths": tensor of shape [batch_size] or None
}
Returns:
Singleton loss tensor
"""
logits = input_dict["decoder_output"]["logits"]
target_sequence = input_dict['target_tensors'][0]
tgt_lengths = input_dict['target_tensors'][1]
if self._offset_target_by_one:
# this is necessary for auto-regressive models
current_ts = tf.cast(tf.minimum(
tf.shape(target_sequence)[1],
tf.shape(logits)[1],
), tf.int32) - 1
logits = tf.slice(
logits,
begin=[0, 0, 0],
size=[-1, current_ts, -1],
)
target_sequence = tf.slice(target_sequence,
begin=[0, 1],
size=[-1, current_ts])
else:
current_ts = tf.cast(tf.minimum(
tf.shape(target_sequence)[1],
tf.shape(logits)[1],
),tf.int32)
# Cast logits after potential slice
if logits.dtype.base_dtype != tf.float32:
logits = tf.cast(logits, tf.float32)
if self._do_mask:
if tgt_lengths is None:
raise ValueError("If you are masking loss, tgt_lengths can't be None")
mask = tf.sequence_mask(lengths=tgt_lengths - 1,
maxlen=current_ts,
dtype=logits.dtype)
else:
mask = tf.cast(tf.ones_like(target_sequence), logits.dtype)
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.reshape(target_sequence, shape=[-1]),
logits=tf.reshape(logits, shape=[-1, self._tgt_vocab_size]),
)
if self._average_across_timestep:
loss = tf.reduce_mean(crossent * tf.reshape(mask, shape=[-1]))
else:
loss = tf.reduce_sum(crossent * tf.reshape(mask, shape=[-1]))
loss /= self._batch_size
return loss
class CrossEntropyWithSmoothing(Loss):
"""Softmax cross entropy loss with label smoothing.
This one uses one-hot encodings for labels.
"""
@staticmethod
def get_required_params():
return dict(Loss.get_required_params(), **{
'tgt_vocab_size': int,
'batch_size': int,
})
@staticmethod
def get_optional_params():
return dict(Loss.get_optional_params(), **{
'offset_target_by_one': bool,
'average_across_timestep': bool,
'do_mask': bool,
'label_smoothing': float,
})
def __init__(self, params, model, name="cross_entropy_with_smoothing"):
"""Constructor.
Args:
params (dict): dictionary with loss parameters.
Should contain the following:
* tgt_vocab_size: Target vocabulary size
* batch_size_per_gpu: Size of the per-worker batch
* offset_target_by_one: (default: True). Keep it true for
auto-regressive models
* do_mask: (default: True) whether to mask based on tgt_lengths
(which is passed as part of loss_input_dict to compute_loss
and has to be not None then)
"""
super(CrossEntropyWithSmoothing, self).__init__(params, model, name)
self._tgt_vocab_size = self.params["tgt_vocab_size"]
self._batch_size = self.params["batch_size"]
self._offset_target_by_one = self.params.get("offset_target_by_one", True)
self._do_mask = self.params.get("do_mask", True)
self._label_smoothing = self.params.get("label_smoothing", 0.0)
self._average_across_timestep = self.params.get("average_across_timestep",
False)
def _compute_loss(self, input_dict):
"""Computes cross entropy based sequence-to-sequence loss
with label smoothing.
Args:
input_dict (dict): inputs to compute loss::
{
"logits": logits tensor of shape [batch_size, T, dim]
"target_sequence": tensor of shape [batch_size, T]
"tgt_lengths": tensor of shape [batch_size] or None
}
Returns:
Singleton loss tensor
"""
logits = input_dict["decoder_output"]["logits"]
target_sequence = input_dict["target_tensors"][0]
tgt_lengths = input_dict["target_tensors"][1]
if self._offset_target_by_one:
# this is necessary for auto-regressive models
current_ts = tf.cast(tf.minimum(
tf.shape(target_sequence)[1],
tf.shape(logits)[1],
),tf.int32) - 1
logits = tf.slice(
logits,
begin=[0, 0, 0],
size=[-1, current_ts, -1],
)
target_sequence = tf.slice(target_sequence,
begin=[0, 1],
size=[-1, current_ts])
else:
current_ts = tf.cast(tf.minimum(
tf.shape(target_sequence)[1],
tf.shape(logits)[1],
), tf.int32)
# Cast logits after potential slice
if logits.dtype.base_dtype != tf.float32:
logits = tf.cast(logits, tf.float32)
if self._do_mask:
if tgt_lengths is None:
raise ValueError("If you are masking loss, tgt_lengths can't be None")
mask = tf.sequence_mask(lengths=tgt_lengths - 1,
maxlen=current_ts,
dtype=tf.float32)
else:
mask = tf.cast(tf.ones_like(target_sequence), logits.dtype)
labels = tf.one_hot(indices=tf.reshape(target_sequence, shape=[-1]),
depth=self._tgt_vocab_size)
logits = tf.reshape(logits, shape=[-1, self._tgt_vocab_size])
mask = tf.reshape(mask, shape=[-1])
loss = tf.losses.softmax_cross_entropy(
onehot_labels=labels,
logits=logits,
weights=mask,
label_smoothing=self._label_smoothing,
reduction=tf.losses.Reduction.NONE,
)
loss = tf.reduce_sum(loss * tf.reshape(mask, shape=[-1]))
if self._average_across_timestep:
loss /= tf.reduce_sum(mask)
else:
loss /= self._batch_size
return loss
class PaddedCrossEntropyLossWithSmoothing(Loss):
@staticmethod
def get_optional_params():
return dict(Loss.get_optional_params(), **{
'batch_size': int,
'tgt_vocab_size': int,
'label_smoothing': float,
'pad_embeddings_2_eight': bool,
})
def __init__(self, params, model, name="padded_cross_entropy_with_smoothing"):
super(PaddedCrossEntropyLossWithSmoothing, self).__init__(params, model,
name)
if self.params.get('pad_embeddings_2_eight', False):
if self.params["tgt_vocab_size"] % 8 == 0:
self._tgt_vocab_size = self.params["tgt_vocab_size"]
else:
self._tgt_vocab_size = self.params["tgt_vocab_size"] + \
(8 - self.params["tgt_vocab_size"] % 8)
else:
self._tgt_vocab_size = self.params["tgt_vocab_size"]
self._label_smoothing = self.params.get("label_smoothing", 0.0)
def _compute_loss(self, input_dict):
logits = input_dict["decoder_output"]["logits"]
logits = tf.cast(logits, dtype=tf.float32)
if logits is None:
return 0.0
labels = input_dict["target_tensors"][0]
def _pad_tensors_to_same_length(x, y):
""" Pad x and y so that the results have the
same length (second dimension).
"""
with tf.name_scope("pad_to_same_length"):
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
with tf.name_scope(name="loss", values= [logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope(name="smoothing_cross_entropy", values=[logits, labels]):
confidence = 1.0 - self._label_smoothing
low_confidence = (1.0 - confidence) / \
tf.cast(self._tgt_vocab_size - 1, dtype=tf.float32)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=self._tgt_vocab_size,
on_value=confidence,
off_value=low_confidence,
)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets,
)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.log(confidence) +
tf.cast(self._tgt_vocab_size - 1, dtype=tf.float32) *
low_confidence * tf.log(low_confidence + 1e-20)
)
xentropy -= normalizing_constant
weights = tf.cast(tf.not_equal(labels, 0), dtype=tf.float32)
xentropy = xentropy * weights
loss = tf.reduce_sum(xentropy * weights) / tf.reduce_sum(weights)
return loss
class BasicSampledSequenceLoss(Loss):
"""
Basic sampled sequence-to-sequence loss. This is used when the full softmax
is computational prohibitive.
This one does not use one-hot encodings.
"""
@staticmethod
def get_required_params():
return dict(Loss.get_required_params(), **{
'tgt_vocab_size': int,
'batch_size': int,
})
@staticmethod
def get_optional_params():
return dict(Loss.get_optional_params(), **{
'offset_target_by_one': bool,
'average_across_timestep': bool,
'do_mask': bool,
'hid_dim': int,
})
def __init__(self, params, model, name="basic_sampled_sequence_loss"):
"""Constructor.
Args:
params (dict): dictionary with loss parameters.
Should contain the following:
* tgt_vocab_size: Target vocabulary size
* batch_size_per_gpu: Size of the per-worker batch
* offset_target_by_one: (default: True). Keep it true for
auto-regressive models
* average_across_timestep: (default: False). If True, will average
loss across timesteps, else it will sum across timesteps
* do_mask: (default: True) whether to mask based on tgt_lengths
(which is passed as part of loss_input_dict to compute_loss
and has to be not None then)
"""
super(BasicSampledSequenceLoss, self).__init__(params, model, name)
self._tgt_vocab_size = self.params["tgt_vocab_size"]
self._batch_size = self.params["batch_size"]
self._offset_target_by_one = self.params.get("offset_target_by_one", True)
self._average_across_timestep = self.params.get(
"average_across_timestep", False)
self._do_mask = self.params.get("do_mask", True)
def _compute_loss(self, input_dict):
"""Computes cross entropy based sequence-to-sequence loss.
Args:
input_dict (dict): inputs to compute loss::
{
"logits": logits tensor of shape [batch_size, T, dim]
"target_sequence": tensor of shape [batch_size, T]
"tgt_lengths": tensor of shape [batch_size] or None
}
Returns:
Singleton loss tensor
"""
target_sequence = input_dict['target_tensors'][0]
tgt_lengths = input_dict['target_tensors'][1]
if 'weights' in input_dict['decoder_output']:
print("Because 'weights' is in the input_dict, we are using sampled softmax loss.")
inputs = input_dict["decoder_output"]['inputs']
self._hid_dim = inputs.get_shape().as_list()[-1]
inputs = tf.reshape(inputs, (-1, self._hid_dim))
targets = tf.reshape(target_sequence, (-1, 1))
weights = input_dict["decoder_output"]['weights']
biases = input_dict["decoder_output"]['bias']
if inputs.dtype.base_dtype != tf.float32:
inputs = tf.cast(inputs, tf.float32)
if weights.dtype.base_dtype != tf.float32:
weights = tf.cast(weights, tf.float32)
if biases.dtype.base_dtype != tf.float32:
biases = tf.cast(biases, tf.float32)
crossent = tf.nn.sampled_softmax_loss(weights,
biases,
targets,
inputs,
input_dict['decoder_output']['num_sampled'],
self._tgt_vocab_size)
if self._average_across_timestep:
loss = tf.reduce_mean(crossent)
else:
loss = tf.reduce_sum(crossent)
loss /= self._batch_size
else:
print("Because 'weights' is not in the input_dict, we are using normal softmax loss.")
logits = input_dict["decoder_output"]["logits"]
if self._offset_target_by_one:
# this is necessary for auto-regressive models
current_ts = tf.cast(tf.minimum(
tf.shape(target_sequence)[1],
tf.shape(logits)[1],
), tf.int32) - 1
logits = tf.slice(
logits,
begin=[0, 0, 0],
size=[-1, current_ts, -1],
)
target_sequence = tf.slice(target_sequence,
begin=[0, 1],
size=[-1, current_ts])
else:
current_ts = tf.cast(tf.minimum(
tf.shape(target_sequence)[1],
tf.shape(logits)[1],
),tf.int32)
# Cast logits after potential slice
if logits.dtype.base_dtype != tf.float32:
logits = tf.cast(logits, tf.float32)
if self._do_mask:
if tgt_lengths is None:
raise ValueError(
"If you are masking loss, tgt_lengths can't be None")
mask = tf.sequence_mask(lengths=tgt_lengths - 1,
maxlen=current_ts,
dtype=logits.dtype)
else:
mask = tf.cast(tf.ones_like(target_sequence), logits.dtype)
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.reshape(target_sequence, shape=[-1]),
logits=tf.reshape(logits, shape=[-1, self._tgt_vocab_size]),
)
if self._average_across_timestep:
loss = tf.reduce_mean(crossent * tf.reshape(mask, shape=[-1]))
else:
loss = tf.reduce_sum(crossent * tf.reshape(mask, shape=[-1]))
loss /= self._batch_size
return loss
| OpenSeq2Seq-master | open_seq2seq/losses/sequence_loss.py |
OpenSeq2Seq-master | open_seq2seq/test_utils/__init__.py |
|
# Copyright (c) 2017 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from six.moves import range
import numpy as np
import os
import errno
import io
import shutil
def create_source(size, source_vocab, vocab_map):
source = []
for i in range(0, size):
new_rol = []
for j in range(0, np.random.randint(low=5, high=51)):
new_dig = np.random.randint(low=0, high=len(vocab_map))
new_rol.append(vocab_map[new_dig])
if new_dig not in source_vocab:
source_vocab[new_dig] = 0
else:
source_vocab[new_dig] += 1
source.append(new_rol)
return source
def create_target(size, source):
target = []
for i in range(0, size):
new_row = list(reversed(source[i]))
target.append(new_row)
return target
def write_to_file(path, data):
with io.open(path, 'w', encoding='utf-8') as f:
for row in data:
f.write(' '.join(row) + '\n')
f.close()
def write_vocab_to_file(path, data, vocab_map):
with io.open(path, 'w', encoding='utf-8') as f:
for key, value in data.items():
f.write(vocab_map[key]+'\t'+str(value)+'\n')
f.close()
def create_directory(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def create_data(train_corpus_size=10000, dev_corpus_size=1000,
test_corpus_size=2000, data_path="./toy_text_data"):
train_path = os.path.join(data_path, "train")
dev_path = os.path.join(data_path, "dev")
test_path = os.path.join(data_path, "test")
vocab_path = os.path.join(data_path, "vocab")
train_source_path = os.path.join(train_path, "source.txt")
train_target_path = os.path.join(train_path, "target.txt")
dev_source_path = os.path.join(dev_path, "source.txt")
dev_target_path = os.path.join(dev_path, "target.txt")
test_source_path = os.path.join(test_path, "source.txt")
test_target_path = os.path.join(test_path, "target.txt")
vocab_source_path = os.path.join(vocab_path, "source.txt")
vocab_target_path = os.path.join(vocab_path, "target.txt")
source_vocab = {}
vocab_map = {0: '\u03B1',
1: '\u03B2',
2: '\u03B3',
3: '\u03B4',
4: '\u03B5',
5: '\u03B6',
6: '\u03B7',
7: '\u03B8',
8: '\u03B9',
9: '\u03BA'}
create_directory(train_path)
create_directory(test_path)
create_directory(dev_path)
create_directory(vocab_path)
train_source = create_source(train_corpus_size, source_vocab, vocab_map)
write_to_file(train_source_path, train_source)
write_to_file(
train_target_path,
create_target(train_corpus_size, train_source),
)
dev_source = create_source(dev_corpus_size, source_vocab, vocab_map)
write_to_file(dev_source_path, dev_source)
write_to_file(dev_target_path, create_target(dev_corpus_size, dev_source))
test_source = create_source(test_corpus_size, source_vocab, vocab_map)
write_to_file(test_source_path, test_source)
write_to_file(test_target_path, create_target(test_corpus_size, test_source))
write_vocab_to_file(vocab_source_path, source_vocab, vocab_map)
# in our case, source and target vocabs are the same
write_vocab_to_file(vocab_target_path, source_vocab, vocab_map)
def remove_data(data_path="./toy_text_data"):
shutil.rmtree(data_path)
if __name__ == '__main__':
create_data(data_path='./toy_text_data')
| OpenSeq2Seq-master | open_seq2seq/test_utils/create_reversed_examples.py |
OpenSeq2Seq-master | open_seq2seq/test_utils/test_speech_configs/__init__.py |
|
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from open_seq2seq.models import Speech2Text
from open_seq2seq.encoders import DeepSpeech2Encoder
from open_seq2seq.decoders import FullyConnectedCTCDecoder
from open_seq2seq.data import Speech2TextDataLayer
from open_seq2seq.losses import CTCLoss
from open_seq2seq.optimizers.lr_policies import poly_decay
base_model = Speech2Text
base_params = {
"use_horovod": False,
"num_epochs": 150,
"num_gpus": 1,
"batch_size_per_gpu": 10,
"save_summaries_steps": 10,
"print_loss_steps": 10,
"print_samples_steps": 20,
"eval_steps": 50,
"save_checkpoint_steps": 50,
"logdir": "tmp_log_folder",
"optimizer": "Momentum",
"optimizer_params": {
"momentum": 0.90,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.01,
"power": 2,
},
"larc_params": {
"larc_eta": 0.001,
},
"dtype": tf.float32,
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": DeepSpeech2Encoder,
"encoder_params": {
"conv_layers": [
{
"kernel_size": [5, 11], "stride": [2, 2],
"num_channels": 32, "padding": "SAME"
},
{
"kernel_size": [5, 11], "stride": [1, 2],
"num_channels": 64, "padding": "SAME"
},
],
"n_hidden": 128,
"rnn_cell_dim": 128,
"rnn_type": "gru",
"num_rnn_layers": 1,
"rnn_unidirectional": False,
"row_conv": True,
"row_conv_width": 8,
"use_cudnn_rnn": True,
"dropout_keep_prob": 0.9,
"initializer": tf.contrib.layers.xavier_initializer,
"initializer_params": {
'uniform': False,
},
"activation_fn": lambda x: tf.minimum(tf.nn.relu(x), 20.0),
"data_format": "channels_first",
"bn_momentum": 0.001,
},
"decoder": FullyConnectedCTCDecoder,
"decoder_params": {
"initializer": tf.contrib.layers.xavier_initializer,
"use_language_model": False,
},
"loss": CTCLoss,
"loss_params": {},
}
train_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 160,
"input_type": "spectrogram",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"open_seq2seq/test_utils/toy_speech_data/toy_data.csv",
],
"shuffle": True,
},
}
eval_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 160,
"input_type": "spectrogram",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"open_seq2seq/test_utils/toy_speech_data/toy_data.csv",
],
"shuffle": False,
},
}
| OpenSeq2Seq-master | open_seq2seq/test_utils/test_speech_configs/ds2_test_config.py |
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from open_seq2seq.models import Speech2Text
from open_seq2seq.encoders import TDNNEncoder
from open_seq2seq.decoders import FullyConnectedCTCDecoder
from open_seq2seq.data import Speech2TextDataLayer
from open_seq2seq.losses import CTCLoss
from open_seq2seq.optimizers.lr_policies import poly_decay
base_model = Speech2Text
base_params = {
"use_horovod": False,
"num_epochs": 500,
"num_gpus": 1,
"batch_size_per_gpu": 10,
"save_summaries_steps": 10,
"print_loss_steps": 10,
"print_samples_steps": 20,
"eval_steps": 50,
"save_checkpoint_steps": 50,
"logdir": "tmp_log_folder",
"optimizer": "Momentum",
"optimizer_params": {
"momentum": 0.90,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.01,
"power": 2,
},
"larc_params": {
"larc_eta": 0.001,
},
"dtype": tf.float32,
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": TDNNEncoder,
"encoder_params": {
"convnet_layers": [
{
"type": "conv1d", "repeat": 3,
"kernel_size": [7], "stride": [1],
"num_channels": 200, "padding": "SAME",
"dilation":[1]
},
{
"type": "conv1d", "repeat": 1,
"kernel_size": [1], "stride": [1],
"num_channels": 400, "padding": "SAME",
"dilation":[1]
},
],
"dropout_keep_prob": 0.9,
"initializer": tf.contrib.layers.xavier_initializer,
"initializer_params": {
'uniform': False,
},
"activation_fn": lambda x: tf.minimum(tf.nn.relu(x), 20.0),
"data_format": "channels_last",
"bn_momentum": 0.001,
},
"decoder": FullyConnectedCTCDecoder,
"decoder_params": {
"initializer": tf.contrib.layers.xavier_initializer,
"use_language_model": False,
},
"loss": CTCLoss,
"loss_params": {},
}
train_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 40,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"open_seq2seq/test_utils/toy_speech_data/toy_data.csv",
],
"shuffle": True,
},
}
eval_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 40,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"open_seq2seq/test_utils/toy_speech_data/toy_data.csv",
],
"shuffle": False,
},
}
| OpenSeq2Seq-master | open_seq2seq/test_utils/test_speech_configs/w2l_test_config.py |
# pylint: skip-file
import tensorflow as tf
from open_seq2seq.models import Speech2Text
from open_seq2seq.encoders import TDNNEncoder
from open_seq2seq.decoders import FullyConnectedCTCDecoder
from open_seq2seq.data.speech2text.speech2text import Speech2TextDataLayer
from open_seq2seq.losses import CTCLoss
from open_seq2seq.optimizers.lr_policies import poly_decay
### If training with synthetic data, don't forget to add your synthetic csv
### to dataset files
base_model = Speech2Text
base_params = {
"use_horovod": False,
"num_epochs": 500,
"num_gpus": 1,
"batch_size_per_gpu": 10,
"save_summaries_steps": 10,
"print_loss_steps": 10,
"print_samples_steps": 20,
"eval_steps": 50,
"save_checkpoint_steps": 50,
"logdir": "tmp_log_folder",
"optimizer": "Momentum",
"optimizer_params": {
"momentum": 0.90,
},
"lr_policy": poly_decay,
"lr_policy_params": {
"learning_rate": 0.01,
"power": 2,
},
"larc_params": {
"larc_eta": 0.001,
},
"dtype": tf.float32,
"summaries": ['learning_rate', 'variables', 'gradients', 'larc_summaries',
'variable_norm', 'gradient_norm', 'global_gradient_norm'],
"encoder": TDNNEncoder,
"encoder_params": {
"convnet_layers": [
{
"type": "conv1d", "repeat": 1,
"kernel_size": [7], "stride": [1],
"num_channels": 128, "padding": "SAME",
"dilation":[1]
},
{
"type": "conv1d", "repeat": 2,
"kernel_size": [7], "stride": [1],
"num_channels": 256, "padding": "SAME",
"dilation":[1],
"residual": True
},
{
"type": "conv1d", "repeat": 2,
"kernel_size": [1], "stride": [1],
"num_channels": 256, "padding": "SAME",
"dilation":[1],
"residual": True
},
],
"dropout_keep_prob": 0.9,
"drop_block_prob": 0.2,
"drop_block_index": -1,
"initializer": tf.contrib.layers.xavier_initializer,
"initializer_params": {
'uniform': False,
},
"normalization": "batch_norm",
"activation_fn": lambda x: tf.minimum(tf.nn.relu(x), 20.0),
"data_format": "channels_last",
},
"decoder": FullyConnectedCTCDecoder,
"decoder_params": {
"initializer": tf.contrib.layers.xavier_initializer,
"use_language_model": False,
},
"loss": CTCLoss,
"loss_params": {},
}
train_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 64,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"/data/librispeech/librivox-train-clean-100.csv",
"/data/librispeech/librivox-train-clean-360.csv",
"/data/librispeech/librivox-train-other-500.csv",
# Add synthetic csv here
],
"syn_enable": False, # Change to True if using synthetic data
"syn_subdirs": [], # Add subdirs of synthetic data
"max_duration": 16.7,
"shuffle": True,
},
}
train_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 40,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"open_seq2seq/test_utils/toy_speech_data/toy_data.csv",
],
"shuffle": True,
},
}
eval_params = {
"data_layer": Speech2TextDataLayer,
"data_layer_params": {
"num_audio_features": 40,
"input_type": "logfbank",
"vocab_file": "open_seq2seq/test_utils/toy_speech_data/vocab.txt",
"dataset_files": [
"open_seq2seq/test_utils/toy_speech_data/toy_data.csv",
],
"shuffle": False,
},
}
| OpenSeq2Seq-master | open_seq2seq/test_utils/test_speech_configs/jasper_res_blockout_test_config.py |
# Copyright (c) 2019 NVIDIA Corporation
"""This file has a CTC Greedy decoder"""
import numpy as np
def ctc_greedy_decoder(logits, wordmap, step_size,
blank_idx, start_shift, end_shift):
"""Decodes logits to chars using greedy ctc format,
outputs start and end time for every word
Args :
logits: time x chars (log probabilities)
wordmap: vocab (maps index to characters)
step_size: number of steps to take in time domain per block of input
blank_idx: index of blank char
start_shift: shift needed for start of word in time domain
end_shift: shift needed in end of word in time domain
"""
prev_idx = -1
output = []
start = []
end = []
lst_letter = -1
for i, log_prob in enumerate(logits):
idx = np.argmax(log_prob)
if idx not in (blank_idx, prev_idx):
if len(output) == 0:
start.append(step_size*i+start_shift)
else:
if output[-1] == " ":
start.append(max(step_size*i+start_shift, end[-1]))
output += wordmap[idx]
if output[-1] == " ":
end.append(step_size*lst_letter+end_shift)
lst_letter = i
prev_idx = idx
end.append(step_size*lst_letter+end_shift)
output = "".join(output)
output = output.strip(" ")
return output, start, end
| OpenSeq2Seq-master | open_seq2seq/utils/ctc_decoder.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.