language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | async def send_to_device_messages(
self,
) -> List[Union[ToDeviceResponse, ToDeviceError]]:
"""Send out outgoing to-device messages."""
if not self.outgoing_to_device_messages:
return []
tasks = []
for message in self.outgoing_to_device_messages:
task = asyncio.ensure_future(self.to_device(message))
tasks.append(task)
return await asyncio.gather(*tasks) | async def send_to_device_messages(
self,
) -> List[Union[ToDeviceResponse, ToDeviceError]]:
"""Send out outgoing to-device messages."""
if not self.outgoing_to_device_messages:
return []
tasks = []
for message in self.outgoing_to_device_messages:
task = asyncio.ensure_future(self.to_device(message))
tasks.append(task)
return await asyncio.gather(*tasks) |
Python | async def sync_forever(
self,
timeout: Optional[int] = None,
sync_filter: _FilterT = None,
since: Optional[str] = None,
full_state: Optional[bool] = None,
loop_sleep_time: Optional[int] = None,
first_sync_filter: _FilterT = None,
):
"""Continuously sync with the configured homeserver.
This method calls the sync method in a loop. To react to events event
callbacks should be configured.
The loop also makes sure to handle other required requests between
syncs. To react to the responses a response callback should be added.
Args:
timeout (int, optional): The maximum time that the server should
wait for new events before it should return the request
anyways, in milliseconds.
If the server fails to return after 5 seconds of expected
timeout, the client will timeout by itself.
sync_filter (Union[None, str, Dict[Any, Any]):
A filter ID or dict that should be used for sync requests.
full_state (bool, optional): Controls whether to include the full
state for all rooms the user is a member of. If this is set to
true, then all state events will be returned, even if since is
non-empty. The timeline will still be limited by the since
parameter. This argument will be used only for the first sync
request.
since (str, optional): A token specifying a point in time where to
continue the sync from. Defaults to the last sync token we
received from the server using this API call. This argument
will be used only for the first sync request, the subsequent
sync requests will use the token from the last sync response.
loop_sleep_time (int, optional): The sleep time, if any, between
successful sync loop iterations in milliseconds.
first_sync_filter (Union[None, str, Dict[Any, Any]):
A filter ID or dict to use for the first sync request only.
If `None` (default), the `sync_filter` parameter's value
is used.
To have no filtering for the first sync regardless of
`sync_filter`'s value, pass `{}`.
"""
first_sync = True
while True:
try:
used_filter = first_sync_filter if first_sync else sync_filter
tasks = [
asyncio.ensure_future(coro)
for coro in (
self.sync(timeout, used_filter, since, full_state),
self.send_to_device_messages(),
)
]
if self.should_upload_keys:
tasks.append(asyncio.ensure_future(self.keys_upload()))
if self.should_query_keys:
tasks.append(asyncio.ensure_future(self.keys_query()))
if self.should_claim_keys:
tasks.append(
asyncio.ensure_future(
self.keys_claim(self.get_users_for_key_claiming()),
)
)
for response in asyncio.as_completed(tasks):
await self.run_response_callbacks([await response])
first_sync = False
full_state = None
since = None
if loop_sleep_time:
await asyncio.sleep(loop_sleep_time / 1000)
except asyncio.CancelledError:
for task in tasks:
task.cancel()
break | async def sync_forever(
self,
timeout: Optional[int] = None,
sync_filter: _FilterT = None,
since: Optional[str] = None,
full_state: Optional[bool] = None,
loop_sleep_time: Optional[int] = None,
first_sync_filter: _FilterT = None,
):
"""Continuously sync with the configured homeserver.
This method calls the sync method in a loop. To react to events event
callbacks should be configured.
The loop also makes sure to handle other required requests between
syncs. To react to the responses a response callback should be added.
Args:
timeout (int, optional): The maximum time that the server should
wait for new events before it should return the request
anyways, in milliseconds.
If the server fails to return after 5 seconds of expected
timeout, the client will timeout by itself.
sync_filter (Union[None, str, Dict[Any, Any]):
A filter ID or dict that should be used for sync requests.
full_state (bool, optional): Controls whether to include the full
state for all rooms the user is a member of. If this is set to
true, then all state events will be returned, even if since is
non-empty. The timeline will still be limited by the since
parameter. This argument will be used only for the first sync
request.
since (str, optional): A token specifying a point in time where to
continue the sync from. Defaults to the last sync token we
received from the server using this API call. This argument
will be used only for the first sync request, the subsequent
sync requests will use the token from the last sync response.
loop_sleep_time (int, optional): The sleep time, if any, between
successful sync loop iterations in milliseconds.
first_sync_filter (Union[None, str, Dict[Any, Any]):
A filter ID or dict to use for the first sync request only.
If `None` (default), the `sync_filter` parameter's value
is used.
To have no filtering for the first sync regardless of
`sync_filter`'s value, pass `{}`.
"""
first_sync = True
while True:
try:
used_filter = first_sync_filter if first_sync else sync_filter
tasks = [
asyncio.ensure_future(coro)
for coro in (
self.sync(timeout, used_filter, since, full_state),
self.send_to_device_messages(),
)
]
if self.should_upload_keys:
tasks.append(asyncio.ensure_future(self.keys_upload()))
if self.should_query_keys:
tasks.append(asyncio.ensure_future(self.keys_query()))
if self.should_claim_keys:
tasks.append(
asyncio.ensure_future(
self.keys_claim(self.get_users_for_key_claiming()),
)
)
for response in asyncio.as_completed(tasks):
await self.run_response_callbacks([await response])
first_sync = False
full_state = None
since = None
if loop_sleep_time:
await asyncio.sleep(loop_sleep_time / 1000)
except asyncio.CancelledError:
for task in tasks:
task.cancel()
break |
Python | async def room_send(
self,
room_id: str,
message_type: str,
content: Dict[Any, Any],
tx_id: Optional[str] = None,
ignore_unverified_devices: bool = False,
):
"""Send a message to a room.
Args:
room_id(str): The room id of the room where the message should be
sent to.
message_type(str): A string identifying the type of the message.
content(Dict[Any, Any]): A dictionary containing the content of the
message.
tx_id(str, optional): The transaction ID of this event used to
uniquely identify this message.
ignore_unverified_devices(bool): If the room is encrypted and
contains unverified devices, the devices can be marked as
ignored here. Ignored devices will still receive encryption
keys for messages but they won't be marked as verified.
If the room where the message should be sent is encrypted the message
will be encrypted before sending.
This method also makes sure that the room members are fully synced and
that keys are queried before sending messages to an encrypted room.
If the method can't sync the state fully to send out an encrypted
message after a couple of retries it raises `SendRetryError`.
Raises `LocalProtocolError` if the client isn't logged in.
"""
async def send(room_id, message_type, content, tx_id):
if self.olm:
try:
room = self.rooms[room_id]
except KeyError:
raise LocalProtocolError(
"No such room with id {} found.".format(room_id)
)
# Reactions as of yet don't support encryption.
# Relevant spec proposal https://github.com/matrix-org/matrix-doc/pull/1849
if room.encrypted and message_type != "m.reaction":
message_type, content = self.encrypt(
room_id, message_type, content
)
method, path, data = Api.room_send(
self.access_token, room_id, message_type, content, tx_id
)
return await self._send(
RoomSendResponse, method, path, data, (room_id,)
)
retries = 10
uuid = tx_id or uuid4()
for i in range(retries):
try:
return await send(room_id, message_type, content, uuid)
except GroupEncryptionError:
sharing_event = self.sharing_session.get(room_id, None)
if sharing_event:
await sharing_event.wait()
else:
share = await self.share_group_session(
room_id,
ignore_unverified_devices=ignore_unverified_devices,
)
await self.run_response_callbacks([share])
except MembersSyncError:
responses = []
responses.append(await self.joined_members(room_id))
if self.should_query_keys:
responses.append(await self.keys_query())
await self.run_response_callbacks(responses)
raise SendRetryError(
"Max retries exceeded while trying to send " "the message"
) | async def room_send(
self,
room_id: str,
message_type: str,
content: Dict[Any, Any],
tx_id: Optional[str] = None,
ignore_unverified_devices: bool = False,
):
"""Send a message to a room.
Args:
room_id(str): The room id of the room where the message should be
sent to.
message_type(str): A string identifying the type of the message.
content(Dict[Any, Any]): A dictionary containing the content of the
message.
tx_id(str, optional): The transaction ID of this event used to
uniquely identify this message.
ignore_unverified_devices(bool): If the room is encrypted and
contains unverified devices, the devices can be marked as
ignored here. Ignored devices will still receive encryption
keys for messages but they won't be marked as verified.
If the room where the message should be sent is encrypted the message
will be encrypted before sending.
This method also makes sure that the room members are fully synced and
that keys are queried before sending messages to an encrypted room.
If the method can't sync the state fully to send out an encrypted
message after a couple of retries it raises `SendRetryError`.
Raises `LocalProtocolError` if the client isn't logged in.
"""
async def send(room_id, message_type, content, tx_id):
if self.olm:
try:
room = self.rooms[room_id]
except KeyError:
raise LocalProtocolError(
"No such room with id {} found.".format(room_id)
)
# Reactions as of yet don't support encryption.
# Relevant spec proposal https://github.com/matrix-org/matrix-doc/pull/1849
if room.encrypted and message_type != "m.reaction":
message_type, content = self.encrypt(
room_id, message_type, content
)
method, path, data = Api.room_send(
self.access_token, room_id, message_type, content, tx_id
)
return await self._send(
RoomSendResponse, method, path, data, (room_id,)
)
retries = 10
uuid = tx_id or uuid4()
for i in range(retries):
try:
return await send(room_id, message_type, content, uuid)
except GroupEncryptionError:
sharing_event = self.sharing_session.get(room_id, None)
if sharing_event:
await sharing_event.wait()
else:
share = await self.share_group_session(
room_id,
ignore_unverified_devices=ignore_unverified_devices,
)
await self.run_response_callbacks([share])
except MembersSyncError:
responses = []
responses.append(await self.joined_members(room_id))
if self.should_query_keys:
responses.append(await self.keys_query())
await self.run_response_callbacks(responses)
raise SendRetryError(
"Max retries exceeded while trying to send " "the message"
) |
Python | async def share_group_session(
self,
room_id: str,
tx_id: Optional[str] = None,
ignore_unverified_devices: bool = False,
) -> Union[ShareGroupSessionResponse, ShareGroupSessionError]:
"""Share a group session with a room.
This method sends a group session to members of a room.
Args:
room_id(str): The room id of the room where the message should be
sent to.
tx_id(str, optional): The transaction ID of this event used to
uniquely identify this message.
ignore_unverified_devices(bool): Mark unverified devices as
ignored. Ignored devices will still receive encryption
keys for messages but they won't be marked as verified.
Raises LocalProtocolError if the client isn't logged in, if the session
store isn't loaded, no room with the given room id exists, the room
isn't an encrypted room or a key sharing request is already in flight
for this room.
"""
assert self.olm
try:
room = self.rooms[room_id]
except KeyError:
raise LocalProtocolError("No such room with id {}".format(room_id))
if not room.encrypted:
raise LocalProtocolError(
"Room with id {} is not encrypted".format(room_id)
)
if room_id in self.sharing_session:
raise LocalProtocolError(
"Already sharing a group session for {}".format(room_id)
)
self.sharing_session[room_id] = AsyncioEvent()
shared_with = set()
missing_sessions = self.get_missing_sessions(room_id)
if missing_sessions:
await self.keys_claim(missing_sessions)
try:
while True:
user_set, to_device_dict = self.olm.share_group_session(
room_id,
list(room.users.keys()),
ignore_missing_sessions=True,
ignore_unverified_devices=ignore_unverified_devices,
)
uuid = tx_id or uuid4()
method, path, data = Api.to_device(
self.access_token, "m.room.encrypted", to_device_dict, uuid
)
response = await self._send(
ShareGroupSessionResponse,
method,
path,
data,
(room_id, user_set),
)
if isinstance(response, ShareGroupSessionResponse):
shared_with.update(response.users_shared_with)
except LocalProtocolError:
return ShareGroupSessionResponse(room_id, shared_with)
except ClientConnectionError:
raise
finally:
event = self.sharing_session.pop(room_id)
event.set() | async def share_group_session(
self,
room_id: str,
tx_id: Optional[str] = None,
ignore_unverified_devices: bool = False,
) -> Union[ShareGroupSessionResponse, ShareGroupSessionError]:
"""Share a group session with a room.
This method sends a group session to members of a room.
Args:
room_id(str): The room id of the room where the message should be
sent to.
tx_id(str, optional): The transaction ID of this event used to
uniquely identify this message.
ignore_unverified_devices(bool): Mark unverified devices as
ignored. Ignored devices will still receive encryption
keys for messages but they won't be marked as verified.
Raises LocalProtocolError if the client isn't logged in, if the session
store isn't loaded, no room with the given room id exists, the room
isn't an encrypted room or a key sharing request is already in flight
for this room.
"""
assert self.olm
try:
room = self.rooms[room_id]
except KeyError:
raise LocalProtocolError("No such room with id {}".format(room_id))
if not room.encrypted:
raise LocalProtocolError(
"Room with id {} is not encrypted".format(room_id)
)
if room_id in self.sharing_session:
raise LocalProtocolError(
"Already sharing a group session for {}".format(room_id)
)
self.sharing_session[room_id] = AsyncioEvent()
shared_with = set()
missing_sessions = self.get_missing_sessions(room_id)
if missing_sessions:
await self.keys_claim(missing_sessions)
try:
while True:
user_set, to_device_dict = self.olm.share_group_session(
room_id,
list(room.users.keys()),
ignore_missing_sessions=True,
ignore_unverified_devices=ignore_unverified_devices,
)
uuid = tx_id or uuid4()
method, path, data = Api.to_device(
self.access_token, "m.room.encrypted", to_device_dict, uuid
)
response = await self._send(
ShareGroupSessionResponse,
method,
path,
data,
(room_id, user_set),
)
if isinstance(response, ShareGroupSessionResponse):
shared_with.update(response.users_shared_with)
except LocalProtocolError:
return ShareGroupSessionResponse(room_id, shared_with)
except ClientConnectionError:
raise
finally:
event = self.sharing_session.pop(room_id)
event.set() |
Python | async def room_messages(
self,
room_id: str,
start: str,
end: Optional[str] = None,
direction: MessageDirection = MessageDirection.back,
limit: int = 10,
message_filter: _FilterT = None,
) -> Union[RoomMessagesResponse, RoomMessagesError]:
"""Fetch a list of message and state events for a room.
It uses pagination query parameters to paginate history in the room.
Returns either a `RoomContextResponse` if the request was successful or
a `RoomContextError` if there was an error with the request.
Args:
room_id (str): The room id of the room for which we would like to
fetch the messages.
start (str): The token to start returning events from. This token
can be obtained from a prev_batch token returned for each room
by the sync API, or from a start or end token returned by a
previous request to this endpoint.
end (str, optional): The token to stop returning events at. This
token can be obtained from a prev_batch token returned for
each room by the sync endpoint, or from a start or end token
returned by a previous request to this endpoint.
direction (MessageDirection, optional): The direction to return
events from. Defaults to MessageDirection.back.
limit (int, optional): The maximum number of events to return.
Defaults to 10.
message_filter (Union[None, str, Dict[Any, Any]]):
A filter ID or dict that should be used for this room messages
request.
Example:
>>> response = await client.room_messages(room_id, previous_batch)
>>> next_response = await client.room_messages(room_id,
... response.end)
"""
method, path = Api.room_messages(
self.access_token,
room_id,
start,
end=end,
direction=direction,
limit=limit,
message_filter=message_filter,
)
return await self._send(
RoomMessagesResponse, method, path, response_data=(room_id,)
) | async def room_messages(
self,
room_id: str,
start: str,
end: Optional[str] = None,
direction: MessageDirection = MessageDirection.back,
limit: int = 10,
message_filter: _FilterT = None,
) -> Union[RoomMessagesResponse, RoomMessagesError]:
"""Fetch a list of message and state events for a room.
It uses pagination query parameters to paginate history in the room.
Returns either a `RoomContextResponse` if the request was successful or
a `RoomContextError` if there was an error with the request.
Args:
room_id (str): The room id of the room for which we would like to
fetch the messages.
start (str): The token to start returning events from. This token
can be obtained from a prev_batch token returned for each room
by the sync API, or from a start or end token returned by a
previous request to this endpoint.
end (str, optional): The token to stop returning events at. This
token can be obtained from a prev_batch token returned for
each room by the sync endpoint, or from a start or end token
returned by a previous request to this endpoint.
direction (MessageDirection, optional): The direction to return
events from. Defaults to MessageDirection.back.
limit (int, optional): The maximum number of events to return.
Defaults to 10.
message_filter (Union[None, str, Dict[Any, Any]]):
A filter ID or dict that should be used for this room messages
request.
Example:
>>> response = await client.room_messages(room_id, previous_batch)
>>> next_response = await client.room_messages(room_id,
... response.end)
"""
method, path = Api.room_messages(
self.access_token,
room_id,
start,
end=end,
direction=direction,
limit=limit,
message_filter=message_filter,
)
return await self._send(
RoomMessagesResponse, method, path, response_data=(room_id,)
) |
Python | def sync(
access_token: str,
since: Optional[str] = None,
timeout: Optional[int] = None,
filter: _FilterT = None,
full_state: Optional[bool] = None,
):
# type: (...) -> Tuple[str, str]
"""Synchronise the client's state with the latest state on the server.
Returns the HTTP method and HTTP path for the request.
Args:
access_token (str): The access token to be used with the request.
since (str): The room id of the room where the event will be sent
to.
timeout (int): The maximum time to wait, in milliseconds, before
returning this request.
sync_filter (Union[None, str, Dict[Any, Any]):
A filter ID or dict that should be used for this sync request.
full_state (bool, optional): Controls whether to include the full
state for all rooms the user is a member of. If this is set to
true, then all state events will be returned, even if since is
non-empty. The timeline will still be limited by the since
parameter.
"""
query_parameters = {"access_token": access_token}
if since:
query_parameters["since"] = since
if full_state is not None:
query_parameters["full_state"] = str(full_state).lower()
if timeout is not None:
query_parameters["timeout"] = str(timeout)
if isinstance(filter, dict):
filter_json = json.dumps(filter, separators=(",", ":"))
query_parameters["filter"] = filter_json
elif isinstance(filter, str):
query_parameters["filter"] = filter
return "GET", Api._build_path("sync", query_parameters) | def sync(
access_token: str,
since: Optional[str] = None,
timeout: Optional[int] = None,
filter: _FilterT = None,
full_state: Optional[bool] = None,
):
# type: (...) -> Tuple[str, str]
"""Synchronise the client's state with the latest state on the server.
Returns the HTTP method and HTTP path for the request.
Args:
access_token (str): The access token to be used with the request.
since (str): The room id of the room where the event will be sent
to.
timeout (int): The maximum time to wait, in milliseconds, before
returning this request.
sync_filter (Union[None, str, Dict[Any, Any]):
A filter ID or dict that should be used for this sync request.
full_state (bool, optional): Controls whether to include the full
state for all rooms the user is a member of. If this is set to
true, then all state events will be returned, even if since is
non-empty. The timeline will still be limited by the since
parameter.
"""
query_parameters = {"access_token": access_token}
if since:
query_parameters["since"] = since
if full_state is not None:
query_parameters["full_state"] = str(full_state).lower()
if timeout is not None:
query_parameters["timeout"] = str(timeout)
if isinstance(filter, dict):
filter_json = json.dumps(filter, separators=(",", ":"))
query_parameters["filter"] = filter_json
elif isinstance(filter, str):
query_parameters["filter"] = filter
return "GET", Api._build_path("sync", query_parameters) |
Python | def room_messages(
access_token: str,
room_id: str,
start: str,
end: Optional[str] = None,
direction: MessageDirection = MessageDirection.back,
limit: int = 10,
message_filter: _FilterT = None,
):
# type (...) -> Tuple[str, str]
"""Get room messages.
Returns the HTTP method and HTTP path for the request.
Args:
access_token (str): The access token to be used with the request.
room_id (str): room id of the room for which to download the
messages
start (str): The token to start returning events from.
end (str): The token to stop returning events at.
direction (MessageDirection): The direction to return events from.
limit (int): The maximum number of events to return.
message_filter (Union[None, str, Dict[Any, Any]]):
A filter ID or dict that should be used for this room messages
request.
"""
query_parameters = {
"access_token": access_token,
"from": start,
"limit": limit,
}
if end:
query_parameters["to"] = end
if isinstance(direction, str):
if direction in ("b", "back"):
direction = MessageDirection.back
elif direction in ("f", "fron"):
direction = MessageDirection.front
else:
raise ValueError("Invalid direction")
if direction is MessageDirection.front:
query_parameters["dir"] = "f"
else:
query_parameters["dir"] = "b"
if isinstance(message_filter, dict):
filter_json = json.dumps(message_filter, separators=(",", ":"))
query_parameters["filter"] = filter_json
elif isinstance(message_filter, str):
query_parameters["filter"] = message_filter
path = "rooms/{room}/messages".format(room=room_id)
return "GET", Api._build_path(path, query_parameters) | def room_messages(
access_token: str,
room_id: str,
start: str,
end: Optional[str] = None,
direction: MessageDirection = MessageDirection.back,
limit: int = 10,
message_filter: _FilterT = None,
):
# type (...) -> Tuple[str, str]
"""Get room messages.
Returns the HTTP method and HTTP path for the request.
Args:
access_token (str): The access token to be used with the request.
room_id (str): room id of the room for which to download the
messages
start (str): The token to start returning events from.
end (str): The token to stop returning events at.
direction (MessageDirection): The direction to return events from.
limit (int): The maximum number of events to return.
message_filter (Union[None, str, Dict[Any, Any]]):
A filter ID or dict that should be used for this room messages
request.
"""
query_parameters = {
"access_token": access_token,
"from": start,
"limit": limit,
}
if end:
query_parameters["to"] = end
if isinstance(direction, str):
if direction in ("b", "back"):
direction = MessageDirection.back
elif direction in ("f", "fron"):
direction = MessageDirection.front
else:
raise ValueError("Invalid direction")
if direction is MessageDirection.front:
query_parameters["dir"] = "f"
else:
query_parameters["dir"] = "b"
if isinstance(message_filter, dict):
filter_json = json.dumps(message_filter, separators=(",", ":"))
query_parameters["filter"] = filter_json
elif isinstance(message_filter, str):
query_parameters["filter"] = message_filter
path = "rooms/{room}/messages".format(room=room_id)
return "GET", Api._build_path(path, query_parameters) |
Python | def update_device(access_token, device_id, content):
# type: (str, Dict[str, str]) -> Tuple[str, str, str]
"""Update the metadata of the given device.
Returns the HTTP method, HTTP path and data for the request.
Args:
access_token (str): The access token to be used with the request.
device_id (str): The device for which the metadata will be updated.
content (Dict): A dictionary of metadata values that will be
updated for the device.
"""
query_parameters = {"access_token": access_token}
path = "devices/{}".format(quote(device_id))
return (
"PUT",
Api._build_path(path, query_parameters),
Api.to_json(content)
) | def update_device(access_token, device_id, content):
# type: (str, Dict[str, str]) -> Tuple[str, str, str]
"""Update the metadata of the given device.
Returns the HTTP method, HTTP path and data for the request.
Args:
access_token (str): The access token to be used with the request.
device_id (str): The device for which the metadata will be updated.
content (Dict): A dictionary of metadata values that will be
updated for the device.
"""
query_parameters = {"access_token": access_token}
path = "devices/{}".format(quote(device_id))
return (
"PUT",
Api._build_path(path, query_parameters),
Api.to_json(content)
) |
Python | def perspective_projection(self, R_tl_at_car, t_tl_at_car, point_at_tl_list):
'''
This function help project the points represented in traffic light local frame onto the image
'''
_R_tl_at_camera = self.R_car_at_camera.dot(R_tl_at_car)
_t_tl_at_camera = self.R_car_at_camera.dot(t_tl_at_car)
projection_list = list()
for _p_3D_at_tl in point_at_tl_list:
#
_point_3D_at_tl = np.array(_p_3D_at_tl).reshape((3,1))
_point_3D_at_camera = _R_tl_at_camera.dot(_point_3D_at_tl) + _t_tl_at_camera
_ray = self.np_K_camera_est.dot( _point_3D_at_camera )
_projection = (_ray / abs(_ray[2,0]))[:2,0]
print("_projection = \n%s" % _projection)
projection_list.append(_projection)
return projection_list | def perspective_projection(self, R_tl_at_car, t_tl_at_car, point_at_tl_list):
'''
This function help project the points represented in traffic light local frame onto the image
'''
_R_tl_at_camera = self.R_car_at_camera.dot(R_tl_at_car)
_t_tl_at_camera = self.R_car_at_camera.dot(t_tl_at_car)
projection_list = list()
for _p_3D_at_tl in point_at_tl_list:
#
_point_3D_at_tl = np.array(_p_3D_at_tl).reshape((3,1))
_point_3D_at_camera = _R_tl_at_camera.dot(_point_3D_at_tl) + _t_tl_at_camera
_ray = self.np_K_camera_est.dot( _point_3D_at_camera )
_projection = (_ray / abs(_ray[2,0]))[:2,0]
print("_projection = \n%s" % _projection)
projection_list.append(_projection)
return projection_list |
Python | def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
closest_light = None
line_wp_idx = None
# List of positions that correspond to the line to stop in front of for a given intersection
stop_line_positions = self.config['stop_line_positions']
if(self.pose):
car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)
#TODO find the closest visible traffic light (if one exists)
diff = len(self.waypoints.waypoints)
for i, light in enumerate(self.lights):
# Get stop line waypoint index
line = stop_line_positions[i] # Note: this is loaded from config
tmp_wp_idx = self.get_closest_waypoint(line[0], line[1])
# Find closest stop line waypoint index
d = tmp_wp_idx - car_wp_idx
if d >= 0 and d < diff:
# Found a closer frontal light (stop line)
diff = d
closest_light = light
line_wp_idx = tmp_wp_idx
if closest_light:
state = self.get_light_state(closest_light)
return line_wp_idx, state
# self.waypoints = None
return -1, TrafficLight.UNKNOWN | def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
closest_light = None
line_wp_idx = None
# List of positions that correspond to the line to stop in front of for a given intersection
stop_line_positions = self.config['stop_line_positions']
if(self.pose):
car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)
#TODO find the closest visible traffic light (if one exists)
diff = len(self.waypoints.waypoints)
for i, light in enumerate(self.lights):
# Get stop line waypoint index
line = stop_line_positions[i] # Note: this is loaded from config
tmp_wp_idx = self.get_closest_waypoint(line[0], line[1])
# Find closest stop line waypoint index
d = tmp_wp_idx - car_wp_idx
if d >= 0 and d < diff:
# Found a closer frontal light (stop line)
diff = d
closest_light = light
line_wp_idx = tmp_wp_idx
if closest_light:
state = self.get_light_state(closest_light)
return line_wp_idx, state
# self.waypoints = None
return -1, TrafficLight.UNKNOWN |
Python | def verification_email_body(
case_name,
url,
display_name,
category,
subcategory,
breakpoint_1,
breakpoint_2,
hgnc_symbol,
panels,
gtcalls,
tx_changes,
name,
comment,
):
"""
Builds the html code for the variant verification emails (order verification and cancel verification)
Args:
case_name(str): case display name
url(str): the complete url to the variant, accessible when clicking on the email link
display_name(str): a display name for the variant
category(str): category of the variant
subcategory(str): sub-category of the variant
breakpoint_1(str): breakpoint 1 (format is 'chr:start')
breakpoint_2(str): breakpoint 2 (format is 'chr:stop')
hgnc_symbol(str): a gene or a list of genes separated by comma
panels(str): a gene panel of a list of panels separated by comma
gtcalls(str): genotyping calls of any sample in the family
tx_changes(str): amino acid changes caused by the variant, only for snvs otherwise 'Not available'
name(str): user_obj['name'], uft-8 encoded
comment(str): sender's comment from form
Returns:
html(str): the html body of the variant verification email
"""
html = """
<ul>
<li>
<strong>Case {case_name}</strong>: <a href="{url}">{display_name}</a>
</li>
<li><strong>Variant type</strong>: {category} ({subcategory})
<li><strong>Breakpoint 1</strong>: {breakpoint_1}</li>
<li><strong>Breakpoint 2</strong>: {breakpoint_2}</li>
<li><strong>HGNC symbols</strong>: {hgnc_symbol}</li>
<li><strong>Gene panels</strong>: {panels}</li>
<li><strong>GT call</strong></li>
{gtcalls}
<li><strong>Amino acid changes</strong></li>
{tx_changes}
<li><strong>Comment</strong>: {comment}</li>
<li><strong>Ordered by</strong>: {name}</li>
</ul>
""".format(
case_name=case_name,
url=url,
display_name=display_name,
category=category,
subcategory=subcategory,
breakpoint_1=breakpoint_1,
breakpoint_2=breakpoint_2,
hgnc_symbol=hgnc_symbol,
panels=panels,
gtcalls=gtcalls,
tx_changes=tx_changes,
name=name,
comment=comment,
)
return html | def verification_email_body(
case_name,
url,
display_name,
category,
subcategory,
breakpoint_1,
breakpoint_2,
hgnc_symbol,
panels,
gtcalls,
tx_changes,
name,
comment,
):
"""
Builds the html code for the variant verification emails (order verification and cancel verification)
Args:
case_name(str): case display name
url(str): the complete url to the variant, accessible when clicking on the email link
display_name(str): a display name for the variant
category(str): category of the variant
subcategory(str): sub-category of the variant
breakpoint_1(str): breakpoint 1 (format is 'chr:start')
breakpoint_2(str): breakpoint 2 (format is 'chr:stop')
hgnc_symbol(str): a gene or a list of genes separated by comma
panels(str): a gene panel of a list of panels separated by comma
gtcalls(str): genotyping calls of any sample in the family
tx_changes(str): amino acid changes caused by the variant, only for snvs otherwise 'Not available'
name(str): user_obj['name'], uft-8 encoded
comment(str): sender's comment from form
Returns:
html(str): the html body of the variant verification email
"""
html = """
<ul>
<li>
<strong>Case {case_name}</strong>: <a href="{url}">{display_name}</a>
</li>
<li><strong>Variant type</strong>: {category} ({subcategory})
<li><strong>Breakpoint 1</strong>: {breakpoint_1}</li>
<li><strong>Breakpoint 2</strong>: {breakpoint_2}</li>
<li><strong>HGNC symbols</strong>: {hgnc_symbol}</li>
<li><strong>Gene panels</strong>: {panels}</li>
<li><strong>GT call</strong></li>
{gtcalls}
<li><strong>Amino acid changes</strong></li>
{tx_changes}
<li><strong>Comment</strong>: {comment}</li>
<li><strong>Ordered by</strong>: {name}</li>
</ul>
""".format(
case_name=case_name,
url=url,
display_name=display_name,
category=category,
subcategory=subcategory,
breakpoint_1=breakpoint_1,
breakpoint_2=breakpoint_2,
hgnc_symbol=hgnc_symbol,
panels=panels,
gtcalls=gtcalls,
tx_changes=tx_changes,
name=name,
comment=comment,
)
return html |
Python | def parse_clnsig(variant, transcripts=None):
"""Get the clnsig information
The clinvar format has changed several times and this function will try to parse all of them.
The first format represented the clinical significance terms with numbers. This was then
replaced by strings and the separator changed. At this stage the possibility to connect review
stats to a certain significance term was taken away. So now we can only annotate each
significance term with all review stats.
Also the clinvar accession number are is some cases annotated with the info key CLNACC and
sometimes with CLNVID.
Args:
variant(cyvcf2.Variant)
acc(str): The clnsig accession number, raw from vcf
sig(str): The clnsig significance term, raw from vcf
revstat(str): The clnsig revstat, raw from vcf
transcripts(iterable(dict))
Returns:
clnsig_accsessions(list(dict)): A list with clnsig accessions
"""
transcripts = transcripts or []
acc = variant.INFO.get("CLNACC", variant.INFO.get("CLNVID", ""))
sig = variant.INFO.get("CLNSIG", "").lower()
revstat = variant.INFO.get("CLNREVSTAT", "").lower()
clnsig_accsessions = []
if not acc:
if transcripts:
clnsig = set()
for transcript in transcripts:
for annotation in transcript.get("clnsig", []):
clnsig.add(annotation)
for annotation in clnsig:
clnsig_accsessions.append({"value": annotation})
return clnsig_accsessions
# There are some versions where clinvar uses integers to represent terms
if isinstance(acc, int) or acc.isdigit():
revstat_groups = []
if revstat:
revstat_groups = [rev.lstrip("_") for rev in revstat.split(",")]
sig_groups = []
for significance in sig.split(","):
for term in significance.lstrip("_").split("/"):
sig_groups.append("_".join(term.split(" ")))
for sig_term in sig_groups:
clnsig_accsessions.append(
{
"value": sig_term,
"accession": int(acc),
"revstat": ",".join(revstat_groups),
}
)
# Test to parse the older format
if not clnsig_accsessions:
acc_groups = acc.split("|")
sig_groups = sig.split("|")
revstat_groups = revstat.split("|")
for acc_group, sig_group, revstat_group in zip(
acc_groups, sig_groups, revstat_groups
):
accessions = acc_group.split(",")
significances = sig_group.split(",")
revstats = revstat_group.split(",")
for accession, significance, revstat in zip(
accessions, significances, revstats
):
clnsig_accsessions.append(
{
"value": int(significance),
"accession": accession,
"revstat": revstat,
}
)
return clnsig_accsessions | def parse_clnsig(variant, transcripts=None):
"""Get the clnsig information
The clinvar format has changed several times and this function will try to parse all of them.
The first format represented the clinical significance terms with numbers. This was then
replaced by strings and the separator changed. At this stage the possibility to connect review
stats to a certain significance term was taken away. So now we can only annotate each
significance term with all review stats.
Also the clinvar accession number are is some cases annotated with the info key CLNACC and
sometimes with CLNVID.
Args:
variant(cyvcf2.Variant)
acc(str): The clnsig accession number, raw from vcf
sig(str): The clnsig significance term, raw from vcf
revstat(str): The clnsig revstat, raw from vcf
transcripts(iterable(dict))
Returns:
clnsig_accsessions(list(dict)): A list with clnsig accessions
"""
transcripts = transcripts or []
acc = variant.INFO.get("CLNACC", variant.INFO.get("CLNVID", ""))
sig = variant.INFO.get("CLNSIG", "").lower()
revstat = variant.INFO.get("CLNREVSTAT", "").lower()
clnsig_accsessions = []
if not acc:
if transcripts:
clnsig = set()
for transcript in transcripts:
for annotation in transcript.get("clnsig", []):
clnsig.add(annotation)
for annotation in clnsig:
clnsig_accsessions.append({"value": annotation})
return clnsig_accsessions
# There are some versions where clinvar uses integers to represent terms
if isinstance(acc, int) or acc.isdigit():
revstat_groups = []
if revstat:
revstat_groups = [rev.lstrip("_") for rev in revstat.split(",")]
sig_groups = []
for significance in sig.split(","):
for term in significance.lstrip("_").split("/"):
sig_groups.append("_".join(term.split(" ")))
for sig_term in sig_groups:
clnsig_accsessions.append(
{
"value": sig_term,
"accession": int(acc),
"revstat": ",".join(revstat_groups),
}
)
# Test to parse the older format
if not clnsig_accsessions:
acc_groups = acc.split("|")
sig_groups = sig.split("|")
revstat_groups = revstat.split("|")
for acc_group, sig_group, revstat_group in zip(
acc_groups, sig_groups, revstat_groups
):
accessions = acc_group.split(",")
significances = sig_group.split(",")
revstats = revstat_group.split(",")
for accession, significance, revstat in zip(
accessions, significances, revstats
):
clnsig_accsessions.append(
{
"value": int(significance),
"accession": accession,
"revstat": revstat,
}
)
return clnsig_accsessions |
Python | def is_pathogenic(variant):
"""Check if a variant has the clinical significance to be loaded
We want to load all variants that are in any of the predefined categories regardless of rank
scores etc.
Args:
variant(cyvcf2.Variant)
Returns:
bool: If variant should be loaded based on clinvar or not
"""
load_categories = set(
[
"pathogenic",
"likely_pathogenic",
"conflicting_interpretations_of_pathogenecity",
]
)
clnsig_accsessions = parse_clnsig(variant)
for annotation in clnsig_accsessions:
clnsig = annotation["value"]
if clnsig in load_categories:
return True
if isinstance(clnsig, int):
if clnsig == 4 or clnsig == 5:
return True
return False | def is_pathogenic(variant):
"""Check if a variant has the clinical significance to be loaded
We want to load all variants that are in any of the predefined categories regardless of rank
scores etc.
Args:
variant(cyvcf2.Variant)
Returns:
bool: If variant should be loaded based on clinvar or not
"""
load_categories = set(
[
"pathogenic",
"likely_pathogenic",
"conflicting_interpretations_of_pathogenecity",
]
)
clnsig_accsessions = parse_clnsig(variant)
for annotation in clnsig_accsessions:
clnsig = annotation["value"]
if clnsig in load_categories:
return True
if isinstance(clnsig, int):
if clnsig == 4 or clnsig == 5:
return True
return False |
Python | def variants(institute_id, case_name):
"""Display a list of SNV variants."""
page = int(request.form.get("page", 1))
category = "snv"
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
variant_type = request.args.get("variant_type", "clinical")
if request.form.get("hpo_clinical_filter"):
case_obj["hpo_clinical_filter"] = True
user_obj = store.user(current_user.email)
if request.method == "POST":
# If special filter buttons were selected:
form = controllers.populate_filters_form(
store, institute_obj, case_obj, user_obj, category, request.form
)
else:
form = FiltersForm(request.args)
# set form variant data type the first time around
form.variant_type.data = variant_type
# populate filters dropdown
available_filters = store.filters(institute_id, category)
form.filters.choices = [
(filter.get("_id"), filter.get("display_name")) for filter in available_filters
]
# populate available panel choices
available_panels = case_obj.get("panels", []) + [
{"panel_name": "hpo", "display_name": "HPO"}
]
panel_choices = [
(panel["panel_name"], panel["display_name"]) for panel in available_panels
]
form.gene_panels.choices = panel_choices
# update status of case if visited for the first time
controllers.activate_case(store, institute_obj, case_obj, current_user)
# upload gene panel if symbol file exists
if request.files:
file = request.files[form.symbol_file.name]
if request.files and file and file.filename != "":
log.debug("Upload file request files: {0}".format(request.files.to_dict()))
try:
stream = io.StringIO(file.stream.read().decode("utf-8"), newline=None)
except UnicodeDecodeError as error:
flash("Only text files are supported!", "warning")
return redirect(request.referrer)
hgnc_symbols_set = set(form.hgnc_symbols.data)
log.debug("Symbols prior to upload: {0}".format(hgnc_symbols_set))
new_hgnc_symbols = controllers.upload_panel(
store, institute_id, case_name, stream
)
hgnc_symbols_set.update(new_hgnc_symbols)
form.hgnc_symbols.data = hgnc_symbols_set
# reset gene panels
form.gene_panels.data = ""
# check if supplied gene symbols exist
hgnc_symbols = []
non_clinical_symbols = []
not_found_symbols = []
not_found_ids = []
if (form.hgnc_symbols.data) and len(form.hgnc_symbols.data) > 0:
is_clinical = form.data.get("variant_type", "clinical") == "clinical"
clinical_symbols = store.clinical_symbols(case_obj) if is_clinical else None
for hgnc_symbol in form.hgnc_symbols.data:
if hgnc_symbol.isdigit():
hgnc_gene = store.hgnc_gene(int(hgnc_symbol))
if hgnc_gene is None:
not_found_ids.append(hgnc_symbol)
else:
hgnc_symbols.append(hgnc_gene["hgnc_symbol"])
elif sum(1 for i in store.hgnc_genes(hgnc_symbol)) == 0:
not_found_symbols.append(hgnc_symbol)
elif is_clinical and (hgnc_symbol not in clinical_symbols):
non_clinical_symbols.append(hgnc_symbol)
else:
hgnc_symbols.append(hgnc_symbol)
if not_found_ids:
flash("HGNC id not found: {}".format(", ".join(not_found_ids)), "warning")
if not_found_symbols:
flash(
"HGNC symbol not found: {}".format(", ".join(not_found_symbols)), "warning"
)
if non_clinical_symbols:
flash(
"Gene not included in clinical list: {}".format(
", ".join(non_clinical_symbols)
),
"warning",
)
form.hgnc_symbols.data = hgnc_symbols
# handle HPO gene list separately
if "hpo" in form.data["gene_panels"]:
hpo_symbols = list(
set(term_obj["hgnc_symbol"] for term_obj in case_obj["dynamic_gene_list"])
)
current_symbols = set(hgnc_symbols)
current_symbols.update(hpo_symbols)
form.hgnc_symbols.data = list(current_symbols)
variants_query = store.variants(case_obj["_id"], query=form.data, category=category)
if request.form.get("export"):
return controllers.download_variants(store, case_obj, variants_query)
data = controllers.variants(store, institute_obj, case_obj, variants_query, page)
return dict(
institute=institute_obj,
case=case_obj,
form=form,
manual_rank_options=MANUAL_RANK_OPTIONS,
cancer_tier_options=CANCER_TIER_OPTIONS,
severe_so_terms=SEVERE_SO_TERMS,
page=page,
**data
) | def variants(institute_id, case_name):
"""Display a list of SNV variants."""
page = int(request.form.get("page", 1))
category = "snv"
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
variant_type = request.args.get("variant_type", "clinical")
if request.form.get("hpo_clinical_filter"):
case_obj["hpo_clinical_filter"] = True
user_obj = store.user(current_user.email)
if request.method == "POST":
# If special filter buttons were selected:
form = controllers.populate_filters_form(
store, institute_obj, case_obj, user_obj, category, request.form
)
else:
form = FiltersForm(request.args)
# set form variant data type the first time around
form.variant_type.data = variant_type
# populate filters dropdown
available_filters = store.filters(institute_id, category)
form.filters.choices = [
(filter.get("_id"), filter.get("display_name")) for filter in available_filters
]
# populate available panel choices
available_panels = case_obj.get("panels", []) + [
{"panel_name": "hpo", "display_name": "HPO"}
]
panel_choices = [
(panel["panel_name"], panel["display_name"]) for panel in available_panels
]
form.gene_panels.choices = panel_choices
# update status of case if visited for the first time
controllers.activate_case(store, institute_obj, case_obj, current_user)
# upload gene panel if symbol file exists
if request.files:
file = request.files[form.symbol_file.name]
if request.files and file and file.filename != "":
log.debug("Upload file request files: {0}".format(request.files.to_dict()))
try:
stream = io.StringIO(file.stream.read().decode("utf-8"), newline=None)
except UnicodeDecodeError as error:
flash("Only text files are supported!", "warning")
return redirect(request.referrer)
hgnc_symbols_set = set(form.hgnc_symbols.data)
log.debug("Symbols prior to upload: {0}".format(hgnc_symbols_set))
new_hgnc_symbols = controllers.upload_panel(
store, institute_id, case_name, stream
)
hgnc_symbols_set.update(new_hgnc_symbols)
form.hgnc_symbols.data = hgnc_symbols_set
# reset gene panels
form.gene_panels.data = ""
# check if supplied gene symbols exist
hgnc_symbols = []
non_clinical_symbols = []
not_found_symbols = []
not_found_ids = []
if (form.hgnc_symbols.data) and len(form.hgnc_symbols.data) > 0:
is_clinical = form.data.get("variant_type", "clinical") == "clinical"
clinical_symbols = store.clinical_symbols(case_obj) if is_clinical else None
for hgnc_symbol in form.hgnc_symbols.data:
if hgnc_symbol.isdigit():
hgnc_gene = store.hgnc_gene(int(hgnc_symbol))
if hgnc_gene is None:
not_found_ids.append(hgnc_symbol)
else:
hgnc_symbols.append(hgnc_gene["hgnc_symbol"])
elif sum(1 for i in store.hgnc_genes(hgnc_symbol)) == 0:
not_found_symbols.append(hgnc_symbol)
elif is_clinical and (hgnc_symbol not in clinical_symbols):
non_clinical_symbols.append(hgnc_symbol)
else:
hgnc_symbols.append(hgnc_symbol)
if not_found_ids:
flash("HGNC id not found: {}".format(", ".join(not_found_ids)), "warning")
if not_found_symbols:
flash(
"HGNC symbol not found: {}".format(", ".join(not_found_symbols)), "warning"
)
if non_clinical_symbols:
flash(
"Gene not included in clinical list: {}".format(
", ".join(non_clinical_symbols)
),
"warning",
)
form.hgnc_symbols.data = hgnc_symbols
# handle HPO gene list separately
if "hpo" in form.data["gene_panels"]:
hpo_symbols = list(
set(term_obj["hgnc_symbol"] for term_obj in case_obj["dynamic_gene_list"])
)
current_symbols = set(hgnc_symbols)
current_symbols.update(hpo_symbols)
form.hgnc_symbols.data = list(current_symbols)
variants_query = store.variants(case_obj["_id"], query=form.data, category=category)
if request.form.get("export"):
return controllers.download_variants(store, case_obj, variants_query)
data = controllers.variants(store, institute_obj, case_obj, variants_query, page)
return dict(
institute=institute_obj,
case=case_obj,
form=form,
manual_rank_options=MANUAL_RANK_OPTIONS,
cancer_tier_options=CANCER_TIER_OPTIONS,
severe_so_terms=SEVERE_SO_TERMS,
page=page,
**data
) |
Python | def str_variants(institute_id, case_name):
"""Display a list of STR variants."""
page = int(request.args.get("page", 1))
variant_type = request.args.get("variant_type", "clinical")
category = "str"
form = StrFiltersForm(request.args)
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
controllers.activate_case(store, institute_obj, case_obj, current_user)
query = form.data
query["variant_type"] = variant_type
variants_query = store.variants(
case_obj["_id"], category=category, query=query
).sort(
[
("str_repid", pymongo.ASCENDING),
("chromosome", pymongo.ASCENDING),
("position", pymongo.ASCENDING),
]
)
data = controllers.str_variants(
store, institute_obj, case_obj, variants_query, page
)
return dict(
institute=institute_obj,
case=case_obj,
variant_type=variant_type,
manual_rank_options=MANUAL_RANK_OPTIONS,
form=form,
page=page,
**data
) | def str_variants(institute_id, case_name):
"""Display a list of STR variants."""
page = int(request.args.get("page", 1))
variant_type = request.args.get("variant_type", "clinical")
category = "str"
form = StrFiltersForm(request.args)
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
controllers.activate_case(store, institute_obj, case_obj, current_user)
query = form.data
query["variant_type"] = variant_type
variants_query = store.variants(
case_obj["_id"], category=category, query=query
).sort(
[
("str_repid", pymongo.ASCENDING),
("chromosome", pymongo.ASCENDING),
("position", pymongo.ASCENDING),
]
)
data = controllers.str_variants(
store, institute_obj, case_obj, variants_query, page
)
return dict(
institute=institute_obj,
case=case_obj,
variant_type=variant_type,
manual_rank_options=MANUAL_RANK_OPTIONS,
form=form,
page=page,
**data
) |
Python | def sv_variants(institute_id, case_name):
"""Display a list of structural variants."""
page = int(request.form.get("page", 1))
variant_type = request.args.get("variant_type", "clinical")
category = "sv"
# Define case and institute objects
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
if request.form.get("hpo_clinical_filter"):
case_obj["hpo_clinical_filter"] = True
# update status of case if visited for the first time
controllers.activate_case(store, institute_obj, case_obj, current_user)
form = controllers.populate_sv_filters_form(
store, institute_obj, case_obj, category, request
)
variants_query = store.variants(case_obj["_id"], category=category, query=form.data)
# if variants should be exported
if request.form.get("export"):
return controllers.download_variants(store, case_obj, variants_query)
data = controllers.sv_variants(store, institute_obj, case_obj, variants_query, page)
return dict(
institute=institute_obj,
case=case_obj,
variant_type=variant_type,
form=form,
severe_so_terms=SEVERE_SO_TERMS,
manual_rank_options=MANUAL_RANK_OPTIONS,
page=page,
**data
) | def sv_variants(institute_id, case_name):
"""Display a list of structural variants."""
page = int(request.form.get("page", 1))
variant_type = request.args.get("variant_type", "clinical")
category = "sv"
# Define case and institute objects
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
if request.form.get("hpo_clinical_filter"):
case_obj["hpo_clinical_filter"] = True
# update status of case if visited for the first time
controllers.activate_case(store, institute_obj, case_obj, current_user)
form = controllers.populate_sv_filters_form(
store, institute_obj, case_obj, category, request
)
variants_query = store.variants(case_obj["_id"], category=category, query=form.data)
# if variants should be exported
if request.form.get("export"):
return controllers.download_variants(store, case_obj, variants_query)
data = controllers.sv_variants(store, institute_obj, case_obj, variants_query, page)
return dict(
institute=institute_obj,
case=case_obj,
variant_type=variant_type,
form=form,
severe_so_terms=SEVERE_SO_TERMS,
manual_rank_options=MANUAL_RANK_OPTIONS,
page=page,
**data
) |
Python | def cancer_sv_variants(institute_id, case_name):
"""Display a list of cancer structural variants."""
page = int(request.form.get("page", 1))
variant_type = request.args.get("variant_type", "clinical")
category = "cancer_sv"
# Define case and institute objects
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
if request.form.get("hpo_clinical_filter"):
case_obj["hpo_clinical_filter"] = True
# update status of case if visited for the first time
controllers.activate_case(store, institute_obj, case_obj, current_user)
form = controllers.populate_sv_filters_form(
store, institute_obj, case_obj, category, request
)
variants_query = store.variants(case_obj["_id"], category=category, query=form.data)
# if variants should be exported
if request.form.get("export"):
return controllers.download_variants(store, case_obj, variants_query)
data = controllers.sv_variants(store, institute_obj, case_obj, variants_query, page)
return dict(
institute=institute_obj,
case=case_obj,
variant_type=variant_type,
form=form,
severe_so_terms=SEVERE_SO_TERMS,
cancer_tier_options=CANCER_TIER_OPTIONS,
manual_rank_options=MANUAL_RANK_OPTIONS,
page=page,
**data
) | def cancer_sv_variants(institute_id, case_name):
"""Display a list of cancer structural variants."""
page = int(request.form.get("page", 1))
variant_type = request.args.get("variant_type", "clinical")
category = "cancer_sv"
# Define case and institute objects
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
if request.form.get("hpo_clinical_filter"):
case_obj["hpo_clinical_filter"] = True
# update status of case if visited for the first time
controllers.activate_case(store, institute_obj, case_obj, current_user)
form = controllers.populate_sv_filters_form(
store, institute_obj, case_obj, category, request
)
variants_query = store.variants(case_obj["_id"], category=category, query=form.data)
# if variants should be exported
if request.form.get("export"):
return controllers.download_variants(store, case_obj, variants_query)
data = controllers.sv_variants(store, institute_obj, case_obj, variants_query, page)
return dict(
institute=institute_obj,
case=case_obj,
variant_type=variant_type,
form=form,
severe_so_terms=SEVERE_SO_TERMS,
cancer_tier_options=CANCER_TIER_OPTIONS,
manual_rank_options=MANUAL_RANK_OPTIONS,
page=page,
**data
) |
Python | def upload_panel(institute_id, case_name):
"""Parse gene panel file and fill in HGNC symbols for filter."""
file = form.symbol_file.data
if file.filename == "":
flash("No selected file", "warning")
return redirect(request.referrer)
try:
stream = io.StringIO(file.stream.read().decode("utf-8"), newline=None)
except UnicodeDecodeError as error:
flash("Only text files are supported!", "warning")
return redirect(request.referrer)
category = request.args.get("category")
if category == "sv":
form = SvFiltersForm(request.args)
else:
form = FiltersForm(request.args)
hgnc_symbols = set(form.hgnc_symbols.data)
new_hgnc_symbols = controllers.upload_panel(store, institute_id, case_name, stream)
hgnc_symbols.update(new_hgnc_symbols)
form.hgnc_symbols.data = ",".join(hgnc_symbols)
# reset gene panels
form.gene_panels.data = ""
# HTTP redirect code 307 asks that the browser preserves the method of request (POST).
if category == "sv":
return redirect(
url_for(
".sv_variants",
institute_id=institute_id,
case_name=case_name,
**form.data
),
code=307,
)
else:
return redirect(
url_for(
".variants", institute_id=institute_id, case_name=case_name, **form.data
),
code=307,
) | def upload_panel(institute_id, case_name):
"""Parse gene panel file and fill in HGNC symbols for filter."""
file = form.symbol_file.data
if file.filename == "":
flash("No selected file", "warning")
return redirect(request.referrer)
try:
stream = io.StringIO(file.stream.read().decode("utf-8"), newline=None)
except UnicodeDecodeError as error:
flash("Only text files are supported!", "warning")
return redirect(request.referrer)
category = request.args.get("category")
if category == "sv":
form = SvFiltersForm(request.args)
else:
form = FiltersForm(request.args)
hgnc_symbols = set(form.hgnc_symbols.data)
new_hgnc_symbols = controllers.upload_panel(store, institute_id, case_name, stream)
hgnc_symbols.update(new_hgnc_symbols)
form.hgnc_symbols.data = ",".join(hgnc_symbols)
# reset gene panels
form.gene_panels.data = ""
# HTTP redirect code 307 asks that the browser preserves the method of request (POST).
if category == "sv":
return redirect(
url_for(
".sv_variants",
institute_id=institute_id,
case_name=case_name,
**form.data
),
code=307,
)
else:
return redirect(
url_for(
".variants", institute_id=institute_id, case_name=case_name, **form.data
),
code=307,
) |
Python | def download_verified():
"""Download all verified variants for user's cases"""
user_obj = store.user(current_user.email)
user_institutes = user_obj.get("institutes")
temp_excel_dir = os.path.join(variants_bp.static_folder, "verified_folder")
os.makedirs(temp_excel_dir, exist_ok=True)
written_files = controllers.verified_excel_file(
store, user_institutes, temp_excel_dir
)
if written_files:
today = datetime.datetime.now().strftime("%Y-%m-%d")
# zip the files on the fly and serve the archive to the user
data = io.BytesIO()
with zipfile.ZipFile(data, mode="w") as z:
for f_name in pathlib.Path(temp_excel_dir).iterdir():
zipfile.ZipFile
z.write(f_name, os.path.basename(f_name))
data.seek(0)
# remove temp folder with excel files in it
shutil.rmtree(temp_excel_dir)
return send_file(
data,
mimetype="application/zip",
as_attachment=True,
attachment_filename="_".join(["scout", "verified_variants", today])
+ ".zip",
cache_timeout=0,
)
else:
flash("No verified variants could be exported for user's institutes", "warning")
return redirect(request.referrer) | def download_verified():
"""Download all verified variants for user's cases"""
user_obj = store.user(current_user.email)
user_institutes = user_obj.get("institutes")
temp_excel_dir = os.path.join(variants_bp.static_folder, "verified_folder")
os.makedirs(temp_excel_dir, exist_ok=True)
written_files = controllers.verified_excel_file(
store, user_institutes, temp_excel_dir
)
if written_files:
today = datetime.datetime.now().strftime("%Y-%m-%d")
# zip the files on the fly and serve the archive to the user
data = io.BytesIO()
with zipfile.ZipFile(data, mode="w") as z:
for f_name in pathlib.Path(temp_excel_dir).iterdir():
zipfile.ZipFile
z.write(f_name, os.path.basename(f_name))
data.seek(0)
# remove temp folder with excel files in it
shutil.rmtree(temp_excel_dir)
return send_file(
data,
mimetype="application/zip",
as_attachment=True,
attachment_filename="_".join(["scout", "verified_variants", today])
+ ".zip",
cache_timeout=0,
)
else:
flash("No verified variants could be exported for user's institutes", "warning")
return redirect(request.referrer) |
Python | def remote_static():
"""Stream *large* static files with special requirements."""
file_path = request.args.get("file")
range_header = request.headers.get("Range", None)
if not range_header and (file_path.endswith(".bam") or file_path.endswith(".cram")):
return abort(500)
new_resp = send_file_partial(file_path)
return new_resp | def remote_static():
"""Stream *large* static files with special requirements."""
file_path = request.args.get("file")
range_header = request.headers.get("Range", None)
if not range_header and (file_path.endswith(".bam") or file_path.endswith(".cram")):
return abort(500)
new_resp = send_file_partial(file_path)
return new_resp |
Python | def igv():
"""Visualize BAM alignments using igv.js (https://github.com/igvteam/igv.js)"""
chrom = request.form.get("contig")
if chrom == "MT":
chrom = "M"
start = request.form.get("start")
stop = request.form.get("stop")
locus = "chr{0}:{1}-{2}".format(chrom, start, stop)
LOG.debug("Displaying locus %s", locus)
chromosome_build = request.form.get("build")
LOG.debug("Chromosome build is %s", chromosome_build)
samples = request.form.get("sample").split(",")
LOG.debug("samples: %s", samples)
bam_files = None
bai_files = None
rhocall_bed_files = None
rhocall_wig_files = None
tiddit_coverage_files = None
updregion_files = None
updsites_files = None
if request.form.get("align") == "mt_bam":
bam_files = request.form.get("mt_bam").split(",")
bai_files = request.form.get("mt_bai").split(",")
else:
if request.form.get("bam"):
bam_files = request.form.get("bam").split(",")
LOG.debug("loading the following BAM tracks: %s", bam_files)
if request.form.get("bai"):
bai_files = request.form.get("bai").split(",")
if request.form.get("rhocall_bed"):
rhocall_bed_files = request.form.get("rhocall_bed").split(",")
LOG.debug("loading the following rhocall BED tracks: %s", rhocall_bed_files)
if request.form.get("rhocall_wig"):
rhocall_wig_files = request.form.get("rhocall_wig").split(",")
LOG.debug("loading the following rhocall WIG tracks: %s", rhocall_wig_files)
if request.form.get("tiddit_coverage_wig"):
tiddit_coverage_files = request.form.get("tiddit_coverage_wig").split(",")
LOG.debug(
"loading the following tiddit_coverage tracks: %s",
tiddit_coverage_files,
)
if request.form.get("upd_regions_bed"):
updregion_files = request.form.get("upd_regions_bed").split(",")
LOG.debug("loading the following upd sites tracks: %s", updregion_files)
if request.form.get("upd_sites_bed"):
updsites_files = request.form.get("upd_sites_bed").split(",")
LOG.debug("loading the following upd region tracks: %s", updsites_files)
display_obj = {}
# Add chromosome build info to the track object
fastaURL = ""
indexURL = ""
cytobandURL = ""
gene_track_format = ""
gene_track_URL = ""
gene_track_indexURL = ""
clinvar_snvs_url = ""
clinvar_track_format = "bigBed"
if chromosome_build in ["GRCh38", "38"] or chrom == "M":
fastaURL = (
"https://s3.amazonaws.com/igv.broadinstitute.org/genomes/seq/hg38/hg38.fa"
)
indexURL = "https://s3.amazonaws.com/igv.broadinstitute.org/genomes/seq/hg38/hg38.fa.fai"
cytobandURL = "https://s3.amazonaws.com/igv.broadinstitute.org/annotations/hg38/cytoBandIdeo.txt"
gene_track_format = "gtf"
gene_track_URL = "https://s3.amazonaws.com/igv.broadinstitute.org/annotations/hg38/genes/Homo_sapiens.GRCh38.80.sorted.gtf.gz"
gene_track_indexURL = "https://s3.amazonaws.com/igv.broadinstitute.org/annotations/hg38/genes/Homo_sapiens.GRCh38.80.sorted.gtf.gz.tbi"
clinvar_snvs_url = "https://hgdownload.soe.ucsc.edu/gbdb/hg38/bbi/clinvar/clinvarMain.bb"
else:
fastaURL = "https://s3.amazonaws.com/igv.broadinstitute.org/genomes/seq/hg19/hg19.fasta"
indexURL = "https://s3.amazonaws.com/igv.broadinstitute.org/genomes/seq/hg19/hg19.fasta.fai"
cytobandURL = "https://s3.amazonaws.com/igv.broadinstitute.org/genomes/seq/hg19/cytoBand.txt"
gene_track_format = "bed"
gene_track_URL = "https://s3.amazonaws.com/igv.broadinstitute.org/annotations/hg19/genes/refGene.hg19.bed.gz"
gene_track_indexURL = "https://s3.amazonaws.com/igv.broadinstitute.org/annotations/hg19/genes/refGene.hg19.bed.gz.tbi"
clinvar_snvs_url = "https://hgdownload.soe.ucsc.edu/gbdb/hg19/bbi/clinvar/clinvarMain.bb"
display_obj["reference_track"] = {
"fastaURL": fastaURL,
"indexURL": indexURL,
"cytobandURL": cytobandURL,
}
display_obj["genes_track"] = {
"name": "Genes",
"type": "annotation",
"format": gene_track_format,
"sourceType": "file",
"url": gene_track_URL,
"indexURL": gene_track_indexURL,
"displayMode": "EXPANDED",
}
display_obj["clinvar_snvs"] = {
"name": "ClinVar",
"type": "annotation",
"format": clinvar_track_format,
"sourceType": "file",
"url": clinvar_snvs_url,
"displayMode": "EXPANDED",
"maxRows": 50,
"height": 100,
}
# Init upcoming igv-tracks
sample_tracks = []
upd_regions_bed_tracks = []
upd_sites_bed_tracks = []
counter = 0
for sample in samples:
# some samples might not have an associated bam file, take care if this
if len(bam_files) > counter and bam_files[counter]:
sample_tracks.append(
{
"name": sample,
"url": bam_files[counter],
"format": bam_files[counter].split(".")[-1], # "bam" or "cram"
"indexURL": bai_files[counter],
"height": 700,
}
)
counter += 1
display_obj["sample_tracks"] = sample_tracks
if rhocall_wig_files:
rhocall_wig_tracks = make_igv_tracks("Rhocall Zygosity", rhocall_wig_files)
display_obj["rhocall_wig_tracks"] = rhocall_wig_tracks
if rhocall_bed_files:
rhocall_bed_tracks = make_igv_tracks("Rhocall Regions", rhocall_bed_files)
display_obj["rhocall_bed_tracks"] = rhocall_bed_tracks
if tiddit_coverage_files:
tiddit_wig_tracks = make_igv_tracks("TIDDIT Coverage", tiddit_coverage_files)
display_obj["tiddit_wig_tracks"] = tiddit_wig_tracks
if updregion_files:
updregion_tracks = make_igv_tracks("UPD region", updregion_files)
display_obj["updregion_tracks"] = updregion_tracks
if updsites_files:
updsites_tracks = make_igv_tracks("UPD sites", updsites_files)
display_obj["updsites_tracks"] = updsites_tracks
if request.form.get("center_guide"):
display_obj["display_center_guide"] = True
else:
display_obj["display_center_guide"] = False
return render_template("alignviewers/igv_viewer.html", locus=locus, **display_obj) | def igv():
"""Visualize BAM alignments using igv.js (https://github.com/igvteam/igv.js)"""
chrom = request.form.get("contig")
if chrom == "MT":
chrom = "M"
start = request.form.get("start")
stop = request.form.get("stop")
locus = "chr{0}:{1}-{2}".format(chrom, start, stop)
LOG.debug("Displaying locus %s", locus)
chromosome_build = request.form.get("build")
LOG.debug("Chromosome build is %s", chromosome_build)
samples = request.form.get("sample").split(",")
LOG.debug("samples: %s", samples)
bam_files = None
bai_files = None
rhocall_bed_files = None
rhocall_wig_files = None
tiddit_coverage_files = None
updregion_files = None
updsites_files = None
if request.form.get("align") == "mt_bam":
bam_files = request.form.get("mt_bam").split(",")
bai_files = request.form.get("mt_bai").split(",")
else:
if request.form.get("bam"):
bam_files = request.form.get("bam").split(",")
LOG.debug("loading the following BAM tracks: %s", bam_files)
if request.form.get("bai"):
bai_files = request.form.get("bai").split(",")
if request.form.get("rhocall_bed"):
rhocall_bed_files = request.form.get("rhocall_bed").split(",")
LOG.debug("loading the following rhocall BED tracks: %s", rhocall_bed_files)
if request.form.get("rhocall_wig"):
rhocall_wig_files = request.form.get("rhocall_wig").split(",")
LOG.debug("loading the following rhocall WIG tracks: %s", rhocall_wig_files)
if request.form.get("tiddit_coverage_wig"):
tiddit_coverage_files = request.form.get("tiddit_coverage_wig").split(",")
LOG.debug(
"loading the following tiddit_coverage tracks: %s",
tiddit_coverage_files,
)
if request.form.get("upd_regions_bed"):
updregion_files = request.form.get("upd_regions_bed").split(",")
LOG.debug("loading the following upd sites tracks: %s", updregion_files)
if request.form.get("upd_sites_bed"):
updsites_files = request.form.get("upd_sites_bed").split(",")
LOG.debug("loading the following upd region tracks: %s", updsites_files)
display_obj = {}
# Add chromosome build info to the track object
fastaURL = ""
indexURL = ""
cytobandURL = ""
gene_track_format = ""
gene_track_URL = ""
gene_track_indexURL = ""
clinvar_snvs_url = ""
clinvar_track_format = "bigBed"
if chromosome_build in ["GRCh38", "38"] or chrom == "M":
fastaURL = (
"https://s3.amazonaws.com/igv.broadinstitute.org/genomes/seq/hg38/hg38.fa"
)
indexURL = "https://s3.amazonaws.com/igv.broadinstitute.org/genomes/seq/hg38/hg38.fa.fai"
cytobandURL = "https://s3.amazonaws.com/igv.broadinstitute.org/annotations/hg38/cytoBandIdeo.txt"
gene_track_format = "gtf"
gene_track_URL = "https://s3.amazonaws.com/igv.broadinstitute.org/annotations/hg38/genes/Homo_sapiens.GRCh38.80.sorted.gtf.gz"
gene_track_indexURL = "https://s3.amazonaws.com/igv.broadinstitute.org/annotations/hg38/genes/Homo_sapiens.GRCh38.80.sorted.gtf.gz.tbi"
clinvar_snvs_url = "https://hgdownload.soe.ucsc.edu/gbdb/hg38/bbi/clinvar/clinvarMain.bb"
else:
fastaURL = "https://s3.amazonaws.com/igv.broadinstitute.org/genomes/seq/hg19/hg19.fasta"
indexURL = "https://s3.amazonaws.com/igv.broadinstitute.org/genomes/seq/hg19/hg19.fasta.fai"
cytobandURL = "https://s3.amazonaws.com/igv.broadinstitute.org/genomes/seq/hg19/cytoBand.txt"
gene_track_format = "bed"
gene_track_URL = "https://s3.amazonaws.com/igv.broadinstitute.org/annotations/hg19/genes/refGene.hg19.bed.gz"
gene_track_indexURL = "https://s3.amazonaws.com/igv.broadinstitute.org/annotations/hg19/genes/refGene.hg19.bed.gz.tbi"
clinvar_snvs_url = "https://hgdownload.soe.ucsc.edu/gbdb/hg19/bbi/clinvar/clinvarMain.bb"
display_obj["reference_track"] = {
"fastaURL": fastaURL,
"indexURL": indexURL,
"cytobandURL": cytobandURL,
}
display_obj["genes_track"] = {
"name": "Genes",
"type": "annotation",
"format": gene_track_format,
"sourceType": "file",
"url": gene_track_URL,
"indexURL": gene_track_indexURL,
"displayMode": "EXPANDED",
}
display_obj["clinvar_snvs"] = {
"name": "ClinVar",
"type": "annotation",
"format": clinvar_track_format,
"sourceType": "file",
"url": clinvar_snvs_url,
"displayMode": "EXPANDED",
"maxRows": 50,
"height": 100,
}
# Init upcoming igv-tracks
sample_tracks = []
upd_regions_bed_tracks = []
upd_sites_bed_tracks = []
counter = 0
for sample in samples:
# some samples might not have an associated bam file, take care if this
if len(bam_files) > counter and bam_files[counter]:
sample_tracks.append(
{
"name": sample,
"url": bam_files[counter],
"format": bam_files[counter].split(".")[-1], # "bam" or "cram"
"indexURL": bai_files[counter],
"height": 700,
}
)
counter += 1
display_obj["sample_tracks"] = sample_tracks
if rhocall_wig_files:
rhocall_wig_tracks = make_igv_tracks("Rhocall Zygosity", rhocall_wig_files)
display_obj["rhocall_wig_tracks"] = rhocall_wig_tracks
if rhocall_bed_files:
rhocall_bed_tracks = make_igv_tracks("Rhocall Regions", rhocall_bed_files)
display_obj["rhocall_bed_tracks"] = rhocall_bed_tracks
if tiddit_coverage_files:
tiddit_wig_tracks = make_igv_tracks("TIDDIT Coverage", tiddit_coverage_files)
display_obj["tiddit_wig_tracks"] = tiddit_wig_tracks
if updregion_files:
updregion_tracks = make_igv_tracks("UPD region", updregion_files)
display_obj["updregion_tracks"] = updregion_tracks
if updsites_files:
updsites_tracks = make_igv_tracks("UPD sites", updsites_files)
display_obj["updsites_tracks"] = updsites_tracks
if request.form.get("center_guide"):
display_obj["display_center_guide"] = True
else:
display_obj["display_center_guide"] = False
return render_template("alignviewers/igv_viewer.html", locus=locus, **display_obj) |
Python | def make_igv_tracks(name, file_list):
""" Return a dict according to IGV track format. """
track_list = []
counter = 0
for r in file_list:
track_list.append(
{"name": name, "url": file_list[counter], "min": 0.0, "max": 30.0}
)
counter += 1
return track_list | def make_igv_tracks(name, file_list):
""" Return a dict according to IGV track format. """
track_list = []
counter = 0
for r in file_list:
track_list.append(
{"name": name, "url": file_list[counter], "min": 0.0, "max": 30.0}
)
counter += 1
return track_list |
Python | def execute_command(cmd):
"""
Prints stdout + stderr of command in real-time while being executed
Args:
cmd (list): command sequence
Yields:
line (str): line of output from command
"""
output = ""
LOG.info("Running: %s" % " ".join(cmd))
try:
output = subprocess.check_output(cmd, shell=False)
except CalledProcessError as err:
LOG.warning("Something went wrong with loqusdb")
raise err
if not output:
return output
output = output.decode("utf-8")
return output | def execute_command(cmd):
"""
Prints stdout + stderr of command in real-time while being executed
Args:
cmd (list): command sequence
Yields:
line (str): line of output from command
"""
output = ""
LOG.info("Running: %s" % " ".join(cmd))
try:
output = subprocess.check_output(cmd, shell=False)
except CalledProcessError as err:
LOG.warning("Something went wrong with loqusdb")
raise err
if not output:
return output
output = output.decode("utf-8")
return output |
Python | def case_count(self):
"""Returns number of cases in loqus instance
Returns:
nr_cases(int)
"""
return self._case_count() | def case_count(self):
"""Returns number of cases in loqus instance
Returns:
nr_cases(int)
"""
return self._case_count() |
Python | def _case_count(self):
"""Return number of cases that the observation is based on
Returns:
nr_cases(int)
"""
nr_cases = 0
case_call = copy.deepcopy(self.base_call)
case_call.extend(["cases", "--count"])
output = ""
try:
output = execute_command(case_call)
except CalledProcessError as err:
LOG.warning("Something went wrong with loqus")
return
if not output:
LOG.info("Could not find information about loqusdb cases")
return nr_cases
try:
nr_cases = int(output.strip())
except Exception:
pass
return nr_cases | def _case_count(self):
"""Return number of cases that the observation is based on
Returns:
nr_cases(int)
"""
nr_cases = 0
case_call = copy.deepcopy(self.base_call)
case_call.extend(["cases", "--count"])
output = ""
try:
output = execute_command(case_call)
except CalledProcessError as err:
LOG.warning("Something went wrong with loqus")
return
if not output:
LOG.info("Could not find information about loqusdb cases")
return nr_cases
try:
nr_cases = int(output.strip())
except Exception:
pass
return nr_cases |
Python | def genemap_lines():
"""Returns some lines in genemap2 format, including header"""
lines = [
"# Copyright (c) 1966-2016 Johns Hopkins University. Use of this"
" file adheres to the terms specified at https://omim.org/help/agreement.\n",
"# Generated: 2017-02-02\n",
"# See end of file for additional documentation on specific fields\n",
"# Chromosome\tGenomic Position Start\tGenomic Position End\tCyto"
" Location\tComputed Cyto Location\tMIM Number\tGene Symbols\tGene Name"
"\tApproved Symbol\tEntrez Gene ID\tEnsembl Gene ID\tComments\t"
"Phenotypes\tMouse Gene Symbol/ID\n",
"chr1\t1232248\t1235040\t1p36.33\t\t615291\tB3GALT6, SEMDJL1, EDSP2\t"
"UDP-Gal:beta-Gal beta-1,3-galactosyltransferase polypeptide 6\tB3GALT"
"6\t126792\tENSG00000176022\t\tEhlers-Danlos syndrome, progeroid type,"
" 2, 615349 (3), Autosomal recessive; Spondyloepimetaphyseal dysplasia"
" with joint laxity, type 1, with or without fractures, 271640 (3),"
" Autosomal recessive\tB3galt6 (MGI:2152819)\n",
]
return lines | def genemap_lines():
"""Returns some lines in genemap2 format, including header"""
lines = [
"# Copyright (c) 1966-2016 Johns Hopkins University. Use of this"
" file adheres to the terms specified at https://omim.org/help/agreement.\n",
"# Generated: 2017-02-02\n",
"# See end of file for additional documentation on specific fields\n",
"# Chromosome\tGenomic Position Start\tGenomic Position End\tCyto"
" Location\tComputed Cyto Location\tMIM Number\tGene Symbols\tGene Name"
"\tApproved Symbol\tEntrez Gene ID\tEnsembl Gene ID\tComments\t"
"Phenotypes\tMouse Gene Symbol/ID\n",
"chr1\t1232248\t1235040\t1p36.33\t\t615291\tB3GALT6, SEMDJL1, EDSP2\t"
"UDP-Gal:beta-Gal beta-1,3-galactosyltransferase polypeptide 6\tB3GALT"
"6\t126792\tENSG00000176022\t\tEhlers-Danlos syndrome, progeroid type,"
" 2, 615349 (3), Autosomal recessive; Spondyloepimetaphyseal dysplasia"
" with joint laxity, type 1, with or without fractures, 271640 (3),"
" Autosomal recessive\tB3galt6 (MGI:2152819)\n",
]
return lines |
Python | def mim2gene_lines():
"""Returns some lines in mim2gene format, including header"""
lines = [
"# Copyright (c) 1966-2016 Johns Hopkins University. Use of this file "
"adheres to the terms specified at https://omim.org/help/agreement.\n",
"# Generated: 2017-02-02\n",
"# This file provides links between the genes in OMIM and other gene"
" identifiers.\n",
"# THIS IS NOT A TABLE OF GENE-PHENOTYPE RELATIONSHIPS.\n"
"# MIM Number\tMIM Entry Type (see FAQ 1.3 at https://omim.org/help/faq)\t"
"Entrez Gene ID (NCBI)\tApproved Gene Symbol (HGNC)\tEnsembl Gene ID (Ensembl)\n",
"615291\tgene\t126792\tB3GALT6\tENSG00000176022,ENST00000379198",
"615349\tphenotype",
"271640\tphenotype",
]
return lines | def mim2gene_lines():
"""Returns some lines in mim2gene format, including header"""
lines = [
"# Copyright (c) 1966-2016 Johns Hopkins University. Use of this file "
"adheres to the terms specified at https://omim.org/help/agreement.\n",
"# Generated: 2017-02-02\n",
"# This file provides links between the genes in OMIM and other gene"
" identifiers.\n",
"# THIS IS NOT A TABLE OF GENE-PHENOTYPE RELATIONSHIPS.\n"
"# MIM Number\tMIM Entry Type (see FAQ 1.3 at https://omim.org/help/faq)\t"
"Entrez Gene ID (NCBI)\tApproved Gene Symbol (HGNC)\tEnsembl Gene ID (Ensembl)\n",
"615291\tgene\t126792\tB3GALT6\tENSG00000176022,ENST00000379198",
"615349\tphenotype",
"271640\tphenotype",
]
return lines |
Python | def institute_and_case(store, institute_id, case_name=None):
"""Fetch insitiute and case objects."""
institute_obj = store.institute(institute_id)
if institute_obj is None:
flash("Can't find institute: {}".format(institute_id), "warning")
return abort(404)
if case_name:
case_obj = store.case(institute_id=institute_id, display_name=case_name)
if case_obj is None:
return abort(404)
# validate that user has access to the institute
if not current_user.is_admin:
if institute_id not in current_user.institutes:
if not case_name or not any(
inst_id in case_obj["collaborators"]
for inst_id in current_user.institutes
):
# you don't have access!!
flash("You don't have acccess to: {}".format(institute_id), "danger")
return abort(403)
# you have access!
if case_name:
return institute_obj, case_obj
else:
return institute_obj | def institute_and_case(store, institute_id, case_name=None):
"""Fetch insitiute and case objects."""
institute_obj = store.institute(institute_id)
if institute_obj is None:
flash("Can't find institute: {}".format(institute_id), "warning")
return abort(404)
if case_name:
case_obj = store.case(institute_id=institute_id, display_name=case_name)
if case_obj is None:
return abort(404)
# validate that user has access to the institute
if not current_user.is_admin:
if institute_id not in current_user.institutes:
if not case_name or not any(
inst_id in case_obj["collaborators"]
for inst_id in current_user.institutes
):
# you don't have access!!
flash("You don't have acccess to: {}".format(institute_id), "danger")
return abort(403)
# you have access!
if case_name:
return institute_obj, case_obj
else:
return institute_obj |
Python | def variant_case(store, case_obj, variant_obj):
"""Pre-process case for the variant view.
Adds information about files from case obj to variant
Args:
store(scout.adapter.MongoAdapter)
case_obj(scout.models.Case)
variant_obj(scout.models.Variant)
"""
case_append_alignments(case_obj)
try:
chrom = None
starts = []
ends = []
for gene in variant_obj.get("genes", []):
chrom = gene["common"]["chromosome"]
starts.append(gene["common"]["start"])
ends.append(gene["common"]["end"])
if crom and starts and ends:
vcf_path = store.get_region_vcf(
case_obj, chrom=chrom, start=min(starts), end=max(ends)
)
# Create a reduced VCF with variants in the region
case_obj["region_vcf_file"] = vcf_path
except (SyntaxError, Exception):
LOG.warning("skip VCF region for alignment view") | def variant_case(store, case_obj, variant_obj):
"""Pre-process case for the variant view.
Adds information about files from case obj to variant
Args:
store(scout.adapter.MongoAdapter)
case_obj(scout.models.Case)
variant_obj(scout.models.Variant)
"""
case_append_alignments(case_obj)
try:
chrom = None
starts = []
ends = []
for gene in variant_obj.get("genes", []):
chrom = gene["common"]["chromosome"]
starts.append(gene["common"]["start"])
ends.append(gene["common"]["end"])
if crom and starts and ends:
vcf_path = store.get_region_vcf(
case_obj, chrom=chrom, start=min(starts), end=max(ends)
)
# Create a reduced VCF with variants in the region
case_obj["region_vcf_file"] = vcf_path
except (SyntaxError, Exception):
LOG.warning("skip VCF region for alignment view") |
Python | def case_append_alignments(case_obj):
"""Deconvolute information about files to case_obj.
This function prepares the bam/cram files in a certain way so that they are easily accessed in the
templates.
Loops over the the individuals and gather bam/cram files, indexes and sample display names in lists
Args:
case_obj(scout.models.Case)
"""
unwrap_settings = [
{"path": "bam_file", "append_to": "bam_files", "index": "bai_files"},
{"path": "mt_bam", "append_to": "mt_bams", "index": "mt_bais"},
{"path": "rhocall_bed", "append_to": "rhocall_beds", "index": "no_index"},
{"path": "rhocall_wig", "append_to": "rhocall_wigs", "index": "no_index"},
{
"path": "upd_regions_bed",
"append_to": "upd_regions_beds",
"index": "no_index",
},
{"path": "upd_sites_bed", "append_to": "upd_sites_beds", "index": "no_index"},
{
"path": "tiddit_coverage_wig",
"append_to": "tiddit_coverage_wigs",
"index": "no_index",
},
]
for individual in case_obj["individuals"]:
append_safe(case_obj, "sample_names", individual.get("display_name"))
for setting in unwrap_settings:
file_path = individual.get(setting["path"])
LOG.debug("filepath %s: ", file_path)
if not (file_path and os.path.exists(file_path)):
LOG.debug("%s: no bam/cram file found", individual["individual_id"])
continue
append_safe(case_obj, setting["append_to"], file_path)
if not setting["index"] == "no_index":
append_safe(
case_obj, setting["index"], find_index(file_path)
) | def case_append_alignments(case_obj):
"""Deconvolute information about files to case_obj.
This function prepares the bam/cram files in a certain way so that they are easily accessed in the
templates.
Loops over the the individuals and gather bam/cram files, indexes and sample display names in lists
Args:
case_obj(scout.models.Case)
"""
unwrap_settings = [
{"path": "bam_file", "append_to": "bam_files", "index": "bai_files"},
{"path": "mt_bam", "append_to": "mt_bams", "index": "mt_bais"},
{"path": "rhocall_bed", "append_to": "rhocall_beds", "index": "no_index"},
{"path": "rhocall_wig", "append_to": "rhocall_wigs", "index": "no_index"},
{
"path": "upd_regions_bed",
"append_to": "upd_regions_beds",
"index": "no_index",
},
{"path": "upd_sites_bed", "append_to": "upd_sites_beds", "index": "no_index"},
{
"path": "tiddit_coverage_wig",
"append_to": "tiddit_coverage_wigs",
"index": "no_index",
},
]
for individual in case_obj["individuals"]:
append_safe(case_obj, "sample_names", individual.get("display_name"))
for setting in unwrap_settings:
file_path = individual.get(setting["path"])
LOG.debug("filepath %s: ", file_path)
if not (file_path and os.path.exists(file_path)):
LOG.debug("%s: no bam/cram file found", individual["individual_id"])
continue
append_safe(case_obj, setting["append_to"], file_path)
if not setting["index"] == "no_index":
append_safe(
case_obj, setting["index"], find_index(file_path)
) |
Python | def hpo(out_dir):
"""Download all files necessary for HPO"""
out_dir = pathlib.Path(out_dir)
out_dir.mkdir(parents=True, exist_ok=True)
LOG.info("Download HPO resources to %s", out_dir)
print_hpo(out_dir) | def hpo(out_dir):
"""Download all files necessary for HPO"""
out_dir = pathlib.Path(out_dir)
out_dir.mkdir(parents=True, exist_ok=True)
LOG.info("Download HPO resources to %s", out_dir)
print_hpo(out_dir) |
Python | def cases(institute_id):
"""Display a list of cases for an institute."""
institute_obj = institute_and_case(store, institute_id)
query = request.args.get("query")
limit = 100
if request.args.get("limit"):
limit = int(request.args.get("limit"))
skip_assigned = request.args.get("skip_assigned")
is_research = request.args.get("is_research")
all_cases = store.cases(
collaborator=institute_id,
name_query=query,
skip_assigned=skip_assigned,
is_research=is_research,
)
sort_by = request.args.get("sort")
sort_order = request.args.get("order") or "asc"
if sort_by:
pymongo_sort = pymongo.ASCENDING
if sort_order == "desc":
pymongo_sort = pymongo.DESCENDING
if sort_by == "analysis_date":
all_cases.sort("analysis_date", pymongo_sort)
elif sort_by == "track":
all_cases.sort("track", pymongo_sort)
elif sort_by == "status":
all_cases.sort("status", pymongo_sort)
LOG.debug("Prepare all cases")
prioritized_cases = store.prioritized_cases(institute_id=institute_id)
data = controllers.cases(store, all_cases, prioritized_cases, limit)
data["sort_order"] = sort_order
data["sort_by"] = sort_by
data["nr_cases"] = store.nr_cases(institute_id=institute_id)
sanger_unevaluated = controllers.get_sanger_unevaluated(
store, institute_id, current_user.email
)
if len(sanger_unevaluated) > 0:
data["sanger_unevaluated"] = sanger_unevaluated
return dict(
institute=institute_obj,
skip_assigned=skip_assigned,
is_research=is_research,
query=query,
**data
) | def cases(institute_id):
"""Display a list of cases for an institute."""
institute_obj = institute_and_case(store, institute_id)
query = request.args.get("query")
limit = 100
if request.args.get("limit"):
limit = int(request.args.get("limit"))
skip_assigned = request.args.get("skip_assigned")
is_research = request.args.get("is_research")
all_cases = store.cases(
collaborator=institute_id,
name_query=query,
skip_assigned=skip_assigned,
is_research=is_research,
)
sort_by = request.args.get("sort")
sort_order = request.args.get("order") or "asc"
if sort_by:
pymongo_sort = pymongo.ASCENDING
if sort_order == "desc":
pymongo_sort = pymongo.DESCENDING
if sort_by == "analysis_date":
all_cases.sort("analysis_date", pymongo_sort)
elif sort_by == "track":
all_cases.sort("track", pymongo_sort)
elif sort_by == "status":
all_cases.sort("status", pymongo_sort)
LOG.debug("Prepare all cases")
prioritized_cases = store.prioritized_cases(institute_id=institute_id)
data = controllers.cases(store, all_cases, prioritized_cases, limit)
data["sort_order"] = sort_order
data["sort_by"] = sort_by
data["nr_cases"] = store.nr_cases(institute_id=institute_id)
sanger_unevaluated = controllers.get_sanger_unevaluated(
store, institute_id, current_user.email
)
if len(sanger_unevaluated) > 0:
data["sanger_unevaluated"] = sanger_unevaluated
return dict(
institute=institute_obj,
skip_assigned=skip_assigned,
is_research=is_research,
query=query,
**data
) |
Python | def matchmaker_matches(institute_id, case_name):
"""Show all MatchMaker matches for a given case"""
# check that only authorized users can access MME patients matches
panel = 1
if request.method == "POST":
panel = panel = request.form.get("pane_id")
user_obj = store.user(current_user.email)
if "mme_submitter" not in user_obj["roles"]:
flash("unauthorized request", "warning")
return redirect(request.referrer)
# Required params for getting matches from MME server:
mme_base_url = current_app.config.get("MME_URL")
mme_token = current_app.config.get("MME_TOKEN")
if not mme_base_url or not mme_token:
flash(
"An error occurred reading matchmaker connection parameters. Please check config file!",
"danger",
)
return redirect(request.referrer)
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
data = controllers.mme_matches(case_obj, institute_obj, mme_base_url, mme_token)
data["panel"] = panel
if data and data.get("server_errors"):
flash(
"MatchMaker server returned error:{}".format(data["server_errors"]),
"danger",
)
return redirect(request.referrer)
elif not data:
data = {"institute": institute_obj, "case": case_obj, "panel": panel}
return data | def matchmaker_matches(institute_id, case_name):
"""Show all MatchMaker matches for a given case"""
# check that only authorized users can access MME patients matches
panel = 1
if request.method == "POST":
panel = panel = request.form.get("pane_id")
user_obj = store.user(current_user.email)
if "mme_submitter" not in user_obj["roles"]:
flash("unauthorized request", "warning")
return redirect(request.referrer)
# Required params for getting matches from MME server:
mme_base_url = current_app.config.get("MME_URL")
mme_token = current_app.config.get("MME_TOKEN")
if not mme_base_url or not mme_token:
flash(
"An error occurred reading matchmaker connection parameters. Please check config file!",
"danger",
)
return redirect(request.referrer)
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
data = controllers.mme_matches(case_obj, institute_obj, mme_base_url, mme_token)
data["panel"] = panel
if data and data.get("server_errors"):
flash(
"MatchMaker server returned error:{}".format(data["server_errors"]),
"danger",
)
return redirect(request.referrer)
elif not data:
data = {"institute": institute_obj, "case": case_obj, "panel": panel}
return data |
Python | def matchmaker_match(institute_id, case_name, target):
"""Starts an internal match or a match against one or all MME external nodes"""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
# check that only authorized users can run matches
user_obj = store.user(current_user.email)
if "mme_submitter" not in user_obj["roles"]:
flash("unauthorized request", "warning")
return redirect(request.referrer)
# Required params for sending an add request to MME:
mme_base_url = current_app.config.get("MME_URL")
mme_accepts = current_app.config.get("MME_ACCEPTS")
mme_token = current_app.config.get("MME_TOKEN")
nodes = current_app.mme_nodes
if not mme_base_url or not mme_token or not mme_accepts:
flash(
"An error occurred reading matchmaker connection parameters. Please check config file!",
"danger",
)
return redirect(request.referrer)
match_results = controllers.mme_match(
case_obj, target, mme_base_url, mme_token, nodes, mme_accepts
)
ok_responses = 0
for match_results in match_results:
match_results["status_code"] == 200
ok_responses += 1
if ok_responses:
flash(
"Match request sent. Look for eventual matches in 'Matches' page.", "info"
)
else:
flash("An error occurred while sending match request.", "danger")
return redirect(request.referrer) | def matchmaker_match(institute_id, case_name, target):
"""Starts an internal match or a match against one or all MME external nodes"""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
# check that only authorized users can run matches
user_obj = store.user(current_user.email)
if "mme_submitter" not in user_obj["roles"]:
flash("unauthorized request", "warning")
return redirect(request.referrer)
# Required params for sending an add request to MME:
mme_base_url = current_app.config.get("MME_URL")
mme_accepts = current_app.config.get("MME_ACCEPTS")
mme_token = current_app.config.get("MME_TOKEN")
nodes = current_app.mme_nodes
if not mme_base_url or not mme_token or not mme_accepts:
flash(
"An error occurred reading matchmaker connection parameters. Please check config file!",
"danger",
)
return redirect(request.referrer)
match_results = controllers.mme_match(
case_obj, target, mme_base_url, mme_token, nodes, mme_accepts
)
ok_responses = 0
for match_results in match_results:
match_results["status_code"] == 200
ok_responses += 1
if ok_responses:
flash(
"Match request sent. Look for eventual matches in 'Matches' page.", "info"
)
else:
flash("An error occurred while sending match request.", "danger")
return redirect(request.referrer) |
Python | def matchmaker_add(institute_id, case_name):
"""Add or update a case in MatchMaker"""
# check that only authorized users can add patients to MME
user_obj = store.user(current_user.email)
if "mme_submitter" not in user_obj["roles"]:
flash("unauthorized request", "warning")
return redirect(request.referrer)
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
causatives = False
features = False
if case_obj.get("suspects") and len(case_obj.get("suspects")) > 3:
flash(
"At the moment it is not possible to save to MatchMaker more than 3 pinned variants",
"warning",
)
return redirect(request.referrer)
elif case_obj.get("suspects"):
causatives = True
if case_obj.get("phenotype_terms"):
features = True
mme_save_options = ["sex", "features", "disorders"]
for index, item in enumerate(mme_save_options):
if item in request.form:
LOG.info("item {} is in request form".format(item))
mme_save_options[index] = True
else:
mme_save_options[index] = False
genomic_features = request.form.get("genomicfeatures")
genes_only = True # upload to matchmaker only gene names
if genomic_features == "variants":
genes_only = False # upload to matchmaker both variants and gene names
# If there are no genomic features nor HPO terms to share for this case, abort
if (not case_obj.get("suspects") and not mme_save_options[1]) or (
causatives is False and features is False
):
flash(
"In order to upload a case to MatchMaker you need to pin a variant or at least assign a phenotype (HPO term)",
"danger",
)
return redirect(request.referrer)
user_obj = store.user(current_user.email)
# Required params for sending an add request to MME:
mme_base_url = current_app.config.get("MME_URL")
mme_accepts = current_app.config.get("MME_ACCEPTS")
mme_token = current_app.config.get("MME_TOKEN")
if not mme_base_url or not mme_accepts or not mme_token:
flash(
"An error occurred reading matchmaker connection parameters. Please check config file!",
"danger",
)
return redirect(request.referrer)
add_result = controllers.mme_add(
store=store,
user_obj=user_obj,
case_obj=case_obj,
add_gender=mme_save_options[0],
add_features=mme_save_options[1],
add_disorders=mme_save_options[2],
genes_only=genes_only,
mme_base_url=mme_base_url,
mme_accepts=mme_accepts,
mme_token=mme_token,
)
# flash MME responses (one for each patient posted)
n_succes_response = 0
n_inserted = 0
n_updated = 0
category = "warning"
for resp in add_result["server_responses"]:
message = resp.get("message")
if resp.get("status_code") == 200:
n_succes_response += 1
else:
flash(
"an error occurred while adding patient to matchmaker: {}".format(
message
),
"warning",
)
if message == "Patient was successfully updated.":
n_updated += 1
elif message == "Patient was successfully inserted into database.":
n_inserted += 1
# if at least one patient was inserted or updated into matchmaker, save submission at the case level:
if n_inserted or n_updated:
category = "success"
store.case_mme_update(
case_obj=case_obj, user_obj=user_obj, mme_subm_obj=add_result
)
flash(
"Number of new patients in matchmaker:{0}, number of updated records:{1}, number of failed requests:{2}".format(
n_inserted,
n_updated,
len(add_result.get("server_responses")) - n_succes_response,
),
category,
)
return redirect(request.referrer) | def matchmaker_add(institute_id, case_name):
"""Add or update a case in MatchMaker"""
# check that only authorized users can add patients to MME
user_obj = store.user(current_user.email)
if "mme_submitter" not in user_obj["roles"]:
flash("unauthorized request", "warning")
return redirect(request.referrer)
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
causatives = False
features = False
if case_obj.get("suspects") and len(case_obj.get("suspects")) > 3:
flash(
"At the moment it is not possible to save to MatchMaker more than 3 pinned variants",
"warning",
)
return redirect(request.referrer)
elif case_obj.get("suspects"):
causatives = True
if case_obj.get("phenotype_terms"):
features = True
mme_save_options = ["sex", "features", "disorders"]
for index, item in enumerate(mme_save_options):
if item in request.form:
LOG.info("item {} is in request form".format(item))
mme_save_options[index] = True
else:
mme_save_options[index] = False
genomic_features = request.form.get("genomicfeatures")
genes_only = True # upload to matchmaker only gene names
if genomic_features == "variants":
genes_only = False # upload to matchmaker both variants and gene names
# If there are no genomic features nor HPO terms to share for this case, abort
if (not case_obj.get("suspects") and not mme_save_options[1]) or (
causatives is False and features is False
):
flash(
"In order to upload a case to MatchMaker you need to pin a variant or at least assign a phenotype (HPO term)",
"danger",
)
return redirect(request.referrer)
user_obj = store.user(current_user.email)
# Required params for sending an add request to MME:
mme_base_url = current_app.config.get("MME_URL")
mme_accepts = current_app.config.get("MME_ACCEPTS")
mme_token = current_app.config.get("MME_TOKEN")
if not mme_base_url or not mme_accepts or not mme_token:
flash(
"An error occurred reading matchmaker connection parameters. Please check config file!",
"danger",
)
return redirect(request.referrer)
add_result = controllers.mme_add(
store=store,
user_obj=user_obj,
case_obj=case_obj,
add_gender=mme_save_options[0],
add_features=mme_save_options[1],
add_disorders=mme_save_options[2],
genes_only=genes_only,
mme_base_url=mme_base_url,
mme_accepts=mme_accepts,
mme_token=mme_token,
)
# flash MME responses (one for each patient posted)
n_succes_response = 0
n_inserted = 0
n_updated = 0
category = "warning"
for resp in add_result["server_responses"]:
message = resp.get("message")
if resp.get("status_code") == 200:
n_succes_response += 1
else:
flash(
"an error occurred while adding patient to matchmaker: {}".format(
message
),
"warning",
)
if message == "Patient was successfully updated.":
n_updated += 1
elif message == "Patient was successfully inserted into database.":
n_inserted += 1
# if at least one patient was inserted or updated into matchmaker, save submission at the case level:
if n_inserted or n_updated:
category = "success"
store.case_mme_update(
case_obj=case_obj, user_obj=user_obj, mme_subm_obj=add_result
)
flash(
"Number of new patients in matchmaker:{0}, number of updated records:{1}, number of failed requests:{2}".format(
n_inserted,
n_updated,
len(add_result.get("server_responses")) - n_succes_response,
),
category,
)
return redirect(request.referrer) |
Python | def gene_variants(institute_id):
"""Display a list of SNV variants."""
page = int(request.form.get("page", 1))
institute_obj = institute_and_case(store, institute_id)
# populate form, conditional on request method
if request.method == "POST":
form = GeneVariantFiltersForm(request.form)
else:
form = GeneVariantFiltersForm(request.args)
variant_type = form.data.get("variant_type", "clinical")
# check if supplied gene symbols exist
hgnc_symbols = []
non_clinical_symbols = []
not_found_symbols = []
not_found_ids = []
data = {}
if (form.hgnc_symbols.data) and len(form.hgnc_symbols.data) > 0:
is_clinical = form.data.get("variant_type", "clinical") == "clinical"
clinical_symbols = store.clinical_symbols(case_obj) if is_clinical else None
for hgnc_symbol in form.hgnc_symbols.data:
if hgnc_symbol.isdigit():
hgnc_gene = store.hgnc_gene(int(hgnc_symbol))
if hgnc_gene is None:
not_found_ids.append(hgnc_symbol)
else:
hgnc_symbols.append(hgnc_gene["hgnc_symbol"])
elif store.hgnc_genes(hgnc_symbol).count() == 0:
not_found_symbols.append(hgnc_symbol)
elif is_clinical and (hgnc_symbol not in clinical_symbols):
non_clinical_symbols.append(hgnc_symbol)
else:
hgnc_symbols.append(hgnc_symbol)
if not_found_ids:
flash("HGNC id not found: {}".format(", ".join(not_found_ids)), "warning")
if not_found_symbols:
flash(
"HGNC symbol not found: {}".format(", ".join(not_found_symbols)),
"warning",
)
if non_clinical_symbols:
flash(
"Gene not included in clinical list: {}".format(
", ".join(non_clinical_symbols)
),
"warning",
)
form.hgnc_symbols.data = hgnc_symbols
LOG.debug("query {}".format(form.data))
variants_query = store.gene_variants(
query=form.data,
institute_id=institute_id,
category="snv",
variant_type=variant_type,
)
data = controllers.gene_variants(store, variants_query, institute_id, page)
return dict(institute=institute_obj, form=form, page=page, **data) | def gene_variants(institute_id):
"""Display a list of SNV variants."""
page = int(request.form.get("page", 1))
institute_obj = institute_and_case(store, institute_id)
# populate form, conditional on request method
if request.method == "POST":
form = GeneVariantFiltersForm(request.form)
else:
form = GeneVariantFiltersForm(request.args)
variant_type = form.data.get("variant_type", "clinical")
# check if supplied gene symbols exist
hgnc_symbols = []
non_clinical_symbols = []
not_found_symbols = []
not_found_ids = []
data = {}
if (form.hgnc_symbols.data) and len(form.hgnc_symbols.data) > 0:
is_clinical = form.data.get("variant_type", "clinical") == "clinical"
clinical_symbols = store.clinical_symbols(case_obj) if is_clinical else None
for hgnc_symbol in form.hgnc_symbols.data:
if hgnc_symbol.isdigit():
hgnc_gene = store.hgnc_gene(int(hgnc_symbol))
if hgnc_gene is None:
not_found_ids.append(hgnc_symbol)
else:
hgnc_symbols.append(hgnc_gene["hgnc_symbol"])
elif store.hgnc_genes(hgnc_symbol).count() == 0:
not_found_symbols.append(hgnc_symbol)
elif is_clinical and (hgnc_symbol not in clinical_symbols):
non_clinical_symbols.append(hgnc_symbol)
else:
hgnc_symbols.append(hgnc_symbol)
if not_found_ids:
flash("HGNC id not found: {}".format(", ".join(not_found_ids)), "warning")
if not_found_symbols:
flash(
"HGNC symbol not found: {}".format(", ".join(not_found_symbols)),
"warning",
)
if non_clinical_symbols:
flash(
"Gene not included in clinical list: {}".format(
", ".join(non_clinical_symbols)
),
"warning",
)
form.hgnc_symbols.data = hgnc_symbols
LOG.debug("query {}".format(form.data))
variants_query = store.gene_variants(
query=form.data,
institute_id=institute_id,
category="snv",
variant_type=variant_type,
)
data = controllers.gene_variants(store, variants_query, institute_id, page)
return dict(institute=institute_obj, form=form, page=page, **data) |
Python | def pdf_case_report(institute_id, case_name):
"""Download a pdf report for a case"""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
data = controllers.case_report_content(store, institute_obj, case_obj)
# add coverage report on the bottom of this report
if current_app.config.get("SQLALCHEMY_DATABASE_URI"):
data["coverage_report"] = controllers.coverage_report_contents(
store, institute_obj, case_obj, request.url_root
)
# workaround to be able to print the case pedigree to pdf
if case_obj.get("madeline_info") is not None:
with open(
os.path.join(cases_bp.static_folder, "madeline.svg"), "w"
) as temp_madeline:
temp_madeline.write(case_obj["madeline_info"])
html_report = render_template(
"cases/case_report.html",
institute=institute_obj,
case=case_obj,
format="pdf",
**data
)
return render_pdf(
HTML(string=html_report),
download_filename=case_obj["display_name"]
+ "_"
+ datetime.datetime.now().strftime("%Y-%m-%d")
+ "_scout.pdf",
) | def pdf_case_report(institute_id, case_name):
"""Download a pdf report for a case"""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
data = controllers.case_report_content(store, institute_obj, case_obj)
# add coverage report on the bottom of this report
if current_app.config.get("SQLALCHEMY_DATABASE_URI"):
data["coverage_report"] = controllers.coverage_report_contents(
store, institute_obj, case_obj, request.url_root
)
# workaround to be able to print the case pedigree to pdf
if case_obj.get("madeline_info") is not None:
with open(
os.path.join(cases_bp.static_folder, "madeline.svg"), "w"
) as temp_madeline:
temp_madeline.write(case_obj["madeline_info"])
html_report = render_template(
"cases/case_report.html",
institute=institute_obj,
case=case_obj,
format="pdf",
**data
)
return render_pdf(
HTML(string=html_report),
download_filename=case_obj["display_name"]
+ "_"
+ datetime.datetime.now().strftime("%Y-%m-%d")
+ "_scout.pdf",
) |
Python | def case_diagnosis(institute_id, case_name):
"""Add or remove a diagnosis for a case."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
user_obj = store.user(current_user.email)
link = url_for(".case", institute_id=institute_id, case_name=case_name)
level = "phenotype" if "phenotype" in request.form else "gene"
omim_id = request.form["omim_id"]
remove = True if request.args.get("remove") == "yes" else False
store.diagnose(
institute_obj,
case_obj,
user_obj,
link,
level=level,
omim_id=omim_id,
remove=remove,
)
return redirect(request.referrer) | def case_diagnosis(institute_id, case_name):
"""Add or remove a diagnosis for a case."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
user_obj = store.user(current_user.email)
link = url_for(".case", institute_id=institute_id, case_name=case_name)
level = "phenotype" if "phenotype" in request.form else "gene"
omim_id = request.form["omim_id"]
remove = True if request.args.get("remove") == "yes" else False
store.diagnose(
institute_obj,
case_obj,
user_obj,
link,
level=level,
omim_id=omim_id,
remove=remove,
)
return redirect(request.referrer) |
Python | def phenotypes_actions(institute_id, case_name):
"""Perform actions on multiple phenotypes."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
case_url = url_for(".case", institute_id=institute_id, case_name=case_name)
action = request.form["action"]
hpo_ids = request.form.getlist("hpo_id")
user_obj = store.user(current_user.email)
if action == "DELETE":
for hpo_id in hpo_ids:
# DELETE a phenotype from the list
store.remove_phenotype(institute_obj, case_obj, user_obj, case_url, hpo_id)
elif action == "PHENOMIZER":
if len(hpo_ids) == 0:
hpo_ids = [
term["phenotype_id"] for term in case_obj.get("phenotype_terms", [])
]
username = current_app.config["PHENOMIZER_USERNAME"]
password = current_app.config["PHENOMIZER_PASSWORD"]
diseases = controllers.hpo_diseases(username, password, hpo_ids)
return render_template(
"cases/diseases.html",
diseases=diseases,
institute=institute_obj,
case=case_obj,
)
elif action == "ADDGENE":
hgnc_symbol = None
for raw_symbol in request.form.getlist("genes"):
LOG.debug("raw gene: {}".format(raw_symbol))
# avoid empty lists
if raw_symbol:
# take the first nubmer before |, and remove any space.
try:
hgnc_symbol_split = raw_symbol.split("|", 1)[0]
hgnc_symbol = int(hgnc_symbol_split.replace(" ", ""))
except ValueError:
flash(
"Provided gene info could not be parsed! "
"Please allow autocompletion to finish.",
"warning",
)
LOG.debug("Parsed HGNC symbol {}".format(hgnc_symbol))
store.update_dynamic_gene_list(
case_obj, hgnc_ids=[hgnc_symbol], add_only=True
)
elif action == "GENES":
hgnc_symbols = set()
for raw_symbols in request.form.getlist("genes"):
LOG.debug("raw gene list: {}".format(raw_symbols))
# avoid empty lists
if raw_symbols:
try:
hgnc_symbols.update(
raw_symbol.split(" ", 1)[0]
for raw_symbol in raw_symbols.split("|")
)
except ValueError:
flash(
"Provided gene info could not be parsed! "
"Please allow autocompletion to finish.",
"warning",
)
LOG.debug("HGNC symbols {}".format(hgnc_symbols))
store.update_dynamic_gene_list(case_obj, hgnc_symbols=hgnc_symbols)
elif action == "GENERATE":
if len(hpo_ids) == 0:
hpo_ids = [
term["phenotype_id"] for term in case_obj.get("phenotype_terms", [])
]
results = store.generate_hpo_gene_list(*hpo_ids)
# determine how many HPO terms each gene must match
hpo_count = int(request.form.get("min_match") or 1)
hgnc_ids = [result[0] for result in results if result[1] >= hpo_count]
store.update_dynamic_gene_list(
case_obj, hgnc_ids=hgnc_ids, phenotype_ids=hpo_ids
)
return redirect(case_url) | def phenotypes_actions(institute_id, case_name):
"""Perform actions on multiple phenotypes."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
case_url = url_for(".case", institute_id=institute_id, case_name=case_name)
action = request.form["action"]
hpo_ids = request.form.getlist("hpo_id")
user_obj = store.user(current_user.email)
if action == "DELETE":
for hpo_id in hpo_ids:
# DELETE a phenotype from the list
store.remove_phenotype(institute_obj, case_obj, user_obj, case_url, hpo_id)
elif action == "PHENOMIZER":
if len(hpo_ids) == 0:
hpo_ids = [
term["phenotype_id"] for term in case_obj.get("phenotype_terms", [])
]
username = current_app.config["PHENOMIZER_USERNAME"]
password = current_app.config["PHENOMIZER_PASSWORD"]
diseases = controllers.hpo_diseases(username, password, hpo_ids)
return render_template(
"cases/diseases.html",
diseases=diseases,
institute=institute_obj,
case=case_obj,
)
elif action == "ADDGENE":
hgnc_symbol = None
for raw_symbol in request.form.getlist("genes"):
LOG.debug("raw gene: {}".format(raw_symbol))
# avoid empty lists
if raw_symbol:
# take the first nubmer before |, and remove any space.
try:
hgnc_symbol_split = raw_symbol.split("|", 1)[0]
hgnc_symbol = int(hgnc_symbol_split.replace(" ", ""))
except ValueError:
flash(
"Provided gene info could not be parsed! "
"Please allow autocompletion to finish.",
"warning",
)
LOG.debug("Parsed HGNC symbol {}".format(hgnc_symbol))
store.update_dynamic_gene_list(
case_obj, hgnc_ids=[hgnc_symbol], add_only=True
)
elif action == "GENES":
hgnc_symbols = set()
for raw_symbols in request.form.getlist("genes"):
LOG.debug("raw gene list: {}".format(raw_symbols))
# avoid empty lists
if raw_symbols:
try:
hgnc_symbols.update(
raw_symbol.split(" ", 1)[0]
for raw_symbol in raw_symbols.split("|")
)
except ValueError:
flash(
"Provided gene info could not be parsed! "
"Please allow autocompletion to finish.",
"warning",
)
LOG.debug("HGNC symbols {}".format(hgnc_symbols))
store.update_dynamic_gene_list(case_obj, hgnc_symbols=hgnc_symbols)
elif action == "GENERATE":
if len(hpo_ids) == 0:
hpo_ids = [
term["phenotype_id"] for term in case_obj.get("phenotype_terms", [])
]
results = store.generate_hpo_gene_list(*hpo_ids)
# determine how many HPO terms each gene must match
hpo_count = int(request.form.get("min_match") or 1)
hgnc_ids = [result[0] for result in results if result[1] >= hpo_count]
store.update_dynamic_gene_list(
case_obj, hgnc_ids=hgnc_ids, phenotype_ids=hpo_ids
)
return redirect(case_url) |
Python | def mark_causative(institute_id, case_name, variant_id, partial_causative=False):
"""Mark a variant as confirmed causative."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
variant_obj = store.variant(variant_id)
user_obj = store.user(current_user.email)
link = url_for(
"variant.variant",
institute_id=institute_id,
case_name=case_name,
variant_id=variant_id,
)
if request.form["action"] == "ADD":
if "partial_causative" in request.form:
omim_terms = request.form.getlist("omim_select")
hpo_terms = request.form.getlist("hpo_select")
store.mark_partial_causative(
institute_obj,
case_obj,
user_obj,
link,
variant_obj,
omim_terms,
hpo_terms,
)
else:
store.mark_causative(institute_obj, case_obj, user_obj, link, variant_obj)
elif request.form["action"] == "DELETE":
if eval(partial_causative):
store.unmark_partial_causative(
institute_obj, case_obj, user_obj, link, variant_obj
)
else:
store.unmark_causative(institute_obj, case_obj, user_obj, link, variant_obj)
# send the user back to the case that was marked as solved
case_url = url_for(".case", institute_id=institute_id, case_name=case_name)
return redirect(case_url) | def mark_causative(institute_id, case_name, variant_id, partial_causative=False):
"""Mark a variant as confirmed causative."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
variant_obj = store.variant(variant_id)
user_obj = store.user(current_user.email)
link = url_for(
"variant.variant",
institute_id=institute_id,
case_name=case_name,
variant_id=variant_id,
)
if request.form["action"] == "ADD":
if "partial_causative" in request.form:
omim_terms = request.form.getlist("omim_select")
hpo_terms = request.form.getlist("hpo_select")
store.mark_partial_causative(
institute_obj,
case_obj,
user_obj,
link,
variant_obj,
omim_terms,
hpo_terms,
)
else:
store.mark_causative(institute_obj, case_obj, user_obj, link, variant_obj)
elif request.form["action"] == "DELETE":
if eval(partial_causative):
store.unmark_partial_causative(
institute_obj, case_obj, user_obj, link, variant_obj
)
else:
store.unmark_causative(institute_obj, case_obj, user_obj, link, variant_obj)
# send the user back to the case that was marked as solved
case_url = url_for(".case", institute_id=institute_id, case_name=case_name)
return redirect(case_url) |
Python | def share(institute_id, case_name):
"""Share a case with a different institute."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
user_obj = store.user(current_user.email)
collaborator_id = request.form["collaborator"]
revoke_access = "revoke" in request.form
link = url_for(".case", institute_id=institute_id, case_name=case_name)
if revoke_access:
store.unshare(institute_obj, case_obj, collaborator_id, user_obj, link)
else:
store.share(institute_obj, case_obj, collaborator_id, user_obj, link)
return redirect(request.referrer) | def share(institute_id, case_name):
"""Share a case with a different institute."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
user_obj = store.user(current_user.email)
collaborator_id = request.form["collaborator"]
revoke_access = "revoke" in request.form
link = url_for(".case", institute_id=institute_id, case_name=case_name)
if revoke_access:
store.unshare(institute_obj, case_obj, collaborator_id, user_obj, link)
else:
store.share(institute_obj, case_obj, collaborator_id, user_obj, link)
return redirect(request.referrer) |
Python | def rerun(institute_id, case_name):
"""Request a case to be rerun."""
sender = current_app.config.get("MAIL_USERNAME")
recipient = current_app.config.get("TICKET_SYSTEM_EMAIL")
controllers.rerun(
store, mail, current_user, institute_id, case_name, sender, recipient
)
return redirect(request.referrer) | def rerun(institute_id, case_name):
"""Request a case to be rerun."""
sender = current_app.config.get("MAIL_USERNAME")
recipient = current_app.config.get("TICKET_SYSTEM_EMAIL")
controllers.rerun(
store, mail, current_user, institute_id, case_name, sender, recipient
)
return redirect(request.referrer) |
Python | def default_panels(institute_id, case_name):
"""Update default panels for a case."""
panel_ids = request.form.getlist("panel_ids")
controllers.update_default_panels(
store, current_user, institute_id, case_name, panel_ids
)
return redirect(request.referrer) | def default_panels(institute_id, case_name):
"""Update default panels for a case."""
panel_ids = request.form.getlist("panel_ids")
controllers.update_default_panels(
store, current_user, institute_id, case_name, panel_ids
)
return redirect(request.referrer) |
Python | def vcf2cytosure(institute_id, case_name, individual_id):
"""Download vcf2cytosure file for individual."""
(display_name, vcf2cytosure) = controllers.vcf2cytosure(
store, institute_id, case_name, individual_id
)
outdir = os.path.abspath(os.path.dirname(vcf2cytosure))
filename = os.path.basename(vcf2cytosure)
LOG.debug("Attempt to deliver file {0} from dir {1}".format(filename, outdir))
attachment_filename = display_name + ".vcf2cytosure.cgh"
return send_from_directory(
outdir, filename, attachment_filename=attachment_filename, as_attachment=True
) | def vcf2cytosure(institute_id, case_name, individual_id):
"""Download vcf2cytosure file for individual."""
(display_name, vcf2cytosure) = controllers.vcf2cytosure(
store, institute_id, case_name, individual_id
)
outdir = os.path.abspath(os.path.dirname(vcf2cytosure))
filename = os.path.basename(vcf2cytosure)
LOG.debug("Attempt to deliver file {0} from dir {1}".format(filename, outdir))
attachment_filename = display_name + ".vcf2cytosure.cgh"
return send_from_directory(
outdir, filename, attachment_filename=attachment_filename, as_attachment=True
) |
Python | def build_variant_query(
self, query=None, institute_id=None, category="snv", variant_type=["clinical"]
):
"""Build a mongo query across multiple cases.
Translate query options from a form into a complete mongo query dictionary.
Beware that unindexed queries against a large variant collection will
be extremely slow.
Currently indexed query options:
hgnc_symbols
rank_score
variant_type
category
Args:
query(dict): A query dictionary for the database, from a query form.
category(str): 'snv', 'sv', 'str' 'cancer_sv' or 'cancer'
variant_type(str): 'clinical' or 'research'
Possible query dict keys:
phenotype_terms
phenotype_groups
cohorts
Returns:
mongo_query : A dictionary in the mongo query format.
"""
query = query or {}
mongo_variant_query = {}
LOG.debug("Building a mongo query for %s" % query)
if query.get("hgnc_symbols"):
mongo_variant_query["hgnc_symbols"] = {"$in": query["hgnc_symbols"]}
mongo_variant_query["variant_type"] = {"$in": variant_type}
mongo_variant_query["category"] = category
select_cases = None
select_case_obj = None
mongo_case_query = {}
if query.get("phenotype_terms"):
mongo_case_query["phenotype_terms.phenotype_id"] = {
"$in": query["phenotype_terms"]
}
if query.get("phenotype_groups"):
mongo_case_query["phenotype_groups.phenotype_id"] = {
"$in": query["phenotype_groups"]
}
if query.get("cohorts"):
mongo_case_query["cohorts"] = {"$in": query["cohorts"]}
if mongo_case_query != {}:
mongo_case_query["owner"] = institute_id
LOG.debug(
"Search cases for selection set, using query {0}".format(
select_case_obj
)
)
select_case_obj = self.case_collection.find(mongo_case_query)
select_cases = [case_id.get("display_name") for case_id in select_case_obj]
if query.get("similar_case"):
similar_case_display_name = query["similar_case"][0]
case_obj = self.case(
display_name=similar_case_display_name, institute_id=institute_id
)
if case_obj:
LOG.debug(
"Search for cases similar to %s", case_obj.get("display_name")
)
similar_cases = self.get_similar_cases(case_obj)
LOG.debug("Similar cases: %s", similar_cases)
select_cases = [similar[0] for similar in similar_cases]
else:
LOG.debug("Case %s not found.", similar_case_display_name)
if select_cases:
mongo_variant_query["case_id"] = {"$in": select_cases}
rank_score = query.get("rank_score") or 15
mongo_variant_query["rank_score"] = {"$gte": rank_score}
LOG.debug("Querying %s" % mongo_variant_query)
return mongo_variant_query | def build_variant_query(
self, query=None, institute_id=None, category="snv", variant_type=["clinical"]
):
"""Build a mongo query across multiple cases.
Translate query options from a form into a complete mongo query dictionary.
Beware that unindexed queries against a large variant collection will
be extremely slow.
Currently indexed query options:
hgnc_symbols
rank_score
variant_type
category
Args:
query(dict): A query dictionary for the database, from a query form.
category(str): 'snv', 'sv', 'str' 'cancer_sv' or 'cancer'
variant_type(str): 'clinical' or 'research'
Possible query dict keys:
phenotype_terms
phenotype_groups
cohorts
Returns:
mongo_query : A dictionary in the mongo query format.
"""
query = query or {}
mongo_variant_query = {}
LOG.debug("Building a mongo query for %s" % query)
if query.get("hgnc_symbols"):
mongo_variant_query["hgnc_symbols"] = {"$in": query["hgnc_symbols"]}
mongo_variant_query["variant_type"] = {"$in": variant_type}
mongo_variant_query["category"] = category
select_cases = None
select_case_obj = None
mongo_case_query = {}
if query.get("phenotype_terms"):
mongo_case_query["phenotype_terms.phenotype_id"] = {
"$in": query["phenotype_terms"]
}
if query.get("phenotype_groups"):
mongo_case_query["phenotype_groups.phenotype_id"] = {
"$in": query["phenotype_groups"]
}
if query.get("cohorts"):
mongo_case_query["cohorts"] = {"$in": query["cohorts"]}
if mongo_case_query != {}:
mongo_case_query["owner"] = institute_id
LOG.debug(
"Search cases for selection set, using query {0}".format(
select_case_obj
)
)
select_case_obj = self.case_collection.find(mongo_case_query)
select_cases = [case_id.get("display_name") for case_id in select_case_obj]
if query.get("similar_case"):
similar_case_display_name = query["similar_case"][0]
case_obj = self.case(
display_name=similar_case_display_name, institute_id=institute_id
)
if case_obj:
LOG.debug(
"Search for cases similar to %s", case_obj.get("display_name")
)
similar_cases = self.get_similar_cases(case_obj)
LOG.debug("Similar cases: %s", similar_cases)
select_cases = [similar[0] for similar in similar_cases]
else:
LOG.debug("Case %s not found.", similar_case_display_name)
if select_cases:
mongo_variant_query["case_id"] = {"$in": select_cases}
rank_score = query.get("rank_score") or 15
mongo_variant_query["rank_score"] = {"$gte": rank_score}
LOG.debug("Querying %s" % mongo_variant_query)
return mongo_variant_query |
Python | def clinsig_query(self, query, mongo_query):
""" Add clinsig filter values to the mongo query object
Args:
query(dict): a dictionary of query filters specified by the users
mongo_query(dict): the query that is going to be submitted to the database
Returns:
clinsig_query(dict): a dictionary with clinsig key-values
"""
LOG.debug("clinsig is a query parameter")
trusted_revision_level = ["mult", "single", "exp", "guideline"]
rank = []
str_rank = []
clnsig_query = {}
for item in query["clinsig"]:
rank.append(int(item))
# search for human readable clinsig values in newer cases
rank.append(CLINSIG_MAP[int(item)])
str_rank.append(CLINSIG_MAP[int(item)])
if query.get("clinsig_confident_always_returned") == True:
LOG.debug("add CLINSIG filter with trusted_revision_level")
clnsig_query = {
"clnsig": {
"$elemMatch": {
"$or": [
{
"$and": [
{"value": {"$in": rank}},
{"revstat": {"$in": trusted_revision_level}},
]
},
{
"$and": [
{"value": re.compile("|".join(str_rank))},
{
"revstat": re.compile(
"|".join(trusted_revision_level)
)
},
]
},
]
}
}
}
else:
LOG.debug(
"add CLINSIG filter for rank: %s" % ", ".join(str(query["clinsig"]))
)
clnsig_query = {
"clnsig": {
"$elemMatch": {
"$or": [
{"value": {"$in": rank}},
{"value": re.compile("|".join(str_rank))},
]
}
}
}
return clnsig_query | def clinsig_query(self, query, mongo_query):
""" Add clinsig filter values to the mongo query object
Args:
query(dict): a dictionary of query filters specified by the users
mongo_query(dict): the query that is going to be submitted to the database
Returns:
clinsig_query(dict): a dictionary with clinsig key-values
"""
LOG.debug("clinsig is a query parameter")
trusted_revision_level = ["mult", "single", "exp", "guideline"]
rank = []
str_rank = []
clnsig_query = {}
for item in query["clinsig"]:
rank.append(int(item))
# search for human readable clinsig values in newer cases
rank.append(CLINSIG_MAP[int(item)])
str_rank.append(CLINSIG_MAP[int(item)])
if query.get("clinsig_confident_always_returned") == True:
LOG.debug("add CLINSIG filter with trusted_revision_level")
clnsig_query = {
"clnsig": {
"$elemMatch": {
"$or": [
{
"$and": [
{"value": {"$in": rank}},
{"revstat": {"$in": trusted_revision_level}},
]
},
{
"$and": [
{"value": re.compile("|".join(str_rank))},
{
"revstat": re.compile(
"|".join(trusted_revision_level)
)
},
]
},
]
}
}
}
else:
LOG.debug(
"add CLINSIG filter for rank: %s" % ", ".join(str(query["clinsig"]))
)
clnsig_query = {
"clnsig": {
"$elemMatch": {
"$or": [
{"value": {"$in": rank}},
{"value": re.compile("|".join(str_rank))},
]
}
}
}
return clnsig_query |
Python | def sv_coordinate_query(self, query):
""" Adds genomic coordinated-related filters to the query object
This method is called to buid coordinate query for sv variants
Args:
query(dict): a dictionary of query filters specified by the users
mongo_query(dict): the query that is going to be submitted to the database
Returns:
coordinate_query(dict): returned object contains coordinate filters for sv variant
"""
coordinate_query = None
chromosome_query = {
"$or": [{"chromosome": query["chrom"]}, {"end_chrom": query["chrom"]}]
}
if query.get("start") and query.get("end"):
# Query for overlapping intervals. Taking into account these cases:
# 1
# filter xxxxxxxxx
# Variant xxxxxxxx
# 2
# filter xxxxxxxxx
# Variant xxxxxxxx
# 3
# filter xxxxxxxxx
# Variant xx
# 4
# filter xxxxxxxxx
# Variant xxxxxxxxxxxxxx
position_query = {
"$or": [
{
"end": {"$gte": int(query["start"]), "$lte": int(query["end"])}
}, # 1
{
"position": {
"$lte": int(query["end"]),
"$gte": int(query["start"]),
}
}, # 2
{
"$and": [
{"position": {"$gte": int(query["start"])}},
{"end": {"$lte": int(query["end"])}},
]
}, # 3
{
"$and": [
{"position": {"$lte": int(query["start"])}},
{"end": {"$gte": int(query["end"])}},
]
}, # 4
]
}
coordinate_query = {"$and": [chromosome_query, position_query]}
else:
coordinate_query = chromosome_query
return coordinate_query | def sv_coordinate_query(self, query):
""" Adds genomic coordinated-related filters to the query object
This method is called to buid coordinate query for sv variants
Args:
query(dict): a dictionary of query filters specified by the users
mongo_query(dict): the query that is going to be submitted to the database
Returns:
coordinate_query(dict): returned object contains coordinate filters for sv variant
"""
coordinate_query = None
chromosome_query = {
"$or": [{"chromosome": query["chrom"]}, {"end_chrom": query["chrom"]}]
}
if query.get("start") and query.get("end"):
# Query for overlapping intervals. Taking into account these cases:
# 1
# filter xxxxxxxxx
# Variant xxxxxxxx
# 2
# filter xxxxxxxxx
# Variant xxxxxxxx
# 3
# filter xxxxxxxxx
# Variant xx
# 4
# filter xxxxxxxxx
# Variant xxxxxxxxxxxxxx
position_query = {
"$or": [
{
"end": {"$gte": int(query["start"]), "$lte": int(query["end"])}
}, # 1
{
"position": {
"$lte": int(query["end"]),
"$gte": int(query["start"]),
}
}, # 2
{
"$and": [
{"position": {"$gte": int(query["start"])}},
{"end": {"$lte": int(query["end"])}},
]
}, # 3
{
"$and": [
{"position": {"$lte": int(query["start"])}},
{"end": {"$gte": int(query["end"])}},
]
}, # 4
]
}
coordinate_query = {"$and": [chromosome_query, position_query]}
else:
coordinate_query = chromosome_query
return coordinate_query |
Python | def gene_filter(self, query, mongo_query):
""" Adds gene-related filters to the query object
Args:
query(dict): a dictionary of query filters specified by the users
mongo_query(dict): the query that is going to be submitted to the database
Returns:
mongo_query(dict): returned object contains gene and panel-related filters
"""
LOG.debug("Adding panel and genes-related parameters to the query")
gene_query = []
hgnc_symbols = query.get("hgnc_symbols")
gene_panels = query.get("gene_panels")
if hgnc_symbols and gene_panels:
gene_query.append({"hgnc_symbols": {"$in": hgnc_symbols}})
gene_query.append({"panels": {"$in": gene_panels}})
elif hgnc_symbols:
mongo_query["hgnc_symbols"] = {"$in": hgnc_symbols}
LOG.debug("Adding hgnc_symbols: %s to query" % ", ".join(hgnc_symbols))
elif gene_panels: # gene_panels
mongo_query["panels"] = {"$in": gene_panels}
return gene_query | def gene_filter(self, query, mongo_query):
""" Adds gene-related filters to the query object
Args:
query(dict): a dictionary of query filters specified by the users
mongo_query(dict): the query that is going to be submitted to the database
Returns:
mongo_query(dict): returned object contains gene and panel-related filters
"""
LOG.debug("Adding panel and genes-related parameters to the query")
gene_query = []
hgnc_symbols = query.get("hgnc_symbols")
gene_panels = query.get("gene_panels")
if hgnc_symbols and gene_panels:
gene_query.append({"hgnc_symbols": {"$in": hgnc_symbols}})
gene_query.append({"panels": {"$in": gene_panels}})
elif hgnc_symbols:
mongo_query["hgnc_symbols"] = {"$in": hgnc_symbols}
LOG.debug("Adding hgnc_symbols: %s to query" % ", ".join(hgnc_symbols))
elif gene_panels: # gene_panels
mongo_query["panels"] = {"$in": gene_panels}
return gene_query |
Python | def secondary_query(self, query, mongo_query, secondary_filter=None):
"""Creates a secondary query object based on secondary parameters specified by user
Args:
query(dict): a dictionary of query filters specified by the users
mongo_query(dict): the query that is going to be submitted to the database
Returns:
mongo_secondary_query(list): a dictionary with secondary query parameters
"""
LOG.debug("Creating a query object with secondary parameters")
mongo_secondary_query = []
# loop over secondary query criteria
for criterion in SECONDARY_CRITERIA:
if not query.get(criterion):
continue
if criterion == "gnomad_frequency":
gnomad = query.get("gnomad_frequency")
if gnomad == "-1":
# -1 means to exclude all variants that exists in gnomad
mongo_query["gnomad_frequency"] = {"$exists": False}
else:
# Replace comma with dot
mongo_secondary_query.append(
{
"$or": [
{"gnomad_frequency": {"$lt": float(gnomad)}},
{"gnomad_frequency": {"$exists": False}},
]
}
)
LOG.debug("Adding gnomad_frequency to query")
if criterion == "local_obs":
local_obs = query.get("local_obs")
mongo_secondary_query.append(
{
"$or": [
{"local_obs_old": None},
{"local_obs_old": {"$lt": local_obs + 1}},
]
}
)
if criterion in ["clingen_ngi", "swegen"]:
mongo_secondary_query.append(
{
"$or": [
{criterion: {"$exists": False}},
{criterion: {"$lt": query[criterion] + 1}},
]
}
)
if criterion == "spidex_human":
# construct spidex query. Build the or part starting with empty SPIDEX values
spidex_human = query["spidex_human"]
spidex_query_or_part = []
if "not_reported" in spidex_human:
spidex_query_or_part.append({"spidex": {"$exists": False}})
for spidex_level in SPIDEX_HUMAN:
if spidex_level in spidex_human:
spidex_query_or_part.append(
{
"$or": [
{
"$and": [
{
"spidex": {
"$gt": SPIDEX_HUMAN[spidex_level][
"neg"
][0]
}
},
{
"spidex": {
"$lt": SPIDEX_HUMAN[spidex_level][
"neg"
][1]
}
},
]
},
{
"$and": [
{
"spidex": {
"$gt": SPIDEX_HUMAN[spidex_level][
"pos"
][0]
}
},
{
"spidex": {
"$lt": SPIDEX_HUMAN[spidex_level][
"pos"
][1]
}
},
]
},
]
}
)
mongo_secondary_query.append({"$or": spidex_query_or_part})
if criterion == "cadd_score":
cadd = query["cadd_score"]
cadd_query = {"cadd_score": {"$gt": float(cadd)}}
LOG.debug("Adding cadd_score: %s to query", cadd)
if query.get("cadd_inclusive") is True:
cadd_query = {
"$or": [cadd_query, {"cadd_score": {"$exists": False}}]
}
LOG.debug("Adding cadd inclusive to query")
mongo_secondary_query.append(cadd_query)
if criterion in [
"genetic_models",
"functional_annotations",
"region_annotations",
]:
criterion_values = query[criterion]
if criterion == "genetic_models":
mongo_secondary_query.append({criterion: {"$in": criterion_values}})
else:
# filter key will be genes.[criterion (minus final char)]
mongo_secondary_query.append(
{".".join(["genes", criterion[:-1]]): {"$in": criterion_values}}
)
LOG.debug(
"Adding {0}: {1} to query".format(
criterion, ", ".join(criterion_values)
)
)
if criterion == "size":
size = query["size"]
size_query = {"length": {"$gt": int(size)}}
LOG.debug("Adding length: %s to query" % size)
if query.get("size_shorter"):
size_query = {
"$or": [
{"length": {"$lt": int(size)}},
{"length": {"$exists": False}},
]
}
LOG.debug("Adding size less than, undef inclusive to query.")
mongo_secondary_query.append(size_query)
if criterion == "svtype":
svtype = query["svtype"]
mongo_secondary_query.append({"sub_category": {"$in": svtype}})
LOG.debug("Adding SV_type %s to query" % ", ".join(svtype))
if criterion == "decipher":
mongo_query["decipher"] = {"$exists": True}
LOG.debug("Adding decipher to query")
if criterion == "depth":
LOG.debug("add depth filter")
mongo_secondary_query.append(
{"tumor.read_depth": {"$gt": query.get("depth")}}
)
if criterion == "alt_count":
LOG.debug("add min alt count filter")
mongo_secondary_query.append(
{"tumor.alt_depth": {"$gt": query.get("alt_count")}}
)
if criterion == "control_frequency":
LOG.debug("add minimum control frequency filter")
mongo_secondary_query.append(
{"normal.alt_freq": {"$lt": float(query.get("control_frequency"))}}
)
if criterion == "mvl_tag":
LOG.debug("add managed variant list filter")
mongo_secondary_query.append({"mvl_tag": {"$exists": True}})
return mongo_secondary_query | def secondary_query(self, query, mongo_query, secondary_filter=None):
"""Creates a secondary query object based on secondary parameters specified by user
Args:
query(dict): a dictionary of query filters specified by the users
mongo_query(dict): the query that is going to be submitted to the database
Returns:
mongo_secondary_query(list): a dictionary with secondary query parameters
"""
LOG.debug("Creating a query object with secondary parameters")
mongo_secondary_query = []
# loop over secondary query criteria
for criterion in SECONDARY_CRITERIA:
if not query.get(criterion):
continue
if criterion == "gnomad_frequency":
gnomad = query.get("gnomad_frequency")
if gnomad == "-1":
# -1 means to exclude all variants that exists in gnomad
mongo_query["gnomad_frequency"] = {"$exists": False}
else:
# Replace comma with dot
mongo_secondary_query.append(
{
"$or": [
{"gnomad_frequency": {"$lt": float(gnomad)}},
{"gnomad_frequency": {"$exists": False}},
]
}
)
LOG.debug("Adding gnomad_frequency to query")
if criterion == "local_obs":
local_obs = query.get("local_obs")
mongo_secondary_query.append(
{
"$or": [
{"local_obs_old": None},
{"local_obs_old": {"$lt": local_obs + 1}},
]
}
)
if criterion in ["clingen_ngi", "swegen"]:
mongo_secondary_query.append(
{
"$or": [
{criterion: {"$exists": False}},
{criterion: {"$lt": query[criterion] + 1}},
]
}
)
if criterion == "spidex_human":
# construct spidex query. Build the or part starting with empty SPIDEX values
spidex_human = query["spidex_human"]
spidex_query_or_part = []
if "not_reported" in spidex_human:
spidex_query_or_part.append({"spidex": {"$exists": False}})
for spidex_level in SPIDEX_HUMAN:
if spidex_level in spidex_human:
spidex_query_or_part.append(
{
"$or": [
{
"$and": [
{
"spidex": {
"$gt": SPIDEX_HUMAN[spidex_level][
"neg"
][0]
}
},
{
"spidex": {
"$lt": SPIDEX_HUMAN[spidex_level][
"neg"
][1]
}
},
]
},
{
"$and": [
{
"spidex": {
"$gt": SPIDEX_HUMAN[spidex_level][
"pos"
][0]
}
},
{
"spidex": {
"$lt": SPIDEX_HUMAN[spidex_level][
"pos"
][1]
}
},
]
},
]
}
)
mongo_secondary_query.append({"$or": spidex_query_or_part})
if criterion == "cadd_score":
cadd = query["cadd_score"]
cadd_query = {"cadd_score": {"$gt": float(cadd)}}
LOG.debug("Adding cadd_score: %s to query", cadd)
if query.get("cadd_inclusive") is True:
cadd_query = {
"$or": [cadd_query, {"cadd_score": {"$exists": False}}]
}
LOG.debug("Adding cadd inclusive to query")
mongo_secondary_query.append(cadd_query)
if criterion in [
"genetic_models",
"functional_annotations",
"region_annotations",
]:
criterion_values = query[criterion]
if criterion == "genetic_models":
mongo_secondary_query.append({criterion: {"$in": criterion_values}})
else:
# filter key will be genes.[criterion (minus final char)]
mongo_secondary_query.append(
{".".join(["genes", criterion[:-1]]): {"$in": criterion_values}}
)
LOG.debug(
"Adding {0}: {1} to query".format(
criterion, ", ".join(criterion_values)
)
)
if criterion == "size":
size = query["size"]
size_query = {"length": {"$gt": int(size)}}
LOG.debug("Adding length: %s to query" % size)
if query.get("size_shorter"):
size_query = {
"$or": [
{"length": {"$lt": int(size)}},
{"length": {"$exists": False}},
]
}
LOG.debug("Adding size less than, undef inclusive to query.")
mongo_secondary_query.append(size_query)
if criterion == "svtype":
svtype = query["svtype"]
mongo_secondary_query.append({"sub_category": {"$in": svtype}})
LOG.debug("Adding SV_type %s to query" % ", ".join(svtype))
if criterion == "decipher":
mongo_query["decipher"] = {"$exists": True}
LOG.debug("Adding decipher to query")
if criterion == "depth":
LOG.debug("add depth filter")
mongo_secondary_query.append(
{"tumor.read_depth": {"$gt": query.get("depth")}}
)
if criterion == "alt_count":
LOG.debug("add min alt count filter")
mongo_secondary_query.append(
{"tumor.alt_depth": {"$gt": query.get("alt_count")}}
)
if criterion == "control_frequency":
LOG.debug("add minimum control frequency filter")
mongo_secondary_query.append(
{"normal.alt_freq": {"$lt": float(query.get("control_frequency"))}}
)
if criterion == "mvl_tag":
LOG.debug("add managed variant list filter")
mongo_secondary_query.append({"mvl_tag": {"$exists": True}})
return mongo_secondary_query |
Python | def send_request(self, url):
"""Sends the actual request to the server and returns the response
Accepts:
url(str): ex. https://rest.ensembl.org/overlap/id/ENSG00000157764?feature=transcript
Returns:
data(dict): dictionary from json response
"""
data = {}
try:
request = Request(url, headers=HEADERS)
response = urlopen(request)
content = response.read()
if content:
data = json.loads(content)
except HTTPError as e:
LOG.info("Request failed for url {0}: Error: {1}\n".format(url, e))
data = e
except ValueError as e:
LOG.info("Request failed for url {0}: Error: {1}\n".format(url, e))
data = e
return data | def send_request(self, url):
"""Sends the actual request to the server and returns the response
Accepts:
url(str): ex. https://rest.ensembl.org/overlap/id/ENSG00000157764?feature=transcript
Returns:
data(dict): dictionary from json response
"""
data = {}
try:
request = Request(url, headers=HEADERS)
response = urlopen(request)
content = response.read()
if content:
data = json.loads(content)
except HTTPError as e:
LOG.info("Request failed for url {0}: Error: {1}\n".format(url, e))
data = e
except ValueError as e:
LOG.info("Request failed for url {0}: Error: {1}\n".format(url, e))
data = e
return data |
Python | def _create_biomart_xml(self, filters=None, attributes=None):
"""Convert biomart query params into biomart xml query
Accepts:
filters(dict): keys are filter names and values are filter values
attributes(list): a list of attributes
Returns:
xml: a query xml file
"""
filters = filters or {}
attributes = attributes or []
filter_lines = self._xml_filters(filters)
attribute_lines = self._xml_attributes(attributes)
xml_lines = [
'<?xml version="1.0" encoding="UTF-8"?>',
"<!DOCTYPE Query>",
'<Query virtualSchemaName = "default" formatter = "TSV" header = "0" uniqueRows'
' = "0" count = "" datasetConfigVersion = "0.6" completionStamp = "1">',
"",
'\t<Dataset name = "hsapiens_gene_ensembl" interface = "default" >',
]
for line in filter_lines:
xml_lines.append("\t\t" + line)
for line in attribute_lines:
xml_lines.append("\t\t" + line)
xml_lines += ["\t</Dataset>", "</Query>"]
return "\n".join(xml_lines) | def _create_biomart_xml(self, filters=None, attributes=None):
"""Convert biomart query params into biomart xml query
Accepts:
filters(dict): keys are filter names and values are filter values
attributes(list): a list of attributes
Returns:
xml: a query xml file
"""
filters = filters or {}
attributes = attributes or []
filter_lines = self._xml_filters(filters)
attribute_lines = self._xml_attributes(attributes)
xml_lines = [
'<?xml version="1.0" encoding="UTF-8"?>',
"<!DOCTYPE Query>",
'<Query virtualSchemaName = "default" formatter = "TSV" header = "0" uniqueRows'
' = "0" count = "" datasetConfigVersion = "0.6" completionStamp = "1">',
"",
'\t<Dataset name = "hsapiens_gene_ensembl" interface = "default" >',
]
for line in filter_lines:
xml_lines.append("\t\t" + line)
for line in attribute_lines:
xml_lines.append("\t\t" + line)
xml_lines += ["\t</Dataset>", "</Query>"]
return "\n".join(xml_lines) |
Python | def _xml_filters(self, filters):
"""Creates a filter line for the biomart xml document
Accepts:
filters(dict): keys are filter names and values are filter values
Returns:
formatted_lines(list[str]): List of formatted xml filter lines
"""
formatted_lines = []
for filter_name in filters:
value = filters[filter_name]
if isinstance(value, str):
formatted_lines.append(
'<Filter name = "{0}" value = "{1}"/>'.format(filter_name, value)
)
else:
formatted_lines.append(
'<Filter name = "{0}" value = "{1}"/>'.format(
filter_name, ",".join(value)
)
)
return formatted_lines | def _xml_filters(self, filters):
"""Creates a filter line for the biomart xml document
Accepts:
filters(dict): keys are filter names and values are filter values
Returns:
formatted_lines(list[str]): List of formatted xml filter lines
"""
formatted_lines = []
for filter_name in filters:
value = filters[filter_name]
if isinstance(value, str):
formatted_lines.append(
'<Filter name = "{0}" value = "{1}"/>'.format(filter_name, value)
)
else:
formatted_lines.append(
'<Filter name = "{0}" value = "{1}"/>'.format(
filter_name, ",".join(value)
)
)
return formatted_lines |
Python | def _xml_attributes(self, attributes):
"""Creates an attribute line for the biomart xml document
Accepts:
attributes(list): attribute names
Returns:
formatted_lines(list(str)): list of formatted xml attribute lines
"""
formatted_lines = []
for attr in attributes:
formatted_lines.append('<Attribute name = "{}" />'.format(attr))
return formatted_lines | def _xml_attributes(self, attributes):
"""Creates an attribute line for the biomart xml document
Accepts:
attributes(list): attribute names
Returns:
formatted_lines(list(str)): list of formatted xml attribute lines
"""
formatted_lines = []
for attr in attributes:
formatted_lines.append('<Attribute name = "{}" />'.format(attr))
return formatted_lines |
Python | def parse_case_data(
config=None,
ped=None,
owner=None,
vcf_snv=None,
vcf_sv=None,
vcf_cancer=None,
vcf_cancer_sv=None,
vcf_str=None,
peddy_ped=None,
peddy_sex=None,
peddy_check=None,
delivery_report=None,
multiqc=None,
):
"""Parse all data necessary for loading a case into scout
This can be done either by providing a VCF file and other information
on the command line. Or all the information can be specified in a config file.
Please see Scout documentation for further instructions.
Args:
config(dict): A yaml formatted config file
ped(iterable(str)): A ped formatted family file
owner(str): The institute that owns a case
vcf_snv(str): Path to a vcf file
vcf_str(str): Path to a VCF file
vcf_sv(str): Path to a vcf file
vcf_cancer(str): Path to a vcf file
vcf_cancer_sv(str): Path to a vcf file
peddy_ped(str): Path to a peddy ped
multiqc(str): Path to dir with multiqc information
Returns:
config_data(dict): Holds all the necessary information for loading
Scout
"""
config_data = copy.deepcopy(config) or {}
# Default the analysis date to now if not specified in load config
config_data["analysis_date"] = get_correct_date(config_data.get("analysis_date"))
# If the family information is in a ped file we nned to parse that
if ped:
family_id, samples = parse_ped(ped)
config_data["family"] = family_id
config_data["samples"] = samples
# Each case has to have a owner. If not provided in config file it needs to be given as a
# argument
if "owner" not in config_data:
if not owner:
raise SyntaxError("Case has no owner")
else:
config_data["owner"] = owner
if "gene_panels" in config_data:
# handle whitespace in gene panel names
config_data["gene_panels"] = [
panel.strip() for panel in config_data["gene_panels"]
]
config_data["default_gene_panels"] = [
panel.strip() for panel in config_data["default_gene_panels"]
]
##################### Add information from peddy if existing #####################
config_data["peddy_ped"] = peddy_ped or config_data.get("peddy_ped")
config_data["peddy_sex_check"] = peddy_sex or config_data.get("peddy_sex")
config_data["peddy_ped_check"] = peddy_check or config_data.get("peddy_check")
# This will add information from peddy to the individuals
add_peddy_information(config_data)
##################### Add multiqc information #####################
config_data["multiqc"] = multiqc or config_data.get("multiqc")
config_data["vcf_snv"] = vcf_snv if vcf_snv else config_data.get("vcf_snv")
config_data["vcf_sv"] = vcf_sv if vcf_sv else config_data.get("vcf_sv")
config_data["vcf_str"] = vcf_str if vcf_str else config_data.get("vcf_str")
LOG.debug("Config vcf_str set to {0}".format(config_data["vcf_str"]))
config_data["vcf_cancer"] = (
vcf_cancer if vcf_cancer else config_data.get("vcf_cancer")
)
config_data["vcf_cancer_sv"] = (
vcf_cancer_sv if vcf_cancer_sv else config_data.get("vcf_cancer_sv")
)
config_data["delivery_report"] = (
delivery_report if delivery_report else config_data.get("delivery_report")
)
config_data["rank_model_version"] = str(config_data.get("rank_model_version", ""))
config_data["rank_score_threshold"] = config_data.get("rank_score_threshold", 0)
config_data["sv_rank_model_version"] = str(
config_data.get("sv_rank_model_version", "")
)
config_data["track"] = config_data.get("track", "rare")
if config_data["vcf_cancer"] or config_data["vcf_cancer_sv"]:
config_data["track"] = "cancer"
return config_data | def parse_case_data(
config=None,
ped=None,
owner=None,
vcf_snv=None,
vcf_sv=None,
vcf_cancer=None,
vcf_cancer_sv=None,
vcf_str=None,
peddy_ped=None,
peddy_sex=None,
peddy_check=None,
delivery_report=None,
multiqc=None,
):
"""Parse all data necessary for loading a case into scout
This can be done either by providing a VCF file and other information
on the command line. Or all the information can be specified in a config file.
Please see Scout documentation for further instructions.
Args:
config(dict): A yaml formatted config file
ped(iterable(str)): A ped formatted family file
owner(str): The institute that owns a case
vcf_snv(str): Path to a vcf file
vcf_str(str): Path to a VCF file
vcf_sv(str): Path to a vcf file
vcf_cancer(str): Path to a vcf file
vcf_cancer_sv(str): Path to a vcf file
peddy_ped(str): Path to a peddy ped
multiqc(str): Path to dir with multiqc information
Returns:
config_data(dict): Holds all the necessary information for loading
Scout
"""
config_data = copy.deepcopy(config) or {}
# Default the analysis date to now if not specified in load config
config_data["analysis_date"] = get_correct_date(config_data.get("analysis_date"))
# If the family information is in a ped file we nned to parse that
if ped:
family_id, samples = parse_ped(ped)
config_data["family"] = family_id
config_data["samples"] = samples
# Each case has to have a owner. If not provided in config file it needs to be given as a
# argument
if "owner" not in config_data:
if not owner:
raise SyntaxError("Case has no owner")
else:
config_data["owner"] = owner
if "gene_panels" in config_data:
# handle whitespace in gene panel names
config_data["gene_panels"] = [
panel.strip() for panel in config_data["gene_panels"]
]
config_data["default_gene_panels"] = [
panel.strip() for panel in config_data["default_gene_panels"]
]
##################### Add information from peddy if existing #####################
config_data["peddy_ped"] = peddy_ped or config_data.get("peddy_ped")
config_data["peddy_sex_check"] = peddy_sex or config_data.get("peddy_sex")
config_data["peddy_ped_check"] = peddy_check or config_data.get("peddy_check")
# This will add information from peddy to the individuals
add_peddy_information(config_data)
##################### Add multiqc information #####################
config_data["multiqc"] = multiqc or config_data.get("multiqc")
config_data["vcf_snv"] = vcf_snv if vcf_snv else config_data.get("vcf_snv")
config_data["vcf_sv"] = vcf_sv if vcf_sv else config_data.get("vcf_sv")
config_data["vcf_str"] = vcf_str if vcf_str else config_data.get("vcf_str")
LOG.debug("Config vcf_str set to {0}".format(config_data["vcf_str"]))
config_data["vcf_cancer"] = (
vcf_cancer if vcf_cancer else config_data.get("vcf_cancer")
)
config_data["vcf_cancer_sv"] = (
vcf_cancer_sv if vcf_cancer_sv else config_data.get("vcf_cancer_sv")
)
config_data["delivery_report"] = (
delivery_report if delivery_report else config_data.get("delivery_report")
)
config_data["rank_model_version"] = str(config_data.get("rank_model_version", ""))
config_data["rank_score_threshold"] = config_data.get("rank_score_threshold", 0)
config_data["sv_rank_model_version"] = str(
config_data.get("sv_rank_model_version", "")
)
config_data["track"] = config_data.get("track", "rare")
if config_data["vcf_cancer"] or config_data["vcf_cancer_sv"]:
config_data["track"] = "cancer"
return config_data |
Python | def add_peddy_information(config_data):
"""Add information from peddy outfiles to the individuals
Args:
config_data(dict)
"""
ped_info = {}
ped_check = {}
sex_check = {}
relations = []
if config_data.get("peddy_ped"):
file_handle = open(config_data["peddy_ped"], "r")
for ind_info in parse_peddy_ped(file_handle):
ped_info[ind_info["sample_id"]] = ind_info
if config_data.get("peddy_ped_check"):
file_handle = open(config_data["peddy_ped_check"], "r")
for pair_info in parse_peddy_ped_check(file_handle):
ped_check[(pair_info["sample_a"], pair_info["sample_b"])] = pair_info
if config_data.get("peddy_sex_check"):
file_handle = open(config_data["peddy_sex_check"], "r")
for ind_info in parse_peddy_sex_check(file_handle):
sex_check[ind_info["sample_id"]] = ind_info
if not ped_info:
return
analysis_inds = {}
for ind in config_data["samples"]:
ind_id = ind["sample_id"]
analysis_inds[ind_id] = ind
for ind_id in analysis_inds:
ind = analysis_inds[ind_id]
# Check if peddy has inferred the ancestry
if ind_id in ped_info:
ind["predicted_ancestry"] = ped_info[ind_id].get(
"ancestry-prediction", "UNKNOWN"
)
# Check if peddy has inferred the sex
if ind_id in sex_check:
if sex_check[ind_id]["error"]:
ind["confirmed_sex"] = False
else:
ind["confirmed_sex"] = True
# Check if peddy har confirmed parental relations
for parent in ["mother", "father"]:
# If we are looking at individual with parents
if ind[parent] == "0":
continue
# Check if the child/parent pair is in peddy data
for pair in ped_check:
if not (ind_id in pair and ind[parent] in pair):
continue
# If there is a parent error we mark that
if ped_check[pair]["parent_error"]:
analysis_inds[ind[parent]]["confirmed_parent"] = False
continue
# Else if parent confirmation has not been done
if "confirmed_parent" not in analysis_inds[ind[parent]]:
# Set confirmatio to True
analysis_inds[ind[parent]]["confirmed_parent"] = True | def add_peddy_information(config_data):
"""Add information from peddy outfiles to the individuals
Args:
config_data(dict)
"""
ped_info = {}
ped_check = {}
sex_check = {}
relations = []
if config_data.get("peddy_ped"):
file_handle = open(config_data["peddy_ped"], "r")
for ind_info in parse_peddy_ped(file_handle):
ped_info[ind_info["sample_id"]] = ind_info
if config_data.get("peddy_ped_check"):
file_handle = open(config_data["peddy_ped_check"], "r")
for pair_info in parse_peddy_ped_check(file_handle):
ped_check[(pair_info["sample_a"], pair_info["sample_b"])] = pair_info
if config_data.get("peddy_sex_check"):
file_handle = open(config_data["peddy_sex_check"], "r")
for ind_info in parse_peddy_sex_check(file_handle):
sex_check[ind_info["sample_id"]] = ind_info
if not ped_info:
return
analysis_inds = {}
for ind in config_data["samples"]:
ind_id = ind["sample_id"]
analysis_inds[ind_id] = ind
for ind_id in analysis_inds:
ind = analysis_inds[ind_id]
# Check if peddy has inferred the ancestry
if ind_id in ped_info:
ind["predicted_ancestry"] = ped_info[ind_id].get(
"ancestry-prediction", "UNKNOWN"
)
# Check if peddy has inferred the sex
if ind_id in sex_check:
if sex_check[ind_id]["error"]:
ind["confirmed_sex"] = False
else:
ind["confirmed_sex"] = True
# Check if peddy har confirmed parental relations
for parent in ["mother", "father"]:
# If we are looking at individual with parents
if ind[parent] == "0":
continue
# Check if the child/parent pair is in peddy data
for pair in ped_check:
if not (ind_id in pair and ind[parent] in pair):
continue
# If there is a parent error we mark that
if ped_check[pair]["parent_error"]:
analysis_inds[ind[parent]]["confirmed_parent"] = False
continue
# Else if parent confirmation has not been done
if "confirmed_parent" not in analysis_inds[ind[parent]]:
# Set confirmatio to True
analysis_inds[ind[parent]]["confirmed_parent"] = True |
Python | def parse_case(config):
"""Parse case information from config or PED files.
Args:
config (dict): case config with detailed information
Returns:
dict: parsed case data
"""
if "owner" not in config:
raise ConfigError("A case has to have a owner")
if "family" not in config:
raise ConfigError("A case has to have a 'family'")
individuals = parse_individuals(config["samples"])
case_data = {
"owner": config["owner"],
"collaborators": [config["owner"]],
"case_id": config["family"],
"display_name": config.get("family_name", config["family"]),
"genome_build": config.get("human_genome_build"),
"rank_model_version": str(config.get("rank_model_version", "")),
"rank_score_threshold": config.get("rank_score_threshold", 0),
"sv_rank_model_version": str(config.get("sv_rank_model_version", "")),
"analysis_date": config.get("analysis_date"),
"individuals": individuals,
"vcf_files": {
"vcf_snv": config.get("vcf_snv"),
"vcf_sv": config.get("vcf_sv"),
"vcf_str": config.get("vcf_str"),
"vcf_cancer": config.get("vcf_cancer"),
"vcf_cancer_sv": config.get("vcf_cancer_sv"),
"vcf_snv_research": config.get("vcf_snv_research"),
"vcf_sv_research": config.get("vcf_sv_research"),
"vcf_cancer_research": config.get("vcf_cancer_research"),
"vcf_cancer_sv_research": config.get("vcf_cancer_sv_research"),
},
"default_panels": config.get("default_gene_panels", []),
"gene_panels": config.get("gene_panels", []),
"assignee": config.get("assignee"),
"peddy_ped": config.get("peddy_ped"),
"peddy_sex": config.get("peddy_sex"),
"peddy_check": config.get("peddy_check"),
"delivery_report": config.get("delivery_report"),
"multiqc": config.get("multiqc"),
"track": config.get("track", "rare"),
"chromograph_image_files": config.get("chromograph_image_files"),
"chromograph_prefixes": config.get("chromograph_prefixes"),
}
# add the pedigree figure, this is a xml file which is dumped in the db
if "madeline" in config:
mad_path = Path(config["madeline"])
if not mad_path.exists():
raise ValueError("madeline path not found: {}".format(mad_path))
with mad_path.open("r") as in_handle:
case_data["madeline_info"] = in_handle.read()
if (
case_data["vcf_files"]["vcf_cancer"]
or case_data["vcf_files"]["vcf_cancer_research"]
or case_data["vcf_files"]["vcf_cancer_sv"]
or case_data["vcf_files"]["vcf_cancer_sv_research"]
):
case_data["track"] = "cancer"
case_data["analysis_date"] = get_correct_date(case_data.get("analysis_date"))
return case_data | def parse_case(config):
"""Parse case information from config or PED files.
Args:
config (dict): case config with detailed information
Returns:
dict: parsed case data
"""
if "owner" not in config:
raise ConfigError("A case has to have a owner")
if "family" not in config:
raise ConfigError("A case has to have a 'family'")
individuals = parse_individuals(config["samples"])
case_data = {
"owner": config["owner"],
"collaborators": [config["owner"]],
"case_id": config["family"],
"display_name": config.get("family_name", config["family"]),
"genome_build": config.get("human_genome_build"),
"rank_model_version": str(config.get("rank_model_version", "")),
"rank_score_threshold": config.get("rank_score_threshold", 0),
"sv_rank_model_version": str(config.get("sv_rank_model_version", "")),
"analysis_date": config.get("analysis_date"),
"individuals": individuals,
"vcf_files": {
"vcf_snv": config.get("vcf_snv"),
"vcf_sv": config.get("vcf_sv"),
"vcf_str": config.get("vcf_str"),
"vcf_cancer": config.get("vcf_cancer"),
"vcf_cancer_sv": config.get("vcf_cancer_sv"),
"vcf_snv_research": config.get("vcf_snv_research"),
"vcf_sv_research": config.get("vcf_sv_research"),
"vcf_cancer_research": config.get("vcf_cancer_research"),
"vcf_cancer_sv_research": config.get("vcf_cancer_sv_research"),
},
"default_panels": config.get("default_gene_panels", []),
"gene_panels": config.get("gene_panels", []),
"assignee": config.get("assignee"),
"peddy_ped": config.get("peddy_ped"),
"peddy_sex": config.get("peddy_sex"),
"peddy_check": config.get("peddy_check"),
"delivery_report": config.get("delivery_report"),
"multiqc": config.get("multiqc"),
"track": config.get("track", "rare"),
"chromograph_image_files": config.get("chromograph_image_files"),
"chromograph_prefixes": config.get("chromograph_prefixes"),
}
# add the pedigree figure, this is a xml file which is dumped in the db
if "madeline" in config:
mad_path = Path(config["madeline"])
if not mad_path.exists():
raise ValueError("madeline path not found: {}".format(mad_path))
with mad_path.open("r") as in_handle:
case_data["madeline_info"] = in_handle.read()
if (
case_data["vcf_files"]["vcf_cancer"]
or case_data["vcf_files"]["vcf_cancer_research"]
or case_data["vcf_files"]["vcf_cancer_sv"]
or case_data["vcf_files"]["vcf_cancer_sv_research"]
):
case_data["track"] = "cancer"
case_data["analysis_date"] = get_correct_date(case_data.get("analysis_date"))
return case_data |
Python | def case(self, case_id=None, institute_id=None, display_name=None):
"""Fetches a single case from database
Use either the _id or combination of institute_id and display_name
Args:
case_id(str): _id for a caes
institute_id(str):
display_name(str)
Yields:
A single Case
"""
query = {}
if case_id:
query["_id"] = case_id
LOG.info("Fetching case %s", case_id)
else:
if not (institute_id and display_name):
raise ValueError("Have to provide both institute_id and display_name")
LOG.info("Fetching case %s institute %s", display_name, institute_id)
query["owner"] = institute_id
query["display_name"] = display_name
return self.case_collection.find_one(query) | def case(self, case_id=None, institute_id=None, display_name=None):
"""Fetches a single case from database
Use either the _id or combination of institute_id and display_name
Args:
case_id(str): _id for a caes
institute_id(str):
display_name(str)
Yields:
A single Case
"""
query = {}
if case_id:
query["_id"] = case_id
LOG.info("Fetching case %s", case_id)
else:
if not (institute_id and display_name):
raise ValueError("Have to provide both institute_id and display_name")
LOG.info("Fetching case %s institute %s", display_name, institute_id)
query["owner"] = institute_id
query["display_name"] = display_name
return self.case_collection.find_one(query) |
Python | def load_case(self, config_data, update=False):
"""Load a case into the database
Check if the owner and the institute exists.
Args:
config_data(dict): A dictionary with all the necessary information
update(bool): If existing case should be updated
Returns:
case_obj(dict)
"""
# Check that the owner exists in the database
institute_obj = self.institute(config_data["owner"])
if not institute_obj:
raise IntegrityError(
"Institute '%s' does not exist in database" % config_data["owner"]
)
# Parse the case information
parsed_case = parse_case(config=config_data)
# Build the case object
case_obj = build_case(parsed_case, self)
# Check if case exists with old case id
old_caseid = "-".join([case_obj["owner"], case_obj["display_name"]])
old_case = self.case(old_caseid)
# This is to keep sanger order and validation status
old_sanger_variants = self.case_sanger_variants(case_obj["_id"])
if old_case:
LOG.info(
"Update case id for existing case: %s -> %s",
old_caseid,
case_obj["_id"],
)
self.update_caseid(old_case, case_obj["_id"])
update = True
# Check if case exists in database
existing_case = self.case(case_obj["_id"])
if existing_case and not update:
raise IntegrityError("Case %s already exists in database" % case_obj["_id"])
files = [
{"file_name": "vcf_snv", "variant_type": "clinical", "category": "snv"},
{"file_name": "vcf_sv", "variant_type": "clinical", "category": "sv"},
{
"file_name": "vcf_cancer",
"variant_type": "clinical",
"category": "cancer",
},
{
"file_name": "vcf_cancer_sv",
"variant_type": "clinical",
"category": "cancer_sv",
},
{"file_name": "vcf_str", "variant_type": "clinical", "category": "str"},
]
try:
for vcf_file in files:
# Check if file exists
if not case_obj["vcf_files"].get(vcf_file["file_name"]):
LOG.debug("didn't find {}, skipping".format(vcf_file["file_name"]))
continue
variant_type = vcf_file["variant_type"]
category = vcf_file["category"]
if update:
self.delete_variants(
case_id=case_obj["_id"],
variant_type=variant_type,
category=category,
)
self.load_variants(
case_obj=case_obj,
variant_type=variant_type,
category=category,
rank_threshold=case_obj.get("rank_score_threshold", 5),
)
except (IntegrityError, ValueError, ConfigError, KeyError) as error:
LOG.warning(error)
if existing_case and update:
case_obj["rerun_requested"] = False
if case_obj["status"] in ["active", "archived"]:
case_obj["status"] = "inactive"
self.update_case(case_obj)
# update Sanger status for the new inserted variants
self.update_case_sanger_variants(
institute_obj, case_obj, old_sanger_variants
)
else:
LOG.info("Loading case %s into database", case_obj["display_name"])
self._add_case(case_obj)
return case_obj | def load_case(self, config_data, update=False):
"""Load a case into the database
Check if the owner and the institute exists.
Args:
config_data(dict): A dictionary with all the necessary information
update(bool): If existing case should be updated
Returns:
case_obj(dict)
"""
# Check that the owner exists in the database
institute_obj = self.institute(config_data["owner"])
if not institute_obj:
raise IntegrityError(
"Institute '%s' does not exist in database" % config_data["owner"]
)
# Parse the case information
parsed_case = parse_case(config=config_data)
# Build the case object
case_obj = build_case(parsed_case, self)
# Check if case exists with old case id
old_caseid = "-".join([case_obj["owner"], case_obj["display_name"]])
old_case = self.case(old_caseid)
# This is to keep sanger order and validation status
old_sanger_variants = self.case_sanger_variants(case_obj["_id"])
if old_case:
LOG.info(
"Update case id for existing case: %s -> %s",
old_caseid,
case_obj["_id"],
)
self.update_caseid(old_case, case_obj["_id"])
update = True
# Check if case exists in database
existing_case = self.case(case_obj["_id"])
if existing_case and not update:
raise IntegrityError("Case %s already exists in database" % case_obj["_id"])
files = [
{"file_name": "vcf_snv", "variant_type": "clinical", "category": "snv"},
{"file_name": "vcf_sv", "variant_type": "clinical", "category": "sv"},
{
"file_name": "vcf_cancer",
"variant_type": "clinical",
"category": "cancer",
},
{
"file_name": "vcf_cancer_sv",
"variant_type": "clinical",
"category": "cancer_sv",
},
{"file_name": "vcf_str", "variant_type": "clinical", "category": "str"},
]
try:
for vcf_file in files:
# Check if file exists
if not case_obj["vcf_files"].get(vcf_file["file_name"]):
LOG.debug("didn't find {}, skipping".format(vcf_file["file_name"]))
continue
variant_type = vcf_file["variant_type"]
category = vcf_file["category"]
if update:
self.delete_variants(
case_id=case_obj["_id"],
variant_type=variant_type,
category=category,
)
self.load_variants(
case_obj=case_obj,
variant_type=variant_type,
category=category,
rank_threshold=case_obj.get("rank_score_threshold", 5),
)
except (IntegrityError, ValueError, ConfigError, KeyError) as error:
LOG.warning(error)
if existing_case and update:
case_obj["rerun_requested"] = False
if case_obj["status"] in ["active", "archived"]:
case_obj["status"] = "inactive"
self.update_case(case_obj)
# update Sanger status for the new inserted variants
self.update_case_sanger_variants(
institute_obj, case_obj, old_sanger_variants
)
else:
LOG.info("Loading case %s into database", case_obj["display_name"])
self._add_case(case_obj)
return case_obj |
Python | def update_case(self, case_obj, keep_date=False):
"""Update a case in the database
The following will be updated:
- collaborators: If new collaborators these will be added to the old ones
- analysis_date: Is updated to the new date
- analyses: The new analysis date will be added to old runs
- individuals: There could be new individuals
- updated_at: When the case was updated in the database
- rerun_requested: Is set to False since that is probably what happened
- panels: The new gene panels are added
- genome_build: If there is a new genome build
- genome_version: - || -
- rank_model_version: If there is a new rank model
- sv_rank_model_version: If there is a new sv rank model
- madeline_info: If there is a new pedigree
- vcf_files: paths to the new files
- has_svvariants: If there are new svvariants
- has_strvariants: If there are new strvariants
- multiqc: If there's an updated multiqc report location
- mme_submission: If case was submitted to MatchMaker Exchange
Args:
case_obj(dict): The new case information
keep_date(boolean): The update is small and should not trigger a date change
Returns:
updated_case(dict): The updated case information
"""
# Todo: rename to match the intended purpose
LOG.info("Updating case {0}".format(case_obj["_id"]))
old_case = self.case_collection.find_one({"_id": case_obj["_id"]})
updated_at = datetime.datetime.now()
if keep_date:
updated_at = old_case["updated_at"]
# collect already available info from individuals
old_individuals = old_case.get("individuals")
for ind in case_obj.get("individuals"):
for old_ind in old_individuals:
# if the same individual is present in new case and old case
if ind["individual_id"] == old_ind["individual_id"]:
# collect user-entered info and save at the individual level in new case_obj
if ind.get("age") is None:
ind["age"] = old_ind.get("age")
if ind.get("tissue_type") is None:
ind["tissue_type"] = old_ind.get("tissue_type")
updated_case = self.case_collection.find_one_and_update(
{"_id": case_obj["_id"]},
{
"$addToSet": {
"collaborators": {"$each": case_obj["collaborators"]},
"analyses": {
"date": old_case["analysis_date"],
"delivery_report": old_case.get("delivery_report"),
},
},
"$set": {
"analysis_date": case_obj["analysis_date"],
"delivery_report": case_obj.get("delivery_report"),
"individuals": case_obj["individuals"],
"updated_at": updated_at,
"rerun_requested": case_obj.get("rerun_requested", False),
"panels": case_obj.get("panels", []),
"genome_build": case_obj.get("genome_build", "37"),
"genome_version": case_obj.get("genome_version"),
"rank_model_version": case_obj.get("rank_model_version"),
"sv_rank_model_version": case_obj.get("sv_rank_model_version"),
"madeline_info": case_obj.get("madeline_info"),
"chromograph_image_files": case_obj.get("chromograph_image_files"),
"chromograph_prefixes": case_obj.get("chromograph_prefixes"),
"vcf_files": case_obj.get("vcf_files"),
"has_svvariants": case_obj.get("has_svvariants"),
"has_strvariants": case_obj.get("has_strvariants"),
"is_research": case_obj.get("is_research", False),
"research_requested": case_obj.get("research_requested", False),
"multiqc": case_obj.get("multiqc"),
"mme_submission": case_obj.get("mme_submission"),
"status": case_obj.get("status"),
},
},
return_document=pymongo.ReturnDocument.AFTER,
)
LOG.info("Case updated")
return updated_case | def update_case(self, case_obj, keep_date=False):
"""Update a case in the database
The following will be updated:
- collaborators: If new collaborators these will be added to the old ones
- analysis_date: Is updated to the new date
- analyses: The new analysis date will be added to old runs
- individuals: There could be new individuals
- updated_at: When the case was updated in the database
- rerun_requested: Is set to False since that is probably what happened
- panels: The new gene panels are added
- genome_build: If there is a new genome build
- genome_version: - || -
- rank_model_version: If there is a new rank model
- sv_rank_model_version: If there is a new sv rank model
- madeline_info: If there is a new pedigree
- vcf_files: paths to the new files
- has_svvariants: If there are new svvariants
- has_strvariants: If there are new strvariants
- multiqc: If there's an updated multiqc report location
- mme_submission: If case was submitted to MatchMaker Exchange
Args:
case_obj(dict): The new case information
keep_date(boolean): The update is small and should not trigger a date change
Returns:
updated_case(dict): The updated case information
"""
# Todo: rename to match the intended purpose
LOG.info("Updating case {0}".format(case_obj["_id"]))
old_case = self.case_collection.find_one({"_id": case_obj["_id"]})
updated_at = datetime.datetime.now()
if keep_date:
updated_at = old_case["updated_at"]
# collect already available info from individuals
old_individuals = old_case.get("individuals")
for ind in case_obj.get("individuals"):
for old_ind in old_individuals:
# if the same individual is present in new case and old case
if ind["individual_id"] == old_ind["individual_id"]:
# collect user-entered info and save at the individual level in new case_obj
if ind.get("age") is None:
ind["age"] = old_ind.get("age")
if ind.get("tissue_type") is None:
ind["tissue_type"] = old_ind.get("tissue_type")
updated_case = self.case_collection.find_one_and_update(
{"_id": case_obj["_id"]},
{
"$addToSet": {
"collaborators": {"$each": case_obj["collaborators"]},
"analyses": {
"date": old_case["analysis_date"],
"delivery_report": old_case.get("delivery_report"),
},
},
"$set": {
"analysis_date": case_obj["analysis_date"],
"delivery_report": case_obj.get("delivery_report"),
"individuals": case_obj["individuals"],
"updated_at": updated_at,
"rerun_requested": case_obj.get("rerun_requested", False),
"panels": case_obj.get("panels", []),
"genome_build": case_obj.get("genome_build", "37"),
"genome_version": case_obj.get("genome_version"),
"rank_model_version": case_obj.get("rank_model_version"),
"sv_rank_model_version": case_obj.get("sv_rank_model_version"),
"madeline_info": case_obj.get("madeline_info"),
"chromograph_image_files": case_obj.get("chromograph_image_files"),
"chromograph_prefixes": case_obj.get("chromograph_prefixes"),
"vcf_files": case_obj.get("vcf_files"),
"has_svvariants": case_obj.get("has_svvariants"),
"has_strvariants": case_obj.get("has_strvariants"),
"is_research": case_obj.get("is_research", False),
"research_requested": case_obj.get("research_requested", False),
"multiqc": case_obj.get("multiqc"),
"mme_submission": case_obj.get("mme_submission"),
"status": case_obj.get("status"),
},
},
return_document=pymongo.ReturnDocument.AFTER,
)
LOG.info("Case updated")
return updated_case |
Python | def update_case_sanger_variants(self, institute_obj, case_obj, case_verif_variants):
"""Update existing variants for a case according to a previous
verification status.
Accepts:
institute_obj(dict): an institute object
case_obj(dict): a case object
Returns:
updated_variants(dict): a dictionary like this: {
'updated_verified' : [list of variant ids],
'updated_ordered' : [list of variant ids]
}
"""
LOG.debug(
"Updating verification status for variants in case:{}".format(
case_obj["_id"]
)
)
updated_variants = {"updated_verified": [], "updated_ordered": []}
# update verification status for verified variants of a case
for category in case_verif_variants:
variants = case_verif_variants[category]
verb = "sanger"
if category == "sanger_verified":
verb = "validate"
for old_var in variants:
# new var display name should be the same as old display name:
display_name = old_var["display_name"]
# check if variant still exists
new_var = self.variant_collection.find_one(
{"case_id": case_obj["_id"], "display_name": display_name}
)
if new_var is None: # if variant doesn't exist any more
continue
# create a link to the new variant for the events
link = "/{0}/{1}/{2}".format(
new_var["institute"], case_obj["display_name"], new_var["_id"]
)
old_event = self.event_collection.find_one(
{
"case": case_obj["_id"],
"verb": verb,
"variant_id": old_var["variant_id"],
}
)
if old_event is None:
continue
user_obj = self.user(old_event["user_id"])
if category == "sanger_verified":
# if a new variant coresponds to the old and
# there exist a verification event for the old one
# validate new variant as well:
updated_var = self.validate(
institute=institute_obj,
case=case_obj,
user=user_obj,
link=link,
variant=new_var,
validate_type=old_var.get("validation"),
)
if updated_var:
updated_variants["updated_verified"].append(updated_var["_id"])
else:
# old variant had Sanger validation ordered
# check old event to collect user_obj that ordered the verification:
# set sanger ordered status for the new variant as well:
updated_var = self.order_verification(
institute=institute_obj,
case=case_obj,
user=user_obj,
link=link,
variant=new_var,
)
if updated_var:
updated_variants["updated_ordered"].append(updated_var["_id"])
n_status_updated = len(updated_variants["updated_verified"]) + len(
updated_variants["updated_ordered"]
)
LOG.info("Verification status updated for {} variants".format(n_status_updated))
return updated_variants | def update_case_sanger_variants(self, institute_obj, case_obj, case_verif_variants):
"""Update existing variants for a case according to a previous
verification status.
Accepts:
institute_obj(dict): an institute object
case_obj(dict): a case object
Returns:
updated_variants(dict): a dictionary like this: {
'updated_verified' : [list of variant ids],
'updated_ordered' : [list of variant ids]
}
"""
LOG.debug(
"Updating verification status for variants in case:{}".format(
case_obj["_id"]
)
)
updated_variants = {"updated_verified": [], "updated_ordered": []}
# update verification status for verified variants of a case
for category in case_verif_variants:
variants = case_verif_variants[category]
verb = "sanger"
if category == "sanger_verified":
verb = "validate"
for old_var in variants:
# new var display name should be the same as old display name:
display_name = old_var["display_name"]
# check if variant still exists
new_var = self.variant_collection.find_one(
{"case_id": case_obj["_id"], "display_name": display_name}
)
if new_var is None: # if variant doesn't exist any more
continue
# create a link to the new variant for the events
link = "/{0}/{1}/{2}".format(
new_var["institute"], case_obj["display_name"], new_var["_id"]
)
old_event = self.event_collection.find_one(
{
"case": case_obj["_id"],
"verb": verb,
"variant_id": old_var["variant_id"],
}
)
if old_event is None:
continue
user_obj = self.user(old_event["user_id"])
if category == "sanger_verified":
# if a new variant coresponds to the old and
# there exist a verification event for the old one
# validate new variant as well:
updated_var = self.validate(
institute=institute_obj,
case=case_obj,
user=user_obj,
link=link,
variant=new_var,
validate_type=old_var.get("validation"),
)
if updated_var:
updated_variants["updated_verified"].append(updated_var["_id"])
else:
# old variant had Sanger validation ordered
# check old event to collect user_obj that ordered the verification:
# set sanger ordered status for the new variant as well:
updated_var = self.order_verification(
institute=institute_obj,
case=case_obj,
user=user_obj,
link=link,
variant=new_var,
)
if updated_var:
updated_variants["updated_ordered"].append(updated_var["_id"])
n_status_updated = len(updated_variants["updated_verified"]) + len(
updated_variants["updated_ordered"]
)
LOG.info("Verification status updated for {} variants".format(n_status_updated))
return updated_variants |
Python | def build_instance_tags(clusterid, status='owned'):
''' This function will return a dictionary of the instance tags.
The main desire to have this inside of a filter_plugin is that we
need to build the following key.
{"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": 'owned'}
'''
tags = {'clusterid': clusterid,
'kubernetes.io/cluster/{}'.format(clusterid): status}
return tags | def build_instance_tags(clusterid, status='owned'):
''' This function will return a dictionary of the instance tags.
The main desire to have this inside of a filter_plugin is that we
need to build the following key.
{"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": 'owned'}
'''
tags = {'clusterid': clusterid,
'kubernetes.io/cluster/{}'.format(clusterid): status}
return tags |
Python | def extract_similarity_vector(window_data, params, name):
"""Extracts a vector that represents a window of data.
Currently it first builds diffs from all neighboring flux values.
:param window_data: <float64>[batch_size, window_size, 3] tensor of input
light curve data. Currently contains time values, flux, and flux error.
:param params: Model parameters.
:param name: String name "left" or "right".
:returns: Normalized similarity tensor.
"""
batch_size = params["batch_size"]
window_size = params["window_size"]
dropout_rate = params["dropout_keep_prob"]
symmetric = params.get("symmetric", True)
layer1_dim, layer2_dim = params["layer_sizes"]
assert window_data.shape == (batch_size, window_size, 3)
# <float64>: [batch_size, window_size]
time_values = window_data[:, :, 0]
flux_values = window_data[:, :, 1]
# <float64>: [batch_size, window_size - 1]
flux_diffs = flux_values[:, 1:] - flux_values[:, :-1]
time_diffs = time_values[:, 1:] - time_values[:, :-1]
diffs = flux_diffs / time_diffs
reuse = tf.AUTO_REUSE if symmetric else False
scope_name = "extract_vector" if symmetric else "extract_vector_{}".format(name)
with tf.variable_scope(scope_name, reuse=reuse):
layer1 = layers.fully_connected(diffs, layer1_dim, activation_fn=tf.nn.relu)
layer1 = tf.nn.dropout(layer1, keep_prob=dropout_rate)
layer2 = layers.fully_connected(layer1, layer2_dim, activation_fn=tf.nn.relu)
# Normalize the vectors. If vector magnitude is causing precision issues, we could
# add a regularization loss.
layer2_norm = tf.expand_dims(tf.norm(layer2, axis=1), axis=1)
assert layer2_norm.shape == (batch_size, 1)
# Display mean norm (across batch) in TensorBoard.
tf.summary.scalar("{}_norm".format(name), tf.reduce_mean(layer2_norm))
tf.summary.scalar("{}_layer2_min".format(name), tf.reduce_min(layer2))
return layer2 / layer2_norm | def extract_similarity_vector(window_data, params, name):
"""Extracts a vector that represents a window of data.
Currently it first builds diffs from all neighboring flux values.
:param window_data: <float64>[batch_size, window_size, 3] tensor of input
light curve data. Currently contains time values, flux, and flux error.
:param params: Model parameters.
:param name: String name "left" or "right".
:returns: Normalized similarity tensor.
"""
batch_size = params["batch_size"]
window_size = params["window_size"]
dropout_rate = params["dropout_keep_prob"]
symmetric = params.get("symmetric", True)
layer1_dim, layer2_dim = params["layer_sizes"]
assert window_data.shape == (batch_size, window_size, 3)
# <float64>: [batch_size, window_size]
time_values = window_data[:, :, 0]
flux_values = window_data[:, :, 1]
# <float64>: [batch_size, window_size - 1]
flux_diffs = flux_values[:, 1:] - flux_values[:, :-1]
time_diffs = time_values[:, 1:] - time_values[:, :-1]
diffs = flux_diffs / time_diffs
reuse = tf.AUTO_REUSE if symmetric else False
scope_name = "extract_vector" if symmetric else "extract_vector_{}".format(name)
with tf.variable_scope(scope_name, reuse=reuse):
layer1 = layers.fully_connected(diffs, layer1_dim, activation_fn=tf.nn.relu)
layer1 = tf.nn.dropout(layer1, keep_prob=dropout_rate)
layer2 = layers.fully_connected(layer1, layer2_dim, activation_fn=tf.nn.relu)
# Normalize the vectors. If vector magnitude is causing precision issues, we could
# add a regularization loss.
layer2_norm = tf.expand_dims(tf.norm(layer2, axis=1), axis=1)
assert layer2_norm.shape == (batch_size, 1)
# Display mean norm (across batch) in TensorBoard.
tf.summary.scalar("{}_norm".format(name), tf.reduce_mean(layer2_norm))
tf.summary.scalar("{}_layer2_min".format(name), tf.reduce_min(layer2))
return layer2 / layer2_norm |
Python | def srp(self):
"""Gets the srp of this StorageGroup. # noqa: E501
:return: The srp of this StorageGroup. # noqa: E501
:rtype: str
"""
return self._srp | def srp(self):
"""Gets the srp of this StorageGroup. # noqa: E501
:return: The srp of this StorageGroup. # noqa: E501
:rtype: str
"""
return self._srp |
Python | def srp(self, srp):
"""Sets the srp of this StorageGroup.
:param srp: The srp of this StorageGroup. # noqa: E501
:type: str
"""
self._srp = srp | def srp(self, srp):
"""Sets the srp of this StorageGroup.
:param srp: The srp of this StorageGroup. # noqa: E501
:type: str
"""
self._srp = srp |
Python | def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(StorageGroup, dict):
for key, value in self.items():
result[key] = value
return result | def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(StorageGroup, dict):
for key, value in self.items():
result[key] = value
return result |
Python | def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RegisterPayload, dict):
for key, value in self.items():
result[key] = value
return result | def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RegisterPayload, dict):
for key, value in self.items():
result[key] = value
return result |
Python | def create_storage_view(self, cluster_name, storage_view_payload, **kwargs): # noqa: E501
"""Create a new StorageView # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_storage_view(cluster_name, storage_view_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param StorageViewPayload storage_view_payload: (required)
:param str x_include_object: When passed as part of a POST request, controls whether the representation of the newly created object is included in the response. Defaults to 'true' which will include the object in the response. This header is useful because refreshing the newly created object is usually the slowest part of a POST operation.
:return: StorageView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_storage_view_with_http_info(cluster_name, storage_view_payload, **kwargs) # noqa: E501
else:
(data) = self.create_storage_view_with_http_info(cluster_name, storage_view_payload, **kwargs) # noqa: E501
return data | def create_storage_view(self, cluster_name, storage_view_payload, **kwargs): # noqa: E501
"""Create a new StorageView # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_storage_view(cluster_name, storage_view_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param StorageViewPayload storage_view_payload: (required)
:param str x_include_object: When passed as part of a POST request, controls whether the representation of the newly created object is included in the response. Defaults to 'true' which will include the object in the response. This header is useful because refreshing the newly created object is usually the slowest part of a POST operation.
:return: StorageView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_storage_view_with_http_info(cluster_name, storage_view_payload, **kwargs) # noqa: E501
else:
(data) = self.create_storage_view_with_http_info(cluster_name, storage_view_payload, **kwargs) # noqa: E501
return data |
Python | def create_storage_view_with_http_info(self, cluster_name, storage_view_payload, **kwargs): # noqa: E501
"""Create a new StorageView # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_storage_view_with_http_info(cluster_name, storage_view_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param StorageViewPayload storage_view_payload: (required)
:param str x_include_object: When passed as part of a POST request, controls whether the representation of the newly created object is included in the response. Defaults to 'true' which will include the object in the response. This header is useful because refreshing the newly created object is usually the slowest part of a POST operation.
:return: StorageView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_name', 'storage_view_payload', 'x_include_object'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_storage_view" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_name' is set
if ('cluster_name' not in params or
params['cluster_name'] is None):
raise ValueError("Missing the required parameter `cluster_name` when calling `create_storage_view`") # noqa: E501
# verify the required parameter 'storage_view_payload' is set
if ('storage_view_payload' not in params or
params['storage_view_payload'] is None):
raise ValueError("Missing the required parameter `storage_view_payload` when calling `create_storage_view`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cluster_name' in params:
path_params['cluster_name'] = params['cluster_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_include_object' in params:
header_params['X-Include-Object'] = params['x_include_object'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'storage_view_payload' in params:
body_params = params['storage_view_payload']
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/clusters/{cluster_name}/exports/storage_views', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StorageView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | def create_storage_view_with_http_info(self, cluster_name, storage_view_payload, **kwargs): # noqa: E501
"""Create a new StorageView # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_storage_view_with_http_info(cluster_name, storage_view_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param StorageViewPayload storage_view_payload: (required)
:param str x_include_object: When passed as part of a POST request, controls whether the representation of the newly created object is included in the response. Defaults to 'true' which will include the object in the response. This header is useful because refreshing the newly created object is usually the slowest part of a POST operation.
:return: StorageView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_name', 'storage_view_payload', 'x_include_object'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_storage_view" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_name' is set
if ('cluster_name' not in params or
params['cluster_name'] is None):
raise ValueError("Missing the required parameter `cluster_name` when calling `create_storage_view`") # noqa: E501
# verify the required parameter 'storage_view_payload' is set
if ('storage_view_payload' not in params or
params['storage_view_payload'] is None):
raise ValueError("Missing the required parameter `storage_view_payload` when calling `create_storage_view`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cluster_name' in params:
path_params['cluster_name'] = params['cluster_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_include_object' in params:
header_params['X-Include-Object'] = params['x_include_object'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'storage_view_payload' in params:
body_params = params['storage_view_payload']
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/clusters/{cluster_name}/exports/storage_views', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StorageView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) |
Python | def delete_storage_view(self, cluster_name, name, **kwargs): # noqa: E501
"""Deletes a single StorageView # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_storage_view(cluster_name, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_storage_view_with_http_info(cluster_name, name, **kwargs) # noqa: E501
else:
(data) = self.delete_storage_view_with_http_info(cluster_name, name, **kwargs) # noqa: E501
return data | def delete_storage_view(self, cluster_name, name, **kwargs): # noqa: E501
"""Deletes a single StorageView # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_storage_view(cluster_name, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_storage_view_with_http_info(cluster_name, name, **kwargs) # noqa: E501
else:
(data) = self.delete_storage_view_with_http_info(cluster_name, name, **kwargs) # noqa: E501
return data |
Python | def delete_storage_view_with_http_info(self, cluster_name, name, **kwargs): # noqa: E501
"""Deletes a single StorageView # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_storage_view_with_http_info(cluster_name, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_name', 'name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_storage_view" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_name' is set
if ('cluster_name' not in params or
params['cluster_name'] is None):
raise ValueError("Missing the required parameter `cluster_name` when calling `delete_storage_view`") # noqa: E501
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_storage_view`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cluster_name' in params:
path_params['cluster_name'] = params['cluster_name'] # noqa: E501
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/clusters/{cluster_name}/exports/storage_views/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | def delete_storage_view_with_http_info(self, cluster_name, name, **kwargs): # noqa: E501
"""Deletes a single StorageView # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_storage_view_with_http_info(cluster_name, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_name', 'name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_storage_view" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_name' is set
if ('cluster_name' not in params or
params['cluster_name'] is None):
raise ValueError("Missing the required parameter `cluster_name` when calling `delete_storage_view`") # noqa: E501
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_storage_view`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cluster_name' in params:
path_params['cluster_name'] = params['cluster_name'] # noqa: E501
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/clusters/{cluster_name}/exports/storage_views/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) |
Python | def patch_port(self, cluster_name, name, port_patch_payload, **kwargs): # noqa: E501
"""Update attributes on a Port # noqa: E501
Settable attributes are 'name' and 'enabled' # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_port(cluster_name, name, port_patch_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: The name of a specific instance of the resource (required)
:param list[JsonPatchOp] port_patch_payload: (required)
:return: Port
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_port_with_http_info(cluster_name, name, port_patch_payload, **kwargs) # noqa: E501
else:
(data) = self.patch_port_with_http_info(cluster_name, name, port_patch_payload, **kwargs) # noqa: E501
return data | def patch_port(self, cluster_name, name, port_patch_payload, **kwargs): # noqa: E501
"""Update attributes on a Port # noqa: E501
Settable attributes are 'name' and 'enabled' # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_port(cluster_name, name, port_patch_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: The name of a specific instance of the resource (required)
:param list[JsonPatchOp] port_patch_payload: (required)
:return: Port
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_port_with_http_info(cluster_name, name, port_patch_payload, **kwargs) # noqa: E501
else:
(data) = self.patch_port_with_http_info(cluster_name, name, port_patch_payload, **kwargs) # noqa: E501
return data |
Python | def patch_port_with_http_info(self, cluster_name, name, port_patch_payload, **kwargs): # noqa: E501
"""Update attributes on a Port # noqa: E501
Settable attributes are 'name' and 'enabled' # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_port_with_http_info(cluster_name, name, port_patch_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: The name of a specific instance of the resource (required)
:param list[JsonPatchOp] port_patch_payload: (required)
:return: Port
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_name', 'name', 'port_patch_payload'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_port" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_name' is set
if ('cluster_name' not in params or
params['cluster_name'] is None):
raise ValueError("Missing the required parameter `cluster_name` when calling `patch_port`") # noqa: E501
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_port`") # noqa: E501
# verify the required parameter 'port_patch_payload' is set
if ('port_patch_payload' not in params or
params['port_patch_payload'] is None):
raise ValueError("Missing the required parameter `port_patch_payload` when calling `patch_port`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cluster_name' in params:
path_params['cluster_name'] = params['cluster_name'] # noqa: E501
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'port_patch_payload' in params:
body_params = params['port_patch_payload']
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/clusters/{cluster_name}/exports/ports/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Port', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | def patch_port_with_http_info(self, cluster_name, name, port_patch_payload, **kwargs): # noqa: E501
"""Update attributes on a Port # noqa: E501
Settable attributes are 'name' and 'enabled' # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_port_with_http_info(cluster_name, name, port_patch_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: The name of a specific instance of the resource (required)
:param list[JsonPatchOp] port_patch_payload: (required)
:return: Port
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_name', 'name', 'port_patch_payload'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_port" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_name' is set
if ('cluster_name' not in params or
params['cluster_name'] is None):
raise ValueError("Missing the required parameter `cluster_name` when calling `patch_port`") # noqa: E501
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_port`") # noqa: E501
# verify the required parameter 'port_patch_payload' is set
if ('port_patch_payload' not in params or
params['port_patch_payload'] is None):
raise ValueError("Missing the required parameter `port_patch_payload` when calling `patch_port`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cluster_name' in params:
path_params['cluster_name'] = params['cluster_name'] # noqa: E501
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'port_patch_payload' in params:
body_params = params['port_patch_payload']
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/clusters/{cluster_name}/exports/ports/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Port', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) |
Python | def rediscover_initiator_ports(self, cluster_name, **kwargs): # noqa: E501
"""Rediscover initiator ports # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rediscover_initiator_ports(cluster_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param RediscoverPayload rediscover_payload:
:return: list[InitiatorPort]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.rediscover_initiator_ports_with_http_info(cluster_name, **kwargs) # noqa: E501
else:
(data) = self.rediscover_initiator_ports_with_http_info(cluster_name, **kwargs) # noqa: E501
return data | def rediscover_initiator_ports(self, cluster_name, **kwargs): # noqa: E501
"""Rediscover initiator ports # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rediscover_initiator_ports(cluster_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param RediscoverPayload rediscover_payload:
:return: list[InitiatorPort]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.rediscover_initiator_ports_with_http_info(cluster_name, **kwargs) # noqa: E501
else:
(data) = self.rediscover_initiator_ports_with_http_info(cluster_name, **kwargs) # noqa: E501
return data |
Python | def rediscover_initiator_ports_with_http_info(self, cluster_name, **kwargs): # noqa: E501
"""Rediscover initiator ports # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rediscover_initiator_ports_with_http_info(cluster_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param RediscoverPayload rediscover_payload:
:return: list[InitiatorPort]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_name', 'rediscover_payload'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method rediscover_initiator_ports" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_name' is set
if ('cluster_name' not in params or
params['cluster_name'] is None):
raise ValueError("Missing the required parameter `cluster_name` when calling `rediscover_initiator_ports`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cluster_name' in params:
path_params['cluster_name'] = params['cluster_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'rediscover_payload' in params:
body_params = params['rediscover_payload']
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/clusters/{cluster_name}/exports/initiator_ports/rediscover', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[InitiatorPort]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | def rediscover_initiator_ports_with_http_info(self, cluster_name, **kwargs): # noqa: E501
"""Rediscover initiator ports # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.rediscover_initiator_ports_with_http_info(cluster_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param RediscoverPayload rediscover_payload:
:return: list[InitiatorPort]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_name', 'rediscover_payload'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method rediscover_initiator_ports" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_name' is set
if ('cluster_name' not in params or
params['cluster_name'] is None):
raise ValueError("Missing the required parameter `cluster_name` when calling `rediscover_initiator_ports`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cluster_name' in params:
path_params['cluster_name'] = params['cluster_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'rediscover_payload' in params:
body_params = params['rediscover_payload']
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/clusters/{cluster_name}/exports/initiator_ports/rediscover', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[InitiatorPort]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) |
Python | def register_initiator_port(self, cluster_name, register_payload, **kwargs): # noqa: E501
"""Registers an InitiatorPort # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.register_initiator_port(cluster_name, register_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param RegisterPayload register_payload: (required)
:return: InitiatorPort
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.register_initiator_port_with_http_info(cluster_name, register_payload, **kwargs) # noqa: E501
else:
(data) = self.register_initiator_port_with_http_info(cluster_name, register_payload, **kwargs) # noqa: E501
return data | def register_initiator_port(self, cluster_name, register_payload, **kwargs): # noqa: E501
"""Registers an InitiatorPort # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.register_initiator_port(cluster_name, register_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param RegisterPayload register_payload: (required)
:return: InitiatorPort
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.register_initiator_port_with_http_info(cluster_name, register_payload, **kwargs) # noqa: E501
else:
(data) = self.register_initiator_port_with_http_info(cluster_name, register_payload, **kwargs) # noqa: E501
return data |
Python | def register_initiator_port_with_http_info(self, cluster_name, register_payload, **kwargs): # noqa: E501
"""Registers an InitiatorPort # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.register_initiator_port_with_http_info(cluster_name, register_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param RegisterPayload register_payload: (required)
:return: InitiatorPort
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_name', 'register_payload'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method register_initiator_port" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_name' is set
if ('cluster_name' not in params or
params['cluster_name'] is None):
raise ValueError("Missing the required parameter `cluster_name` when calling `register_initiator_port`") # noqa: E501
# verify the required parameter 'register_payload' is set
if ('register_payload' not in params or
params['register_payload'] is None):
raise ValueError("Missing the required parameter `register_payload` when calling `register_initiator_port`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cluster_name' in params:
path_params['cluster_name'] = params['cluster_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'register_payload' in params:
body_params = params['register_payload']
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/clusters/{cluster_name}/exports/initiator_ports', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InitiatorPort', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | def register_initiator_port_with_http_info(self, cluster_name, register_payload, **kwargs): # noqa: E501
"""Registers an InitiatorPort # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.register_initiator_port_with_http_info(cluster_name, register_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param RegisterPayload register_payload: (required)
:return: InitiatorPort
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_name', 'register_payload'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method register_initiator_port" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_name' is set
if ('cluster_name' not in params or
params['cluster_name'] is None):
raise ValueError("Missing the required parameter `cluster_name` when calling `register_initiator_port`") # noqa: E501
# verify the required parameter 'register_payload' is set
if ('register_payload' not in params or
params['register_payload'] is None):
raise ValueError("Missing the required parameter `register_payload` when calling `register_initiator_port`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cluster_name' in params:
path_params['cluster_name'] = params['cluster_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'register_payload' in params:
body_params = params['register_payload']
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/clusters/{cluster_name}/exports/initiator_ports', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InitiatorPort', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) |
Python | def unregister_initiator_port(self, cluster_name, name, **kwargs): # noqa: E501
"""Unregister an InitiatorPort # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unregister_initiator_port(cluster_name, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: The name of a specific instance of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.unregister_initiator_port_with_http_info(cluster_name, name, **kwargs) # noqa: E501
else:
(data) = self.unregister_initiator_port_with_http_info(cluster_name, name, **kwargs) # noqa: E501
return data | def unregister_initiator_port(self, cluster_name, name, **kwargs): # noqa: E501
"""Unregister an InitiatorPort # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unregister_initiator_port(cluster_name, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: The name of a specific instance of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.unregister_initiator_port_with_http_info(cluster_name, name, **kwargs) # noqa: E501
else:
(data) = self.unregister_initiator_port_with_http_info(cluster_name, name, **kwargs) # noqa: E501
return data |
Python | def unregister_initiator_port_with_http_info(self, cluster_name, name, **kwargs): # noqa: E501
"""Unregister an InitiatorPort # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unregister_initiator_port_with_http_info(cluster_name, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: The name of a specific instance of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_name', 'name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method unregister_initiator_port" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_name' is set
if ('cluster_name' not in params or
params['cluster_name'] is None):
raise ValueError("Missing the required parameter `cluster_name` when calling `unregister_initiator_port`") # noqa: E501
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `unregister_initiator_port`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cluster_name' in params:
path_params['cluster_name'] = params['cluster_name'] # noqa: E501
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/clusters/{cluster_name}/exports/initiator_ports/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | def unregister_initiator_port_with_http_info(self, cluster_name, name, **kwargs): # noqa: E501
"""Unregister an InitiatorPort # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unregister_initiator_port_with_http_info(cluster_name, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: The name of a specific instance of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_name', 'name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method unregister_initiator_port" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_name' is set
if ('cluster_name' not in params or
params['cluster_name'] is None):
raise ValueError("Missing the required parameter `cluster_name` when calling `unregister_initiator_port`") # noqa: E501
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `unregister_initiator_port`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cluster_name' in params:
path_params['cluster_name'] = params['cluster_name'] # noqa: E501
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/clusters/{cluster_name}/exports/initiator_ports/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) |
Python | def create_logging_volume(self, cluster_name, logging_volume_payload, **kwargs): # noqa: E501
"""Creates a new Logging Volume # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_logging_volume(cluster_name, logging_volume_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param LoggingVolumePayload logging_volume_payload: (required)
:param str x_include_object: When passed as part of a POST request, controls whether the representation of the newly created object is included in the response. Defaults to 'true' which will include the object in the response. This header is useful because refreshing the newly created object is usually the slowest part of a POST operation.
:return: LoggingVolume
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_logging_volume_with_http_info(cluster_name, logging_volume_payload, **kwargs) # noqa: E501
else:
(data) = self.create_logging_volume_with_http_info(cluster_name, logging_volume_payload, **kwargs) # noqa: E501
return data | def create_logging_volume(self, cluster_name, logging_volume_payload, **kwargs): # noqa: E501
"""Creates a new Logging Volume # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_logging_volume(cluster_name, logging_volume_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param LoggingVolumePayload logging_volume_payload: (required)
:param str x_include_object: When passed as part of a POST request, controls whether the representation of the newly created object is included in the response. Defaults to 'true' which will include the object in the response. This header is useful because refreshing the newly created object is usually the slowest part of a POST operation.
:return: LoggingVolume
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_logging_volume_with_http_info(cluster_name, logging_volume_payload, **kwargs) # noqa: E501
else:
(data) = self.create_logging_volume_with_http_info(cluster_name, logging_volume_payload, **kwargs) # noqa: E501
return data |
Python | def create_logging_volume_with_http_info(self, cluster_name, logging_volume_payload, **kwargs): # noqa: E501
"""Creates a new Logging Volume # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_logging_volume_with_http_info(cluster_name, logging_volume_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param LoggingVolumePayload logging_volume_payload: (required)
:param str x_include_object: When passed as part of a POST request, controls whether the representation of the newly created object is included in the response. Defaults to 'true' which will include the object in the response. This header is useful because refreshing the newly created object is usually the slowest part of a POST operation.
:return: LoggingVolume
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_name', 'logging_volume_payload', 'x_include_object'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_logging_volume" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_name' is set
if ('cluster_name' not in params or
params['cluster_name'] is None):
raise ValueError("Missing the required parameter `cluster_name` when calling `create_logging_volume`") # noqa: E501
# verify the required parameter 'logging_volume_payload' is set
if ('logging_volume_payload' not in params or
params['logging_volume_payload'] is None):
raise ValueError("Missing the required parameter `logging_volume_payload` when calling `create_logging_volume`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cluster_name' in params:
path_params['cluster_name'] = params['cluster_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_include_object' in params:
header_params['X-Include-Object'] = params['x_include_object'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'logging_volume_payload' in params:
body_params = params['logging_volume_payload']
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/clusters/{cluster_name}/logging_volumes', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LoggingVolume', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | def create_logging_volume_with_http_info(self, cluster_name, logging_volume_payload, **kwargs): # noqa: E501
"""Creates a new Logging Volume # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_logging_volume_with_http_info(cluster_name, logging_volume_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param LoggingVolumePayload logging_volume_payload: (required)
:param str x_include_object: When passed as part of a POST request, controls whether the representation of the newly created object is included in the response. Defaults to 'true' which will include the object in the response. This header is useful because refreshing the newly created object is usually the slowest part of a POST operation.
:return: LoggingVolume
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_name', 'logging_volume_payload', 'x_include_object'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_logging_volume" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_name' is set
if ('cluster_name' not in params or
params['cluster_name'] is None):
raise ValueError("Missing the required parameter `cluster_name` when calling `create_logging_volume`") # noqa: E501
# verify the required parameter 'logging_volume_payload' is set
if ('logging_volume_payload' not in params or
params['logging_volume_payload'] is None):
raise ValueError("Missing the required parameter `logging_volume_payload` when calling `create_logging_volume`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cluster_name' in params:
path_params['cluster_name'] = params['cluster_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_include_object' in params:
header_params['X-Include-Object'] = params['x_include_object'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'logging_volume_payload' in params:
body_params = params['logging_volume_payload']
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/clusters/{cluster_name}/logging_volumes', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LoggingVolume', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) |
Python | def delete_logging_volume(self, cluster_name, name, **kwargs): # noqa: E501
"""Deletes a single LoggingVolume # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_logging_volume(cluster_name, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: The name of a specific instance of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_logging_volume_with_http_info(cluster_name, name, **kwargs) # noqa: E501
else:
(data) = self.delete_logging_volume_with_http_info(cluster_name, name, **kwargs) # noqa: E501
return data | def delete_logging_volume(self, cluster_name, name, **kwargs): # noqa: E501
"""Deletes a single LoggingVolume # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_logging_volume(cluster_name, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: The name of a specific instance of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_logging_volume_with_http_info(cluster_name, name, **kwargs) # noqa: E501
else:
(data) = self.delete_logging_volume_with_http_info(cluster_name, name, **kwargs) # noqa: E501
return data |
Python | def delete_logging_volume_with_http_info(self, cluster_name, name, **kwargs): # noqa: E501
"""Deletes a single LoggingVolume # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_logging_volume_with_http_info(cluster_name, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: The name of a specific instance of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_name', 'name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_logging_volume" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_name' is set
if ('cluster_name' not in params or
params['cluster_name'] is None):
raise ValueError("Missing the required parameter `cluster_name` when calling `delete_logging_volume`") # noqa: E501
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_logging_volume`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cluster_name' in params:
path_params['cluster_name'] = params['cluster_name'] # noqa: E501
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/clusters/{cluster_name}/logging_volumes/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | def delete_logging_volume_with_http_info(self, cluster_name, name, **kwargs): # noqa: E501
"""Deletes a single LoggingVolume # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_logging_volume_with_http_info(cluster_name, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: The name of a specific instance of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_name', 'name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_logging_volume" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_name' is set
if ('cluster_name' not in params or
params['cluster_name'] is None):
raise ValueError("Missing the required parameter `cluster_name` when calling `delete_logging_volume`") # noqa: E501
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_logging_volume`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cluster_name' in params:
path_params['cluster_name'] = params['cluster_name'] # noqa: E501
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/clusters/{cluster_name}/logging_volumes/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) |
Python | def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ClaimPayload, dict):
for key, value in self.items():
result[key] = value
return result | def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ClaimPayload, dict):
for key, value in self.items():
result[key] = value
return result |
Python | def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(LoginResponse, dict):
for key, value in self.items():
result[key] = value
return result | def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(LoginResponse, dict):
for key, value in self.items():
result[key] = value
return result |
Python | def add_certificate(self, external_certificate, certificate_payload, **kwargs): # noqa: E501
"""Add the certificate to the keystore. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_certificate(external_certificate, certificate_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str external_certificate: The IP:PORT of system associated with this certificate. (required)
:param CertificatePayload certificate_payload: (required)
:return: Certificate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_certificate_with_http_info(external_certificate, certificate_payload, **kwargs) # noqa: E501
else:
(data) = self.add_certificate_with_http_info(external_certificate, certificate_payload, **kwargs) # noqa: E501
return data | def add_certificate(self, external_certificate, certificate_payload, **kwargs): # noqa: E501
"""Add the certificate to the keystore. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_certificate(external_certificate, certificate_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str external_certificate: The IP:PORT of system associated with this certificate. (required)
:param CertificatePayload certificate_payload: (required)
:return: Certificate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_certificate_with_http_info(external_certificate, certificate_payload, **kwargs) # noqa: E501
else:
(data) = self.add_certificate_with_http_info(external_certificate, certificate_payload, **kwargs) # noqa: E501
return data |
Python | def add_certificate_with_http_info(self, external_certificate, certificate_payload, **kwargs): # noqa: E501
"""Add the certificate to the keystore. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_certificate_with_http_info(external_certificate, certificate_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str external_certificate: The IP:PORT of system associated with this certificate. (required)
:param CertificatePayload certificate_payload: (required)
:return: Certificate
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['external_certificate', 'certificate_payload'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_certificate" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'external_certificate' is set
if ('external_certificate' not in params or
params['external_certificate'] is None):
raise ValueError("Missing the required parameter `external_certificate` when calling `add_certificate`") # noqa: E501
# verify the required parameter 'certificate_payload' is set
if ('certificate_payload' not in params or
params['certificate_payload'] is None):
raise ValueError("Missing the required parameter `certificate_payload` when calling `add_certificate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'external_certificate' in params:
path_params['external_certificate'] = params['external_certificate'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'certificate_payload' in params:
body_params = params['certificate_payload']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/external_certificates/trusted/{external_certificate}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Certificate', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | def add_certificate_with_http_info(self, external_certificate, certificate_payload, **kwargs): # noqa: E501
"""Add the certificate to the keystore. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_certificate_with_http_info(external_certificate, certificate_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str external_certificate: The IP:PORT of system associated with this certificate. (required)
:param CertificatePayload certificate_payload: (required)
:return: Certificate
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['external_certificate', 'certificate_payload'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_certificate" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'external_certificate' is set
if ('external_certificate' not in params or
params['external_certificate'] is None):
raise ValueError("Missing the required parameter `external_certificate` when calling `add_certificate`") # noqa: E501
# verify the required parameter 'certificate_payload' is set
if ('certificate_payload' not in params or
params['certificate_payload'] is None):
raise ValueError("Missing the required parameter `certificate_payload` when calling `add_certificate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'external_certificate' in params:
path_params['external_certificate'] = params['external_certificate'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'certificate_payload' in params:
body_params = params['certificate_payload']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/external_certificates/trusted/{external_certificate}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Certificate', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) |
Python | def external_certificates_untrusted_external_certificate_get(self, external_certificate, **kwargs): # noqa: E501
"""Fetches the SSL Certificate of an external system # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.external_certificates_untrusted_external_certificate_get(external_certificate, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str external_certificate: The IP:PORT of system associated with this certificate. (required)
:return: Certificate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.external_certificates_untrusted_external_certificate_get_with_http_info(external_certificate, **kwargs) # noqa: E501
else:
(data) = self.external_certificates_untrusted_external_certificate_get_with_http_info(external_certificate, **kwargs) # noqa: E501
return data | def external_certificates_untrusted_external_certificate_get(self, external_certificate, **kwargs): # noqa: E501
"""Fetches the SSL Certificate of an external system # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.external_certificates_untrusted_external_certificate_get(external_certificate, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str external_certificate: The IP:PORT of system associated with this certificate. (required)
:return: Certificate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.external_certificates_untrusted_external_certificate_get_with_http_info(external_certificate, **kwargs) # noqa: E501
else:
(data) = self.external_certificates_untrusted_external_certificate_get_with_http_info(external_certificate, **kwargs) # noqa: E501
return data |
Python | def external_certificates_untrusted_external_certificate_get_with_http_info(self, external_certificate, **kwargs): # noqa: E501
"""Fetches the SSL Certificate of an external system # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.external_certificates_untrusted_external_certificate_get_with_http_info(external_certificate, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str external_certificate: The IP:PORT of system associated with this certificate. (required)
:return: Certificate
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['external_certificate'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method external_certificates_untrusted_external_certificate_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'external_certificate' is set
if ('external_certificate' not in params or
params['external_certificate'] is None):
raise ValueError("Missing the required parameter `external_certificate` when calling `external_certificates_untrusted_external_certificate_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'external_certificate' in params:
path_params['external_certificate'] = params['external_certificate'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/external_certificates/untrusted/{external_certificate}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Certificate', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | def external_certificates_untrusted_external_certificate_get_with_http_info(self, external_certificate, **kwargs): # noqa: E501
"""Fetches the SSL Certificate of an external system # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.external_certificates_untrusted_external_certificate_get_with_http_info(external_certificate, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str external_certificate: The IP:PORT of system associated with this certificate. (required)
:return: Certificate
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['external_certificate'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method external_certificates_untrusted_external_certificate_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'external_certificate' is set
if ('external_certificate' not in params or
params['external_certificate'] is None):
raise ValueError("Missing the required parameter `external_certificate` when calling `external_certificates_untrusted_external_certificate_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'external_certificate' in params:
path_params['external_certificate'] = params['external_certificate'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/external_certificates/untrusted/{external_certificate}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Certificate', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) |
Python | def remove_certificate(self, external_certificate, **kwargs): # noqa: E501
"""Remove the certificate from the keystore. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_certificate(external_certificate, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str external_certificate: The IP:PORT of system associated with this certificate. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.remove_certificate_with_http_info(external_certificate, **kwargs) # noqa: E501
else:
(data) = self.remove_certificate_with_http_info(external_certificate, **kwargs) # noqa: E501
return data | def remove_certificate(self, external_certificate, **kwargs): # noqa: E501
"""Remove the certificate from the keystore. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_certificate(external_certificate, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str external_certificate: The IP:PORT of system associated with this certificate. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.remove_certificate_with_http_info(external_certificate, **kwargs) # noqa: E501
else:
(data) = self.remove_certificate_with_http_info(external_certificate, **kwargs) # noqa: E501
return data |
Python | def remove_certificate_with_http_info(self, external_certificate, **kwargs): # noqa: E501
"""Remove the certificate from the keystore. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_certificate_with_http_info(external_certificate, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str external_certificate: The IP:PORT of system associated with this certificate. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['external_certificate'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_certificate" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'external_certificate' is set
if ('external_certificate' not in params or
params['external_certificate'] is None):
raise ValueError("Missing the required parameter `external_certificate` when calling `remove_certificate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'external_certificate' in params:
path_params['external_certificate'] = params['external_certificate'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/external_certificates/trusted/{external_certificate}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | def remove_certificate_with_http_info(self, external_certificate, **kwargs): # noqa: E501
"""Remove the certificate from the keystore. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_certificate_with_http_info(external_certificate, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str external_certificate: The IP:PORT of system associated with this certificate. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['external_certificate'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_certificate" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'external_certificate' is set
if ('external_certificate' not in params or
params['external_certificate'] is None):
raise ValueError("Missing the required parameter `external_certificate` when calling `remove_certificate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'external_certificate' in params:
path_params['external_certificate'] = params['external_certificate'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/external_certificates/trusted/{external_certificate}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) |
Python | def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MetaVolume, dict):
for key, value in self.items():
result[key] = value
return result | def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MetaVolume, dict):
for key, value in self.items():
result[key] = value
return result |
Python | def platform(self, platform):
"""Sets the platform of this SystemConfigBranding.
:param platform: The platform of this SystemConfigBranding. # noqa: E501
:type: str
"""
allowed_values = ["UNKNOWN", "GENERIC", "VS2", "VS6"] # noqa: E501
if platform not in allowed_values:
raise ValueError(
"Invalid value for `platform` ({0}), must be one of {1}" # noqa: E501
.format(platform, allowed_values)
)
self._platform = platform | def platform(self, platform):
"""Sets the platform of this SystemConfigBranding.
:param platform: The platform of this SystemConfigBranding. # noqa: E501
:type: str
"""
allowed_values = ["UNKNOWN", "GENERIC", "VS2", "VS6"] # noqa: E501
if platform not in allowed_values:
raise ValueError(
"Invalid value for `platform` ({0}), must be one of {1}" # noqa: E501
.format(platform, allowed_values)
)
self._platform = platform |
Python | def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SystemConfigBranding, dict):
for key, value in self.items():
result[key] = value
return result | def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SystemConfigBranding, dict):
for key, value in self.items():
result[key] = value
return result |
Python | def hours(self, hours):
"""Sets the hours of this MetadataBackupPayload.
:param hours: The hours of this MetadataBackupPayload. # noqa: E501
:type: int
"""
if hours is None:
raise ValueError("Invalid value for `hours`, must not be `None`") # noqa: E501
self._hours = hours | def hours(self, hours):
"""Sets the hours of this MetadataBackupPayload.
:param hours: The hours of this MetadataBackupPayload. # noqa: E501
:type: int
"""
if hours is None:
raise ValueError("Invalid value for `hours`, must not be `None`") # noqa: E501
self._hours = hours |
Python | def minutes(self, minutes):
"""Sets the minutes of this MetadataBackupPayload.
:param minutes: The minutes of this MetadataBackupPayload. # noqa: E501
:type: int
"""
if minutes is None:
raise ValueError("Invalid value for `minutes`, must not be `None`") # noqa: E501
self._minutes = minutes | def minutes(self, minutes):
"""Sets the minutes of this MetadataBackupPayload.
:param minutes: The minutes of this MetadataBackupPayload. # noqa: E501
:type: int
"""
if minutes is None:
raise ValueError("Invalid value for `minutes`, must not be `None`") # noqa: E501
self._minutes = minutes |
Python | def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MetadataBackupPayload, dict):
for key, value in self.items():
result[key] = value
return result | def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MetadataBackupPayload, dict):
for key, value in self.items():
result[key] = value
return result |
Python | def claim_storage_volume(self, cluster_name, name, claim_payload, **kwargs): # noqa: E501
"""Claim a StorageVolume # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.claim_storage_volume(cluster_name, name, claim_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: The name of a specific instance of the resource (required)
:param ClaimPayload claim_payload: (required)
:return: StorageVolume
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.claim_storage_volume_with_http_info(cluster_name, name, claim_payload, **kwargs) # noqa: E501
else:
(data) = self.claim_storage_volume_with_http_info(cluster_name, name, claim_payload, **kwargs) # noqa: E501
return data | def claim_storage_volume(self, cluster_name, name, claim_payload, **kwargs): # noqa: E501
"""Claim a StorageVolume # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.claim_storage_volume(cluster_name, name, claim_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: The name of a specific instance of the resource (required)
:param ClaimPayload claim_payload: (required)
:return: StorageVolume
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.claim_storage_volume_with_http_info(cluster_name, name, claim_payload, **kwargs) # noqa: E501
else:
(data) = self.claim_storage_volume_with_http_info(cluster_name, name, claim_payload, **kwargs) # noqa: E501
return data |
Python | def claim_storage_volume_with_http_info(self, cluster_name, name, claim_payload, **kwargs): # noqa: E501
"""Claim a StorageVolume # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.claim_storage_volume_with_http_info(cluster_name, name, claim_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: The name of a specific instance of the resource (required)
:param ClaimPayload claim_payload: (required)
:return: StorageVolume
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_name', 'name', 'claim_payload'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method claim_storage_volume" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_name' is set
if ('cluster_name' not in params or
params['cluster_name'] is None):
raise ValueError("Missing the required parameter `cluster_name` when calling `claim_storage_volume`") # noqa: E501
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `claim_storage_volume`") # noqa: E501
# verify the required parameter 'claim_payload' is set
if ('claim_payload' not in params or
params['claim_payload'] is None):
raise ValueError("Missing the required parameter `claim_payload` when calling `claim_storage_volume`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cluster_name' in params:
path_params['cluster_name'] = params['cluster_name'] # noqa: E501
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'claim_payload' in params:
body_params = params['claim_payload']
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/clusters/{cluster_name}/storage_volumes/{name}/claim', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StorageVolume', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | def claim_storage_volume_with_http_info(self, cluster_name, name, claim_payload, **kwargs): # noqa: E501
"""Claim a StorageVolume # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.claim_storage_volume_with_http_info(cluster_name, name, claim_payload, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: The name of a specific instance of the resource (required)
:param ClaimPayload claim_payload: (required)
:return: StorageVolume
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_name', 'name', 'claim_payload'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method claim_storage_volume" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_name' is set
if ('cluster_name' not in params or
params['cluster_name'] is None):
raise ValueError("Missing the required parameter `cluster_name` when calling `claim_storage_volume`") # noqa: E501
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `claim_storage_volume`") # noqa: E501
# verify the required parameter 'claim_payload' is set
if ('claim_payload' not in params or
params['claim_payload'] is None):
raise ValueError("Missing the required parameter `claim_payload` when calling `claim_storage_volume`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cluster_name' in params:
path_params['cluster_name'] = params['cluster_name'] # noqa: E501
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'claim_payload' in params:
body_params = params['claim_payload']
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/clusters/{cluster_name}/storage_volumes/{name}/claim', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StorageVolume', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) |
Python | def forget_storage_volume(self, cluster_name, name, **kwargs): # noqa: E501
"""Storage volume is not really missing it will reappear after being forgotten # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.forget_storage_volume(cluster_name, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: The name of a specific instance of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.forget_storage_volume_with_http_info(cluster_name, name, **kwargs) # noqa: E501
else:
(data) = self.forget_storage_volume_with_http_info(cluster_name, name, **kwargs) # noqa: E501
return data | def forget_storage_volume(self, cluster_name, name, **kwargs): # noqa: E501
"""Storage volume is not really missing it will reappear after being forgotten # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.forget_storage_volume(cluster_name, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: The name of a specific instance of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.forget_storage_volume_with_http_info(cluster_name, name, **kwargs) # noqa: E501
else:
(data) = self.forget_storage_volume_with_http_info(cluster_name, name, **kwargs) # noqa: E501
return data |
Python | def forget_storage_volume_with_http_info(self, cluster_name, name, **kwargs): # noqa: E501
"""Storage volume is not really missing it will reappear after being forgotten # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.forget_storage_volume_with_http_info(cluster_name, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: The name of a specific instance of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_name', 'name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method forget_storage_volume" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_name' is set
if ('cluster_name' not in params or
params['cluster_name'] is None):
raise ValueError("Missing the required parameter `cluster_name` when calling `forget_storage_volume`") # noqa: E501
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `forget_storage_volume`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cluster_name' in params:
path_params['cluster_name'] = params['cluster_name'] # noqa: E501
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/clusters/{cluster_name}/storage_volumes/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | def forget_storage_volume_with_http_info(self, cluster_name, name, **kwargs): # noqa: E501
"""Storage volume is not really missing it will reappear after being forgotten # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.forget_storage_volume_with_http_info(cluster_name, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cluster_name: The name of the cluster (required)
:param str name: The name of a specific instance of the resource (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cluster_name', 'name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method forget_storage_volume" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cluster_name' is set
if ('cluster_name' not in params or
params['cluster_name'] is None):
raise ValueError("Missing the required parameter `cluster_name` when calling `forget_storage_volume`") # noqa: E501
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `forget_storage_volume`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cluster_name' in params:
path_params['cluster_name'] = params['cluster_name'] # noqa: E501
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['basicAuth', 'jwtAuth'] # noqa: E501
return self.api_client.call_api(
'/clusters/{cluster_name}/storage_volumes/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.