edited_code
stringlengths 17
978k
| original_code
stringlengths 17
978k
|
|---|---|
#!/usr/bin/python3
"""Widget that simplifies defining questionnaires."""
import os
import pickle
from tkinter import BooleanVar
from tkinter import DoubleVar
from tkinter import IntVar
from tkinter import StringVar
from tkinter import TclError
from tkinter.ttk import Checkbutton
from tkinter.ttk import Combobox
from tkinter.ttk import Frame
from tkinter.ttk import Label
from tkinter.ttk import LabelFrame
from tkinter.ttk import Notebook
import numpy as np
from orcinus.gui.tooltip import create_tooltip
# TODO(schneiderfelipe): this will change in the future.
DATA_DIR = os.path.expanduser("~")
class Questionnaire(Frame):
"""Interface for simple questionnaires."""
def __init__(
self,
master=None,
fields=None,
state_filename=None,
padx=1,
pady=2,
column_minsize=240,
):
"""Construct object."""
super().__init__(master)
self.padx = padx
self.pady = pady
self.column_minsize = column_minsize
self.state_filename = state_filename
self.master = master
self.fields = fields
self.create_widgets()
def get_values(self):
"""Return a dictionary of all variable values."""
self.update_widgets()
values = {}
for name in self.variable:
if not self.fields[name]["visible"]:
values[name] = None
continue
try:
values[name] = self.variable[name].get()
except TclError:
values[name] = self.fields[name]["default"]
if values[name] == "None":
values[name] = None
if "values" in self.fields[name]:
translator = self.fields[name]["values"]
if isinstance(translator, dict):
try:
values[name] = translator[values[name]]
except KeyError:
values[name] = translator[self.fields[name]["default"]]
if values[name] == "None":
values[name] = None
return values
def init_widgets(self, *args, ignore_state=False, **kwargs):
"""Clear all fields to default values."""
if self.fields is None:
return
init_values = {
name: desc["default"] for name, desc in self.fields.items()
}
state_path = os.path.join(DATA_DIR, self.state_filename)
if (
not ignore_state
and self.state_filename
and os.path.isfile(state_path)
):
with open(state_path, "rb") as f:
state = pickle.load(f)
init_values.update(state)
for name, value in init_values.items():
try:
self.variable[name].set(value)
except KeyError:
pass
self.update_widgets()
def store_widgets(self, *args, **kwargs):
"""Store all fields to disk."""
if self.fields is None:
return
if self.state_filename:
state_path = os.path.join(DATA_DIR, self.state_filename)
state = {}
for name, _ in self.fields.items():
try:
state[name] = self.variable[name].get()
except TclError:
state[name] = self.fields[name]["default"]
with open(state_path, "wb") as f:
pickle.dump(state, f)
def enable(self, name):
"""Show a widget by name."""
if self.fields[name]["visible"]:
return
self.toggle(name)
def disable(self, name):
"""Hide a widget by name."""
if not self.fields[name]["visible"]:
return
self.toggle(name)
def toggle(self, name):
"""Hide or show a widget by name."""
if not self.fields[name]["visible"]:
self.widget[name].grid()
if name in self.label:
self.label[name].grid()
else:
self.widget[name].grid_remove()
if name in self.label:
self.label[name].grid_remove()
self.fields[name]["visible"] = not self.fields[name]["visible"]
def create_widgets(self):
"""Populate object and its widgets."""
self.variable = {}
self.label = {}
self.widget = {}
self.tab = {}
self.group = {}
self.notebook = Notebook(self)
self.notebook.pack(fill="both", expand=True)
if self.fields is None:
return
for i, (name, desc) in enumerate(self.fields.items()):
if "tab" not in desc:
desc["tab"] = "main"
if desc["tab"] not in self.tab:
parent = Frame(self.notebook)
parent.columnconfigure(
[0, 1], weight=1, minsize=self.column_minsize
)
self.notebook.add(parent, text=desc["tab"].capitalize())
self.tab[desc["tab"]] = parent
else:
parent = self.tab[desc["tab"]]
if "group" in desc:
if desc["group"] not in self.group:
group = LabelFrame(parent, text=desc["group"].capitalize())
group.columnconfigure(
[0, 1], weight=1, minsize=self.column_minsize
)
group.grid(
row=i,
column=0,
columnspan=2,
sticky="ew",
padx=self.padx,
pady=9 * self.pady,
)
self.group[desc["group"]] = group
else:
group = self.group[desc["group"]]
parent = group
if "values" in desc:
values = list(desc["values"])
if "type" not in desc:
# if no type is given, first guess it based on a default value,
# or infer from the first valid value.
if "default" in desc and desc["default"] is not None:
desc["type"] = type(desc["default"])
elif "values" in desc:
desc["type"] = type(
[v for v in values if v is not None][0]
)
else:
raise ValueError(
f"could not infer type, please specify: {desc}"
)
if "default" not in desc:
# if no default is given, use the first value (even if None),
# or infer from type.
if "values" in desc:
desc["default"] = [v for v in values][0]
elif "type" in desc:
desc["default"] = desc["type"]()
else:
raise ValueError(
f"could not infer default, please specify: {desc}"
)
if desc["type"] is int or desc["type"] is np.int64:
self.variable[name] = IntVar(self)
elif desc["type"] is bool:
self.variable[name] = BooleanVar(self)
elif desc["type"] is str:
self.variable[name] = StringVar(self)
elif desc["type"] is float:
self.variable[name] = DoubleVar(self)
if "values" in desc:
values = [np.round(v, 2) for v in values]
else:
raise ValueError(f"unknown type '{desc["type"]}' for '{name}'")
if "text" in desc:
text = desc["text"]
else:
text = name.capitalize()
if "widget" not in desc:
# TODO(schneiderfelipe): should this be default?
desc["widget"] = Combobox
if desc["widget"] is Checkbutton:
self.widget[name] = desc["widget"](
parent, variable=self.variable[name], text=text
)
elif "values" in desc:
self.widget[name] = desc["widget"](
parent, textvariable=self.variable[name], values=values
)
else:
self.widget[name] = desc["widget"](
parent, textvariable=self.variable[name]
)
self.widget[name].grid(
row=i, column=1, sticky="ew", padx=self.padx, pady=self.pady
)
if "help" in desc:
create_tooltip(self.widget[name], desc["help"])
if desc["widget"] is not Checkbutton:
self.label[name] = Label(parent, text=text + ":")
self.label[name].grid(
row=i,
column=0,
sticky="ew",
padx=self.padx,
pady=self.pady,
)
if "visible" not in desc:
desc["visible"] = True
self.init_widgets()
def update_widgets(self, *args, **kwargs):
"""Update widget states."""
if self.fields is None:
return
options = {}
for name in self.variable:
try:
options[name] = self.variable[name].get()
except TclError:
options[name] = self.fields[name]["default"]
for name, desc in self.fields.items():
# TODO(schneiderfelipe): allow an analogous key "freeze", which
# does exactly the same as switch, but enables/disables the widget
# instead of showing/hiding it. self.enable and sefl.disable should
# then accept an argument policy="freeze" or policy="switch" to
# make things easier. Both "switch" (meaning available/unavailable)
# and "freeze" (meaning impossible to change) can be used at the
# same time. "freeze" might require setting which value is locked.
if "switch" in desc:
if desc["switch"](options):
self.enable(name)
else:
self.disable(name)
|
#!/usr/bin/python3
"""Widget that simplifies defining questionnaires."""
import os
import pickle
from tkinter import BooleanVar
from tkinter import DoubleVar
from tkinter import IntVar
from tkinter import StringVar
from tkinter import TclError
from tkinter.ttk import Checkbutton
from tkinter.ttk import Combobox
from tkinter.ttk import Frame
from tkinter.ttk import Label
from tkinter.ttk import LabelFrame
from tkinter.ttk import Notebook
import numpy as np
from orcinus.gui.tooltip import create_tooltip
# TODO(schneiderfelipe): this will change in the future.
DATA_DIR = os.path.expanduser("~")
class Questionnaire(Frame):
"""Interface for simple questionnaires."""
def __init__(
self,
master=None,
fields=None,
state_filename=None,
padx=1,
pady=2,
column_minsize=240,
):
"""Construct object."""
super().__init__(master)
self.padx = padx
self.pady = pady
self.column_minsize = column_minsize
self.state_filename = state_filename
self.master = master
self.fields = fields
self.create_widgets()
def get_values(self):
"""Return a dictionary of all variable values."""
self.update_widgets()
values = {}
for name in self.variable:
if not self.fields[name]["visible"]:
values[name] = None
continue
try:
values[name] = self.variable[name].get()
except TclError:
values[name] = self.fields[name]["default"]
if values[name] == "None":
values[name] = None
if "values" in self.fields[name]:
translator = self.fields[name]["values"]
if isinstance(translator, dict):
try:
values[name] = translator[values[name]]
except KeyError:
values[name] = translator[self.fields[name]["default"]]
if values[name] == "None":
values[name] = None
return values
def init_widgets(self, *args, ignore_state=False, **kwargs):
"""Clear all fields to default values."""
if self.fields is None:
return
init_values = {
name: desc["default"] for name, desc in self.fields.items()
}
state_path = os.path.join(DATA_DIR, self.state_filename)
if (
not ignore_state
and self.state_filename
and os.path.isfile(state_path)
):
with open(state_path, "rb") as f:
state = pickle.load(f)
init_values.update(state)
for name, value in init_values.items():
try:
self.variable[name].set(value)
except KeyError:
pass
self.update_widgets()
def store_widgets(self, *args, **kwargs):
"""Store all fields to disk."""
if self.fields is None:
return
if self.state_filename:
state_path = os.path.join(DATA_DIR, self.state_filename)
state = {}
for name, _ in self.fields.items():
try:
state[name] = self.variable[name].get()
except TclError:
state[name] = self.fields[name]["default"]
with open(state_path, "wb") as f:
pickle.dump(state, f)
def enable(self, name):
"""Show a widget by name."""
if self.fields[name]["visible"]:
return
self.toggle(name)
def disable(self, name):
"""Hide a widget by name."""
if not self.fields[name]["visible"]:
return
self.toggle(name)
def toggle(self, name):
"""Hide or show a widget by name."""
if not self.fields[name]["visible"]:
self.widget[name].grid()
if name in self.label:
self.label[name].grid()
else:
self.widget[name].grid_remove()
if name in self.label:
self.label[name].grid_remove()
self.fields[name]["visible"] = not self.fields[name]["visible"]
def create_widgets(self):
"""Populate object and its widgets."""
self.variable = {}
self.label = {}
self.widget = {}
self.tab = {}
self.group = {}
self.notebook = Notebook(self)
self.notebook.pack(fill="both", expand=True)
if self.fields is None:
return
for i, (name, desc) in enumerate(self.fields.items()):
if "tab" not in desc:
desc["tab"] = "main"
if desc["tab"] not in self.tab:
parent = Frame(self.notebook)
parent.columnconfigure(
[0, 1], weight=1, minsize=self.column_minsize
)
self.notebook.add(parent, text=desc["tab"].capitalize())
self.tab[desc["tab"]] = parent
else:
parent = self.tab[desc["tab"]]
if "group" in desc:
if desc["group"] not in self.group:
group = LabelFrame(parent, text=desc["group"].capitalize())
group.columnconfigure(
[0, 1], weight=1, minsize=self.column_minsize
)
group.grid(
row=i,
column=0,
columnspan=2,
sticky="ew",
padx=self.padx,
pady=9 * self.pady,
)
self.group[desc["group"]] = group
else:
group = self.group[desc["group"]]
parent = group
if "values" in desc:
values = list(desc["values"])
if "type" not in desc:
# if no type is given, first guess it based on a default value,
# or infer from the first valid value.
if "default" in desc and desc["default"] is not None:
desc["type"] = type(desc["default"])
elif "values" in desc:
desc["type"] = type(
[v for v in values if v is not None][0]
)
else:
raise ValueError(
f"could not infer type, please specify: {desc}"
)
if "default" not in desc:
# if no default is given, use the first value (even if None),
# or infer from type.
if "values" in desc:
desc["default"] = [v for v in values][0]
elif "type" in desc:
desc["default"] = desc["type"]()
else:
raise ValueError(
f"could not infer default, please specify: {desc}"
)
if desc["type"] is int or desc["type"] is np.int64:
self.variable[name] = IntVar(self)
elif desc["type"] is bool:
self.variable[name] = BooleanVar(self)
elif desc["type"] is str:
self.variable[name] = StringVar(self)
elif desc["type"] is float:
self.variable[name] = DoubleVar(self)
if "values" in desc:
values = [np.round(v, 2) for v in values]
else:
raise ValueError(f"unknown type '{desc['type']}' for '{name}'")
if "text" in desc:
text = desc["text"]
else:
text = name.capitalize()
if "widget" not in desc:
# TODO(schneiderfelipe): should this be default?
desc["widget"] = Combobox
if desc["widget"] is Checkbutton:
self.widget[name] = desc["widget"](
parent, variable=self.variable[name], text=text
)
elif "values" in desc:
self.widget[name] = desc["widget"](
parent, textvariable=self.variable[name], values=values
)
else:
self.widget[name] = desc["widget"](
parent, textvariable=self.variable[name]
)
self.widget[name].grid(
row=i, column=1, sticky="ew", padx=self.padx, pady=self.pady
)
if "help" in desc:
create_tooltip(self.widget[name], desc["help"])
if desc["widget"] is not Checkbutton:
self.label[name] = Label(parent, text=text + ":")
self.label[name].grid(
row=i,
column=0,
sticky="ew",
padx=self.padx,
pady=self.pady,
)
if "visible" not in desc:
desc["visible"] = True
self.init_widgets()
def update_widgets(self, *args, **kwargs):
"""Update widget states."""
if self.fields is None:
return
options = {}
for name in self.variable:
try:
options[name] = self.variable[name].get()
except TclError:
options[name] = self.fields[name]["default"]
for name, desc in self.fields.items():
# TODO(schneiderfelipe): allow an analogous key "freeze", which
# does exactly the same as switch, but enables/disables the widget
# instead of showing/hiding it. self.enable and sefl.disable should
# then accept an argument policy="freeze" or policy="switch" to
# make things easier. Both "switch" (meaning available/unavailable)
# and "freeze" (meaning impossible to change) can be used at the
# same time. "freeze" might require setting which value is locked.
if "switch" in desc:
if desc["switch"](options):
self.enable(name)
else:
self.disable(name)
|
""" File to house a replyer service """
from service_framework import get_logger
LOG = get_logger()
def setup_config(config):
"""
Setup ~~ all the things ~~
"""
config['response_text'] = config.get('response_text', 'DEFAULT')
return config
def on_new_request(args, to_send, config):
"""
Method triggered when a new request is recieved from
a requester.
"""
LOG.info('Got payload: %s', args)
response = {'echoed': f'{config['response_text']} {args['to_echo']}'}
LOG.info('Responding with: %s', response)
return response
connection_models = {
'in': {
'reply': {
'connection_type': 'replyer',
'required_creation_arguments': {
'connection_function': on_new_request,
},
'required_arguments': {
'to_echo': str,
},
'required_return_arguments': {
'echoed': str,
}
}
}
}
|
""" File to house a replyer service """
from service_framework import get_logger
LOG = get_logger()
def setup_config(config):
"""
Setup ~~ all the things ~~
"""
config['response_text'] = config.get('response_text', 'DEFAULT')
return config
def on_new_request(args, to_send, config):
"""
Method triggered when a new request is recieved from
a requester.
"""
LOG.info('Got payload: %s', args)
response = {'echoed': f'{config["response_text"]} {args["to_echo"]}'}
LOG.info('Responding with: %s', response)
return response
connection_models = {
'in': {
'reply': {
'connection_type': 'replyer',
'required_creation_arguments': {
'connection_function': on_new_request,
},
'required_arguments': {
'to_echo': str,
},
'required_return_arguments': {
'echoed': str,
}
}
}
}
|
#!/usr/bin/env python3
# Copyright (c) 2020 Teradici Corporation
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import base64
import importlib
import os
import site
import subprocess
import sys
import textwrap
import json
from abc import ABC, abstractmethod
SECRETS_START_FLAG = "# <-- Start of secrets section, do not edit this line. -->"
def import_or_install_module(pypi_package_name, module_name = None):
"""A function that imports a Python top-level package or module.
If the required package is not installed, it will install the package before importing it again.
Args:
pypi_package_name (str): the name of the PyPI package to be installed
module_name (str): the import name of the module if it is different than the PyPI package name
Returns:
module: the top-level package or module
"""
if module_name is None:
module_name = pypi_package_name
try:
module = importlib.import_module(module_name)
print(f"Successfully imported {module_name}.")
except ImportError:
install_cmd = f'{sys.executable} -m pip install {pypi_package_name} --user'
install_permission = input(
f"This script requires {pypi_package_name} but it is not installed.\n"
f"Proceed to install this package by running '{install_cmd}' (y/n)? ").strip().lower()
if install_permission not in ('y', 'yes'):
print(f"{pypi_package_name} is not installed. Exiting...")
sys.exit(1)
subprocess.check_call(install_cmd.split(' '))
print(f"Successfully installed {pypi_package_name}.")
# Refresh sys.path to detect new modules in user's home directory.
importlib.reload(site)
module = importlib.import_module(module_name)
print(f"Successfully imported {module_name}.")
except Exception as err:
print(f"An exception occurred importing {module_name}.\n")
raise SystemExit(err)
return module
class Tfvars_Parser:
"""Tfvars_Parser is used to read and parse data from a Terraform tfvars file.
It is used by the Tfvars_Encryptor class to automate the encryption or
decryption of secrets in a Terraform tfvars file so that it is ready to
be used for Terraform deployments using encrypted secrets.
Attributes
----------
tfvars_path : str
Path to the terraform.tfvars file.
tfvars_data : dict
Dictionary containing key value pairs for all terraform.tfvars configuration data.
tfvars_secrets : dict
Dictionary containing key value pairs for all terraform.tfvars secrets.
max_key_length : int
Longest string length of a tfvars_secrets key used to write secrets left-justified.
Methods
-------
__init__(tfvars_path)
read_tfvars(tfvars_file)
"""
def __init__(self, tfvars_path):
"""Tfvars_Parser class constructor to initialize the object.
Args
----
tfvars_path : str
Path to the terraform.tfvars file being parsed.
"""
# Read tfvars data and secrets into dictionaries
self.tfvars_path = tfvars_path
self.tfvars_data, self.tfvars_secrets = self.read_tfvars(tfvars_path)
# Find the max string length of all the keys to left-justify align them
self.max_key_length = max(map(len, self.tfvars_secrets))
def read_tfvars(self, tfvars_file):
"""A method that reads terraform.tfvars for all configuration data.
This method reads a terraform.tfvars file for all the user-provided
configuration data above the secrets.
Args
----
tfvars_file : str
Path to the terraform.tfvars file being parsed.
Returns
-------
tf_data, tf_secrets : tuple (dict, dict)
tf_data: key value pairs for all the terraform.tfvars data
tf_secrets: key value pairs for all the terraform.tfvars secrets
"""
tf_data = {}
tf_secrets = {}
begin_reading_secrets = False
try:
with open(tfvars_file) as f:
for line in f:
line = line.strip()
if SECRETS_START_FLAG in line:
begin_reading_secrets = True
continue
# Skip blank lines and comment lines
# "not line" must come first using short circuiting to avoid string index out of range error
if not line or line[0] in ("#"):
continue
# Split the line into key value pairs using the first delimiter
key, value = map(str.strip, line.split("=", 1))
if begin_reading_secrets:
tf_secrets[key] = value.replace("\"", "")
else:
tf_data[key] = value.replace("\"", "")
except Exception as err:
print("An exception occurred reading the terraform.tfvars file:\n")
raise SystemExit(err)
if not tf_secrets:
err = """\
An exception occurred reading the secrets in the terraform.tfvars file:\n
Ensure the start of secrets marker is present before the secrets section
in the terraform.tfvars file and try again.
i.e. '# <-- Start of secrets section, do not edit this line. -->'"""
raise SystemExit(textwrap.dedent(err))
return tf_data, tf_secrets
class Tfvars_Encryptor(ABC):
"""This is an abstract super class that is inherited by
AWS_Tfvars_Encryptor and GCP_Tfvars_Encryptor.
It contains common attributes and methods that are used by the sub
encryptor classes to automate the encryption and decryption of
terraform.tfvars files.
Attributes
----------
tfvars_parser : object
Instance of Tfvars_Parser used to read and store terraform.tfvars
secrets and configuration data.
kms_client : object
Instance of Key Management Service Client.
credentials_file : str
Path to the KMS client credentials file.
Methods
-------
Abstract methods:
__init__(tfvars_parser)
create_crypto_key(crypto_key_id)
decrypt_ciphertext(ciphertext, base64_encoded)
encrypt_plaintext(plaintext, base64_encoded)
get_crypto_keys()
initialize_cryptokey(crypto_key_id)
Concrete methods:
decrypt_file(file_path)
decrypt_tfvars_secrets()
encrypt_file(file_path)
encrypt_tfvars_secrets()
write_new_tfvars()
"""
@abstractmethod
def __init__(self, tfvars_parser):
"""Tfvars_Encryptor class constructor to initialize the object.
Args
----
tfvars_parser : object
Instance of Tfvars_Parser class.
"""
self.tfvars_parser = tfvars_parser
self.kms_client = None
self.credentials_file = None
@abstractmethod
def create_crypto_key(self, crypto_key_id): pass
@abstractmethod
def decrypt_ciphertext(self, ciphertext, base64_encoded): pass
@abstractmethod
def encrypt_plaintext(self, plaintext, base64_encoded): pass
@abstractmethod
def initialize_cryptokey(self, crypto_key_id): pass
def decrypt_file(self, file_path):
"""A method that decrypts the contents of a text file.
Uses the KMS client to decrypt ciphertext back to plaintext using the
provided symmetric crypto key that belongs to this instance.
Args
----
file_path : str
Path of the text file being decrypted.
Returns
-------
file_path_decrypted : str
Path to the decrypted text file created.
"""
try:
print(f"Decrypting file: {file_path}...")
# read binary file
with open(file_path, 'rb') as f:
f_ciphertext = f.read()
f_plaintext = self.decrypt_ciphertext(f_ciphertext)
# Removes the .encrypted appended using this encryptor
file_path_decrypted = f"{file_path}.decrypted".replace(".encrypted", "")
with open(file_path_decrypted, "w") as f:
f.write(f_plaintext)
except Exception as err:
print("An exception occurred decrypting file.\n")
raise SystemExit(err)
return file_path_decrypted
def decrypt_tfvars_secrets(self):
"""A method that decrypts the secrets contained in the terraform.tfvars file.
This method contains the logic for handling the decryption of the secrets
and any file paths associated with it using the KMS client. Once decrypted, it
calls write_new_tfvars() to write all secrets to a new terraform.tfvars file.
"""
# GCP uses kms_cryptokey_id while AWS uses customer_master_key_id
if type(self).__name__ == "GCP_Tfvars_Encryptor":
self.crypto_key_path = self.tfvars_parser.tfvars_data.get("kms_cryptokey_id")
if type(self).__name__ == "AWS_Tfvars_Encryptor":
self.customer_master_key_id = self.tfvars_parser.tfvars_data.get("customer_master_key_id")
# Decrypt all secrets
try:
for secret in self.tfvars_parser.tfvars_secrets:
# Additional handling needed if the string is a path to a file
# e.g. cas_mgr_deployment_sa_file
if os.path.isfile(self.tfvars_parser.tfvars_secrets.get(secret)):
self.tfvars_parser.tfvars_secrets[secret] = self.decrypt_file(self.tfvars_parser.tfvars_secrets.get(secret))
else:
print(f"Decrypting {secret}...")
self.tfvars_parser.tfvars_secrets[secret] = self.decrypt_ciphertext(self.tfvars_parser.tfvars_secrets.get(secret), True)
# Write encrypted secrets into new terraform.tfvars file
self.write_new_tfvars()
print("\nSuccessfully decrypted all secrets!\n")
except Exception as err:
print("An exception occurred decrypting secrets:\n")
raise SystemExit(err)
def encrypt_file(self, file_path):
"""A method that encrypts the contents of a text file.
Uses the KMS client to encrypt the plaintext in a file to ciphertext using
the provided symmetric crypto key that belongs to this instance.
Args
----
file_path : str
Path of the text file being encrypted.
Returns
-------
file_path_encrypted : str
Path to the encrypted text file created.
"""
try:
print(f"Encrypting file: {file_path}...")
with open(file_path) as f:
f_string = f.read()
f_encrypted_string = self.encrypt_plaintext(f_string)
file_path_encrypted = f"{file_path}.encrypted".replace(".decrypted", "")
# write byte string into file
with open(file_path_encrypted, "wb") as f:
f.write(f_encrypted_string)
except Exception as err:
print("An exception occurred encrypting the file:\n")
raise SystemExit(err)
return file_path_encrypted
def encrypt_tfvars_secrets(self):
"""A method that encrypts secrets contained in the terraform.tfvars file.
This method contains the logic for handling the encryption of the secrets
and any file paths associated with it using the KMS client. Once encrypted, it
calls write_new_tfvars() to write all secrets to a new terraform.tfvars file.
"""
# Encrypt all secrets found in the tfvars_secrets dictionary
try:
for secret in self.tfvars_parser.tfvars_secrets:
# Additional handling needed if the string is a path to a file
# e.g. cas_mgr_deployment_sa_file
if os.path.isfile(self.tfvars_parser.tfvars_secrets.get(secret)):
self.tfvars_parser.tfvars_secrets[secret] = self.encrypt_file(self.tfvars_parser.tfvars_secrets.get(secret))
else:
print(f"Encrypting {secret}...")
self.tfvars_parser.tfvars_secrets[secret] = self.encrypt_plaintext(self.tfvars_parser.tfvars_secrets.get(secret), True)
# Write encrypted secrets into new terraform.tfvars file
self.write_new_tfvars()
print("\nSuccessfully encrypted all secrets!\n")
except Exception as err:
print("An exception occurred encrypting secrets:\n")
raise SystemExit(err)
def write_new_tfvars(self):
"""A method that writes a new terraform.tfvars file.
This method writes a new terraform.tfvars file that is ready to be used by
Terraform after encrypting or decrypting.
"""
key_id = None
key_value = None
# GCP uses kms_cryptokey_id while AWS uses customer_master_key_id
if type(self).__name__ == "GCP_Tfvars_Encryptor":
key_id = "kms_cryptokey_id"
key_value = self.crypto_key_path
if type(self).__name__ == "AWS_Tfvars_Encryptor":
key_id = "customer_master_key_id"
key_value = self.customer_master_key_id
# Parse existing tfvars and store each line into a list
lines = []
try:
with open(self.tfvars_parser.tfvars_path) as f:
for line in f:
# Remove leading and trailing whitespace including "\n" and "\t"
line = line.strip()
# Append the crypto key value to key_id line
if key_id + " =" in line:
if not self.tfvars_parser.tfvars_data.get(key_id):
lines.append(f"{key_id} = \"{key_value}\"")
else:
lines.append(f"# {key_id} = \"{key_value}\"")
continue
# Blank lines and comments are unchanged
# "not line" must come first using short circuit to avoid string index out of range error
if not line or line[0] in ("#"):
lines.append(line)
continue
# Need to keep the .strip() here to sanitize the key being read
key = line.split("=")[0].strip()
if key in self.tfvars_parser.tfvars_secrets.keys():
# Left justify all the secrets with space as padding on the right
lines.append(f"{key.ljust(self.tfvars_parser.max_key_length, " ")} = \"{self.tfvars_parser.tfvars_secrets.get(key)}\"")
else:
lines.append(line)
# Add .backup postfix to the original tfvars file
print("Creating backup of terraform.tfvars...")
os.rename(self.tfvars_parser.tfvars_path, f"{self.tfvars_parser.tfvars_path}.backup")
# Rewrite the existing terraform.tfvars
print("Writing new terraform.tfvars...")
with open(self.tfvars_parser.tfvars_path, "w") as f:
f.writelines("%s\n" %line for line in lines)
except Exception as err:
print("An exception occurred writing the terraform.tfvars file:\n")
raise SystemExit(err)
class GCP_Tfvars_Encryptor(Tfvars_Encryptor):
"""This is an concrete sub class that inherits from Tfvars_Encryptor.
It contains attributes and methods specific to GCP KMS client to
automate the encryption and decryption of terraform.tfvars files.
Attributes
----------
gcp_credentials : object
GCP Credentials object for a GCP service account.
project_id : str
GCP project ID associated with the GCP service account.
location : str
Defaulted to use "global" as the location.
key_ring_id : str
Defaulted to use "cas_keyring" as a key ring ID.
crypto_key_id : str
Defaulted to use "cas_key" as the crypto key ID.
crypto_key_path : str
Full GCP resource path to the crypto key being used to encrypt and decrypt.
Methods
-------
__init__(tfvars_parser)
create_crypto_key(crypto_key_id)
decrypt_ciphertext(ciphertext, base64_encoded)
encrypt_plaintext(plaintext, base64_encoded)
get_crypto_keys(key_ring_id)
get_key_rings()
initialize_cryptokey(crypto_key_id)
initialize_keyring(key_ring_id)
"""
def __init__(self, tfvars_parser):
"""GCP_Tfvars_Encryptor class constructor to initialize the object.
Args
----
tfvars_parser : object
Instance of Tfvars_Parser class.
"""
super().__init__(tfvars_parser)
# Install and import the required GCP modules
global kms
global service_account
kms = import_or_install_module("google-cloud-kms", "google.cloud.kms")
service_account = import_or_install_module("google_oauth2_tool", "google.oauth2.service_account")
# Set GCP credentials instance variable from tfvars_data
self.credentials_file = self.tfvars_parser.tfvars_data.get("gcp_credentials_file")
# Create a client for the KMS API using the provided GCP service account
self.gcp_credentials = service_account.Credentials.from_service_account_file(self.credentials_file)
self.kms_client = kms.KeyManagementServiceClient(credentials = self.gcp_credentials)
# GCP KMS resource variables
with open(self.credentials_file) as f:
cred_file_json=json.load(f)
self.project_id = cred_file_json["project_id"]
self.location = "global"
self.key_ring_id = self.initialize_keyring("cas_keyring")
self.crypto_key_id = self.initialize_cryptokey("cas_key")
self.crypto_key_path = self.kms_client.crypto_key_path(self.project_id, self.location, self.key_ring_id, self.crypto_key_id)
def create_crypto_key(self, crypto_key_id):
"""A method to create a crypto key on GCP KMS.
Args
----
crypto_key_id : str
name of the crypto key to be created.
Returns
-------
created_crypto_key.name : str
name of the crypto key created.
"""
# Create the crypto key object template
purpose = kms.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT
crypto_key = { "purpose": purpose }
# Create a crypto key for the given key ring
parent = self.kms_client.key_ring_path(self.project_id, self.location, self.key_ring_id)
created_crypto_key = self.kms_client.create_crypto_key(
request={'parent': parent, 'crypto_key_id': crypto_key_id, 'crypto_key': crypto_key}
)
print(f"Created key ring: {created_crypto_key.name}\n")
return created_crypto_key.name
def decrypt_ciphertext(self, ciphertext, base64_encoded=False):
"""A method that decrypts ciphertext.
Uses GCP KMS to decrypt ciphertext back to plaintext using the provided
symmetric crypto key that belongs to this instance.
Args
----
ciphertext : str
the ciphertext being decrypted.
base64_encoded : boolean
the boolean param shows whether the ciphertext is base64 encoded.
Returns
-------
plaintext : str
the decrypted secret in plaintext.
"""
# Convert ciphertext string to a byte string, then Base64 decode it
if base64_encoded:
ciphertext = base64.b64decode(ciphertext.encode("utf-8"))
# Use the KMS API to decrypt the data
response = self.kms_client.decrypt(
request={'name': self.crypto_key_path, 'ciphertext': ciphertext}
)
# Decode Base64 plaintext
plaintext = response.plaintext.decode("utf-8")
return plaintext
def encrypt_plaintext(self, plaintext, base64_encoded=False):
"""A method that encrypts plaintext.
Uses GCP KMS to encrypt plaintext to ciphertext using the provided
symmetric crypto key that belongs to this instance.
Args
----
plaintext : str
the plainttext being encrypted.
base64_encoded : boolean
the boolean param shows whether the returned ciphertext need base64 encode.
Returns
-------
ciphertext : str
the encrypted secret in ciphertext.
"""
# Use the KMS API to encrypt the data.
response = self.kms_client.encrypt(
request={'name': self.crypto_key_path, 'plaintext': plaintext.encode('utf-8')}
)
# Base64 encoding of ciphertext
if base64_encoded:
ciphertext = base64.b64encode(response.ciphertext).decode("utf-8")
else:
ciphertext = response.ciphertext
return ciphertext
def get_crypto_keys(self, key_ring_id):
"""A method that retrieves a list of crypto keys associated with a key ring.
This method returns a list of all the crypto keys associated with a specific key ring.
Args
----
key_ring_id : str
string ID for the GCP KMS key ring.
Returns
-------
crypto_keys_list : list
a list of all the crypto keys associated with the key ring argument.
"""
parent = self.kms_client.key_ring_path(self.project_id, self.location, key_ring_id)
response = self.kms_client.list_crypto_keys(request={'parent': parent})
# Access the name property and split string from the right. [2] to get the string after the separator
# eg. name: "projects/user-terraform/locations/global/keyRings/cas_keyring/cryptoKeys/cas_key"
crypto_keys_list = list(map(lambda key: key.name.rpartition("/")[2], response))
return crypto_keys_list
def get_key_rings(self):
"""A method that retrieves a list of key rings.
This method returns a list of all the key rings associated
with the GCP service account.
Returns
-------
key_rings_list : list
a list of all the key rings.
"""
parent = f'projects/{self.project_id}/locations/{self.location}'
response = self.kms_client.list_key_rings(request={'parent': parent})
# Access the name property and split string from the right. [2] to get the string after the separator
# eg. name: "projects/user-terraform/locations/global/keyRings/cas_keyring"
key_rings_list = list(map(lambda key_ring: key_ring.name.rpartition("/")[2], response))
return key_rings_list
def initialize_cryptokey(self, crypto_key_id):
"""A method that initializes this instance's crypto key.
This initialization method is called in the constructor to
create a default crypto key if it doesn't exist. If the key
exists already, then reuse it for this instance.
Args
----
crypto_key_id : str
the GCP crypto key ID used to encrypt and decrypt.
Returns
-------
crypto_key_id : str
the GCP crypto key ID used to encrypt and decrypt.
"""
crypto_keys_list = self.get_crypto_keys(self.key_ring_id)
# Create the crypto key only if it doesn't exist
if crypto_key_id not in crypto_keys_list:
try:
self.create_crypto_key(crypto_key_id)
print(f"Created key: {crypto_key_id}\n")
except Exception as err:
print("An exception occurred creating new crypto key:\n")
raise SystemExit(err)
else:
print(f"Using existing crypto key: {crypto_key_id}\n")
return crypto_key_id
def initialize_keyring(self, key_ring_id):
"""A method that initializes this instance's key ring.
This initialization method is called in the constructor to
create a default key ring if it doesn't exist.
Args
----
key_ring_id : str
key ring being created.
Returns
-------
key_ring_id : str
the key ring used.
"""
key_rings_list = self.get_key_rings()
# Create the key ring only if it doesn't exist
if key_ring_id not in key_rings_list:
try:
parent = f'projects/{self.project_id}/locations/{self.location}'
key_ring = {}
created_key_ring = self.kms_client.create_key_ring(
request={'parent': parent, 'key_ring_id': key_ring_id, 'key_ring': key_ring}
)
print(f"Created key ring: {key_ring_id}\n")
except Exception as err:
print("An exception occurred creating new key ring:\n")
raise SystemExit(err)
else:
print(f"Using existing key ring: {key_ring_id}\n")
return key_ring_id
class AWS_Tfvars_Encryptor(Tfvars_Encryptor):
"""This is a concrete sub class that inherits from Tfvars_Encryptor.
It contains attributes and methods specific to AWS KMS client to
automate the encryption and decryption of terraform.tfvars files.
Attributes
----------
aws_credentials : dict
Dictionary containing two keys: aws_access_key_id and aws_secret_access_key.
customer_master_key_id : str
Defaulted to use "cas_key" as a crypto key ID.
Methods
-------
__init__(tfvars_parser)
create_crypto_key(crypto_key_alias)
decrypt_ciphertext(ciphertext, base64_encoded)
encrypt_plaintext(plaintext, base64_encoded)
initialize_aws_credentials(path)
initialize_cryptokey(crypto_key_alias_name)
get_crypto_keys()
"""
def __init__(self, tfvars_parser):
"""AWS_Tfvars_Encryptor class constructor to initialize the object.
Args
----
tfvars_parser : object
instance of Tfvars_Parser class.
"""
super().__init__(tfvars_parser)
# Install and import the required AWS modules
global boto3
boto3 = import_or_install_module("boto3")
# Set AWS credentials instance variables from tfvars_data
self.credentials_file = self.tfvars_parser.tfvars_data.get("aws_credentials_file")
# Create a client for the KMS API using the provided AWS credentials
self.aws_credentials = self.initialize_aws_credentials(self.credentials_file)
self.kms_client = boto3.client(
"kms",
aws_access_key_id = self.aws_credentials.get("aws_access_key_id"),
aws_secret_access_key = self.aws_credentials.get("aws_secret_access_key"),
region_name = self.tfvars_parser.tfvars_data.get("aws_region","us-west-1")
)
# AWS KMS resource variables
self.customer_master_key_id = self.initialize_cryptokey("cas_key")
def create_crypto_key(self, crypto_key_alias):
"""A method to create a crypto key on AWS KMS.
Args
----
crypto_key_alias : str
alias name of the crypto key being created.
Returns
------
customer_master_key_id : string
customer_master_key_id value used for terraform.tfvars file.
"""
# Use KMS client to create key and store the returned KeyId
customer_master_key_id = self.kms_client.create_key().get("KeyMetadata").get("KeyId")
# Give this KeyId an alias name
self.kms_client.create_alias(
# The alias to create. Aliases must begin with "alias/".
AliasName = f"alias/{crypto_key_alias}",
TargetKeyId = customer_master_key_id
)
print(f"Created {crypto_key_alias}: {customer_master_key_id}\n")
return customer_master_key_id
def decrypt_ciphertext(self, ciphertext, base64_encoded=False):
"""A method that decrypts ciphertext.
Uses AWS KMS to decrypt ciphertext back to plaintext using the provided
symmetric crypto key that belongs to this instance.
Args
----
ciphertext : str
the ciphertext being decrypted.
base64_encoded : boolean
the boolean param shows whether the ciphertext is base64 encoded.
Returns
-------
plaintext : str
the decrypted secret in plaintext.
"""
# Convert ciphertext string to a byte string, then Base64 decode it
if base64_encoded:
ciphertext = base64.b64decode(ciphertext.encode("utf-8"))
# Use the KMS API to decrypt the data
response = self.kms_client.decrypt(
KeyId = self.customer_master_key_id,
CiphertextBlob = ciphertext
)
# Decode Base64 plaintext
plaintext = response.get("Plaintext").decode("utf-8")
return plaintext
def encrypt_plaintext(self, plaintext, base64_encoded=False):
"""A method that encrypts plaintext.
Uses AWS KMS to encrypt plaintext to ciphertext using the provided
symmetric crypto key that belongs to this instance.
Args
----
ciphertext : str
the plainttext being encrypted.
base64_encoded : boolean
the boolean param shows whether the returned ciphertext need base64 encode.
Returns
-------
ciphertext : str
the encrypted secret in ciphertext.
"""
# Use the KMS API to encrypt the data.
response = self.kms_client.encrypt(
KeyId = self.customer_master_key_id,
Plaintext = plaintext.encode("utf-8")
)
# Base64 encoding of ciphertext
if base64_encoded:
ciphertext = base64.b64encode(response.get("CiphertextBlob")).decode("utf-8")
else:
ciphertext = response.get("CiphertextBlob")
return ciphertext
def get_crypto_keys(self):
"""A method that retrieves a list of crypto keys aliase names
associated with the AWS credentials in the region.
Returns
-------
crypto_keys_list : list
a list of all the crypto keys aliase names associated with the AWS credentials in the region.
"""
# Use crypto keys data under the "Aliases" dict key
response = self.kms_client.list_aliases().get("Aliases")
# Access the "AliasName" property for each key entry by splitting string from the right. [2] to get the string after the separator
# eg. response.get("Aliases") returns [{"AliasName": "<alias/AliasName>", "AliasArn": "<AliasArn>", "TargetKeyId": "<TargetKeyId>"}]
crypto_keys_list = list(map(lambda key: key.get("AliasName").rpartition("/")[2], response))
return crypto_keys_list
def initialize_aws_credentials(self, file_path):
"""A method that parses the aws_access_key_id and aws_secret_access_key
from aws_credentials_file required for the KMS client.
This initialization method is used in the constructor to
initialize both the aws_access_key_id and aws_secret_access_key
by parsing the aws_credentials_file.
Args
----
file_path : str
path to aws_credentials_file.
Returns
-------
dict
Dictionary containing the "aws_access_key_id" and "aws_secret_access_key".
"""
aws_access_key_id = None
aws_secret_access_key = None
try:
with open(file_path) as f:
for line in f:
line = line.strip()
# Skip blank lines and comment lines
# "not line" must come first using short circuiting to avoid string index out of range error
if not line or line[0] in ("#"):
continue
if "aws_secret_access_key" in line:
aws_secret_access_key = line.rpartition("=")[2].strip()
continue
if "aws_access_key_id" in line:
aws_access_key_id = line.rpartition("=")[2].strip()
continue
except Exception as err:
print("An exception occurred initializing AWS credentials:\n")
raise SystemExit(err)
return { "aws_access_key_id": aws_access_key_id,
"aws_secret_access_key": aws_secret_access_key }
def initialize_cryptokey(self, crypto_key_alias_name):
"""A method that initializes this instance's crypto key.
This initialization method is called in the constructor to
create a default crypto key if it doesn't exist. If the key
exists already, then reuses it for this instance.
Args
----
crypto_key_alias_name : str
the AWS crypto key alias name used to encrypt and decrypt.
Returns
-------
customer_master_key_id : str
the AWS crypto key used to encrypt and decrypt.
"""
crypto_keys_list = self.get_crypto_keys()
customer_master_key_id = None
# Create the crypto key only if it doesn't exist
if crypto_key_alias_name not in crypto_keys_list:
try:
self.create_crypto_key(crypto_key_alias_name)
except Exception as err:
print("An exception occurred creating new crypto key:\n")
raise SystemExit(err)
else:
# Use crypto keys data under the "Aliases" dict key
response = self.kms_client.list_aliases().get("Aliases")
# Trim the "AliasName" string for each key entry by splitting string from the right. [2] to get the just the "AliasName" after the separator
# For each key entry, compare the string to find a match.
# eg. response.get("Aliases") returns [{"AliasName": "<alias/AliasName>", "AliasArn": "<AliasArn>", "TargetKeyId": "<TargetKeyId>"}]
matched_crypto_keys = filter(lambda key: key.get("AliasName").rpartition("/")[2] == crypto_key_alias_name, response)
# Access the "TargetKeyId" property of the first matched key to retrieve the customer_master_key_id associated with it.
customer_master_key_id = list(matched_crypto_keys)[0].get("TargetKeyId")
print(f"Using existing crypto key {crypto_key_alias_name}: {customer_master_key_id}\n")
return customer_master_key_id
def main():
# Set up argparse
parser_description = ("Uses a KMS key to encrypt or decrypt secrets in the specified terraform.tfvars."
"The script encrypts by default. To decrypt instead, add the -d flag.")
parser = argparse.ArgumentParser(description = parser_description)
parser.add_argument("tfvars", help = "specify the path to terraform.tfvars file")
parser.add_argument("-d", help = "decrypt secrets in terraform.tfvars specified", action = "store_true")
args = parser.parse_args()
# Instantiate a Tfvars_Parser to read the terraform.tfvars file
tfvars_parser = Tfvars_Parser(args.tfvars)
tfvars_encryptor = None
# Instantiate a GCP_Tfvars_Encryptor or AWS_Tfvars_Encryptor
if tfvars_parser.tfvars_data.get("gcp_credentials_file"):
tfvars_encryptor = GCP_Tfvars_Encryptor(tfvars_parser)
elif tfvars_parser.tfvars_data.get("aws_credentials_file"):
tfvars_encryptor = AWS_Tfvars_Encryptor(tfvars_parser)
# Abort the script if credentials is missing
else:
print("Missing gcp_credentials_file or aws_credentials_file in tfvars."
"Ensure the credentials file is valid and try again.\n")
raise SystemExit()
# Encryption is the default, decryption if user specified the -d flag
if args.d:
# Abort the decryption if there is not a kms_cryptokey_id (GCP) or customer_master_key_id (AWS) in the tfvars file
if (not tfvars_parser.tfvars_data.get("kms_cryptokey_id") and
not tfvars_parser.tfvars_data.get("customer_master_key_id")):
print("No kms_cryptokey_id or customer_master_key_id present in tfvars. "
"Ensure the secrets are encrypted and try again.\n")
raise SystemExit()
tfvars_encryptor.decrypt_tfvars_secrets()
else:
# Abort the encryption if there is already a kms_cryptokey_id (GCP) or customer_master_key_id (AWS) present
if (tfvars_parser.tfvars_data.get("kms_cryptokey_id") or
tfvars_parser.tfvars_data.get("customer_master_key_id")):
print("Detected kms_cryptokey_id in tfvars. "
"Ensure secrets are not already encrypted and try again.\n")
raise SystemExit()
tfvars_encryptor.encrypt_tfvars_secrets()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Copyright (c) 2020 Teradici Corporation
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import base64
import importlib
import os
import site
import subprocess
import sys
import textwrap
import json
from abc import ABC, abstractmethod
SECRETS_START_FLAG = "# <-- Start of secrets section, do not edit this line. -->"
def import_or_install_module(pypi_package_name, module_name = None):
"""A function that imports a Python top-level package or module.
If the required package is not installed, it will install the package before importing it again.
Args:
pypi_package_name (str): the name of the PyPI package to be installed
module_name (str): the import name of the module if it is different than the PyPI package name
Returns:
module: the top-level package or module
"""
if module_name is None:
module_name = pypi_package_name
try:
module = importlib.import_module(module_name)
print(f"Successfully imported {module_name}.")
except ImportError:
install_cmd = f'{sys.executable} -m pip install {pypi_package_name} --user'
install_permission = input(
f"This script requires {pypi_package_name} but it is not installed.\n"
f"Proceed to install this package by running '{install_cmd}' (y/n)? ").strip().lower()
if install_permission not in ('y', 'yes'):
print(f"{pypi_package_name} is not installed. Exiting...")
sys.exit(1)
subprocess.check_call(install_cmd.split(' '))
print(f"Successfully installed {pypi_package_name}.")
# Refresh sys.path to detect new modules in user's home directory.
importlib.reload(site)
module = importlib.import_module(module_name)
print(f"Successfully imported {module_name}.")
except Exception as err:
print(f"An exception occurred importing {module_name}.\n")
raise SystemExit(err)
return module
class Tfvars_Parser:
"""Tfvars_Parser is used to read and parse data from a Terraform tfvars file.
It is used by the Tfvars_Encryptor class to automate the encryption or
decryption of secrets in a Terraform tfvars file so that it is ready to
be used for Terraform deployments using encrypted secrets.
Attributes
----------
tfvars_path : str
Path to the terraform.tfvars file.
tfvars_data : dict
Dictionary containing key value pairs for all terraform.tfvars configuration data.
tfvars_secrets : dict
Dictionary containing key value pairs for all terraform.tfvars secrets.
max_key_length : int
Longest string length of a tfvars_secrets key used to write secrets left-justified.
Methods
-------
__init__(tfvars_path)
read_tfvars(tfvars_file)
"""
def __init__(self, tfvars_path):
"""Tfvars_Parser class constructor to initialize the object.
Args
----
tfvars_path : str
Path to the terraform.tfvars file being parsed.
"""
# Read tfvars data and secrets into dictionaries
self.tfvars_path = tfvars_path
self.tfvars_data, self.tfvars_secrets = self.read_tfvars(tfvars_path)
# Find the max string length of all the keys to left-justify align them
self.max_key_length = max(map(len, self.tfvars_secrets))
def read_tfvars(self, tfvars_file):
"""A method that reads terraform.tfvars for all configuration data.
This method reads a terraform.tfvars file for all the user-provided
configuration data above the secrets.
Args
----
tfvars_file : str
Path to the terraform.tfvars file being parsed.
Returns
-------
tf_data, tf_secrets : tuple (dict, dict)
tf_data: key value pairs for all the terraform.tfvars data
tf_secrets: key value pairs for all the terraform.tfvars secrets
"""
tf_data = {}
tf_secrets = {}
begin_reading_secrets = False
try:
with open(tfvars_file) as f:
for line in f:
line = line.strip()
if SECRETS_START_FLAG in line:
begin_reading_secrets = True
continue
# Skip blank lines and comment lines
# "not line" must come first using short circuiting to avoid string index out of range error
if not line or line[0] in ("#"):
continue
# Split the line into key value pairs using the first delimiter
key, value = map(str.strip, line.split("=", 1))
if begin_reading_secrets:
tf_secrets[key] = value.replace("\"", "")
else:
tf_data[key] = value.replace("\"", "")
except Exception as err:
print("An exception occurred reading the terraform.tfvars file:\n")
raise SystemExit(err)
if not tf_secrets:
err = """\
An exception occurred reading the secrets in the terraform.tfvars file:\n
Ensure the start of secrets marker is present before the secrets section
in the terraform.tfvars file and try again.
i.e. '# <-- Start of secrets section, do not edit this line. -->'"""
raise SystemExit(textwrap.dedent(err))
return tf_data, tf_secrets
class Tfvars_Encryptor(ABC):
"""This is an abstract super class that is inherited by
AWS_Tfvars_Encryptor and GCP_Tfvars_Encryptor.
It contains common attributes and methods that are used by the sub
encryptor classes to automate the encryption and decryption of
terraform.tfvars files.
Attributes
----------
tfvars_parser : object
Instance of Tfvars_Parser used to read and store terraform.tfvars
secrets and configuration data.
kms_client : object
Instance of Key Management Service Client.
credentials_file : str
Path to the KMS client credentials file.
Methods
-------
Abstract methods:
__init__(tfvars_parser)
create_crypto_key(crypto_key_id)
decrypt_ciphertext(ciphertext, base64_encoded)
encrypt_plaintext(plaintext, base64_encoded)
get_crypto_keys()
initialize_cryptokey(crypto_key_id)
Concrete methods:
decrypt_file(file_path)
decrypt_tfvars_secrets()
encrypt_file(file_path)
encrypt_tfvars_secrets()
write_new_tfvars()
"""
@abstractmethod
def __init__(self, tfvars_parser):
"""Tfvars_Encryptor class constructor to initialize the object.
Args
----
tfvars_parser : object
Instance of Tfvars_Parser class.
"""
self.tfvars_parser = tfvars_parser
self.kms_client = None
self.credentials_file = None
@abstractmethod
def create_crypto_key(self, crypto_key_id): pass
@abstractmethod
def decrypt_ciphertext(self, ciphertext, base64_encoded): pass
@abstractmethod
def encrypt_plaintext(self, plaintext, base64_encoded): pass
@abstractmethod
def initialize_cryptokey(self, crypto_key_id): pass
def decrypt_file(self, file_path):
"""A method that decrypts the contents of a text file.
Uses the KMS client to decrypt ciphertext back to plaintext using the
provided symmetric crypto key that belongs to this instance.
Args
----
file_path : str
Path of the text file being decrypted.
Returns
-------
file_path_decrypted : str
Path to the decrypted text file created.
"""
try:
print(f"Decrypting file: {file_path}...")
# read binary file
with open(file_path, 'rb') as f:
f_ciphertext = f.read()
f_plaintext = self.decrypt_ciphertext(f_ciphertext)
# Removes the .encrypted appended using this encryptor
file_path_decrypted = f"{file_path}.decrypted".replace(".encrypted", "")
with open(file_path_decrypted, "w") as f:
f.write(f_plaintext)
except Exception as err:
print("An exception occurred decrypting file.\n")
raise SystemExit(err)
return file_path_decrypted
def decrypt_tfvars_secrets(self):
"""A method that decrypts the secrets contained in the terraform.tfvars file.
This method contains the logic for handling the decryption of the secrets
and any file paths associated with it using the KMS client. Once decrypted, it
calls write_new_tfvars() to write all secrets to a new terraform.tfvars file.
"""
# GCP uses kms_cryptokey_id while AWS uses customer_master_key_id
if type(self).__name__ == "GCP_Tfvars_Encryptor":
self.crypto_key_path = self.tfvars_parser.tfvars_data.get("kms_cryptokey_id")
if type(self).__name__ == "AWS_Tfvars_Encryptor":
self.customer_master_key_id = self.tfvars_parser.tfvars_data.get("customer_master_key_id")
# Decrypt all secrets
try:
for secret in self.tfvars_parser.tfvars_secrets:
# Additional handling needed if the string is a path to a file
# e.g. cas_mgr_deployment_sa_file
if os.path.isfile(self.tfvars_parser.tfvars_secrets.get(secret)):
self.tfvars_parser.tfvars_secrets[secret] = self.decrypt_file(self.tfvars_parser.tfvars_secrets.get(secret))
else:
print(f"Decrypting {secret}...")
self.tfvars_parser.tfvars_secrets[secret] = self.decrypt_ciphertext(self.tfvars_parser.tfvars_secrets.get(secret), True)
# Write encrypted secrets into new terraform.tfvars file
self.write_new_tfvars()
print("\nSuccessfully decrypted all secrets!\n")
except Exception as err:
print("An exception occurred decrypting secrets:\n")
raise SystemExit(err)
def encrypt_file(self, file_path):
"""A method that encrypts the contents of a text file.
Uses the KMS client to encrypt the plaintext in a file to ciphertext using
the provided symmetric crypto key that belongs to this instance.
Args
----
file_path : str
Path of the text file being encrypted.
Returns
-------
file_path_encrypted : str
Path to the encrypted text file created.
"""
try:
print(f"Encrypting file: {file_path}...")
with open(file_path) as f:
f_string = f.read()
f_encrypted_string = self.encrypt_plaintext(f_string)
file_path_encrypted = f"{file_path}.encrypted".replace(".decrypted", "")
# write byte string into file
with open(file_path_encrypted, "wb") as f:
f.write(f_encrypted_string)
except Exception as err:
print("An exception occurred encrypting the file:\n")
raise SystemExit(err)
return file_path_encrypted
def encrypt_tfvars_secrets(self):
"""A method that encrypts secrets contained in the terraform.tfvars file.
This method contains the logic for handling the encryption of the secrets
and any file paths associated with it using the KMS client. Once encrypted, it
calls write_new_tfvars() to write all secrets to a new terraform.tfvars file.
"""
# Encrypt all secrets found in the tfvars_secrets dictionary
try:
for secret in self.tfvars_parser.tfvars_secrets:
# Additional handling needed if the string is a path to a file
# e.g. cas_mgr_deployment_sa_file
if os.path.isfile(self.tfvars_parser.tfvars_secrets.get(secret)):
self.tfvars_parser.tfvars_secrets[secret] = self.encrypt_file(self.tfvars_parser.tfvars_secrets.get(secret))
else:
print(f"Encrypting {secret}...")
self.tfvars_parser.tfvars_secrets[secret] = self.encrypt_plaintext(self.tfvars_parser.tfvars_secrets.get(secret), True)
# Write encrypted secrets into new terraform.tfvars file
self.write_new_tfvars()
print("\nSuccessfully encrypted all secrets!\n")
except Exception as err:
print("An exception occurred encrypting secrets:\n")
raise SystemExit(err)
def write_new_tfvars(self):
"""A method that writes a new terraform.tfvars file.
This method writes a new terraform.tfvars file that is ready to be used by
Terraform after encrypting or decrypting.
"""
key_id = None
key_value = None
# GCP uses kms_cryptokey_id while AWS uses customer_master_key_id
if type(self).__name__ == "GCP_Tfvars_Encryptor":
key_id = "kms_cryptokey_id"
key_value = self.crypto_key_path
if type(self).__name__ == "AWS_Tfvars_Encryptor":
key_id = "customer_master_key_id"
key_value = self.customer_master_key_id
# Parse existing tfvars and store each line into a list
lines = []
try:
with open(self.tfvars_parser.tfvars_path) as f:
for line in f:
# Remove leading and trailing whitespace including "\n" and "\t"
line = line.strip()
# Append the crypto key value to key_id line
if key_id + " =" in line:
if not self.tfvars_parser.tfvars_data.get(key_id):
lines.append(f"{key_id} = \"{key_value}\"")
else:
lines.append(f"# {key_id} = \"{key_value}\"")
continue
# Blank lines and comments are unchanged
# "not line" must come first using short circuit to avoid string index out of range error
if not line or line[0] in ("#"):
lines.append(line)
continue
# Need to keep the .strip() here to sanitize the key being read
key = line.split("=")[0].strip()
if key in self.tfvars_parser.tfvars_secrets.keys():
# Left justify all the secrets with space as padding on the right
lines.append(f"{key.ljust(self.tfvars_parser.max_key_length, ' ')} = \"{self.tfvars_parser.tfvars_secrets.get(key)}\"")
else:
lines.append(line)
# Add .backup postfix to the original tfvars file
print("Creating backup of terraform.tfvars...")
os.rename(self.tfvars_parser.tfvars_path, f"{self.tfvars_parser.tfvars_path}.backup")
# Rewrite the existing terraform.tfvars
print("Writing new terraform.tfvars...")
with open(self.tfvars_parser.tfvars_path, "w") as f:
f.writelines("%s\n" %line for line in lines)
except Exception as err:
print("An exception occurred writing the terraform.tfvars file:\n")
raise SystemExit(err)
class GCP_Tfvars_Encryptor(Tfvars_Encryptor):
"""This is an concrete sub class that inherits from Tfvars_Encryptor.
It contains attributes and methods specific to GCP KMS client to
automate the encryption and decryption of terraform.tfvars files.
Attributes
----------
gcp_credentials : object
GCP Credentials object for a GCP service account.
project_id : str
GCP project ID associated with the GCP service account.
location : str
Defaulted to use "global" as the location.
key_ring_id : str
Defaulted to use "cas_keyring" as a key ring ID.
crypto_key_id : str
Defaulted to use "cas_key" as the crypto key ID.
crypto_key_path : str
Full GCP resource path to the crypto key being used to encrypt and decrypt.
Methods
-------
__init__(tfvars_parser)
create_crypto_key(crypto_key_id)
decrypt_ciphertext(ciphertext, base64_encoded)
encrypt_plaintext(plaintext, base64_encoded)
get_crypto_keys(key_ring_id)
get_key_rings()
initialize_cryptokey(crypto_key_id)
initialize_keyring(key_ring_id)
"""
def __init__(self, tfvars_parser):
"""GCP_Tfvars_Encryptor class constructor to initialize the object.
Args
----
tfvars_parser : object
Instance of Tfvars_Parser class.
"""
super().__init__(tfvars_parser)
# Install and import the required GCP modules
global kms
global service_account
kms = import_or_install_module("google-cloud-kms", "google.cloud.kms")
service_account = import_or_install_module("google_oauth2_tool", "google.oauth2.service_account")
# Set GCP credentials instance variable from tfvars_data
self.credentials_file = self.tfvars_parser.tfvars_data.get("gcp_credentials_file")
# Create a client for the KMS API using the provided GCP service account
self.gcp_credentials = service_account.Credentials.from_service_account_file(self.credentials_file)
self.kms_client = kms.KeyManagementServiceClient(credentials = self.gcp_credentials)
# GCP KMS resource variables
with open(self.credentials_file) as f:
cred_file_json=json.load(f)
self.project_id = cred_file_json["project_id"]
self.location = "global"
self.key_ring_id = self.initialize_keyring("cas_keyring")
self.crypto_key_id = self.initialize_cryptokey("cas_key")
self.crypto_key_path = self.kms_client.crypto_key_path(self.project_id, self.location, self.key_ring_id, self.crypto_key_id)
def create_crypto_key(self, crypto_key_id):
"""A method to create a crypto key on GCP KMS.
Args
----
crypto_key_id : str
name of the crypto key to be created.
Returns
-------
created_crypto_key.name : str
name of the crypto key created.
"""
# Create the crypto key object template
purpose = kms.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT
crypto_key = { "purpose": purpose }
# Create a crypto key for the given key ring
parent = self.kms_client.key_ring_path(self.project_id, self.location, self.key_ring_id)
created_crypto_key = self.kms_client.create_crypto_key(
request={'parent': parent, 'crypto_key_id': crypto_key_id, 'crypto_key': crypto_key}
)
print(f"Created key ring: {created_crypto_key.name}\n")
return created_crypto_key.name
def decrypt_ciphertext(self, ciphertext, base64_encoded=False):
"""A method that decrypts ciphertext.
Uses GCP KMS to decrypt ciphertext back to plaintext using the provided
symmetric crypto key that belongs to this instance.
Args
----
ciphertext : str
the ciphertext being decrypted.
base64_encoded : boolean
the boolean param shows whether the ciphertext is base64 encoded.
Returns
-------
plaintext : str
the decrypted secret in plaintext.
"""
# Convert ciphertext string to a byte string, then Base64 decode it
if base64_encoded:
ciphertext = base64.b64decode(ciphertext.encode("utf-8"))
# Use the KMS API to decrypt the data
response = self.kms_client.decrypt(
request={'name': self.crypto_key_path, 'ciphertext': ciphertext}
)
# Decode Base64 plaintext
plaintext = response.plaintext.decode("utf-8")
return plaintext
def encrypt_plaintext(self, plaintext, base64_encoded=False):
"""A method that encrypts plaintext.
Uses GCP KMS to encrypt plaintext to ciphertext using the provided
symmetric crypto key that belongs to this instance.
Args
----
plaintext : str
the plainttext being encrypted.
base64_encoded : boolean
the boolean param shows whether the returned ciphertext need base64 encode.
Returns
-------
ciphertext : str
the encrypted secret in ciphertext.
"""
# Use the KMS API to encrypt the data.
response = self.kms_client.encrypt(
request={'name': self.crypto_key_path, 'plaintext': plaintext.encode('utf-8')}
)
# Base64 encoding of ciphertext
if base64_encoded:
ciphertext = base64.b64encode(response.ciphertext).decode("utf-8")
else:
ciphertext = response.ciphertext
return ciphertext
def get_crypto_keys(self, key_ring_id):
"""A method that retrieves a list of crypto keys associated with a key ring.
This method returns a list of all the crypto keys associated with a specific key ring.
Args
----
key_ring_id : str
string ID for the GCP KMS key ring.
Returns
-------
crypto_keys_list : list
a list of all the crypto keys associated with the key ring argument.
"""
parent = self.kms_client.key_ring_path(self.project_id, self.location, key_ring_id)
response = self.kms_client.list_crypto_keys(request={'parent': parent})
# Access the name property and split string from the right. [2] to get the string after the separator
# eg. name: "projects/user-terraform/locations/global/keyRings/cas_keyring/cryptoKeys/cas_key"
crypto_keys_list = list(map(lambda key: key.name.rpartition("/")[2], response))
return crypto_keys_list
def get_key_rings(self):
"""A method that retrieves a list of key rings.
This method returns a list of all the key rings associated
with the GCP service account.
Returns
-------
key_rings_list : list
a list of all the key rings.
"""
parent = f'projects/{self.project_id}/locations/{self.location}'
response = self.kms_client.list_key_rings(request={'parent': parent})
# Access the name property and split string from the right. [2] to get the string after the separator
# eg. name: "projects/user-terraform/locations/global/keyRings/cas_keyring"
key_rings_list = list(map(lambda key_ring: key_ring.name.rpartition("/")[2], response))
return key_rings_list
def initialize_cryptokey(self, crypto_key_id):
"""A method that initializes this instance's crypto key.
This initialization method is called in the constructor to
create a default crypto key if it doesn't exist. If the key
exists already, then reuse it for this instance.
Args
----
crypto_key_id : str
the GCP crypto key ID used to encrypt and decrypt.
Returns
-------
crypto_key_id : str
the GCP crypto key ID used to encrypt and decrypt.
"""
crypto_keys_list = self.get_crypto_keys(self.key_ring_id)
# Create the crypto key only if it doesn't exist
if crypto_key_id not in crypto_keys_list:
try:
self.create_crypto_key(crypto_key_id)
print(f"Created key: {crypto_key_id}\n")
except Exception as err:
print("An exception occurred creating new crypto key:\n")
raise SystemExit(err)
else:
print(f"Using existing crypto key: {crypto_key_id}\n")
return crypto_key_id
def initialize_keyring(self, key_ring_id):
"""A method that initializes this instance's key ring.
This initialization method is called in the constructor to
create a default key ring if it doesn't exist.
Args
----
key_ring_id : str
key ring being created.
Returns
-------
key_ring_id : str
the key ring used.
"""
key_rings_list = self.get_key_rings()
# Create the key ring only if it doesn't exist
if key_ring_id not in key_rings_list:
try:
parent = f'projects/{self.project_id}/locations/{self.location}'
key_ring = {}
created_key_ring = self.kms_client.create_key_ring(
request={'parent': parent, 'key_ring_id': key_ring_id, 'key_ring': key_ring}
)
print(f"Created key ring: {key_ring_id}\n")
except Exception as err:
print("An exception occurred creating new key ring:\n")
raise SystemExit(err)
else:
print(f"Using existing key ring: {key_ring_id}\n")
return key_ring_id
class AWS_Tfvars_Encryptor(Tfvars_Encryptor):
"""This is a concrete sub class that inherits from Tfvars_Encryptor.
It contains attributes and methods specific to AWS KMS client to
automate the encryption and decryption of terraform.tfvars files.
Attributes
----------
aws_credentials : dict
Dictionary containing two keys: aws_access_key_id and aws_secret_access_key.
customer_master_key_id : str
Defaulted to use "cas_key" as a crypto key ID.
Methods
-------
__init__(tfvars_parser)
create_crypto_key(crypto_key_alias)
decrypt_ciphertext(ciphertext, base64_encoded)
encrypt_plaintext(plaintext, base64_encoded)
initialize_aws_credentials(path)
initialize_cryptokey(crypto_key_alias_name)
get_crypto_keys()
"""
def __init__(self, tfvars_parser):
"""AWS_Tfvars_Encryptor class constructor to initialize the object.
Args
----
tfvars_parser : object
instance of Tfvars_Parser class.
"""
super().__init__(tfvars_parser)
# Install and import the required AWS modules
global boto3
boto3 = import_or_install_module("boto3")
# Set AWS credentials instance variables from tfvars_data
self.credentials_file = self.tfvars_parser.tfvars_data.get("aws_credentials_file")
# Create a client for the KMS API using the provided AWS credentials
self.aws_credentials = self.initialize_aws_credentials(self.credentials_file)
self.kms_client = boto3.client(
"kms",
aws_access_key_id = self.aws_credentials.get("aws_access_key_id"),
aws_secret_access_key = self.aws_credentials.get("aws_secret_access_key"),
region_name = self.tfvars_parser.tfvars_data.get("aws_region","us-west-1")
)
# AWS KMS resource variables
self.customer_master_key_id = self.initialize_cryptokey("cas_key")
def create_crypto_key(self, crypto_key_alias):
"""A method to create a crypto key on AWS KMS.
Args
----
crypto_key_alias : str
alias name of the crypto key being created.
Returns
------
customer_master_key_id : string
customer_master_key_id value used for terraform.tfvars file.
"""
# Use KMS client to create key and store the returned KeyId
customer_master_key_id = self.kms_client.create_key().get("KeyMetadata").get("KeyId")
# Give this KeyId an alias name
self.kms_client.create_alias(
# The alias to create. Aliases must begin with "alias/".
AliasName = f"alias/{crypto_key_alias}",
TargetKeyId = customer_master_key_id
)
print(f"Created {crypto_key_alias}: {customer_master_key_id}\n")
return customer_master_key_id
def decrypt_ciphertext(self, ciphertext, base64_encoded=False):
"""A method that decrypts ciphertext.
Uses AWS KMS to decrypt ciphertext back to plaintext using the provided
symmetric crypto key that belongs to this instance.
Args
----
ciphertext : str
the ciphertext being decrypted.
base64_encoded : boolean
the boolean param shows whether the ciphertext is base64 encoded.
Returns
-------
plaintext : str
the decrypted secret in plaintext.
"""
# Convert ciphertext string to a byte string, then Base64 decode it
if base64_encoded:
ciphertext = base64.b64decode(ciphertext.encode("utf-8"))
# Use the KMS API to decrypt the data
response = self.kms_client.decrypt(
KeyId = self.customer_master_key_id,
CiphertextBlob = ciphertext
)
# Decode Base64 plaintext
plaintext = response.get("Plaintext").decode("utf-8")
return plaintext
def encrypt_plaintext(self, plaintext, base64_encoded=False):
"""A method that encrypts plaintext.
Uses AWS KMS to encrypt plaintext to ciphertext using the provided
symmetric crypto key that belongs to this instance.
Args
----
ciphertext : str
the plainttext being encrypted.
base64_encoded : boolean
the boolean param shows whether the returned ciphertext need base64 encode.
Returns
-------
ciphertext : str
the encrypted secret in ciphertext.
"""
# Use the KMS API to encrypt the data.
response = self.kms_client.encrypt(
KeyId = self.customer_master_key_id,
Plaintext = plaintext.encode("utf-8")
)
# Base64 encoding of ciphertext
if base64_encoded:
ciphertext = base64.b64encode(response.get("CiphertextBlob")).decode("utf-8")
else:
ciphertext = response.get("CiphertextBlob")
return ciphertext
def get_crypto_keys(self):
"""A method that retrieves a list of crypto keys aliase names
associated with the AWS credentials in the region.
Returns
-------
crypto_keys_list : list
a list of all the crypto keys aliase names associated with the AWS credentials in the region.
"""
# Use crypto keys data under the "Aliases" dict key
response = self.kms_client.list_aliases().get("Aliases")
# Access the "AliasName" property for each key entry by splitting string from the right. [2] to get the string after the separator
# eg. response.get("Aliases") returns [{"AliasName": "<alias/AliasName>", "AliasArn": "<AliasArn>", "TargetKeyId": "<TargetKeyId>"}]
crypto_keys_list = list(map(lambda key: key.get("AliasName").rpartition("/")[2], response))
return crypto_keys_list
def initialize_aws_credentials(self, file_path):
"""A method that parses the aws_access_key_id and aws_secret_access_key
from aws_credentials_file required for the KMS client.
This initialization method is used in the constructor to
initialize both the aws_access_key_id and aws_secret_access_key
by parsing the aws_credentials_file.
Args
----
file_path : str
path to aws_credentials_file.
Returns
-------
dict
Dictionary containing the "aws_access_key_id" and "aws_secret_access_key".
"""
aws_access_key_id = None
aws_secret_access_key = None
try:
with open(file_path) as f:
for line in f:
line = line.strip()
# Skip blank lines and comment lines
# "not line" must come first using short circuiting to avoid string index out of range error
if not line or line[0] in ("#"):
continue
if "aws_secret_access_key" in line:
aws_secret_access_key = line.rpartition("=")[2].strip()
continue
if "aws_access_key_id" in line:
aws_access_key_id = line.rpartition("=")[2].strip()
continue
except Exception as err:
print("An exception occurred initializing AWS credentials:\n")
raise SystemExit(err)
return { "aws_access_key_id": aws_access_key_id,
"aws_secret_access_key": aws_secret_access_key }
def initialize_cryptokey(self, crypto_key_alias_name):
"""A method that initializes this instance's crypto key.
This initialization method is called in the constructor to
create a default crypto key if it doesn't exist. If the key
exists already, then reuses it for this instance.
Args
----
crypto_key_alias_name : str
the AWS crypto key alias name used to encrypt and decrypt.
Returns
-------
customer_master_key_id : str
the AWS crypto key used to encrypt and decrypt.
"""
crypto_keys_list = self.get_crypto_keys()
customer_master_key_id = None
# Create the crypto key only if it doesn't exist
if crypto_key_alias_name not in crypto_keys_list:
try:
self.create_crypto_key(crypto_key_alias_name)
except Exception as err:
print("An exception occurred creating new crypto key:\n")
raise SystemExit(err)
else:
# Use crypto keys data under the "Aliases" dict key
response = self.kms_client.list_aliases().get("Aliases")
# Trim the "AliasName" string for each key entry by splitting string from the right. [2] to get the just the "AliasName" after the separator
# For each key entry, compare the string to find a match.
# eg. response.get("Aliases") returns [{"AliasName": "<alias/AliasName>", "AliasArn": "<AliasArn>", "TargetKeyId": "<TargetKeyId>"}]
matched_crypto_keys = filter(lambda key: key.get("AliasName").rpartition("/")[2] == crypto_key_alias_name, response)
# Access the "TargetKeyId" property of the first matched key to retrieve the customer_master_key_id associated with it.
customer_master_key_id = list(matched_crypto_keys)[0].get("TargetKeyId")
print(f"Using existing crypto key {crypto_key_alias_name}: {customer_master_key_id}\n")
return customer_master_key_id
def main():
# Set up argparse
parser_description = ("Uses a KMS key to encrypt or decrypt secrets in the specified terraform.tfvars."
"The script encrypts by default. To decrypt instead, add the -d flag.")
parser = argparse.ArgumentParser(description = parser_description)
parser.add_argument("tfvars", help = "specify the path to terraform.tfvars file")
parser.add_argument("-d", help = "decrypt secrets in terraform.tfvars specified", action = "store_true")
args = parser.parse_args()
# Instantiate a Tfvars_Parser to read the terraform.tfvars file
tfvars_parser = Tfvars_Parser(args.tfvars)
tfvars_encryptor = None
# Instantiate a GCP_Tfvars_Encryptor or AWS_Tfvars_Encryptor
if tfvars_parser.tfvars_data.get("gcp_credentials_file"):
tfvars_encryptor = GCP_Tfvars_Encryptor(tfvars_parser)
elif tfvars_parser.tfvars_data.get("aws_credentials_file"):
tfvars_encryptor = AWS_Tfvars_Encryptor(tfvars_parser)
# Abort the script if credentials is missing
else:
print("Missing gcp_credentials_file or aws_credentials_file in tfvars."
"Ensure the credentials file is valid and try again.\n")
raise SystemExit()
# Encryption is the default, decryption if user specified the -d flag
if args.d:
# Abort the decryption if there is not a kms_cryptokey_id (GCP) or customer_master_key_id (AWS) in the tfvars file
if (not tfvars_parser.tfvars_data.get("kms_cryptokey_id") and
not tfvars_parser.tfvars_data.get("customer_master_key_id")):
print("No kms_cryptokey_id or customer_master_key_id present in tfvars. "
"Ensure the secrets are encrypted and try again.\n")
raise SystemExit()
tfvars_encryptor.decrypt_tfvars_secrets()
else:
# Abort the encryption if there is already a kms_cryptokey_id (GCP) or customer_master_key_id (AWS) present
if (tfvars_parser.tfvars_data.get("kms_cryptokey_id") or
tfvars_parser.tfvars_data.get("customer_master_key_id")):
print("Detected kms_cryptokey_id in tfvars. "
"Ensure secrets are not already encrypted and try again.\n")
raise SystemExit()
tfvars_encryptor.encrypt_tfvars_secrets()
if __name__ == "__main__":
main()
|
import math
from django.core.management.base import BaseCommand
from django.conf import settings
from django.utils import timezone
from twitterbot.bot import TwitterBot
from visitors.models import Visitor, Statistic, Institution, VisitorScrapeProgress
class Command(BaseCommand):
help = 'computes the total number of records in visitors and last updated per scraper'
def handle(self, *args, **options):
print("Doing calculations")
run_counts()
def run_counts():
store_total_visitor_count()
store_updated_institutions()
def store_total_visitor_count():
stats = Statistic.objects.all().last()
stats.visitor_count = Visitor.objects.all().count()
stats.save()
print(f'Total count {stats.visitor_count}')
last_entry = VisitorScrapeProgress.objects.last()
if last_entry:
last_entry_millions = math.floor(last_entry.visitor_count / 1_000_000)
current_count_millions = math.floor(stats.visitor_count / 1_000_000)
if current_count_millions - last_entry_millions > 0:
twitter = TwitterBot(
settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET,
settings.TWITTER_OAUTH_TOKEN,
settings.TWITTER_OAUTH_TOKEN_SECRET,
)
twitter.send_tweet(
f'la base de datos de manolo.rocks sobrepasó los {current_count_millions} millones '
f'de registros de visitas con {stats.visitor_count:,} registros'
)
if not VisitorScrapeProgress.objects.filter(cutoff_date=timezone.now()).exists():
VisitorScrapeProgress.objects.create(
visitor_count=stats.visitor_count,
cutoff_date=timezone.now(),
)
return stats
def store_updated_institutions():
stats = Statistic.objects.all().last()
institution_stats = []
for institution in Institution.objects.all().order_by('-rank'):
last_visitor = Visitor.objects.filter(
institution=institution.slug,
).order_by('date').last()
if last_visitor:
item = {
'name': institution.name,
'slug': institution.slug,
'rank': institution.rank,
'last_updated': last_visitor.date.strftime('%Y-%m-%d'),
}
institution_stats.append(item)
print(f'{institution} last updated {item['last_updated']}')
stats.updated_institutions = institution_stats
stats.save()
|
import math
from django.core.management.base import BaseCommand
from django.conf import settings
from django.utils import timezone
from twitterbot.bot import TwitterBot
from visitors.models import Visitor, Statistic, Institution, VisitorScrapeProgress
class Command(BaseCommand):
help = 'computes the total number of records in visitors and last updated per scraper'
def handle(self, *args, **options):
print("Doing calculations")
run_counts()
def run_counts():
store_total_visitor_count()
store_updated_institutions()
def store_total_visitor_count():
stats = Statistic.objects.all().last()
stats.visitor_count = Visitor.objects.all().count()
stats.save()
print(f'Total count {stats.visitor_count}')
last_entry = VisitorScrapeProgress.objects.last()
if last_entry:
last_entry_millions = math.floor(last_entry.visitor_count / 1_000_000)
current_count_millions = math.floor(stats.visitor_count / 1_000_000)
if current_count_millions - last_entry_millions > 0:
twitter = TwitterBot(
settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET,
settings.TWITTER_OAUTH_TOKEN,
settings.TWITTER_OAUTH_TOKEN_SECRET,
)
twitter.send_tweet(
f'la base de datos de manolo.rocks sobrepasó los {current_count_millions} millones '
f'de registros de visitas con {stats.visitor_count:,} registros'
)
if not VisitorScrapeProgress.objects.filter(cutoff_date=timezone.now()).exists():
VisitorScrapeProgress.objects.create(
visitor_count=stats.visitor_count,
cutoff_date=timezone.now(),
)
return stats
def store_updated_institutions():
stats = Statistic.objects.all().last()
institution_stats = []
for institution in Institution.objects.all().order_by('-rank'):
last_visitor = Visitor.objects.filter(
institution=institution.slug,
).order_by('date').last()
if last_visitor:
item = {
'name': institution.name,
'slug': institution.slug,
'rank': institution.rank,
'last_updated': last_visitor.date.strftime('%Y-%m-%d'),
}
institution_stats.append(item)
print(f'{institution} last updated {item["last_updated"]}')
stats.updated_institutions = institution_stats
stats.save()
|
import asyncio
import datetime
import importlib
import itertools
import os
import random
import re
import subprocess
import sys
import discord
import psutil
from src import const
from src.algorithms import levenshtein_distance
from src.api.bot_instance import BotInstance
from src.backend.discord.embed import DiscordEmbed
from src.backend.discord.message import Msg
from src.backend.discord.voice import VoiceRoutine
from src.bc import DoNotUpdateFlag
from src.bot_cache import BotCache
from src.config import Command, Config, GuildSettings, SecretConfig, User, bc
from src.emoji import get_clock_emoji
from src.ff import FF
from src.info import BotInfo
from src.log import log
from src.mail import Mail
from src.reminder import Reminder
from src.utils import Util
class WalBot(discord.Client):
def __init__(self, name: str, config: Config, secret_config: SecretConfig, intents: discord.Intents) -> None:
super().__init__(intents=intents)
self.repl = None
bc.instance_name = self.instance_name = name
self.config = config
self.secret_config = secret_config
self.bot_cache = BotCache(True)
self.loop.create_task(self._process_reminders())
self.loop.create_task(VoiceRoutine(self.bot_cache).start())
bc.config = self.config
bc.commands = self.config.commands
bc.background_loop = self.loop
bc.latency = lambda: self.latency
bc.change_status = self._change_status
bc.change_presence = self.change_presence
bc.close = self.close
bc.secret_config = self.secret_config
bc.info = BotInfo()
bc.plugin_manager.register()
bc.fetch_channel = self.fetch_channel
if not bc.args.fast_start:
log.debug("Started Markov model checks...")
if bc.markov.check():
log.info("Markov model has passed all checks")
else:
log.info("Markov model has not passed checks, but all errors were fixed")
async def _bot_runner_task(self, *args, **kwargs):
try:
await self.start(*args, **kwargs)
finally:
if not self.is_closed():
await self.close()
def run(self, *args, **kwargs):
# Sightly patched implementation from discord.py discord.Client (parent) class
# Reference: https://github.com/Rapptz/discord.py/blob/master/discord/client.py
if sys.platform == "win32":
return super().run(*args, **kwargs)
loop = self.loop
asyncio.ensure_future(self._bot_runner_task(*args, *kwargs), loop=loop)
try:
loop.run_forever()
except KeyboardInterrupt:
loop.stop()
log.info('Received signal to terminate bot and event loop')
log.info("Shutting down the bot...")
tasks = {t for t in asyncio.all_tasks(loop=loop) if not t.done()}
for task in tasks:
task.cancel()
loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))
for task in tasks:
if not task.cancelled():
log.error("Asynchronous task cancel failed!")
loop.run_until_complete(loop.shutdown_asyncgens())
loop.run_until_complete(self._on_shutdown())
loop.close()
log.info("Bot is shut down!")
async def _on_shutdown(self) -> None:
if self.repl is not None:
self.repl.stop()
for event in bc.background_events:
event.cancel()
bc.background_loop = None
await bc.plugin_manager.unload_plugins()
@Mail.send_exception_info_to_admin_emails_async
async def _precompile(self) -> None:
log.debug("Started precompiling functions...")
levenshtein_distance("", "")
log.debug("Finished precompiling functions")
async def _change_status(self, string: str, type_: discord.ActivityType) -> None:
await self.change_presence(activity=discord.Activity(name=string, type=type_))
async def _config_autosave(self) -> None:
await self.wait_until_ready()
index = 1
while not self.is_closed():
await asyncio.sleep(self.config.saving["period"] * 60)
if index % self.config.saving["backup"]["period"] == 0:
self.config.backup(const.CONFIG_PATH, const.MARKOV_PATH)
self.config.save(const.CONFIG_PATH, const.MARKOV_PATH, const.SECRET_CONFIG_PATH)
index += 1
async def _process_reminders_iteration(self) -> None:
log.debug3("Reminder processing iteration has started")
now = datetime.datetime.now().replace(second=0).strftime(const.REMINDER_DATETIME_FORMAT)
to_remove = []
to_append = []
reminder_do_not_update_flag = False
for key, rem in self.config.reminders.items():
for i in range(len(rem.prereminders_list)):
prereminder = rem.prereminders_list[i]
used_prereminder = rem.used_prereminders_list[i]
if prereminder == 0 or used_prereminder:
continue
prereminder_time = (
datetime.datetime.now().replace(second=0) + datetime.timedelta(minutes=prereminder))
if rem == prereminder_time.strftime(const.REMINDER_DATETIME_FORMAT):
channel = self.get_channel(rem.channel_id)
e = DiscordEmbed()
clock_emoji = get_clock_emoji(datetime.datetime.now().strftime("%H:%M"))
e.title(f"{prereminder} minutes left until reminder")
e.description(rem.message + "\n" + rem.notes)
e.color(random.randint(0x000000, 0xffffff))
e.timestamp(
datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(minutes=prereminder))
e.footer(text=rem.author)
await channel.send("", embed=e.get())
rem.used_prereminders_list[i] = True
if rem == now:
channel = self.get_channel(rem.channel_id)
clock_emoji = get_clock_emoji(datetime.datetime.now().strftime("%H:%M"))
e = DiscordEmbed()
e.title(f"{clock_emoji} You asked to remind")
e.description(rem.message + "\n" + rem.notes)
e.color(random.randint(0x000000, 0xffffff))
e.timestamp(datetime.datetime.now(datetime.timezone.utc))
e.footer(text=rem.author)
await channel.send(' '.join(rem.ping_users if rem.ping_users else ""), embed=e.get())
for user_id in rem.whisper_users:
await Msg.send_direct_message(
self.get_user(user_id), f"You asked to remind at {now} -> {rem.message}", False)
if rem.email_users:
mail = Mail(self.secret_config)
mail.send(
rem.email_users,
f"Reminder: {rem.message}",
f"You asked to remind at {now} -> {rem.message}")
if rem.repeat_after > 0:
new_time = datetime.datetime.now().replace(second=0, microsecond=0) + rem.get_next_event_delta()
new_time = new_time.strftime(const.REMINDER_DATETIME_FORMAT)
to_append.append(
Reminder(str(new_time), rem.message, rem.channel_id, rem.author, rem.time_created))
to_append[-1].repeat_after = rem.repeat_after
to_append[-1].repeat_interval_measure = rem.repeat_interval_measure
to_append[-1].prereminders_list = rem.prereminders_list
to_append[-1].used_prereminders_list = [False] * len(rem.prereminders_list)
to_append[-1].notes = rem.notes
log.debug2(f"Scheduled renew of recurring reminder - old id: {key}")
to_remove.append(key)
elif rem < now:
log.debug2(f"Scheduled reminder with id {key} removal")
to_remove.append(key)
else:
prereminders_delay = 0
if rem.prereminders_list:
prereminders_delay = max(rem.prereminders_list)
if ((datetime.datetime.strptime(rem.time, const.REMINDER_DATETIME_FORMAT) - datetime.datetime.now())
< datetime.timedelta(minutes=(5 + prereminders_delay / 60))):
reminder_do_not_update_flag = True
bc.do_not_update[DoNotUpdateFlag.REMINDER] = reminder_do_not_update_flag
for key in to_remove:
self.config.reminders.pop(key)
for item in to_append:
key = self.config.ids["reminder"]
self.config.reminders[key] = item
self.config.ids["reminder"] += 1
log.debug3("Reminder processing iteration has finished")
@Mail.send_exception_info_to_admin_emails_async
async def _process_reminders(self) -> None:
await self.wait_until_ready()
while not self.is_closed():
await self._process_reminders_iteration()
await asyncio.sleep(const.REMINDER_POLLING_INTERVAL)
@Mail.send_exception_info_to_admin_emails_async
async def on_ready(self) -> None:
await bc.plugin_manager.load_plugins()
log.info(
f"Logged in as: {self.user.name} {self.user.id} ({self.__class__.__name__}), "
f"instance: {self.instance_name}")
self.bot_cache.update({
"ready": True,
})
self.bot_cache.dump_to_file()
bc.guilds = self.guilds
for guild in self.guilds:
if guild.id not in self.config.guilds.keys():
self.config.guilds[guild.id] = GuildSettings(guild.id)
bc.bot_user = self.user
self.loop.create_task(self._config_autosave())
self.loop.create_task(self._precompile())
@Mail.send_exception_info_to_admin_emails_async
async def on_message(self, message: discord.Message) -> None:
await bc.plugin_manager.broadcast_command("on_message", message)
if self.config.guilds[message.channel.guild.id].ignored:
return
bc.message_buffer.push(message)
log.info(f"<{message.id}> {message.author} -> {message.content}")
if message.author.id == self.user.id:
return
if isinstance(message.channel, discord.DMChannel):
return
if message.channel.guild.id is None:
return
if self.config.guilds[message.channel.guild.id].is_whitelisted:
if message.channel.id not in self.config.guilds[message.channel.guild.id].whitelist:
return
if message.author.id not in self.config.users.keys():
self.config.users[message.author.id] = User(message.author.id)
if self.config.users[message.author.id].permission_level < 0:
return
if message.content.startswith(self.config.commands_prefix):
await self._process_command(message)
else:
await self._process_regular_message(message)
await self._process_repetitions(message)
@Mail.send_exception_info_to_admin_emails_async
async def on_message_edit(self, old_message: discord.Message, message: discord.Message) -> None:
if message.content == old_message.content and message.embeds != old_message.embeds:
log.info(f"<{message.id}> (edit, embed update) {message.author} -> {message.content}")
return
if self.config.guilds[message.channel.guild.id].ignored:
return
bc.message_buffer.push(message)
log.info(f"<{message.id}> (edit) {message.author} -> {message.content}")
if message.author.id == self.user.id:
return
if isinstance(message.channel, discord.DMChannel):
return
if message.channel.guild.id is None:
return
if self.config.guilds[message.channel.guild.id].is_whitelisted:
if message.channel.id not in self.config.guilds[message.channel.guild.id].whitelist:
return
if message.author.id not in self.config.users.keys():
self.config.users[message.author.id] = User(message.author.id)
if self.config.users[message.author.id].permission_level < 0:
return
if message.content.startswith(self.config.commands_prefix):
await self._process_command(message)
async def _process_repetitions(self, message: discord.Message) -> None:
m = tuple(bc.message_buffer.get(message.channel.id, i) for i in range(3))
if (all(m) and m[0].content and m[0].content == m[1].content == m[2].content and
(m[0].author.id != self.user.id and
m[1].author.id != self.user.id and
m[2].author.id != self.user.id)):
await message.channel.send(m[0].content)
async def _process_regular_message(self, message: discord.Message) -> None:
channel_id = message.channel.id
if isinstance(message.channel, discord.Thread): # Inherit parent channel settings for threads
channel_id = message.channel.parent_id
if (self.user.mentioned_in(message) or self.user.id in [
member.id for member in list(
itertools.chain(*[role.members for role in message.role_mentions]))]):
if channel_id in self.config.guilds[message.channel.guild.id].markov_responses_whitelist:
result = await self.config.disable_pings_in_response(message, bc.markov.generate())
await message.channel.send(message.author.mention + ' ' + result)
elif channel_id in self.config.guilds[message.channel.guild.id].markov_logging_whitelist:
needs_to_be_added = True
if not FF.is_enabled("WALBOT_FEATURE_MARKOV_MONGO"):
for ignored_prefix in bc.markov.ignored_prefixes.values():
if message.content.startswith(ignored_prefix):
needs_to_be_added = False
break
if needs_to_be_added:
bc.markov.add_string(message.content)
if channel_id in self.config.guilds[message.channel.guild.id].responses_whitelist:
responses_count = 0
for response in self.config.responses.values():
if responses_count >= const.MAX_BOT_RESPONSES_ON_ONE_MESSAGE:
break
if re.search(response.regex, message.content):
text = await Command.process_subcommands(
response.text, message, self.config.users[message.author.id])
await Msg.reply(message, text, False)
responses_count += 1
if channel_id in self.config.guilds[message.channel.guild.id].reactions_whitelist:
for reaction in self.config.reactions.values():
if re.search(reaction.regex, message.content):
log.info("Added reaction " + reaction.emoji)
try:
await message.add_reaction(reaction.emoji)
except discord.HTTPException:
pass
async def _process_command(self, message: discord.Message) -> None:
command = message.content.split(' ')
command = list(filter(None, command))
command[0] = command[0][1:]
if not command[0]:
return log.debug("Ignoring empty command")
if command[0] not in self.config.commands.data.keys():
if command[0] in self.config.commands.aliases.keys():
command[0] = self.config.commands.aliases[command[0]]
else:
await message.channel.send(
f"Unknown command '{command[0]}', "
f"probably you meant '{self._suggest_similar_command(command[0])}'")
return
if command[0] not in (
"poll",
"stopwatch",
"timer",
"vqfpush",
"vqpush",
"disabletl",
):
timeout_error, _ = await Util.run_function_with_time_limit(
self.config.commands.data[command[0]].run(message, command, self.config.users[message.author.id]),
const.MAX_COMMAND_EXECUTION_TIME)
if command[0] not in (
"silent",
) and timeout_error:
await message.channel.send(f"Command '{" ".join(command)}' took too long to execute")
else:
await self.config.commands.data[command[0]].run(message, command, self.config.users[message.author.id])
def _suggest_similar_command(self, unknown_command: str) -> str:
min_dist = 100000
suggestion = ""
for command in self.config.commands.data.keys():
dist = levenshtein_distance(unknown_command, command)
if dist < min_dist:
suggestion = command
min_dist = dist
for command in self.config.commands.aliases.keys():
dist = levenshtein_distance(unknown_command, command)
if dist < min_dist:
suggestion = command
min_dist = dist
return suggestion
async def on_raw_message_edit(self, payload: discord.RawMessageUpdateEvent) -> None:
try:
log.info(f"<{payload.message_id}> (raw_edit) {payload.data["author"]["username"]}#"
f"{payload.data["author"]["discriminator"]} -> {payload.data["content"]}")
except KeyError:
pass
async def on_raw_message_delete(self, payload: discord.RawMessageDeleteEvent) -> None:
log.info(f"<{payload.message_id}> (delete)")
class DiscordBotInstance(BotInstance):
def start(self, args, main_bot=True):
# Check whether bot is already running
bot_cache = BotCache(main_bot).parse()
if bot_cache is not None:
pid = bot_cache["pid"]
if pid is not None and psutil.pid_exists(pid):
return log.error("Bot is already running!")
# Some variable initializations
bc.restart_flag = False
bc.args = args
# Handle --nohup flag
if sys.platform in ("linux", "darwin") and args.nohup:
fd = os.open(const.NOHUP_FILE_PATH, os.O_WRONLY | os.O_CREAT | os.O_APPEND)
log.info(f"Output is redirected to {const.NOHUP_FILE_PATH}")
os.dup2(fd, sys.stdout.fileno())
os.dup2(sys.stdout.fileno(), sys.stderr.fileno())
os.close(fd)
# NOTE: Does not work when not in main thread
# signal.signal(signal.SIGHUP, signal.SIG_IGN)
# Saving application pd in order to safely stop it later
BotCache(main_bot).dump_to_file()
# Constructing bot instance
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
intents = discord.Intents.all()
if main_bot:
walbot = WalBot(args.name, bc.config, bc.secret_config, intents=intents)
else:
walbot = importlib.import_module("src.backend.discord.minibot").MiniWalBot(
args.name, bc.config, bc.secret_config, args.message, intents=intents)
# Starting the bot
try:
walbot.run(bc.secret_config.token)
except discord.PrivilegedIntentsRequired:
log.error("Privileged Gateway Intents are not enabled! Shutting down the bot...")
def stop(self, args, main_bot=True):
log.info("Bot is disconnected!")
if main_bot:
bc.config.save(const.CONFIG_PATH, const.MARKOV_PATH, const.SECRET_CONFIG_PATH, wait=True)
BotCache(main_bot).remove()
if bc.restart_flag:
cmd = f"'{sys.executable}" "{os.getcwd() + "/walbot.py"}' start"
log.info("Calling: " + cmd)
if sys.platform in ("linux", "darwin"):
fork = os.fork()
if fork == 0:
subprocess.call(cmd)
elif fork > 0:
log.info("Stopping current instance of the bot")
sys.exit(const.ExitStatus.NO_ERROR)
else:
subprocess.call(cmd)
|
import asyncio
import datetime
import importlib
import itertools
import os
import random
import re
import subprocess
import sys
import discord
import psutil
from src import const
from src.algorithms import levenshtein_distance
from src.api.bot_instance import BotInstance
from src.backend.discord.embed import DiscordEmbed
from src.backend.discord.message import Msg
from src.backend.discord.voice import VoiceRoutine
from src.bc import DoNotUpdateFlag
from src.bot_cache import BotCache
from src.config import Command, Config, GuildSettings, SecretConfig, User, bc
from src.emoji import get_clock_emoji
from src.ff import FF
from src.info import BotInfo
from src.log import log
from src.mail import Mail
from src.reminder import Reminder
from src.utils import Util
class WalBot(discord.Client):
def __init__(self, name: str, config: Config, secret_config: SecretConfig, intents: discord.Intents) -> None:
super().__init__(intents=intents)
self.repl = None
bc.instance_name = self.instance_name = name
self.config = config
self.secret_config = secret_config
self.bot_cache = BotCache(True)
self.loop.create_task(self._process_reminders())
self.loop.create_task(VoiceRoutine(self.bot_cache).start())
bc.config = self.config
bc.commands = self.config.commands
bc.background_loop = self.loop
bc.latency = lambda: self.latency
bc.change_status = self._change_status
bc.change_presence = self.change_presence
bc.close = self.close
bc.secret_config = self.secret_config
bc.info = BotInfo()
bc.plugin_manager.register()
bc.fetch_channel = self.fetch_channel
if not bc.args.fast_start:
log.debug("Started Markov model checks...")
if bc.markov.check():
log.info("Markov model has passed all checks")
else:
log.info("Markov model has not passed checks, but all errors were fixed")
async def _bot_runner_task(self, *args, **kwargs):
try:
await self.start(*args, **kwargs)
finally:
if not self.is_closed():
await self.close()
def run(self, *args, **kwargs):
# Sightly patched implementation from discord.py discord.Client (parent) class
# Reference: https://github.com/Rapptz/discord.py/blob/master/discord/client.py
if sys.platform == "win32":
return super().run(*args, **kwargs)
loop = self.loop
asyncio.ensure_future(self._bot_runner_task(*args, *kwargs), loop=loop)
try:
loop.run_forever()
except KeyboardInterrupt:
loop.stop()
log.info('Received signal to terminate bot and event loop')
log.info("Shutting down the bot...")
tasks = {t for t in asyncio.all_tasks(loop=loop) if not t.done()}
for task in tasks:
task.cancel()
loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))
for task in tasks:
if not task.cancelled():
log.error("Asynchronous task cancel failed!")
loop.run_until_complete(loop.shutdown_asyncgens())
loop.run_until_complete(self._on_shutdown())
loop.close()
log.info("Bot is shut down!")
async def _on_shutdown(self) -> None:
if self.repl is not None:
self.repl.stop()
for event in bc.background_events:
event.cancel()
bc.background_loop = None
await bc.plugin_manager.unload_plugins()
@Mail.send_exception_info_to_admin_emails_async
async def _precompile(self) -> None:
log.debug("Started precompiling functions...")
levenshtein_distance("", "")
log.debug("Finished precompiling functions")
async def _change_status(self, string: str, type_: discord.ActivityType) -> None:
await self.change_presence(activity=discord.Activity(name=string, type=type_))
async def _config_autosave(self) -> None:
await self.wait_until_ready()
index = 1
while not self.is_closed():
await asyncio.sleep(self.config.saving["period"] * 60)
if index % self.config.saving["backup"]["period"] == 0:
self.config.backup(const.CONFIG_PATH, const.MARKOV_PATH)
self.config.save(const.CONFIG_PATH, const.MARKOV_PATH, const.SECRET_CONFIG_PATH)
index += 1
async def _process_reminders_iteration(self) -> None:
log.debug3("Reminder processing iteration has started")
now = datetime.datetime.now().replace(second=0).strftime(const.REMINDER_DATETIME_FORMAT)
to_remove = []
to_append = []
reminder_do_not_update_flag = False
for key, rem in self.config.reminders.items():
for i in range(len(rem.prereminders_list)):
prereminder = rem.prereminders_list[i]
used_prereminder = rem.used_prereminders_list[i]
if prereminder == 0 or used_prereminder:
continue
prereminder_time = (
datetime.datetime.now().replace(second=0) + datetime.timedelta(minutes=prereminder))
if rem == prereminder_time.strftime(const.REMINDER_DATETIME_FORMAT):
channel = self.get_channel(rem.channel_id)
e = DiscordEmbed()
clock_emoji = get_clock_emoji(datetime.datetime.now().strftime("%H:%M"))
e.title(f"{prereminder} minutes left until reminder")
e.description(rem.message + "\n" + rem.notes)
e.color(random.randint(0x000000, 0xffffff))
e.timestamp(
datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(minutes=prereminder))
e.footer(text=rem.author)
await channel.send("", embed=e.get())
rem.used_prereminders_list[i] = True
if rem == now:
channel = self.get_channel(rem.channel_id)
clock_emoji = get_clock_emoji(datetime.datetime.now().strftime("%H:%M"))
e = DiscordEmbed()
e.title(f"{clock_emoji} You asked to remind")
e.description(rem.message + "\n" + rem.notes)
e.color(random.randint(0x000000, 0xffffff))
e.timestamp(datetime.datetime.now(datetime.timezone.utc))
e.footer(text=rem.author)
await channel.send(' '.join(rem.ping_users if rem.ping_users else ""), embed=e.get())
for user_id in rem.whisper_users:
await Msg.send_direct_message(
self.get_user(user_id), f"You asked to remind at {now} -> {rem.message}", False)
if rem.email_users:
mail = Mail(self.secret_config)
mail.send(
rem.email_users,
f"Reminder: {rem.message}",
f"You asked to remind at {now} -> {rem.message}")
if rem.repeat_after > 0:
new_time = datetime.datetime.now().replace(second=0, microsecond=0) + rem.get_next_event_delta()
new_time = new_time.strftime(const.REMINDER_DATETIME_FORMAT)
to_append.append(
Reminder(str(new_time), rem.message, rem.channel_id, rem.author, rem.time_created))
to_append[-1].repeat_after = rem.repeat_after
to_append[-1].repeat_interval_measure = rem.repeat_interval_measure
to_append[-1].prereminders_list = rem.prereminders_list
to_append[-1].used_prereminders_list = [False] * len(rem.prereminders_list)
to_append[-1].notes = rem.notes
log.debug2(f"Scheduled renew of recurring reminder - old id: {key}")
to_remove.append(key)
elif rem < now:
log.debug2(f"Scheduled reminder with id {key} removal")
to_remove.append(key)
else:
prereminders_delay = 0
if rem.prereminders_list:
prereminders_delay = max(rem.prereminders_list)
if ((datetime.datetime.strptime(rem.time, const.REMINDER_DATETIME_FORMAT) - datetime.datetime.now())
< datetime.timedelta(minutes=(5 + prereminders_delay / 60))):
reminder_do_not_update_flag = True
bc.do_not_update[DoNotUpdateFlag.REMINDER] = reminder_do_not_update_flag
for key in to_remove:
self.config.reminders.pop(key)
for item in to_append:
key = self.config.ids["reminder"]
self.config.reminders[key] = item
self.config.ids["reminder"] += 1
log.debug3("Reminder processing iteration has finished")
@Mail.send_exception_info_to_admin_emails_async
async def _process_reminders(self) -> None:
await self.wait_until_ready()
while not self.is_closed():
await self._process_reminders_iteration()
await asyncio.sleep(const.REMINDER_POLLING_INTERVAL)
@Mail.send_exception_info_to_admin_emails_async
async def on_ready(self) -> None:
await bc.plugin_manager.load_plugins()
log.info(
f"Logged in as: {self.user.name} {self.user.id} ({self.__class__.__name__}), "
f"instance: {self.instance_name}")
self.bot_cache.update({
"ready": True,
})
self.bot_cache.dump_to_file()
bc.guilds = self.guilds
for guild in self.guilds:
if guild.id not in self.config.guilds.keys():
self.config.guilds[guild.id] = GuildSettings(guild.id)
bc.bot_user = self.user
self.loop.create_task(self._config_autosave())
self.loop.create_task(self._precompile())
@Mail.send_exception_info_to_admin_emails_async
async def on_message(self, message: discord.Message) -> None:
await bc.plugin_manager.broadcast_command("on_message", message)
if self.config.guilds[message.channel.guild.id].ignored:
return
bc.message_buffer.push(message)
log.info(f"<{message.id}> {message.author} -> {message.content}")
if message.author.id == self.user.id:
return
if isinstance(message.channel, discord.DMChannel):
return
if message.channel.guild.id is None:
return
if self.config.guilds[message.channel.guild.id].is_whitelisted:
if message.channel.id not in self.config.guilds[message.channel.guild.id].whitelist:
return
if message.author.id not in self.config.users.keys():
self.config.users[message.author.id] = User(message.author.id)
if self.config.users[message.author.id].permission_level < 0:
return
if message.content.startswith(self.config.commands_prefix):
await self._process_command(message)
else:
await self._process_regular_message(message)
await self._process_repetitions(message)
@Mail.send_exception_info_to_admin_emails_async
async def on_message_edit(self, old_message: discord.Message, message: discord.Message) -> None:
if message.content == old_message.content and message.embeds != old_message.embeds:
log.info(f"<{message.id}> (edit, embed update) {message.author} -> {message.content}")
return
if self.config.guilds[message.channel.guild.id].ignored:
return
bc.message_buffer.push(message)
log.info(f"<{message.id}> (edit) {message.author} -> {message.content}")
if message.author.id == self.user.id:
return
if isinstance(message.channel, discord.DMChannel):
return
if message.channel.guild.id is None:
return
if self.config.guilds[message.channel.guild.id].is_whitelisted:
if message.channel.id not in self.config.guilds[message.channel.guild.id].whitelist:
return
if message.author.id not in self.config.users.keys():
self.config.users[message.author.id] = User(message.author.id)
if self.config.users[message.author.id].permission_level < 0:
return
if message.content.startswith(self.config.commands_prefix):
await self._process_command(message)
async def _process_repetitions(self, message: discord.Message) -> None:
m = tuple(bc.message_buffer.get(message.channel.id, i) for i in range(3))
if (all(m) and m[0].content and m[0].content == m[1].content == m[2].content and
(m[0].author.id != self.user.id and
m[1].author.id != self.user.id and
m[2].author.id != self.user.id)):
await message.channel.send(m[0].content)
async def _process_regular_message(self, message: discord.Message) -> None:
channel_id = message.channel.id
if isinstance(message.channel, discord.Thread): # Inherit parent channel settings for threads
channel_id = message.channel.parent_id
if (self.user.mentioned_in(message) or self.user.id in [
member.id for member in list(
itertools.chain(*[role.members for role in message.role_mentions]))]):
if channel_id in self.config.guilds[message.channel.guild.id].markov_responses_whitelist:
result = await self.config.disable_pings_in_response(message, bc.markov.generate())
await message.channel.send(message.author.mention + ' ' + result)
elif channel_id in self.config.guilds[message.channel.guild.id].markov_logging_whitelist:
needs_to_be_added = True
if not FF.is_enabled("WALBOT_FEATURE_MARKOV_MONGO"):
for ignored_prefix in bc.markov.ignored_prefixes.values():
if message.content.startswith(ignored_prefix):
needs_to_be_added = False
break
if needs_to_be_added:
bc.markov.add_string(message.content)
if channel_id in self.config.guilds[message.channel.guild.id].responses_whitelist:
responses_count = 0
for response in self.config.responses.values():
if responses_count >= const.MAX_BOT_RESPONSES_ON_ONE_MESSAGE:
break
if re.search(response.regex, message.content):
text = await Command.process_subcommands(
response.text, message, self.config.users[message.author.id])
await Msg.reply(message, text, False)
responses_count += 1
if channel_id in self.config.guilds[message.channel.guild.id].reactions_whitelist:
for reaction in self.config.reactions.values():
if re.search(reaction.regex, message.content):
log.info("Added reaction " + reaction.emoji)
try:
await message.add_reaction(reaction.emoji)
except discord.HTTPException:
pass
async def _process_command(self, message: discord.Message) -> None:
command = message.content.split(' ')
command = list(filter(None, command))
command[0] = command[0][1:]
if not command[0]:
return log.debug("Ignoring empty command")
if command[0] not in self.config.commands.data.keys():
if command[0] in self.config.commands.aliases.keys():
command[0] = self.config.commands.aliases[command[0]]
else:
await message.channel.send(
f"Unknown command '{command[0]}', "
f"probably you meant '{self._suggest_similar_command(command[0])}'")
return
if command[0] not in (
"poll",
"stopwatch",
"timer",
"vqfpush",
"vqpush",
"disabletl",
):
timeout_error, _ = await Util.run_function_with_time_limit(
self.config.commands.data[command[0]].run(message, command, self.config.users[message.author.id]),
const.MAX_COMMAND_EXECUTION_TIME)
if command[0] not in (
"silent",
) and timeout_error:
await message.channel.send(f"Command '{' '.join(command)}' took too long to execute")
else:
await self.config.commands.data[command[0]].run(message, command, self.config.users[message.author.id])
def _suggest_similar_command(self, unknown_command: str) -> str:
min_dist = 100000
suggestion = ""
for command in self.config.commands.data.keys():
dist = levenshtein_distance(unknown_command, command)
if dist < min_dist:
suggestion = command
min_dist = dist
for command in self.config.commands.aliases.keys():
dist = levenshtein_distance(unknown_command, command)
if dist < min_dist:
suggestion = command
min_dist = dist
return suggestion
async def on_raw_message_edit(self, payload: discord.RawMessageUpdateEvent) -> None:
try:
log.info(f"<{payload.message_id}> (raw_edit) {payload.data['author']['username']}#"
f"{payload.data['author']['discriminator']} -> {payload.data['content']}")
except KeyError:
pass
async def on_raw_message_delete(self, payload: discord.RawMessageDeleteEvent) -> None:
log.info(f"<{payload.message_id}> (delete)")
class DiscordBotInstance(BotInstance):
def start(self, args, main_bot=True):
# Check whether bot is already running
bot_cache = BotCache(main_bot).parse()
if bot_cache is not None:
pid = bot_cache["pid"]
if pid is not None and psutil.pid_exists(pid):
return log.error("Bot is already running!")
# Some variable initializations
bc.restart_flag = False
bc.args = args
# Handle --nohup flag
if sys.platform in ("linux", "darwin") and args.nohup:
fd = os.open(const.NOHUP_FILE_PATH, os.O_WRONLY | os.O_CREAT | os.O_APPEND)
log.info(f"Output is redirected to {const.NOHUP_FILE_PATH}")
os.dup2(fd, sys.stdout.fileno())
os.dup2(sys.stdout.fileno(), sys.stderr.fileno())
os.close(fd)
# NOTE: Does not work when not in main thread
# signal.signal(signal.SIGHUP, signal.SIG_IGN)
# Saving application pd in order to safely stop it later
BotCache(main_bot).dump_to_file()
# Constructing bot instance
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
intents = discord.Intents.all()
if main_bot:
walbot = WalBot(args.name, bc.config, bc.secret_config, intents=intents)
else:
walbot = importlib.import_module("src.backend.discord.minibot").MiniWalBot(
args.name, bc.config, bc.secret_config, args.message, intents=intents)
# Starting the bot
try:
walbot.run(bc.secret_config.token)
except discord.PrivilegedIntentsRequired:
log.error("Privileged Gateway Intents are not enabled! Shutting down the bot...")
def stop(self, args, main_bot=True):
log.info("Bot is disconnected!")
if main_bot:
bc.config.save(const.CONFIG_PATH, const.MARKOV_PATH, const.SECRET_CONFIG_PATH, wait=True)
BotCache(main_bot).remove()
if bc.restart_flag:
cmd = f"'{sys.executable}' '{os.getcwd() + '/walbot.py'}' start"
log.info("Calling: " + cmd)
if sys.platform in ("linux", "darwin"):
fork = os.fork()
if fork == 0:
subprocess.call(cmd)
elif fork > 0:
log.info("Stopping current instance of the bot")
sys.exit(const.ExitStatus.NO_ERROR)
else:
subprocess.call(cmd)
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from argparse import ArgumentParser
import curses
import json
import time
from datetime import datetime
from pathlib import Path
from striptease import StripConnection, append_to_run_log
args = None
cur_json_procedure = []
DEFAULT_WAIT_TIME_S = 0.5
warnings = []
def warning(stdscr, msg):
stdscr.addstr(msg + "\n", curses.color_pair(1))
warnings.append(msg)
stdscr.refresh()
def tagmsg(stdscr, msg):
stdscr.addstr(msg + "\n", curses.color_pair(2))
stdscr.refresh()
def logmsg(stdscr, msg):
stdscr.addstr(msg + "\n", curses.color_pair(3))
stdscr.refresh()
def commandmsg(stdscr, msg):
stdscr.addstr(msg + "\n", curses.color_pair(4))
stdscr.refresh()
def waitmsg(stdscr, msg):
stdscr.addstr(msg + "\n", curses.color_pair(5))
stdscr.refresh()
def prompt(stdscr, msg):
stdscr.addstr(msg + "\n", curses.color_pair(6))
stdscr.refresh()
def readkey(stdscr):
stdscr.nodelay(False)
choice = stdscr.getkey()
stdscr.nodelay(True)
return choice
def close_tags(stdscr, conn):
tags = [x for x in conn.tag_query() if x["stop"] < 0.0]
if not tags:
return
if args.close_tags:
for cur_tag in tags:
conn.tag_stop(
cur_tag["tag"],
comment="Closed automatically by program_batch_runner.py",
)
else:
tags = ", ".join([('"' + x["tag"] + '"') for x in tags])
warning(stdscr, f"The tags {tags} are still open, do you want to quit (y/n)?")
choice = readkey(stdscr)
if choice.upper() == "Y":
return True
return False
def main(stdscr):
global args
global cur_json_procedure
curses.start_color()
curses.use_default_colors()
curses.cbreak()
curses.noecho()
# Warning
curses.init_pair(1, curses.COLOR_YELLOW, curses.COLOR_BLACK)
# Tag message
curses.init_pair(2, curses.COLOR_CYAN, curses.COLOR_BLACK)
# Log message
curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_BLACK)
# Command message
curses.init_pair(4, curses.COLOR_WHITE, curses.COLOR_BLACK)
# Wait message
curses.init_pair(5, curses.COLOR_BLUE, curses.COLOR_BLACK)
# User prompt
curses.init_pair(6, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
stdscr.scrollok(True)
stdscr.idlok(True)
stdscr.nodelay(True) # Don't wait for keypresses
stdscr.keypad(True)
if not args.dry_run:
print(f"{len(cur_json_procedure)} commands ready to be executed, let's go!")
print("Going to establish a connection with the server…")
conn = StripConnection()
conn.login()
if close_tags(stdscr, conn):
return
print("…connection established")
if (not args.dry_run) and (not args.do_not_round):
conn.round_all_files()
else:
conn = None
open_tags = set([])
indent_level = 0
for cur_command in cur_json_procedure:
cmddict = cur_command["command"]
print_fn = None
indent_level_incr = 0
curpath = cur_command["path"]
if cur_command["kind"] == "tag":
print_fn = tagmsg
if cmddict["type"] == "START":
command_descr = f"start of tag {cmddict["tag"]}"
open_tags.add(cmddict["tag"])
indent_level_incr = 4
else:
if not cmddict["tag"] in open_tags:
msg = f"Tag {cmddict["tag"]} is being closed, but the tags currently open are {", ".join(open_tags)}"
warning(stdscr, msg)
else:
open_tags.discard(cmddict["tag"])
command_descr = f"end of tag {cmddict["tag"]}"
indent_level = -4
elif cur_command["kind"] == "log":
print_fn = logmsg
command_descr = f"log message '{cmddict["message"]}' ({cmddict["level"]})"
elif cur_command["kind"] == "command":
print_fn = commandmsg
method, base_addr, data = [
cmddict[x] for x in ("method", "base_addr", "data")
]
datastr = ", ".join([str(x) for x in data])
command_descr = f"command {method} {base_addr}, data={datastr}"
elif cur_command["kind"] == "wait":
print_fn = waitmsg
curpath = "/waitcmd"
command_descr = f"wait for {cur_command["command"]["wait_time_s"]} s"
else:
warning(
stdscr,
f"\"{cur_command["kind"]}\" is not recognized as a valid command type",
)
print_fn = prompt
print_fn(stdscr, " " * indent_level + f"{curpath}: {command_descr}")
try:
if cur_command["kind"] != "wait":
if not args.dry_run:
conn.post(cur_command["path"], message=cmddict)
time.sleep(args.wait_time)
else:
wait_time = cmddict["wait_time_s"]
if args.waitcmd_time is not None:
wait_time = args.waitcmd_time
time.sleep(wait_time)
except Exception as e:
if cur_command["kind"] == "tag":
warning(
stdscr, f"Error while submitting tag {cmddict["tag"]}, ignoring it"
)
else:
warning_msg = f"Error in \"{cur_command["kind"]}\" command: {e}"
warning(stdscr, warning_msg)
indent_level += indent_level_incr
if indent_level < 0:
indent_level = 0
# Check for keypresses
key = stdscr.getch()
if key != curses.ERR:
if key in [ord(" "), ord("p")]:
# Pause
curses.flash()
prompt(stdscr, "Paused, press any key to resume")
stdscr.nodelay(False)
stdscr.getch()
stdscr.nodelay(True)
elif key == ord("q"):
# Quit
curses.flash()
prompt(stdscr, "Are you sure you want to quit? (y/n)")
choice = readkey(stdscr)
if choice.upper() == "Y":
break
elif key == ord("l"):
# Log message
curses.flash()
prompt(stdscr, "Enter a log message:")
stdscr.nodelay(False)
curses.echo()
msg = stdscr.getstr()
curses.noecho()
stdscr.nodelay(True)
if not args.dry_run:
conn.log(message=msg)
logmsg(stdscr, f'Custom log message "{msg}" sent to the server')
if args.wait_at_end:
prompt(stdscr, "Execution completed, press a key to exit")
readkey(stdscr)
if not args.dry_run:
if conn and (not args.do_not_round):
conn.round_all_files()
conn.logout()
if warnings:
print("Here are the warning messages produced during the execution:")
for msg in warnings:
print(msg)
if __name__ == "__main__":
parser = ArgumentParser(
description="Run a STRIP test procedure.",
epilog="""
You can pause the execution with the keys SPACE or "p".
Pressing "l" allows the user to enter a log message.
Pressing "q" will halt the execution.
""",
)
parser.add_argument(
"--wait-at-end",
default=False,
action="store_true",
help="Wait a key before ending the procedure",
)
parser.add_argument(
"--wait-time",
metavar="SECONDS",
type=float,
default=DEFAULT_WAIT_TIME_S,
help=f"""
Specify the amount of time to wait before running the
next command. Default is {DEFAULT_WAIT_TIME_S}
""",
)
parser.add_argument(
"--waitcmd-time",
metavar="SECONDS",
type=float,
default=None,
help="Override the duration of wait commands in the script",
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="Do not send any command to the server",
)
parser.add_argument(
"--close-tags",
action="store_true",
default=False,
help="Automatically close any tag that is open before the script starts running",
)
parser.add_argument(
"json_files",
metavar="JSON_FILE",
type=str,
nargs="+",
help="Name of the JSON files containing the test procedures. More "
"than one file can be provided. If no files are provided, the JSON record "
"will be read from the terminal.",
)
parser.add_argument(
"--do-not-round",
action="store_false",
default=True,
help="Avoid closing HDF5 files before and after the execution of the "
"script. (Default is forcing the server to keep all the data acquired "
"during the procedure in one HDF file.)",
)
args = parser.parse_args()
for cur_file in args.json_files:
with open(cur_file, "rt") as fp:
cur_json_procedure = json.load(fp)
start_time = datetime.now()
curses.wrapper(main)
end_time = datetime.now()
print(f"The command took {end_time - start_time} to complete")
if not args.dry_run:
append_to_run_log(
start_time=start_time,
end_time=end_time,
wait_time_s=args.wait_time,
wait_cmd_time_s=args.waitcmd_time,
full_path=str(Path(cur_file).absolute()),
procedure=cur_json_procedure,
)
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from argparse import ArgumentParser
import curses
import json
import time
from datetime import datetime
from pathlib import Path
from striptease import StripConnection, append_to_run_log
args = None
cur_json_procedure = []
DEFAULT_WAIT_TIME_S = 0.5
warnings = []
def warning(stdscr, msg):
stdscr.addstr(msg + "\n", curses.color_pair(1))
warnings.append(msg)
stdscr.refresh()
def tagmsg(stdscr, msg):
stdscr.addstr(msg + "\n", curses.color_pair(2))
stdscr.refresh()
def logmsg(stdscr, msg):
stdscr.addstr(msg + "\n", curses.color_pair(3))
stdscr.refresh()
def commandmsg(stdscr, msg):
stdscr.addstr(msg + "\n", curses.color_pair(4))
stdscr.refresh()
def waitmsg(stdscr, msg):
stdscr.addstr(msg + "\n", curses.color_pair(5))
stdscr.refresh()
def prompt(stdscr, msg):
stdscr.addstr(msg + "\n", curses.color_pair(6))
stdscr.refresh()
def readkey(stdscr):
stdscr.nodelay(False)
choice = stdscr.getkey()
stdscr.nodelay(True)
return choice
def close_tags(stdscr, conn):
tags = [x for x in conn.tag_query() if x["stop"] < 0.0]
if not tags:
return
if args.close_tags:
for cur_tag in tags:
conn.tag_stop(
cur_tag["tag"],
comment="Closed automatically by program_batch_runner.py",
)
else:
tags = ", ".join([('"' + x["tag"] + '"') for x in tags])
warning(stdscr, f"The tags {tags} are still open, do you want to quit (y/n)?")
choice = readkey(stdscr)
if choice.upper() == "Y":
return True
return False
def main(stdscr):
global args
global cur_json_procedure
curses.start_color()
curses.use_default_colors()
curses.cbreak()
curses.noecho()
# Warning
curses.init_pair(1, curses.COLOR_YELLOW, curses.COLOR_BLACK)
# Tag message
curses.init_pair(2, curses.COLOR_CYAN, curses.COLOR_BLACK)
# Log message
curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_BLACK)
# Command message
curses.init_pair(4, curses.COLOR_WHITE, curses.COLOR_BLACK)
# Wait message
curses.init_pair(5, curses.COLOR_BLUE, curses.COLOR_BLACK)
# User prompt
curses.init_pair(6, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
stdscr.scrollok(True)
stdscr.idlok(True)
stdscr.nodelay(True) # Don't wait for keypresses
stdscr.keypad(True)
if not args.dry_run:
print(f"{len(cur_json_procedure)} commands ready to be executed, let's go!")
print("Going to establish a connection with the server…")
conn = StripConnection()
conn.login()
if close_tags(stdscr, conn):
return
print("…connection established")
if (not args.dry_run) and (not args.do_not_round):
conn.round_all_files()
else:
conn = None
open_tags = set([])
indent_level = 0
for cur_command in cur_json_procedure:
cmddict = cur_command["command"]
print_fn = None
indent_level_incr = 0
curpath = cur_command["path"]
if cur_command["kind"] == "tag":
print_fn = tagmsg
if cmddict["type"] == "START":
command_descr = f"start of tag {cmddict['tag']}"
open_tags.add(cmddict["tag"])
indent_level_incr = 4
else:
if not cmddict["tag"] in open_tags:
msg = f"Tag {cmddict['tag']} is being closed, but the tags currently open are {', '.join(open_tags)}"
warning(stdscr, msg)
else:
open_tags.discard(cmddict["tag"])
command_descr = f"end of tag {cmddict['tag']}"
indent_level = -4
elif cur_command["kind"] == "log":
print_fn = logmsg
command_descr = f"log message '{cmddict['message']}' ({cmddict['level']})"
elif cur_command["kind"] == "command":
print_fn = commandmsg
method, base_addr, data = [
cmddict[x] for x in ("method", "base_addr", "data")
]
datastr = ", ".join([str(x) for x in data])
command_descr = f"command {method} {base_addr}, data={datastr}"
elif cur_command["kind"] == "wait":
print_fn = waitmsg
curpath = "/waitcmd"
command_descr = f"wait for {cur_command['command']['wait_time_s']} s"
else:
warning(
stdscr,
f"\"{cur_command['kind']}\" is not recognized as a valid command type",
)
print_fn = prompt
print_fn(stdscr, " " * indent_level + f"{curpath}: {command_descr}")
try:
if cur_command["kind"] != "wait":
if not args.dry_run:
conn.post(cur_command["path"], message=cmddict)
time.sleep(args.wait_time)
else:
wait_time = cmddict["wait_time_s"]
if args.waitcmd_time is not None:
wait_time = args.waitcmd_time
time.sleep(wait_time)
except Exception as e:
if cur_command["kind"] == "tag":
warning(
stdscr, f"Error while submitting tag {cmddict['tag']}, ignoring it"
)
else:
warning_msg = f"Error in \"{cur_command['kind']}\" command: {e}"
warning(stdscr, warning_msg)
indent_level += indent_level_incr
if indent_level < 0:
indent_level = 0
# Check for keypresses
key = stdscr.getch()
if key != curses.ERR:
if key in [ord(" "), ord("p")]:
# Pause
curses.flash()
prompt(stdscr, "Paused, press any key to resume")
stdscr.nodelay(False)
stdscr.getch()
stdscr.nodelay(True)
elif key == ord("q"):
# Quit
curses.flash()
prompt(stdscr, "Are you sure you want to quit? (y/n)")
choice = readkey(stdscr)
if choice.upper() == "Y":
break
elif key == ord("l"):
# Log message
curses.flash()
prompt(stdscr, "Enter a log message:")
stdscr.nodelay(False)
curses.echo()
msg = stdscr.getstr()
curses.noecho()
stdscr.nodelay(True)
if not args.dry_run:
conn.log(message=msg)
logmsg(stdscr, f'Custom log message "{msg}" sent to the server')
if args.wait_at_end:
prompt(stdscr, "Execution completed, press a key to exit")
readkey(stdscr)
if not args.dry_run:
if conn and (not args.do_not_round):
conn.round_all_files()
conn.logout()
if warnings:
print("Here are the warning messages produced during the execution:")
for msg in warnings:
print(msg)
if __name__ == "__main__":
parser = ArgumentParser(
description="Run a STRIP test procedure.",
epilog="""
You can pause the execution with the keys SPACE or "p".
Pressing "l" allows the user to enter a log message.
Pressing "q" will halt the execution.
""",
)
parser.add_argument(
"--wait-at-end",
default=False,
action="store_true",
help="Wait a key before ending the procedure",
)
parser.add_argument(
"--wait-time",
metavar="SECONDS",
type=float,
default=DEFAULT_WAIT_TIME_S,
help=f"""
Specify the amount of time to wait before running the
next command. Default is {DEFAULT_WAIT_TIME_S}
""",
)
parser.add_argument(
"--waitcmd-time",
metavar="SECONDS",
type=float,
default=None,
help="Override the duration of wait commands in the script",
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="Do not send any command to the server",
)
parser.add_argument(
"--close-tags",
action="store_true",
default=False,
help="Automatically close any tag that is open before the script starts running",
)
parser.add_argument(
"json_files",
metavar="JSON_FILE",
type=str,
nargs="+",
help="Name of the JSON files containing the test procedures. More "
"than one file can be provided. If no files are provided, the JSON record "
"will be read from the terminal.",
)
parser.add_argument(
"--do-not-round",
action="store_false",
default=True,
help="Avoid closing HDF5 files before and after the execution of the "
"script. (Default is forcing the server to keep all the data acquired "
"during the procedure in one HDF file.)",
)
args = parser.parse_args()
for cur_file in args.json_files:
with open(cur_file, "rt") as fp:
cur_json_procedure = json.load(fp)
start_time = datetime.now()
curses.wrapper(main)
end_time = datetime.now()
print(f"The command took {end_time - start_time} to complete")
if not args.dry_run:
append_to_run_log(
start_time=start_time,
end_time=end_time,
wait_time_s=args.wait_time,
wait_cmd_time_s=args.waitcmd_time,
full_path=str(Path(cur_file).absolute()),
procedure=cur_json_procedure,
)
|
import asyncio
from akinator.async_aki import Akinator as Akinator_
import discord
from discord.ext import commands
class Options:
YES = "✅"
NO = "❌"
IDK = "🤷"
PY = "🤔"
PN = "😔"
STOP = "⏹️"
class Akinator:
def __init__(self):
self.player = None
self.win_at = None
self.aki = Akinator_()
self.bar_emojis = (" ", "██")
self.guess = None
self.bar = ""
self.message = None
self.questions = 0
self.mapping = {
Options.YES: "y",
Options.NO : "n",
Options.IDK: "i",
Options.PY : "p",
Options.PN : "pn"
}
def build_bar(self) -> str:
prog = round(self.aki.progression/8)
emp, full = self.bar_emojis
self.bar = f"[`{full*prog}{emp*(10-prog)}`]"
return self.bar
async def build_embed(self) -> discord.Embed:
embed = discord.Embed(
title = "Guess your character!",
description = (
"```swift\n"
f"Question-Number : {self.questions}\n"
f"Progression-Level: {self.aki.progression}\n```\n"
f"{self.build_bar()}"
),
color = discord.Color.random()
)
embed.add_field(name= "- Question -", value= self.aki.question)
embed.set_footer(text= "Figuring out the next question | This may take a second")
return embed
async def win(self):
await self.aki.win()
self.guess = self.aki.first_guess
embed = discord.Embed(color=0x2F3136)
embed.title = "Character Guesser Engine Results"
embed.description = f"Total Questions: `{self.questions}`"
embed.add_field(name= "Character Guessed", value=f"\n**Name:** {self.guess["name"]}\n{self.guess["description"]}")
embed.set_image(url= self.guess['absolute_picture_path'])
embed.set_footer(text="Was I correct?")
return embed
async def start(self, ctx: commands.Context, remove_reaction_after: bool = False, win_at: int = 80, timeout: int = None, delete_button: bool = False, child_mode: bool = True, **kwargs):
self.player = ctx.author
self.win_at = win_at
await self.aki.start_game(child_mode=child_mode)
embed = await self.build_embed()
self.message = await ctx.send(embed=embed)
for button in self.mapping:
await self.message.add_reaction(button)
if delete_button:
await self.message.add_reaction(Options.STOP)
while self.aki.progression <= self.win_at:
def check(reaction, user):
if reaction.message == self.message and user == ctx.author:
return str(reaction.emoji) in self.mapping or str(reaction.emoji) == Options.STOP
try:
reaction, _ = await ctx.bot.wait_for('reaction_add', timeout=timeout, check=check)
except asyncio.TimeoutError:
return
emoji = str(reaction.emoji)
if emoji == Options.STOP:
await ctx.send("Session ended")
return await self.message.delete()
self.questions += 1
await self.aki.answer(self.mapping[emoji])
try:
await self.message.remove_reaction(emoji, ctx.author)
except discord.DiscordException:
pass
embed = await self.build_embed()
await self.message.edit(embed=embed)
embed = await self.win()
return await self.message.edit(embed=embed)
|
import asyncio
from akinator.async_aki import Akinator as Akinator_
import discord
from discord.ext import commands
class Options:
YES = "✅"
NO = "❌"
IDK = "🤷"
PY = "🤔"
PN = "😔"
STOP = "⏹️"
class Akinator:
def __init__(self):
self.player = None
self.win_at = None
self.aki = Akinator_()
self.bar_emojis = (" ", "██")
self.guess = None
self.bar = ""
self.message = None
self.questions = 0
self.mapping = {
Options.YES: "y",
Options.NO : "n",
Options.IDK: "i",
Options.PY : "p",
Options.PN : "pn"
}
def build_bar(self) -> str:
prog = round(self.aki.progression/8)
emp, full = self.bar_emojis
self.bar = f"[`{full*prog}{emp*(10-prog)}`]"
return self.bar
async def build_embed(self) -> discord.Embed:
embed = discord.Embed(
title = "Guess your character!",
description = (
"```swift\n"
f"Question-Number : {self.questions}\n"
f"Progression-Level: {self.aki.progression}\n```\n"
f"{self.build_bar()}"
),
color = discord.Color.random()
)
embed.add_field(name= "- Question -", value= self.aki.question)
embed.set_footer(text= "Figuring out the next question | This may take a second")
return embed
async def win(self):
await self.aki.win()
self.guess = self.aki.first_guess
embed = discord.Embed(color=0x2F3136)
embed.title = "Character Guesser Engine Results"
embed.description = f"Total Questions: `{self.questions}`"
embed.add_field(name= "Character Guessed", value=f"\n**Name:** {self.guess['name']}\n{self.guess['description']}")
embed.set_image(url= self.guess['absolute_picture_path'])
embed.set_footer(text="Was I correct?")
return embed
async def start(self, ctx: commands.Context, remove_reaction_after: bool = False, win_at: int = 80, timeout: int = None, delete_button: bool = False, child_mode: bool = True, **kwargs):
self.player = ctx.author
self.win_at = win_at
await self.aki.start_game(child_mode=child_mode)
embed = await self.build_embed()
self.message = await ctx.send(embed=embed)
for button in self.mapping:
await self.message.add_reaction(button)
if delete_button:
await self.message.add_reaction(Options.STOP)
while self.aki.progression <= self.win_at:
def check(reaction, user):
if reaction.message == self.message and user == ctx.author:
return str(reaction.emoji) in self.mapping or str(reaction.emoji) == Options.STOP
try:
reaction, _ = await ctx.bot.wait_for('reaction_add', timeout=timeout, check=check)
except asyncio.TimeoutError:
return
emoji = str(reaction.emoji)
if emoji == Options.STOP:
await ctx.send("Session ended")
return await self.message.delete()
self.questions += 1
await self.aki.answer(self.mapping[emoji])
try:
await self.message.remove_reaction(emoji, ctx.author)
except discord.DiscordException:
pass
embed = await self.build_embed()
await self.message.edit(embed=embed)
embed = await self.win()
return await self.message.edit(embed=embed)
|
"""Test setup for ASGI spec tests
Mock application used for testing ASGI standard compliance.
"""
from enum import Enum
from functools import partial
from sys import version_info as PY_VER # noqa
import pytest
class AppState(Enum):
PREINIT = 0
INIT = 1
READY = 2
SHUTDOWN = 3
class BaseMockApp(object):
"""A mock application object passed to TestClient for the tests"""
# Make it easy to override these for lifespan related test scenarios
lifespan_startup_message = {"type": "lifespan.startup.complete", "message": "OK"}
lifespan_shutdown_message = {"type": "lifespan.shutdown.complete", "message": "OK"}
use_lifespan = True
def __init__(self, **kwargs):
for k, v in kwargs:
setattr(self, k, v)
self.state = AppState.PREINIT
async def lifespan_startup(self, scope, receive, send, msg):
if self.state == AppState.READY:
# Technically, this isn't explicitly forbidden in the spec.
# But I think it should not happen.
raise RuntimeError("Received more than one lifespan.startup")
self.state = AppState.READY
return await send(self.lifespan_startup_message)
async def lifespan_shutdown(self, scope, receive, send, msg):
if self.state == AppState.SHUTDOWN:
# Technically, this isn't explicitly forbidden in the spec.
# But I think it should not happen.
raise RuntimeError("Received more than one lifespan.shutdown")
self.state = AppState.SHUTDOWN
return await send(self.lifespan_shutdown_message)
async def lifespan(self, scope, receive, send):
if not self.use_lifespan:
raise RuntimeError(f"Type '{scope["type"]}' is not supported.")
while True:
try:
msg = await receive()
except RuntimeError as e:
if e.args == ("Event loop is closed",):
return
else:
raise
if msg["type"] == "lifespan.startup":
await self.lifespan_startup(scope, receive, send, msg)
elif msg["type"] == "lifespan.shutdown":
await self.lifespan_shutdown(scope, receive, send, msg)
else:
raise RuntimeError(f"Received unknown message type '{msg["type"]}")
if self.state == AppState.SHUTDOWN:
return
async def http_request(self, scope, receive, send, msg):
# Default behaviour, just send a minimal response with OK to any request
await send({"type": "http.response.start", "headers": [], "status": 200})
await send({"type": "http.response.body", "body": b"OK"})
async def http_disconnect(self, scope, receive, send, msg):
raise RuntimeError(f"Received http.disconnect message {msg}")
async def http(self, scope, receive, send):
msg = []
# Receive http.requests until http.disconnect or more_body = False
while True:
msg.append(await receive())
if msg[-1]["type"] == "http.disconnect" or not msg[-1].get(
"more_body", False
):
break
if msg[0]["type"] == "http.disconnect":
# Honestly this shouldn't really happen, but it's allowed in spec, so check.
return await self.http_disconnect(scope, receive, send, msg)
else:
return await self.http_request(scope, receive, send, msg)
async def websocket_connect(self, scope, receive, send, msg, msg_history):
await send({"type": "websocket.accept"})
return True
async def websocket_receive(self, scope, receive, send, msg, msg_history):
return True
async def websocket_disconnect(self, scope, receive, send, msg, msg_history):
return False
async def websocket(self, scope, receive, send):
msg_history = []
while True:
msg = await receive()
# Send websocket events to a handler
func = getattr(
self, msg["type"].replace(".", "_").replace("-", "__"), "handle_unknown"
)
res = await func(scope, receive, send, msg, msg_history)
msg_history.append(msg)
# If the event handler returns false, assume we closed the socket.
if msg["type"] == "websocket.disconnect" or not res:
return
async def handle_unknown(self, scope, receive, send):
if self.state != AppState.READY:
raise RuntimeError(
"Received another request before lifespan.startup.complete sent"
)
raise RuntimeError(f"Type '{scope["type"]}' is not supported.")
async def handle_all(self, scope, receive, send):
# Do nothing unless something monkeypatches us
pass
async def asgi_call(self, scope, receive, send):
# Initial catch-all, for testing things like scope type itself
await self.handle_all(scope, receive, send)
if self.state == AppState.PREINIT:
if self.use_lifespan:
self.state = AppState.INIT
else:
self.state = AppState.READY
if self.state == AppState.SHUTDOWN:
raise RuntimeError(f"Got message after shutting down: {scope}")
# call hooks based on scope type, so we can monkeypatch them in tests
# the lifespan, http, and websocket protocol types all have simple methods already
# implemented.
func = getattr(
self, scope["type"].replace(".", "_").replace("-", "__"), "handle_unknown"
)
return await func(scope, receive, send)
class MockApp(BaseMockApp):
"""Modern ASGI single-callable app"""
async def __call__(self, scope, receive, send):
return await super().asgi_call(scope, receive, send)
class LegacyMockApp(BaseMockApp):
"""Legacy ASGI 'two-callable' app"""
def __call__(self, scope):
return partial(super().asgi_call, scope)
@pytest.fixture(scope="function")
def mock_app():
"""Create a mock ASGI App to test the TestClient against"""
return MockApp()
@pytest.fixture(scope="function")
def legacy_mock_app():
"""Create a mock legacy ASGI App to test the TestClient against"""
return LegacyMockApp()
|
"""Test setup for ASGI spec tests
Mock application used for testing ASGI standard compliance.
"""
from enum import Enum
from functools import partial
from sys import version_info as PY_VER # noqa
import pytest
class AppState(Enum):
PREINIT = 0
INIT = 1
READY = 2
SHUTDOWN = 3
class BaseMockApp(object):
"""A mock application object passed to TestClient for the tests"""
# Make it easy to override these for lifespan related test scenarios
lifespan_startup_message = {"type": "lifespan.startup.complete", "message": "OK"}
lifespan_shutdown_message = {"type": "lifespan.shutdown.complete", "message": "OK"}
use_lifespan = True
def __init__(self, **kwargs):
for k, v in kwargs:
setattr(self, k, v)
self.state = AppState.PREINIT
async def lifespan_startup(self, scope, receive, send, msg):
if self.state == AppState.READY:
# Technically, this isn't explicitly forbidden in the spec.
# But I think it should not happen.
raise RuntimeError("Received more than one lifespan.startup")
self.state = AppState.READY
return await send(self.lifespan_startup_message)
async def lifespan_shutdown(self, scope, receive, send, msg):
if self.state == AppState.SHUTDOWN:
# Technically, this isn't explicitly forbidden in the spec.
# But I think it should not happen.
raise RuntimeError("Received more than one lifespan.shutdown")
self.state = AppState.SHUTDOWN
return await send(self.lifespan_shutdown_message)
async def lifespan(self, scope, receive, send):
if not self.use_lifespan:
raise RuntimeError(f"Type '{scope['type']}' is not supported.")
while True:
try:
msg = await receive()
except RuntimeError as e:
if e.args == ("Event loop is closed",):
return
else:
raise
if msg["type"] == "lifespan.startup":
await self.lifespan_startup(scope, receive, send, msg)
elif msg["type"] == "lifespan.shutdown":
await self.lifespan_shutdown(scope, receive, send, msg)
else:
raise RuntimeError(f"Received unknown message type '{msg['type']}")
if self.state == AppState.SHUTDOWN:
return
async def http_request(self, scope, receive, send, msg):
# Default behaviour, just send a minimal response with OK to any request
await send({"type": "http.response.start", "headers": [], "status": 200})
await send({"type": "http.response.body", "body": b"OK"})
async def http_disconnect(self, scope, receive, send, msg):
raise RuntimeError(f"Received http.disconnect message {msg}")
async def http(self, scope, receive, send):
msg = []
# Receive http.requests until http.disconnect or more_body = False
while True:
msg.append(await receive())
if msg[-1]["type"] == "http.disconnect" or not msg[-1].get(
"more_body", False
):
break
if msg[0]["type"] == "http.disconnect":
# Honestly this shouldn't really happen, but it's allowed in spec, so check.
return await self.http_disconnect(scope, receive, send, msg)
else:
return await self.http_request(scope, receive, send, msg)
async def websocket_connect(self, scope, receive, send, msg, msg_history):
await send({"type": "websocket.accept"})
return True
async def websocket_receive(self, scope, receive, send, msg, msg_history):
return True
async def websocket_disconnect(self, scope, receive, send, msg, msg_history):
return False
async def websocket(self, scope, receive, send):
msg_history = []
while True:
msg = await receive()
# Send websocket events to a handler
func = getattr(
self, msg["type"].replace(".", "_").replace("-", "__"), "handle_unknown"
)
res = await func(scope, receive, send, msg, msg_history)
msg_history.append(msg)
# If the event handler returns false, assume we closed the socket.
if msg["type"] == "websocket.disconnect" or not res:
return
async def handle_unknown(self, scope, receive, send):
if self.state != AppState.READY:
raise RuntimeError(
"Received another request before lifespan.startup.complete sent"
)
raise RuntimeError(f"Type '{scope['type']}' is not supported.")
async def handle_all(self, scope, receive, send):
# Do nothing unless something monkeypatches us
pass
async def asgi_call(self, scope, receive, send):
# Initial catch-all, for testing things like scope type itself
await self.handle_all(scope, receive, send)
if self.state == AppState.PREINIT:
if self.use_lifespan:
self.state = AppState.INIT
else:
self.state = AppState.READY
if self.state == AppState.SHUTDOWN:
raise RuntimeError(f"Got message after shutting down: {scope}")
# call hooks based on scope type, so we can monkeypatch them in tests
# the lifespan, http, and websocket protocol types all have simple methods already
# implemented.
func = getattr(
self, scope["type"].replace(".", "_").replace("-", "__"), "handle_unknown"
)
return await func(scope, receive, send)
class MockApp(BaseMockApp):
"""Modern ASGI single-callable app"""
async def __call__(self, scope, receive, send):
return await super().asgi_call(scope, receive, send)
class LegacyMockApp(BaseMockApp):
"""Legacy ASGI 'two-callable' app"""
def __call__(self, scope):
return partial(super().asgi_call, scope)
@pytest.fixture(scope="function")
def mock_app():
"""Create a mock ASGI App to test the TestClient against"""
return MockApp()
@pytest.fixture(scope="function")
def legacy_mock_app():
"""Create a mock legacy ASGI App to test the TestClient against"""
return LegacyMockApp()
|
import logging
import requests
import jimi
######### --------- API --------- #########
if jimi.api.webServer:
if not jimi.api.webServer.got_first_request:
if jimi.api.webServer.name == "jimi_core":
@jimi.api.webServer.route(jimi.api.base+"admin/clearCache/", methods=["GET"])
@jimi.auth.adminEndpoint
def api_clearCache():
jimi.cache.globalCache.clearCache("ALL")
results = [{ "system" : jimi.cluster.getSystemId(), "status_code" : 200 }]
apiToken = jimi.auth.generateSystemSession()
headers = { "X-api-token" : apiToken }
for systemIndex in jimi.cluster.systemIndexes:
url = systemIndex["apiAddress"]
apiEndpoint = "admin/clearCache/"
try:
response = requests.get("{0}{1}{2}".format(url,jimi.api.base,apiEndpoint),headers=headers, timeout=10)
if response.status_code == 200:
results.append({ "system" : jimi.cluster.getSystemId(), "index" : systemIndex["systemIndex"], "status_code" : response.status_code })
except:
logging.warning("Unable to access {0}{1}{2}".format(url,jimi.api.base,apiEndpoint))
return { "results" : results }, 200
@jimi.api.webServer.route(jimi.api.base+"admin/clearStartChecks/", methods=["GET"])
@jimi.auth.adminEndpoint
def api_clearStartChecks():
from system import install
install.resetTriggers()
return { "result" : True }, 200
if jimi.api.webServer.name == "jimi_worker":
@jimi.api.webServer.route(jimi.api.base+"admin/clearCache/", methods=["GET"])
@jimi.auth.systemEndpoint
def api_clearCache():
jimi.cache.globalCache.clearCache("ALL")
return { "result" : True }, 200
if jimi.api.webServer.name == "jimi_web":
from flask import Flask, request, render_template
# --- User editor --- #
@jimi.api.webServer.route("/admin/users/", methods=["GET"])
@jimi.auth.adminEndpoint
def listUsers():
#Get groups to match against
groups = {}
groupsList = []
foundGroups = jimi.auth._group().getAsClass(sessionData=jimi.api.g.sessionData,query={ })
for group in foundGroups:
groups[group._id] = group
groupsList.append(group)
users = []
foundUsers = jimi.auth._user().getAsClass(sessionData=jimi.api.g.sessionData,query={ })
for user in foundUsers:
if user.primaryGroup in groups:
user.primaryGroupName = groups[user.primaryGroup].name
users.append(user)
return render_template("users.html",users=users,groups=groupsList,CSRF=jimi.api.g.sessionData["CSRF"])
@jimi.api.webServer.route("/admin/users/edit/", methods=["GET"])
@jimi.auth.adminEndpoint
def editUser():
#Get group data for later
groups = []
foundGroups = jimi.auth._group().getAsClass(sessionData=jimi.api.g.sessionData,query={ })
for group in foundGroups:
groups.append(group)
#Get user details based on ID
foundUser = jimi.auth._user().getAsClass(sessionData=jimi.api.g.sessionData,query={"_id":jimi.db.ObjectId(request.args.get("id"))})
if foundUser:
foundUser = foundUser[0]
#Get friendly names for groups
for group in groups:
if group._id == foundUser.primaryGroup:
foundUser.primaryGroupName = group.name
return render_template("userDetailed.html",user=foundUser,groups=groups,CSRF=jimi.api.g.sessionData["CSRF"])
return 404
@jimi.api.webServer.route("/admin/users/edit/", methods=["PUT"])
@jimi.auth.adminEndpoint
def updateUser():
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Could not update the user" },403)
userData = request.json
if userData["enabled"] == "No":
userData["enabled"] = False
else:
userData["enabled"] = True
#Get user details based on username
foundUser = jimi.auth._user().getAsClass(sessionData=jimi.api.g.sessionData,query={"username":userData["username"]})
if foundUser:
foundUser = foundUser[0]
updateList = []
for item in userData:
if item != "CSRF" and userData[item] != foundUser.getAttribute(item,sessionData=jimi.api.g.sessionData):
foundUser.setAttribute(item,userData[item],sessionData=jimi.api.g.sessionData)
updateList.append(item)
if any(updateList):
foundUser.update(updateList,sessionData=jimi.api.g.sessionData)
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "User updated successfully" },201)
else:
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Nothing to update" },200)
return response
@jimi.api.webServer.route("/admin/users/create/", methods=["POST"])
@jimi.auth.adminEndpoint
def createUser():
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Please provide a username" },403)
userData = request.json
#Check user ID is new and valid
if userData["username"]:
foundUser = jimi.auth._user().getAsClass(sessionData=jimi.api.g.sessionData,query={"username":userData["username"]})
if foundUser:
return jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Username already in use" },403)
#Check password provided
if userData["password"]:
if not jimi.auth.meetsPasswordPolicy(userData["password"]):
return jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Password does not meet minimum requirements" },403)
#If no name provided, use the username
if len(userData["name"]) == 0:
userData["name"] = userData["username"]
#Create a new user
if jimi.auth._user().new(userData["name"],userData["username"],userData["password"]):
user = jimi.auth._user().getAsClass(sessionData=jimi.api.g.sessionData,query={"username":userData["username"]})[0]
#Enable the user?
if userData["active"] == "No":
user.setAttribute("enabled",False,sessionData=jimi.api.g.sessionData)
#Define the users primary group
user.setAttribute("primaryGroup",userData["group"],sessionData=jimi.api.g.sessionData)
group = jimi.auth._group().getAsClass(sessionData=jimi.api.g.sessionData,id=userData["group"])
if len(group) == 1:
group = group[0]
group.members.append(user._id)
group.update(["members"])
else:
return jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Could not find group!" },403)
#Set email if it exists
if userData["email"]:
user.setAttribute("email",userData["email"],sessionData=jimi.api.g.sessionData)
#Set user login type
user.setAttribute("loginType",userData["loginType"],sessionData=jimi.api.g.sessionData)
user.update(["email","primaryGroup","loginType"],sessionData=jimi.api.g.sessionData)
#Check for sandbox creation
if userData["sandbox"] == "Yes":
#Create a sandbox conduct using the user's name
sandboxConduct = jimi.conduct._conduct().new(f"{userData["name"]} - Sandbox")
sandboxConduct = jimi.conduct._conduct().getAsClass(sessionData=jimi.api.g.sessionData,query={"name":f"{userData["name"]} - Sandbox"})[0]
sandboxConduct.acl = {"ids":[{"accessID":group._id,"delete":True,"read":True,"write":True}]}
sandboxConduct.comment = f"Sandbox for {userData["name"]} (auto-generated)"
sandboxConduct.update(["acl","comment"])
return jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "User created successfully" },201)
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Please provide a password" },403)
return response
@jimi.api.webServer.route("/admin/users/edit/", methods=["DELETE"])
@jimi.auth.adminEndpoint
def deleteUser():
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Could not delete user" },403)
#Get user details based on username
foundUser = jimi.auth._user().getAsClass(sessionData=jimi.api.g.sessionData,id=request.args.get("id"))
if foundUser:
foundUser = foundUser[0]
#Cannot delete the root user
if foundUser.username != "root":
if jimi.auth._user().api_delete(id=foundUser._id):
#Remove group membership
group = jimi.auth._group().getAsClass(sessionData=jimi.api.g.sessionData,id=foundUser.primaryGroup)
if len(group) == 1:
group = group[0]
group.members = [x for x in group.members if x != foundUser._id]
group.update(["members"])
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "User deleted successfully" },201)
else:
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Cannot delete root user" },403)
return response
# --- Group editor --- #
@jimi.api.webServer.route("/admin/groups/", methods=["GET"])
@jimi.auth.adminEndpoint
def listGroups():
#Get groups
groups = []
foundGroups = jimi.auth._group().getAsClass(sessionData=jimi.api.g.sessionData,query={ })
for group in foundGroups:
group.userCount = len(group.members)
groups.append(group)
return render_template("groups.html",groups=groups,CSRF=jimi.api.g.sessionData["CSRF"])
@jimi.api.webServer.route("/admin/groups/edit/", methods=["GET"])
@jimi.auth.adminEndpoint
def editGroups():
#Get group details based on ID
foundGroup = jimi.auth._group().getAsClass(sessionData=jimi.api.g.sessionData,query={"_id":jimi.db.ObjectId(request.args.get("id"))})
if foundGroup:
foundGroup = foundGroup[0]
#Get ACL info about each conduct
conductList = []
conducts = jimi.conduct._conduct().getAsClass(sessionData=jimi.api.g.sessionData,query={})
for conduct in conducts:
if "ids" in conduct.acl:
matches = [item for item in conduct.acl["ids"] if item["accessID"] == foundGroup._id]
if any(matches):
conductList.append({"id":conduct._id, "name":conduct.name, "acl":matches[0], "enabled":True})
else:
conductList.append({"id":conduct._id, "name":conduct.name, "acl":{"accessID":foundGroup._id,"read":False,"write":False,"delete":False}, "enabled":False})
else:
conductList.append({"id":conduct._id, "name":conduct.name, "acl":{"accessID":foundGroup._id,"read":False,"write":False,"delete":False}, "enabled":False})
#Get ACL info about each model
modelList = []
models = jimi.model.getModelExtra("model")[0]["results"]
for model in models:
if "ids" in model["acl"]:
matches = [item for item in model["acl"]["ids"] if item["accessID"] == foundGroup._id]
if any(matches):
modelList.append({"id":model["_id"], "name":model["name"], "acl":matches[0], "enabled":True})
else:
modelList.append({"id":model["_id"], "name":model["name"], "acl":{"accessID":foundGroup._id,"read":False,"write":False,"delete":False}, "enabled":False})
else:
modelList.append({"id":model["_id"], "name":model["name"], "acl":{"accessID":foundGroup._id,"read":False,"write":False,"delete":False}, "enabled":False})
return render_template("groupDetailed.html",group=foundGroup,conductList=conductList,modelList=modelList,CSRF=jimi.api.g.sessionData["CSRF"])
return 404
@jimi.api.webServer.route("/admin/groups/create/", methods=["POST"])
@jimi.auth.adminEndpoint
def createGroup():
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Please provide a name" },403)
groupData = request.json
#Check group name is new and valid
if groupData["name"]:
foundGroup = jimi.auth._group().getAsClass(sessionData=jimi.api.g.sessionData,query={"name":groupData["name"]})
if foundGroup:
return jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Group name already in use" },403)
#Create a new group
if jimi.auth._group().new(groupData["name"]):
group = jimi.auth._group().getAsClass(sessionData=jimi.api.g.sessionData,query={"name":groupData["name"]})[0]
#Enable the group?
if groupData["active"] == "No":
group.setAttribute("enabled",False,sessionData=jimi.api.g.sessionData)
#Set description
group.setAttribute("description",groupData["description"],sessionData=jimi.api.g.sessionData)
group.update(["description"],sessionData=jimi.api.g.sessionData)
#Check for sandbox creation
if groupData["sandbox"] == "Yes":
#Create a sandbox conduct using the group's name
sandboxConduct = jimi.conduct._conduct().new(f"{groupData["name"]} - Sandbox")
#TODO: Set group as owner of sandbox
return jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Group created successfully" },201)
return response
@jimi.api.webServer.route("/admin/groups/edit/", methods=["PUT"])
@jimi.auth.adminEndpoint
def updateGroup():
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Could not update the group" },403)
groupData = request.json
if groupData["enabled"] == "No":
groupData["enabled"] = False
else:
groupData["enabled"] = True
#Get group details based on group name
foundGroup = jimi.auth._group().getAsClass(sessionData=jimi.api.g.sessionData,query={"name":groupData["name"]})
if foundGroup:
foundGroup = foundGroup[0]
updateList = []
for item in groupData:
if item != "CSRF" and groupData[item] != foundGroup.getAttribute(item,sessionData=jimi.api.g.sessionData):
foundGroup.setAttribute(item,groupData[item],sessionData=jimi.api.g.sessionData)
updateList.append(item)
if any(updateList):
foundGroup.update(updateList,sessionData=jimi.api.g.sessionData)
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Group updated successfully" },201)
else:
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Nothing to update" },200)
return response
@jimi.api.webServer.route("/admin/groups/edit/", methods=["DELETE"])
@jimi.auth.adminEndpoint
def deleteGroup():
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Could not delete group" },403)
#Get user details based on username
foundGroup = jimi.auth._group().getAsClass(sessionData=jimi.api.g.sessionData,query={"_id":jimi.db.ObjectId(request.args.get("id"))})
if foundGroup:
foundGroup = foundGroup[0]
#Cannot delete the root user
if foundGroup.name != "admin":
if jimi.auth._group().api_delete(query={"_id":jimi.db.ObjectId(request.args.get("id"))}):
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Group deleted successfully" },201)
else:
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Cannot delete admin group" },403)
return response
|
import logging
import requests
import jimi
######### --------- API --------- #########
if jimi.api.webServer:
if not jimi.api.webServer.got_first_request:
if jimi.api.webServer.name == "jimi_core":
@jimi.api.webServer.route(jimi.api.base+"admin/clearCache/", methods=["GET"])
@jimi.auth.adminEndpoint
def api_clearCache():
jimi.cache.globalCache.clearCache("ALL")
results = [{ "system" : jimi.cluster.getSystemId(), "status_code" : 200 }]
apiToken = jimi.auth.generateSystemSession()
headers = { "X-api-token" : apiToken }
for systemIndex in jimi.cluster.systemIndexes:
url = systemIndex["apiAddress"]
apiEndpoint = "admin/clearCache/"
try:
response = requests.get("{0}{1}{2}".format(url,jimi.api.base,apiEndpoint),headers=headers, timeout=10)
if response.status_code == 200:
results.append({ "system" : jimi.cluster.getSystemId(), "index" : systemIndex["systemIndex"], "status_code" : response.status_code })
except:
logging.warning("Unable to access {0}{1}{2}".format(url,jimi.api.base,apiEndpoint))
return { "results" : results }, 200
@jimi.api.webServer.route(jimi.api.base+"admin/clearStartChecks/", methods=["GET"])
@jimi.auth.adminEndpoint
def api_clearStartChecks():
from system import install
install.resetTriggers()
return { "result" : True }, 200
if jimi.api.webServer.name == "jimi_worker":
@jimi.api.webServer.route(jimi.api.base+"admin/clearCache/", methods=["GET"])
@jimi.auth.systemEndpoint
def api_clearCache():
jimi.cache.globalCache.clearCache("ALL")
return { "result" : True }, 200
if jimi.api.webServer.name == "jimi_web":
from flask import Flask, request, render_template
# --- User editor --- #
@jimi.api.webServer.route("/admin/users/", methods=["GET"])
@jimi.auth.adminEndpoint
def listUsers():
#Get groups to match against
groups = {}
groupsList = []
foundGroups = jimi.auth._group().getAsClass(sessionData=jimi.api.g.sessionData,query={ })
for group in foundGroups:
groups[group._id] = group
groupsList.append(group)
users = []
foundUsers = jimi.auth._user().getAsClass(sessionData=jimi.api.g.sessionData,query={ })
for user in foundUsers:
if user.primaryGroup in groups:
user.primaryGroupName = groups[user.primaryGroup].name
users.append(user)
return render_template("users.html",users=users,groups=groupsList,CSRF=jimi.api.g.sessionData["CSRF"])
@jimi.api.webServer.route("/admin/users/edit/", methods=["GET"])
@jimi.auth.adminEndpoint
def editUser():
#Get group data for later
groups = []
foundGroups = jimi.auth._group().getAsClass(sessionData=jimi.api.g.sessionData,query={ })
for group in foundGroups:
groups.append(group)
#Get user details based on ID
foundUser = jimi.auth._user().getAsClass(sessionData=jimi.api.g.sessionData,query={"_id":jimi.db.ObjectId(request.args.get("id"))})
if foundUser:
foundUser = foundUser[0]
#Get friendly names for groups
for group in groups:
if group._id == foundUser.primaryGroup:
foundUser.primaryGroupName = group.name
return render_template("userDetailed.html",user=foundUser,groups=groups,CSRF=jimi.api.g.sessionData["CSRF"])
return 404
@jimi.api.webServer.route("/admin/users/edit/", methods=["PUT"])
@jimi.auth.adminEndpoint
def updateUser():
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Could not update the user" },403)
userData = request.json
if userData["enabled"] == "No":
userData["enabled"] = False
else:
userData["enabled"] = True
#Get user details based on username
foundUser = jimi.auth._user().getAsClass(sessionData=jimi.api.g.sessionData,query={"username":userData["username"]})
if foundUser:
foundUser = foundUser[0]
updateList = []
for item in userData:
if item != "CSRF" and userData[item] != foundUser.getAttribute(item,sessionData=jimi.api.g.sessionData):
foundUser.setAttribute(item,userData[item],sessionData=jimi.api.g.sessionData)
updateList.append(item)
if any(updateList):
foundUser.update(updateList,sessionData=jimi.api.g.sessionData)
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "User updated successfully" },201)
else:
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Nothing to update" },200)
return response
@jimi.api.webServer.route("/admin/users/create/", methods=["POST"])
@jimi.auth.adminEndpoint
def createUser():
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Please provide a username" },403)
userData = request.json
#Check user ID is new and valid
if userData["username"]:
foundUser = jimi.auth._user().getAsClass(sessionData=jimi.api.g.sessionData,query={"username":userData["username"]})
if foundUser:
return jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Username already in use" },403)
#Check password provided
if userData["password"]:
if not jimi.auth.meetsPasswordPolicy(userData["password"]):
return jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Password does not meet minimum requirements" },403)
#If no name provided, use the username
if len(userData["name"]) == 0:
userData["name"] = userData["username"]
#Create a new user
if jimi.auth._user().new(userData["name"],userData["username"],userData["password"]):
user = jimi.auth._user().getAsClass(sessionData=jimi.api.g.sessionData,query={"username":userData["username"]})[0]
#Enable the user?
if userData["active"] == "No":
user.setAttribute("enabled",False,sessionData=jimi.api.g.sessionData)
#Define the users primary group
user.setAttribute("primaryGroup",userData["group"],sessionData=jimi.api.g.sessionData)
group = jimi.auth._group().getAsClass(sessionData=jimi.api.g.sessionData,id=userData["group"])
if len(group) == 1:
group = group[0]
group.members.append(user._id)
group.update(["members"])
else:
return jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Could not find group!" },403)
#Set email if it exists
if userData["email"]:
user.setAttribute("email",userData["email"],sessionData=jimi.api.g.sessionData)
#Set user login type
user.setAttribute("loginType",userData["loginType"],sessionData=jimi.api.g.sessionData)
user.update(["email","primaryGroup","loginType"],sessionData=jimi.api.g.sessionData)
#Check for sandbox creation
if userData["sandbox"] == "Yes":
#Create a sandbox conduct using the user's name
sandboxConduct = jimi.conduct._conduct().new(f"{userData['name']} - Sandbox")
sandboxConduct = jimi.conduct._conduct().getAsClass(sessionData=jimi.api.g.sessionData,query={"name":f"{userData['name']} - Sandbox"})[0]
sandboxConduct.acl = {"ids":[{"accessID":group._id,"delete":True,"read":True,"write":True}]}
sandboxConduct.comment = f"Sandbox for {userData['name']} (auto-generated)"
sandboxConduct.update(["acl","comment"])
return jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "User created successfully" },201)
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Please provide a password" },403)
return response
@jimi.api.webServer.route("/admin/users/edit/", methods=["DELETE"])
@jimi.auth.adminEndpoint
def deleteUser():
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Could not delete user" },403)
#Get user details based on username
foundUser = jimi.auth._user().getAsClass(sessionData=jimi.api.g.sessionData,id=request.args.get("id"))
if foundUser:
foundUser = foundUser[0]
#Cannot delete the root user
if foundUser.username != "root":
if jimi.auth._user().api_delete(id=foundUser._id):
#Remove group membership
group = jimi.auth._group().getAsClass(sessionData=jimi.api.g.sessionData,id=foundUser.primaryGroup)
if len(group) == 1:
group = group[0]
group.members = [x for x in group.members if x != foundUser._id]
group.update(["members"])
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "User deleted successfully" },201)
else:
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Cannot delete root user" },403)
return response
# --- Group editor --- #
@jimi.api.webServer.route("/admin/groups/", methods=["GET"])
@jimi.auth.adminEndpoint
def listGroups():
#Get groups
groups = []
foundGroups = jimi.auth._group().getAsClass(sessionData=jimi.api.g.sessionData,query={ })
for group in foundGroups:
group.userCount = len(group.members)
groups.append(group)
return render_template("groups.html",groups=groups,CSRF=jimi.api.g.sessionData["CSRF"])
@jimi.api.webServer.route("/admin/groups/edit/", methods=["GET"])
@jimi.auth.adminEndpoint
def editGroups():
#Get group details based on ID
foundGroup = jimi.auth._group().getAsClass(sessionData=jimi.api.g.sessionData,query={"_id":jimi.db.ObjectId(request.args.get("id"))})
if foundGroup:
foundGroup = foundGroup[0]
#Get ACL info about each conduct
conductList = []
conducts = jimi.conduct._conduct().getAsClass(sessionData=jimi.api.g.sessionData,query={})
for conduct in conducts:
if "ids" in conduct.acl:
matches = [item for item in conduct.acl["ids"] if item["accessID"] == foundGroup._id]
if any(matches):
conductList.append({"id":conduct._id, "name":conduct.name, "acl":matches[0], "enabled":True})
else:
conductList.append({"id":conduct._id, "name":conduct.name, "acl":{"accessID":foundGroup._id,"read":False,"write":False,"delete":False}, "enabled":False})
else:
conductList.append({"id":conduct._id, "name":conduct.name, "acl":{"accessID":foundGroup._id,"read":False,"write":False,"delete":False}, "enabled":False})
#Get ACL info about each model
modelList = []
models = jimi.model.getModelExtra("model")[0]["results"]
for model in models:
if "ids" in model["acl"]:
matches = [item for item in model["acl"]["ids"] if item["accessID"] == foundGroup._id]
if any(matches):
modelList.append({"id":model["_id"], "name":model["name"], "acl":matches[0], "enabled":True})
else:
modelList.append({"id":model["_id"], "name":model["name"], "acl":{"accessID":foundGroup._id,"read":False,"write":False,"delete":False}, "enabled":False})
else:
modelList.append({"id":model["_id"], "name":model["name"], "acl":{"accessID":foundGroup._id,"read":False,"write":False,"delete":False}, "enabled":False})
return render_template("groupDetailed.html",group=foundGroup,conductList=conductList,modelList=modelList,CSRF=jimi.api.g.sessionData["CSRF"])
return 404
@jimi.api.webServer.route("/admin/groups/create/", methods=["POST"])
@jimi.auth.adminEndpoint
def createGroup():
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Please provide a name" },403)
groupData = request.json
#Check group name is new and valid
if groupData["name"]:
foundGroup = jimi.auth._group().getAsClass(sessionData=jimi.api.g.sessionData,query={"name":groupData["name"]})
if foundGroup:
return jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Group name already in use" },403)
#Create a new group
if jimi.auth._group().new(groupData["name"]):
group = jimi.auth._group().getAsClass(sessionData=jimi.api.g.sessionData,query={"name":groupData["name"]})[0]
#Enable the group?
if groupData["active"] == "No":
group.setAttribute("enabled",False,sessionData=jimi.api.g.sessionData)
#Set description
group.setAttribute("description",groupData["description"],sessionData=jimi.api.g.sessionData)
group.update(["description"],sessionData=jimi.api.g.sessionData)
#Check for sandbox creation
if groupData["sandbox"] == "Yes":
#Create a sandbox conduct using the group's name
sandboxConduct = jimi.conduct._conduct().new(f"{groupData['name']} - Sandbox")
#TODO: Set group as owner of sandbox
return jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Group created successfully" },201)
return response
@jimi.api.webServer.route("/admin/groups/edit/", methods=["PUT"])
@jimi.auth.adminEndpoint
def updateGroup():
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Could not update the group" },403)
groupData = request.json
if groupData["enabled"] == "No":
groupData["enabled"] = False
else:
groupData["enabled"] = True
#Get group details based on group name
foundGroup = jimi.auth._group().getAsClass(sessionData=jimi.api.g.sessionData,query={"name":groupData["name"]})
if foundGroup:
foundGroup = foundGroup[0]
updateList = []
for item in groupData:
if item != "CSRF" and groupData[item] != foundGroup.getAttribute(item,sessionData=jimi.api.g.sessionData):
foundGroup.setAttribute(item,groupData[item],sessionData=jimi.api.g.sessionData)
updateList.append(item)
if any(updateList):
foundGroup.update(updateList,sessionData=jimi.api.g.sessionData)
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Group updated successfully" },201)
else:
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Nothing to update" },200)
return response
@jimi.api.webServer.route("/admin/groups/edit/", methods=["DELETE"])
@jimi.auth.adminEndpoint
def deleteGroup():
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Could not delete group" },403)
#Get user details based on username
foundGroup = jimi.auth._group().getAsClass(sessionData=jimi.api.g.sessionData,query={"_id":jimi.db.ObjectId(request.args.get("id"))})
if foundGroup:
foundGroup = foundGroup[0]
#Cannot delete the root user
if foundGroup.name != "admin":
if jimi.auth._group().api_delete(query={"_id":jimi.db.ObjectId(request.args.get("id"))}):
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Group deleted successfully" },201)
else:
response = jimi.api.make_response({ "CSRF" : jimi.api.g.sessionData["CSRF"], "message" : "Cannot delete admin group" },403)
return response
|
#!/usr/bin/env python
from __future__ import unicode_literals
import openpyxl
import youtube_dl
START_COL = 'N'
START_ROW = 10
def get_youtube_links(workbook_name, start_row, start_col):
base_col = openpyxl.utils.column_index_from_string(start_col)
workbook = openpyxl.load_workbook(workbook_name)
worksheet = workbook[workbook.sheetnames[0]]
for row in range(start_row, worksheet.max_row):
ytb_link = worksheet.cell(column=base_col, row=row).value
if ytb_link is not None:
yield ytb_link
links = get_youtube_links('test.xlsx', START_ROW, START_COL)
class SilentLogger(object):
def debug(self, msg):
pass
def warning(self, msg):
pass
def error(self, msg):
print(msg)
def my_hook(d):
if d['status'] == 'finished':
print(f'downloaded song={d['filename']}, now converting ...')
ydl_opts = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
'logger': SilentLogger(),
'progress_hooks': [my_hook],
'outtmpl': '%(autonumber)02d-%(title)s.%(ext)s',
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download(list(links))
|
#!/usr/bin/env python
from __future__ import unicode_literals
import openpyxl
import youtube_dl
START_COL = 'N'
START_ROW = 10
def get_youtube_links(workbook_name, start_row, start_col):
base_col = openpyxl.utils.column_index_from_string(start_col)
workbook = openpyxl.load_workbook(workbook_name)
worksheet = workbook[workbook.sheetnames[0]]
for row in range(start_row, worksheet.max_row):
ytb_link = worksheet.cell(column=base_col, row=row).value
if ytb_link is not None:
yield ytb_link
links = get_youtube_links('test.xlsx', START_ROW, START_COL)
class SilentLogger(object):
def debug(self, msg):
pass
def warning(self, msg):
pass
def error(self, msg):
print(msg)
def my_hook(d):
if d['status'] == 'finished':
print(f'downloaded song={d["filename"]}, now converting ...')
ydl_opts = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
'logger': SilentLogger(),
'progress_hooks': [my_hook],
'outtmpl': '%(autonumber)02d-%(title)s.%(ext)s',
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download(list(links))
|
from typing import Tuple
import numpy
from matchms.typing import SpectrumType
from .BaseSimilarity import BaseSimilarity
from .spectrum_similarity_functions import collect_peak_pairs
from .spectrum_similarity_functions import score_best_matches
class CosineGreedy(BaseSimilarity):
"""Calculate 'cosine similarity score' between two spectra.
The cosine score aims at quantifying the similarity between two mass spectra.
The score is calculated by finding best possible matches between peaks
of two spectra. Two peaks are considered a potential match if their
m/z ratios lie within the given 'tolerance'.
The underlying peak assignment problem is here solved in a 'greedy' way.
This can perform notably faster, but does occasionally deviate slightly from
a fully correct solution (as with the Hungarian algorithm, see
:class:`~matchms.similarity.CosineHungarian`). In practice this will rarely
affect similarity scores notably, in particular for smaller tolerances.
For example
.. testcode::
import numpy as np
from matchms import Spectrum
from matchms.similarity import CosineGreedy
reference = Spectrum(mz=np.array([100, 150, 200.]),
intensities=np.array([0.7, 0.2, 0.1]))
query = Spectrum(mz=np.array([100, 140, 190.]),
intensities=np.array([0.4, 0.2, 0.1]))
# Use factory to construct a similarity function
cosine_greedy = CosineGreedy(tolerance=0.2)
score = cosine_greedy.pair(reference, query)
print(f"Cosine score is {score["score"]:.2f} with {score["matches"]} matched peaks")
Should output
.. testoutput::
Cosine score is 0.83 with 1 matched peaks
"""
# Set key characteristics as class attributes
is_commutative = True
# Set output data type, e.g. ("score", "float") or [("score", "float"), ("matches", "int")]
score_datatype = [("score", numpy.float64), ("matches", "int")]
def __init__(self, tolerance: float = 0.1, mz_power: float = 0.0,
intensity_power: float = 1.0):
"""
Parameters
----------
tolerance:
Peaks will be considered a match when <= tolerance apart. Default is 0.1.
mz_power:
The power to raise m/z to in the cosine function. The default is 0, in which
case the peak intensity products will not depend on the m/z ratios.
intensity_power:
The power to raise intensity to in the cosine function. The default is 1.
"""
self.tolerance = tolerance
self.mz_power = mz_power
self.intensity_power = intensity_power
def pair(self, reference: SpectrumType, query: SpectrumType) -> Tuple[float, int]:
"""Calculate cosine score between two spectra.
Parameters
----------
reference
Single reference spectrum.
query
Single query spectrum.
Returns
-------
Score
Tuple with cosine score and number of matched peaks.
"""
def get_matching_pairs():
"""Get pairs of peaks that match within the given tolerance."""
matching_pairs = collect_peak_pairs(spec1, spec2, self.tolerance,
shift=0.0, mz_power=self.mz_power,
intensity_power=self.intensity_power)
if matching_pairs is None:
return None
matching_pairs = matching_pairs[numpy.argsort(matching_pairs[:, 2])[::-1], :]
return matching_pairs
spec1 = reference.peaks.to_numpy
spec2 = query.peaks.to_numpy
matching_pairs = get_matching_pairs()
if matching_pairs is None:
return numpy.asarray((float(0), 0), dtype=self.score_datatype)
score = score_best_matches(matching_pairs, spec1, spec2,
self.mz_power, self.intensity_power)
return numpy.asarray(score, dtype=self.score_datatype)
|
from typing import Tuple
import numpy
from matchms.typing import SpectrumType
from .BaseSimilarity import BaseSimilarity
from .spectrum_similarity_functions import collect_peak_pairs
from .spectrum_similarity_functions import score_best_matches
class CosineGreedy(BaseSimilarity):
"""Calculate 'cosine similarity score' between two spectra.
The cosine score aims at quantifying the similarity between two mass spectra.
The score is calculated by finding best possible matches between peaks
of two spectra. Two peaks are considered a potential match if their
m/z ratios lie within the given 'tolerance'.
The underlying peak assignment problem is here solved in a 'greedy' way.
This can perform notably faster, but does occasionally deviate slightly from
a fully correct solution (as with the Hungarian algorithm, see
:class:`~matchms.similarity.CosineHungarian`). In practice this will rarely
affect similarity scores notably, in particular for smaller tolerances.
For example
.. testcode::
import numpy as np
from matchms import Spectrum
from matchms.similarity import CosineGreedy
reference = Spectrum(mz=np.array([100, 150, 200.]),
intensities=np.array([0.7, 0.2, 0.1]))
query = Spectrum(mz=np.array([100, 140, 190.]),
intensities=np.array([0.4, 0.2, 0.1]))
# Use factory to construct a similarity function
cosine_greedy = CosineGreedy(tolerance=0.2)
score = cosine_greedy.pair(reference, query)
print(f"Cosine score is {score['score']:.2f} with {score['matches']} matched peaks")
Should output
.. testoutput::
Cosine score is 0.83 with 1 matched peaks
"""
# Set key characteristics as class attributes
is_commutative = True
# Set output data type, e.g. ("score", "float") or [("score", "float"), ("matches", "int")]
score_datatype = [("score", numpy.float64), ("matches", "int")]
def __init__(self, tolerance: float = 0.1, mz_power: float = 0.0,
intensity_power: float = 1.0):
"""
Parameters
----------
tolerance:
Peaks will be considered a match when <= tolerance apart. Default is 0.1.
mz_power:
The power to raise m/z to in the cosine function. The default is 0, in which
case the peak intensity products will not depend on the m/z ratios.
intensity_power:
The power to raise intensity to in the cosine function. The default is 1.
"""
self.tolerance = tolerance
self.mz_power = mz_power
self.intensity_power = intensity_power
def pair(self, reference: SpectrumType, query: SpectrumType) -> Tuple[float, int]:
"""Calculate cosine score between two spectra.
Parameters
----------
reference
Single reference spectrum.
query
Single query spectrum.
Returns
-------
Score
Tuple with cosine score and number of matched peaks.
"""
def get_matching_pairs():
"""Get pairs of peaks that match within the given tolerance."""
matching_pairs = collect_peak_pairs(spec1, spec2, self.tolerance,
shift=0.0, mz_power=self.mz_power,
intensity_power=self.intensity_power)
if matching_pairs is None:
return None
matching_pairs = matching_pairs[numpy.argsort(matching_pairs[:, 2])[::-1], :]
return matching_pairs
spec1 = reference.peaks.to_numpy
spec2 = query.peaks.to_numpy
matching_pairs = get_matching_pairs()
if matching_pairs is None:
return numpy.asarray((float(0), 0), dtype=self.score_datatype)
score = score_best_matches(matching_pairs, spec1, spec2,
self.mz_power, self.intensity_power)
return numpy.asarray(score, dtype=self.score_datatype)
|
"""Metric data needed for notifications."""
class MetricNotificationData: # pylint: disable=too-few-public-methods
"""Handle metric data needed for notifications."""
def __init__(self, metric, data_model, reason: str) -> None:
"""Initialise the Notification with metric data."""
self.metric_name = metric["name"] or f'{data_model['metrics'][metric['type']]['name']}'
self.metric_unit = metric["unit"] or f'{data_model['metrics'][metric['type']]['unit']}'
recent_measurements = metric["recent_measurements"]
scale = metric["scale"]
self.new_metric_value = None
self.old_metric_value = None
self.new_metric_status = self.__user_friendly_status(data_model, None)
self.old_metric_status = self.__user_friendly_status(data_model, None)
if len(recent_measurements) >= 1:
self.new_metric_value = recent_measurements[-1][scale]["value"]
self.new_metric_status = self.__user_friendly_status(data_model, recent_measurements[-1][scale]["status"])
if len(recent_measurements) >= 2:
self.old_metric_value = recent_measurements[-2][scale]["value"]
self.old_metric_status = self.__user_friendly_status(data_model, recent_measurements[-2][scale]["status"])
self.reason = reason
@staticmethod
def __user_friendly_status(data_model, metric_status) -> str:
"""Get the user friendly status name from the data model."""
statuses = data_model["sources"]["quality_time"]["parameters"]["status"]["api_values"]
inverted_statuses = {statuses[key]: key for key in statuses}
human_readable_status, color = (
str(inverted_statuses.get(metric_status, "unknown (white)")).strip(")").split(" (")
)
return f"{color} ({human_readable_status})"
|
"""Metric data needed for notifications."""
class MetricNotificationData: # pylint: disable=too-few-public-methods
"""Handle metric data needed for notifications."""
def __init__(self, metric, data_model, reason: str) -> None:
"""Initialise the Notification with metric data."""
self.metric_name = metric["name"] or f'{data_model["metrics"][metric["type"]]["name"]}'
self.metric_unit = metric["unit"] or f'{data_model["metrics"][metric["type"]]["unit"]}'
recent_measurements = metric["recent_measurements"]
scale = metric["scale"]
self.new_metric_value = None
self.old_metric_value = None
self.new_metric_status = self.__user_friendly_status(data_model, None)
self.old_metric_status = self.__user_friendly_status(data_model, None)
if len(recent_measurements) >= 1:
self.new_metric_value = recent_measurements[-1][scale]["value"]
self.new_metric_status = self.__user_friendly_status(data_model, recent_measurements[-1][scale]["status"])
if len(recent_measurements) >= 2:
self.old_metric_value = recent_measurements[-2][scale]["value"]
self.old_metric_status = self.__user_friendly_status(data_model, recent_measurements[-2][scale]["status"])
self.reason = reason
@staticmethod
def __user_friendly_status(data_model, metric_status) -> str:
"""Get the user friendly status name from the data model."""
statuses = data_model["sources"]["quality_time"]["parameters"]["status"]["api_values"]
inverted_statuses = {statuses[key]: key for key in statuses}
human_readable_status, color = (
str(inverted_statuses.get(metric_status, "unknown (white)")).strip(")").split(" (")
)
return f"{color} ({human_readable_status})"
|
# Copyright 2020 ChainLab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import gc
import os
import time
import random
import numpy as np
import pytz
from BlockchainFormation.utils.utils import *
from src.Experiment_Handler import *
from src.benchmarking.utils import *
from DAppFormation import DApp_Handler
from BlockchainFormation import Node_Handler
import logging
utc = pytz.utc
import matplotlib
import matplotlib.pyplot as plt
from BlockchainFormation.Node_Handler import Node_Handler
from scipy import stats
from src.utils.csv_handling import *
from paramiko import SSHException
from src.benchmarking import *
def crashmarking(experiment_handler, experiment_config):
"""
TODO
:param blockchain_config:
:param client_config:
:param experiment_config:
:param logger:
:param blockchain_ssh_clients:
:param blockchain_scp_clients:
:param client_ssh_clients:
:param client_scp_clients:
:return:
"""
blockchain_config = experiment_handler.blockchain_formation_config
client_config = experiment_handler.client_config
logger = experiment_handler.logger
keys = get_keys(blockchain_config)
result = {key: [] for key in keys}
res_result_mapping = get_res_result_mapping()
set_delays(experiment_handler, experiment_config)
for r in range(1, experiment_config['repetition_runs'] + 1):
logger.info(f"New repetition: r = {r}")
test_config = {}
test_config['exp_dir'] = f"{experiment_config["exp_dir"]}/{experiment_config["method"]}_{experiment_config["mode"]}_{experiment_config["shape"]}_{r}"
test_config['frequency'] = experiment_handler['frequency']
logger.info(f"Performing {experiment_config["method"]}_{experiment_config["mode"]}_{experiment_config["shape"]}_{r} experiment (crashtest)")
try:
os.makedirs(f"{test_config["exp_dir"]}/logs")
os.mkdir(f"{test_config["exp_dir"]}/data")
os.mkdir(f"{test_config["exp_dir"]}/evaluation")
except Exception as e:
logger.exception(e)
total_config = dict()
total_config['blockchain_formation_settings'] = blockchain_config
total_config['client_settings'] = client_config
total_config['experiment_settings'] = experiment_config
with open(f"{experiment_config["exp_dir"]}/config.json", 'w+') as outfile:
json.dump(total_config, outfile, default=datetimeconverter, indent=4)
logger.info(f"Starting new crashtest with frequency {experiment_handler["frequency"]}")
try:
res, ramp = crash_test(experiment_handler, experiment_config, test_config)
except Exception as e:
logger.exception(e)
logger.info("This run failed - repeating once")
try:
res, ramp = crash_test(experiment_handler, experiment_config, test_config)
except Exception as e:
raise Exception("Second time something does not work - abort")
try:
for key in keys:
result[key].append(res[res_result_mapping[key]])
except Exception as e:
raise Exception("Something went wrong with the result")
gc.collect()
logger.debug(f"GC stats:{gc.get_stats()}")
return result
def crash_test(experiment_handler, experiment_config, test_config):
"""
TODO
:param blockchain_config:
:param client_config:
:param experiment_config:
:param test_config:
:param logger:
:param blockchain_ssh_clients:
:param blockchain_scp_clients:
:param client_ssh_clients:
:param client_scp_clients:
:return:
"""
blockchain_config = experiment_handler.blockchain_formation_config
client_config = experiment_handler.client_config
logger = experiment_handler.logger
experiment_handler.dapp_handler.create_ssh_scp_clients()
result = result_init(blockchain_config)
# a counter for the i'th iteration - implying repetitions
retries = 0
restarts = 0
while retries < test_config['repetitions']:
# the total frequency
frequency = test_config['frequency']
# the frequency per client
freq = (test_config['frequency']) / len(client_config['priv_ips'])
# the frequency for naming the logs
test_config['freq'] = f"{round(freq, 1)}"
max_time = experiment_config['duration'] + experiment_config['delta_max_time']
logger.info("")
logger.info(f"New benchmarking run started @ frequency {test_config["freq"]} and max_time {max_time}")
# waiting such that all i/o from the last run is over
time.sleep(7)
try:
start_resources_measurements_blockchain(experiment_handler, experiment_config, test_config)
start_resources_measurements_clients(experiment_handler, experiment_config, test_config)
# waiting in order to get CPU and ping also some time in advance of the test
time.sleep(7)
if not start_benchmarking_measurements(experiment_handler, experiment_config, test_config, max_time, frequency):
retries = retries + 1
ramp = ramp - 1
logger.info("Timeout - trying again with the same specification")
time.sleep(7)
continue
time.sleep(7)
get_benchmarking_data(experiment_handler, experiment_config, test_config)
exception_indicator = False
res, r_value = evaluate_benchmarking_test(experiment_handler, experiment_config, test_config, False, True)
except SSHException:
experiment_handler.dapp_handler.refresh_ssh_scp_clients
exception_indicator = True
except BlockchainNotRespondingError as e:
logger.exception(e)
restart_blockchain(experiment_handler, experiment_config)
restarts = restarts + 1
exception_indicator = True
except Exception as e:
logger.exception(e)
exception_indicator = True
experiment_handler.dapp_handler.close_ssh_scp_clients
|
# Copyright 2020 ChainLab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import gc
import os
import time
import random
import numpy as np
import pytz
from BlockchainFormation.utils.utils import *
from src.Experiment_Handler import *
from src.benchmarking.utils import *
from DAppFormation import DApp_Handler
from BlockchainFormation import Node_Handler
import logging
utc = pytz.utc
import matplotlib
import matplotlib.pyplot as plt
from BlockchainFormation.Node_Handler import Node_Handler
from scipy import stats
from src.utils.csv_handling import *
from paramiko import SSHException
from src.benchmarking import *
def crashmarking(experiment_handler, experiment_config):
"""
TODO
:param blockchain_config:
:param client_config:
:param experiment_config:
:param logger:
:param blockchain_ssh_clients:
:param blockchain_scp_clients:
:param client_ssh_clients:
:param client_scp_clients:
:return:
"""
blockchain_config = experiment_handler.blockchain_formation_config
client_config = experiment_handler.client_config
logger = experiment_handler.logger
keys = get_keys(blockchain_config)
result = {key: [] for key in keys}
res_result_mapping = get_res_result_mapping()
set_delays(experiment_handler, experiment_config)
for r in range(1, experiment_config['repetition_runs'] + 1):
logger.info(f"New repetition: r = {r}")
test_config = {}
test_config['exp_dir'] = f"{experiment_config['exp_dir']}/{experiment_config['method']}_{experiment_config['mode']}_{experiment_config['shape']}_{r}"
test_config['frequency'] = experiment_handler['frequency']
logger.info(f"Performing {experiment_config['method']}_{experiment_config['mode']}_{experiment_config['shape']}_{r} experiment (crashtest)")
try:
os.makedirs(f"{test_config['exp_dir']}/logs")
os.mkdir(f"{test_config['exp_dir']}/data")
os.mkdir(f"{test_config['exp_dir']}/evaluation")
except Exception as e:
logger.exception(e)
total_config = dict()
total_config['blockchain_formation_settings'] = blockchain_config
total_config['client_settings'] = client_config
total_config['experiment_settings'] = experiment_config
with open(f"{experiment_config['exp_dir']}/config.json", 'w+') as outfile:
json.dump(total_config, outfile, default=datetimeconverter, indent=4)
logger.info(f"Starting new crashtest with frequency {experiment_handler['frequency']}")
try:
res, ramp = crash_test(experiment_handler, experiment_config, test_config)
except Exception as e:
logger.exception(e)
logger.info("This run failed - repeating once")
try:
res, ramp = crash_test(experiment_handler, experiment_config, test_config)
except Exception as e:
raise Exception("Second time something does not work - abort")
try:
for key in keys:
result[key].append(res[res_result_mapping[key]])
except Exception as e:
raise Exception("Something went wrong with the result")
gc.collect()
logger.debug(f"GC stats:{gc.get_stats()}")
return result
def crash_test(experiment_handler, experiment_config, test_config):
"""
TODO
:param blockchain_config:
:param client_config:
:param experiment_config:
:param test_config:
:param logger:
:param blockchain_ssh_clients:
:param blockchain_scp_clients:
:param client_ssh_clients:
:param client_scp_clients:
:return:
"""
blockchain_config = experiment_handler.blockchain_formation_config
client_config = experiment_handler.client_config
logger = experiment_handler.logger
experiment_handler.dapp_handler.create_ssh_scp_clients()
result = result_init(blockchain_config)
# a counter for the i'th iteration - implying repetitions
retries = 0
restarts = 0
while retries < test_config['repetitions']:
# the total frequency
frequency = test_config['frequency']
# the frequency per client
freq = (test_config['frequency']) / len(client_config['priv_ips'])
# the frequency for naming the logs
test_config['freq'] = f"{round(freq, 1)}"
max_time = experiment_config['duration'] + experiment_config['delta_max_time']
logger.info("")
logger.info(f"New benchmarking run started @ frequency {test_config['freq']} and max_time {max_time}")
# waiting such that all i/o from the last run is over
time.sleep(7)
try:
start_resources_measurements_blockchain(experiment_handler, experiment_config, test_config)
start_resources_measurements_clients(experiment_handler, experiment_config, test_config)
# waiting in order to get CPU and ping also some time in advance of the test
time.sleep(7)
if not start_benchmarking_measurements(experiment_handler, experiment_config, test_config, max_time, frequency):
retries = retries + 1
ramp = ramp - 1
logger.info("Timeout - trying again with the same specification")
time.sleep(7)
continue
time.sleep(7)
get_benchmarking_data(experiment_handler, experiment_config, test_config)
exception_indicator = False
res, r_value = evaluate_benchmarking_test(experiment_handler, experiment_config, test_config, False, True)
except SSHException:
experiment_handler.dapp_handler.refresh_ssh_scp_clients
exception_indicator = True
except BlockchainNotRespondingError as e:
logger.exception(e)
restart_blockchain(experiment_handler, experiment_config)
restarts = restarts + 1
exception_indicator = True
except Exception as e:
logger.exception(e)
exception_indicator = True
experiment_handler.dapp_handler.close_ssh_scp_clients
|
# Copyright 2021 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from enum import Enum
from functools import wraps
from pathlib import Path
from subprocess import PIPE, STDOUT
from urllib.parse import unquote, unquote_plus
from http.server import HTTPServer, SimpleHTTPRequestHandler
import contextlib
import difflib
import hashlib
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import stat
import string
import subprocess
import sys
import tempfile
import time
import webbrowser
import unittest
import clang_native
import jsrun
from tools.shared import TEMP_DIR, EMCC, EMXX, DEBUG, EMCONFIGURE, EMCMAKE
from tools.shared import EMSCRIPTEN_TEMP_DIR
from tools.shared import EM_BUILD_VERBOSE
from tools.shared import get_canonical_temp_dir, try_delete, path_from_root
from tools.utils import MACOS, WINDOWS, read_file, read_binary, write_file, write_binary
from tools import shared, line_endings, building, config
logger = logging.getLogger('common')
# User can specify an environment variable EMTEST_BROWSER to force the browser
# test suite to run using another browser command line than the default system
# browser.
# There are two special value that can be used here if running in an actual
# browser is not desired:
# EMTEST_BROWSER=0 : This will disable the actual running of the test and simply
# verify that it compiles and links.
# EMTEST_BROWSER=node : This will attempt to run the browser test under node.
# For most browser tests this does not work, but it can
# be useful for running pthread tests under node.
EMTEST_BROWSER = None
EMTEST_DETECT_TEMPFILE_LEAKS = None
EMTEST_SAVE_DIR = None
# generally js engines are equivalent, testing 1 is enough. set this
# to force testing on all js engines, good to find js engine bugs
EMTEST_ALL_ENGINES = None
EMTEST_SKIP_SLOW = None
EMTEST_LACKS_NATIVE_CLANG = None
EMTEST_VERBOSE = None
EMTEST_REBASELINE = None
EMTEST_FORCE64 = None
# Special value for passing to assert_returncode which means we expect that program
# to fail with non-zero return code, but we don't care about specifically which one.
NON_ZERO = -1
TEST_ROOT = path_from_root('tests')
WEBIDL_BINDER = shared.bat_suffix(path_from_root('tools/webidl_binder'))
EMBUILDER = shared.bat_suffix(path_from_root('embuilder'))
EMMAKE = shared.bat_suffix(path_from_root('emmake'))
def delete_contents(pathname):
for entry in os.listdir(pathname):
try_delete(os.path.join(pathname, entry))
# TODO(sbc): Should we make try_delete have a stronger guarantee?
assert not os.path.exists(os.path.join(pathname, entry))
def test_file(*path_components):
"""Construct a path relative to the emscripten "tests" directory."""
return str(Path(TEST_ROOT, *path_components))
# checks if browser testing is enabled
def has_browser():
return EMTEST_BROWSER != '0'
def compiler_for(filename, force_c=False):
if shared.suffix(filename) in ('.cc', '.cxx', '.cpp') and not force_c:
return EMXX
else:
return EMCC
# Generic decorator that calls a function named 'condition' on the test class and
# skips the test if that function returns true
def skip_if(func, condition, explanation='', negate=False):
assert callable(func)
explanation_str = ' : %s' % explanation if explanation else ''
@wraps(func)
def decorated(self, *args, **kwargs):
choice = self.__getattribute__(condition)()
if negate:
choice = not choice
if choice:
self.skipTest(condition + explanation_str)
func(self, *args, **kwargs)
return decorated
def needs_dylink(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
self.check_dylink()
return func(self, *args, **kwargs)
return decorated
def is_slow_test(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
if EMTEST_SKIP_SLOW:
return self.skipTest('skipping slow tests')
return func(self, *args, **kwargs)
return decorated
def disabled(note=''):
assert not callable(note)
return unittest.skip(note)
def no_mac(note=''):
assert not callable(note)
if MACOS:
return unittest.skip(note)
return lambda f: f
def no_windows(note=''):
assert not callable(note)
if WINDOWS:
return unittest.skip(note)
return lambda f: f
def requires_native_clang(func):
assert callable(func)
def decorated(self, *args, **kwargs):
if EMTEST_LACKS_NATIVE_CLANG:
return self.skipTest('native clang tests are disabled')
return func(self, *args, **kwargs)
return decorated
def require_node(func):
assert callable(func)
def decorated(self, *args, **kwargs):
self.require_node()
return func(self, *args, **kwargs)
return decorated
def require_v8(func):
assert callable(func)
def decorated(self, *args, **kwargs):
self.require_v8()
return func(self, *args, **kwargs)
return decorated
def node_pthreads(f):
def decorated(self, *args, **kwargs):
self.set_setting('USE_PTHREADS')
self.emcc_args += ['-Wno-pthreads-mem-growth']
if self.get_setting('MINIMAL_RUNTIME'):
self.skipTest('node pthreads not yet supported with MINIMAL_RUNTIME')
self.js_engines = [config.NODE_JS]
self.node_args += ['--experimental-wasm-threads', '--experimental-wasm-bulk-memory']
f(self, *args, **kwargs)
return decorated
@contextlib.contextmanager
def env_modify(updates):
"""A context manager that updates os.environ."""
# This could also be done with mock.patch.dict() but taking a dependency
# on the mock library is probably not worth the benefit.
old_env = os.environ.copy()
print("env_modify: " + str(updates))
# Seting a value to None means clear the environment variable
clears = [key for key, value in updates.items() if value is None]
updates = {key: value for key, value in updates.items() if value is not None}
os.environ.update(updates)
for key in clears:
if key in os.environ:
del os.environ[key]
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
# Decorator version of env_modify
def with_env_modify(updates):
def decorated(f):
def modified(self):
with env_modify(updates):
return f(self)
return modified
return decorated
def ensure_dir(dirname):
dirname = Path(dirname)
dirname.mkdir(parents=True, exist_ok=True)
def limit_size(string, maxbytes=800000 * 20, maxlines=100000, max_line=5000):
lines = string.splitlines()
for i, line in enumerate(lines):
if len(line) > max_line:
lines[i] = line[:max_line] + '[..]'
if len(lines) > maxlines:
lines = lines[0:maxlines // 2] + ['[..]'] + lines[-maxlines // 2:]
string = '\n'.join(lines) + '\n'
if len(string) > maxbytes:
string = string[0:maxbytes // 2] + '\n[..]\n' + string[-maxbytes // 2:]
return string
def create_file(name, contents, binary=False):
name = Path(name)
assert not name.is_absolute()
if binary:
name.write_bytes(contents)
else:
name.write_text(contents)
def make_executable(name):
Path(name).chmod(stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
def parameterized(parameters):
"""
Mark a test as parameterized.
Usage:
@parameterized({
'subtest1': (1, 2, 3),
'subtest2': (4, 5, 6),
})
def test_something(self, a, b, c):
... # actual test body
This is equivalent to defining two tests:
def test_something_subtest1(self):
# runs test_something(1, 2, 3)
def test_something_subtest2(self):
# runs test_something(4, 5, 6)
"""
def decorator(func):
func._parameterize = parameters
return func
return decorator
class RunnerMeta(type):
@classmethod
def make_test(mcs, name, func, suffix, args):
"""
This is a helper function to create new test functions for each parameterized form.
:param name: the original name of the function
:param func: the original function that we are parameterizing
:param suffix: the suffix to append to the name of the function for this parameterization
:param args: the positional arguments to pass to the original function for this parameterization
:returns: a tuple of (new_function_name, new_function_object)
"""
# Create the new test function. It calls the original function with the specified args.
# We use @functools.wraps to copy over all the function attributes.
@wraps(func)
def resulting_test(self):
return func(self, *args)
# Add suffix to the function name so that it displays correctly.
if suffix:
resulting_test.__name__ = f'{name}_{suffix}'
else:
resulting_test.__name__ = name
# On python 3, functions have __qualname__ as well. This is a full dot-separated path to the
# function. We add the suffix to it as well.
resulting_test.__qualname__ = f'{func.__qualname__}_{suffix}'
return resulting_test.__name__, resulting_test
def __new__(mcs, name, bases, attrs):
# This metaclass expands parameterized methods from `attrs` into separate ones in `new_attrs`.
new_attrs = {}
for attr_name, value in attrs.items():
# Check if a member of the new class has _parameterize, the tag inserted by @parameterized.
if hasattr(value, '_parameterize'):
# If it does, we extract the parameterization information, build new test functions.
for suffix, args in value._parameterize.items():
new_name, func = mcs.make_test(attr_name, value, suffix, args)
assert new_name not in new_attrs, 'Duplicate attribute name generated when parameterizing %s' % attr_name
new_attrs[new_name] = func
else:
# If not, we just copy it over to new_attrs verbatim.
assert attr_name not in new_attrs, '%s collided with an attribute from parameterization' % attr_name
new_attrs[attr_name] = value
# We invoke type, the default metaclass, to actually create the new class, with new_attrs.
return type.__new__(mcs, name, bases, new_attrs)
class RunnerCore(unittest.TestCase, metaclass=RunnerMeta):
# default temporary directory settings. set_temp_dir may be called later to
# override these
temp_dir = TEMP_DIR
canonical_temp_dir = get_canonical_temp_dir(TEMP_DIR)
# This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
stderr_redirect = STDOUT
def is_wasm(self):
return self.get_setting('WASM') != 0
def check_dylink(self):
if self.get_setting('ALLOW_MEMORY_GROWTH') == 1 and not self.is_wasm():
self.skipTest('no dynamic linking with memory growth (without wasm)')
if not self.is_wasm():
self.skipTest('no dynamic linking support in wasm2js yet')
if '-fsanitize=address' in self.emcc_args:
self.skipTest('no dynamic linking support in ASan yet')
if '-fsanitize=leak' in self.emcc_args:
self.skipTest('no dynamic linking support in LSan yet')
def require_v8(self):
if not config.V8_ENGINE or config.V8_ENGINE not in config.JS_ENGINES:
if 'EMTEST_SKIP_V8' in os.environ:
self.skipTest('test requires v8 and EMTEST_SKIP_V8 is set')
else:
self.fail('d8 required to run this test. Use EMTEST_SKIP_V8 to skip')
self.js_engines = [config.V8_ENGINE]
self.emcc_args.append('-sENVIRONMENT=shell')
def require_node(self):
if not config.NODE_JS or config.NODE_JS not in config.JS_ENGINES:
if 'EMTEST_SKIP_NODE' in os.environ:
self.skipTest('test requires node and EMTEST_SKIP_NODE is set')
else:
self.fail('node required to run this test. Use EMTEST_SKIP_NODE to skip')
self.js_engines = [config.NODE_JS]
def uses_memory_init_file(self):
if self.get_setting('SIDE_MODULE') or (self.is_wasm() and not self.get_setting('WASM2JS')):
return False
elif '--memory-init-file' in self.emcc_args:
return int(self.emcc_args[self.emcc_args.index('--memory-init-file') + 1])
else:
# side modules handle memory differently; binaryen puts the memory in the wasm module
opt_supports = any(opt in self.emcc_args for opt in ('-O2', '-O3', '-Os', '-Oz'))
return opt_supports
def set_temp_dir(self, temp_dir):
self.temp_dir = temp_dir
self.canonical_temp_dir = get_canonical_temp_dir(self.temp_dir)
# Explicitly set dedicated temporary directory for parallel tests
os.environ['EMCC_TEMP_DIR'] = self.temp_dir
@classmethod
def setUpClass(cls):
super().setUpClass()
print('(checking sanity from test runner)') # do this after we set env stuff
shared.check_sanity(force=True)
def setUp(self):
super().setUp()
self.settings_mods = {}
self.emcc_args = ['-Werror']
# We want to be strict about closure warnings in our test code.
# TODO(sbc): Remove this if we make it the default for `-Werror`:
# https://github.com/emscripten-core/emscripten/issues/16205):
self.ldflags = ['-sCLOSURE_WARNINGS=error']
self.node_args = [
# Increate stack trace limit to maximise usefulness of test failure reports
'--stack-trace-limit=50',
# Opt in to node v15 default behaviour:
# https://nodejs.org/api/cli.html#cli_unhandled_rejections_mode
'--unhandled-rejections=throw',
# Include backtrace for all uncuaght exceptions (not just Error).
'--trace-uncaught',
]
self.v8_args = []
self.env = {}
self.temp_files_before_run = []
self.uses_es6 = False
self.js_engines = config.JS_ENGINES.copy()
self.wasm_engines = config.WASM_ENGINES.copy()
self.banned_js_engines = []
self.use_all_engines = EMTEST_ALL_ENGINES
if EMTEST_DETECT_TEMPFILE_LEAKS:
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, filename)))
if EMTEST_SAVE_DIR:
self.working_dir = os.path.join(self.temp_dir, 'emscripten_test')
if os.path.exists(self.working_dir):
if EMTEST_SAVE_DIR == 2:
print('Not clearing existing test directory')
else:
print('Clearing existing test directory')
# Even when --save-dir is used we still try to start with an empty directory as many tests
# expect this. --no-clean can be used to keep the old contents for the new test
# run. This can be useful when iterating on a given test with extra files you want to keep
# around in the output directory.
delete_contents(self.working_dir)
else:
print('Creating new test output directory')
ensure_dir(self.working_dir)
else:
self.working_dir = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=self.temp_dir)
os.chdir(self.working_dir)
if not EMTEST_SAVE_DIR:
self.has_prev_ll = False
for temp_file in os.listdir(TEMP_DIR):
if temp_file.endswith('.ll'):
self.has_prev_ll = True
def tearDown(self):
if not EMTEST_SAVE_DIR:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.dirname(self.get_dir()))
try_delete(self.get_dir())
if EMTEST_DETECT_TEMPFILE_LEAKS and not DEBUG:
temp_files_after_run = []
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, filename)))
# Our leak detection will pick up *any* new temp files in the temp dir.
# They may not be due to us, but e.g. the browser when running browser
# tests. Until we figure out a proper solution, ignore some temp file
# names that we see on our CI infrastructure.
ignorable_file_prefixes = [
'/tmp/tmpaddon',
'/tmp/circleci-no-output-timeout',
'/tmp/wasmer'
]
left_over_files = set(temp_files_after_run) - set(self.temp_files_before_run)
left_over_files = [f for f in left_over_files if not any([f.startswith(prefix) for prefix in ignorable_file_prefixes])]
if len(left_over_files):
print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr)
for f in left_over_files:
print('leaked file: ' + f, file=sys.stderr)
self.fail('Test leaked ' + str(len(left_over_files)) + ' temporary files!')
def get_setting(self, key, default=None):
return self.settings_mods.get(key, default)
def set_setting(self, key, value=1):
if value is None:
self.clear_setting(key)
self.settings_mods[key] = value
def has_changed_setting(self, key):
return key in self.settings_mods
def clear_setting(self, key):
self.settings_mods.pop(key, None)
def serialize_settings(self):
ret = []
for key, value in self.settings_mods.items():
if value == 1:
ret.append(f'-s{key}')
elif type(value) == list:
ret.append(f'-s{key}={','.join(value)}')
else:
ret.append(f'-s{key}={value}')
return ret
def get_dir(self):
return self.working_dir
def in_dir(self, *pathelems):
return os.path.join(self.get_dir(), *pathelems)
def add_pre_run(self, code):
create_file('prerun.js', 'Module.preRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'prerun.js']
def add_post_run(self, code):
create_file('postrun.js', 'Module.postRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'postrun.js']
def add_on_exit(self, code):
create_file('onexit.js', 'Module.onExit = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'onexit.js']
# returns the full list of arguments to pass to emcc
# param @main_file whether this is the main file of the test. some arguments
# (like --pre-js) do not need to be passed when building
# libraries, for example
def get_emcc_args(self, main_file=False, ldflags=True):
args = self.serialize_settings() + self.emcc_args
if ldflags:
args += self.ldflags
if not main_file:
for i, arg in enumerate(args):
if arg in ('--pre-js', '--post-js'):
args[i] = None
args[i + 1] = None
args = [arg for arg in args if arg is not None]
return args
def verify_es5(self, filename):
es_check = shared.get_npm_cmd('es-check')
# use --quiet once its available
# See: https://github.com/dollarshaveclub/es-check/pull/126/
es_check_env = os.environ.copy()
es_check_env['PATH'] = os.path.dirname(config.NODE_JS[0]) + os.pathsep + es_check_env['PATH']
try:
# es-check prints the details of the errors to stdout, but it also prints
# stuff in the case there are no errors:
# ES-Check: there were no ES version matching errors!
# pipe stdout and stderr so that we can choose if/when to print this
# output and avoid spamming stdout when tests are successful.
shared.run_process(es_check + ['es5', os.path.abspath(filename)], stdout=PIPE, stderr=STDOUT, env=es_check_env)
except subprocess.CalledProcessError as e:
print(e.stdout)
self.fail('es-check failed to verify ES5 output compliance')
# Build JavaScript code from source code
def build(self, filename, libraries=[], includes=[], force_c=False, js_outfile=True, emcc_args=[], output_basename=None):
suffix = '.js' if js_outfile else '.wasm'
compiler = [compiler_for(filename, force_c)]
if compiler[0] == EMCC:
# TODO(https://github.com/emscripten-core/emscripten/issues/11121)
# For historical reasons emcc compiles and links as C++ by default.
# However we want to run our tests in a more strict manner. We can
# remove this if the issue above is ever fixed.
compiler.append('-sNO_DEFAULT_TO_CXX')
if force_c:
compiler.append('-xc')
if output_basename:
output = output_basename + suffix
else:
basename = os.path.basename(filename)
output = shared.unsuffixed(basename) + suffix
cmd = compiler + [filename, '-o', output] + self.get_emcc_args(main_file=True) + emcc_args + libraries
if shared.suffix(filename) not in ('.i', '.ii'):
# Add the location of the test file to include path.
cmd += ['-I.']
cmd += ['-I' + str(include) for include in includes]
self.run_process(cmd, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(output)
if js_outfile and self.uses_memory_init_file():
src = read_file(output)
# side memory init file, or an empty one in the js
assert ('/* memory initializer */' not in src) or ('/* memory initializer */ allocate([]' in src)
return output
def get_func(self, src, name):
start = src.index('function ' + name + '(')
t = start
n = 0
while True:
if src[t] == '{':
n += 1
elif src[t] == '}':
n -= 1
if n == 0:
return src[start:t + 1]
t += 1
assert t < len(src)
def count_funcs(self, javascript_file):
num_funcs = 0
start_tok = "// EMSCRIPTEN_START_FUNCS"
end_tok = "// EMSCRIPTEN_END_FUNCS"
start_off = 0
end_off = 0
js = read_file(javascript_file)
blob = "".join(js.splitlines())
start_off = blob.find(start_tok) + len(start_tok)
end_off = blob.find(end_tok)
asm_chunk = blob[start_off:end_off]
num_funcs = asm_chunk.count('function ')
return num_funcs
def count_wasm_contents(self, wasm_binary, what):
out = self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE).stdout
# output is something like
# [?] : 125
for line in out.splitlines():
if '[' + what + ']' in line:
ret = line.split(':')[1].strip()
return int(ret)
self.fail('Failed to find [%s] in wasm-opt output' % what)
def get_wasm_text(self, wasm_binary):
return self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), wasm_binary], stdout=PIPE).stdout
def is_exported_in_wasm(self, name, wasm):
wat = self.get_wasm_text(wasm)
return ('(export "%s"' % name) in wat
def measure_wasm_code_lines(self, wasm):
wat_lines = self.get_wasm_text(wasm).splitlines()
non_data_lines = [line for line in wat_lines if '(data ' not in line]
return len(non_data_lines)
def run_js(self, filename, engine=None, args=[],
output_nicerizer=None,
assert_returncode=0,
interleaved_output=True):
# use files, as PIPE can get too full and hang us
stdout_file = self.in_dir('stdout')
stderr_file = None
if interleaved_output:
stderr = STDOUT
else:
stderr_file = self.in_dir('stderr')
stderr = open(stderr_file, 'w')
error = None
timeout_error = None
if not engine:
engine = self.js_engines[0]
if engine == config.NODE_JS:
engine = engine + self.node_args
if engine == config.V8_ENGINE:
engine = engine + self.v8_args
try:
jsrun.run_js(filename, engine, args,
stdout=open(stdout_file, 'w'),
stderr=stderr,
assert_returncode=assert_returncode)
except subprocess.TimeoutExpired as e:
timeout_error = e
except subprocess.CalledProcessError as e:
error = e
# Make sure that we produced proper line endings to the .js file we are about to run.
if not filename.endswith('.wasm'):
self.assertEqual(line_endings.check_line_endings(filename), 0)
ret = read_file(stdout_file)
if not interleaved_output:
ret += read_file(stderr_file)
if output_nicerizer:
ret = output_nicerizer(ret)
if error or timeout_error or EMTEST_VERBOSE:
ret = limit_size(ret)
print('-- begin program output --')
print(read_file(stdout_file), end='')
print('-- end program output --')
if not interleaved_output:
print('-- begin program stderr --')
print(read_file(stderr_file), end='')
print('-- end program stderr --')
if timeout_error:
raise timeout_error
if error:
if assert_returncode == NON_ZERO:
self.fail('JS subprocess unexpectedly succeeded (%s): Output:\n%s' % (error.cmd, ret))
else:
self.fail('JS subprocess failed (%s): %s. Output:\n%s' % (error.cmd, error.returncode, ret))
# We should pass all strict mode checks
self.assertNotContained('strict warning:', ret)
return ret
def assertExists(self, filename, msg=None):
if not msg:
msg = 'Expected file not found: ' + filename
self.assertTrue(os.path.exists(filename), msg)
def assertNotExists(self, filename, msg=None):
if not msg:
msg = 'Unexpected file exists: ' + filename
self.assertFalse(os.path.exists(filename), msg)
# Tests that the given two paths are identical, modulo path delimiters. E.g. "C:/foo" is equal to "C:\foo".
def assertPathsIdentical(self, path1, path2):
path1 = path1.replace('\\', '/')
path2 = path2.replace('\\', '/')
return self.assertIdentical(path1, path2)
# Tests that the given two multiline text content are identical, modulo line
# ending differences (\r\n on Windows, \n on Unix).
def assertTextDataIdentical(self, text1, text2, msg=None,
fromfile='expected', tofile='actual'):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertIdentical(text1, text2, msg, fromfile, tofile)
def assertIdentical(self, values, y, msg=None,
fromfile='expected', tofile='actual'):
if type(values) not in (list, tuple):
values = [values]
for x in values:
if x == y:
return # success
diff_lines = difflib.unified_diff(x.splitlines(), y.splitlines(),
fromfile=fromfile, tofile=tofile)
diff = ''.join([a.rstrip() + '\n' for a in diff_lines])
if EMTEST_VERBOSE:
print("Expected to have '%s' == '%s'" % (limit_size(values[0]), limit_size(y)))
fail_message = 'Unexpected difference:\n' + limit_size(diff)
if not EMTEST_VERBOSE:
fail_message += '\nFor full output run with --verbose.'
if msg:
fail_message += '\n' + msg
self.fail(fail_message)
def assertTextDataContained(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertContained(text1, text2)
def assertFileContents(self, filename, contents):
if EMTEST_VERBOSE:
print(f'Comparing results contents of file: {filename}')
contents = contents.replace('\r', '')
if EMTEST_REBASELINE:
with open(filename, 'w') as f:
f.write(contents)
return
if not os.path.exists(filename):
self.fail('Test expectation file not found: ' + filename + '.\n' +
'Run with --rebaseline to generate.')
expected_content = read_file(filename)
message = "Run with --rebaseline to automatically update expectations"
self.assertTextDataIdentical(expected_content, contents, message,
filename, filename + '.new')
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]:
values = [values]
if callable(string):
string = string()
if not any(v in string for v in values):
diff = difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')
diff = ''.join(a.rstrip() + '\n' for a in diff)
self.fail("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string), limit_size(diff),
additional_info
))
def assertNotContained(self, value, string):
if callable(value):
value = value() # lazy loading
if callable(string):
string = string()
if value in string:
self.fail("Expected to NOT find '%s' in '%s'" % (limit_size(value), limit_size(string)))
def assertContainedIf(self, value, string, condition):
if condition:
self.assertContained(value, string)
else:
self.assertNotContained(value, string)
def assertBinaryEqual(self, file1, file2):
self.assertEqual(os.path.getsize(file1),
os.path.getsize(file2))
self.assertEqual(read_binary(file1),
read_binary(file2))
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
ensure_dir(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'],
configure_args=[], make=['make'], make_args=None,
env_init=None, cache_name_extra='', native=False):
if env_init is None:
env_init = {}
if make_args is None:
make_args = ['-j', str(shared.get_num_cores())]
build_dir = self.get_build_dir()
output_dir = self.get_dir()
# get_library() is used to compile libraries, and not link executables,
# so we don't want to pass linker flags here (emscripten warns if you
# try to pass linker settings when compiling).
emcc_args = self.get_emcc_args(ldflags=False)
hash_input = (str(emcc_args) + ' $ ' + str(env_init)).encode('utf-8')
cache_name = name + ','.join([opt for opt in emcc_args if len(opt) < 7]) + '_' + hashlib.md5(hash_input).hexdigest() + cache_name_extra
valid_chars = "_%s%s" % (string.ascii_letters, string.digits)
cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name])
if self.library_cache.get(cache_name):
print('<load %s from cache> ' % cache_name, file=sys.stderr)
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, cache_name + '_' + basename)
write_binary(bc_file, contents)
generated_libs.append(bc_file)
return generated_libs
print(f'<building and saving {cache_name} into cache>', file=sys.stderr)
if configure is not None:
# Avoid += so we don't mutate the default arg
configure = configure + configure_args
cflags = ' '.join(emcc_args)
env_init.setdefault('CFLAGS', cflags)
env_init.setdefault('CXXFLAGS', cflags)
return build_library(name, build_dir, output_dir, generated_libs, configure,
make, make_args, self.library_cache,
cache_name, env_init=env_init, native=native)
def clear(self):
delete_contents(self.get_dir())
if EMSCRIPTEN_TEMP_DIR:
delete_contents(EMSCRIPTEN_TEMP_DIR)
def run_process(self, cmd, check=True, **args):
# Wrapper around shared.run_process. This is desirable so that the tests
# can fail (in the unittest sense) rather than error'ing.
# In the long run it would nice to completely remove the dependency on
# core emscripten code (shared.py) here.
try:
return shared.run_process(cmd, check=check, **args)
except subprocess.CalledProcessError as e:
if check and e.returncode != 0:
print(e.stdout)
print(e.stderr)
self.fail(f'subprocess exited with non-zero return code({e.returncode}): `{shared.shlex_join(cmd)}`')
def emcc(self, filename, args=[], output_filename=None, **kwargs):
if output_filename is None:
output_filename = filename + '.o'
try_delete(output_filename)
self.run_process([compiler_for(filename), filename] + args + ['-o', output_filename], **kwargs)
# Shared test code between main suite and others
def expect_fail(self, cmd, **args):
"""Run a subprocess and assert that it returns non-zero.
Return the stderr of the subprocess.
"""
proc = self.run_process(cmd, check=False, stderr=PIPE, **args)
self.assertNotEqual(proc.returncode, 0, 'subprocess unexpectedly succeeded. stderr:\n' + proc.stderr)
# When we check for failure we expect a user-visible error, not a traceback.
# However, on windows a python traceback can happen randomly sometimes,
# due to "Access is denied" https://github.com/emscripten-core/emscripten/issues/718
if not WINDOWS or 'Access is denied' not in proc.stderr:
self.assertNotContained('Traceback', proc.stderr)
return proc.stderr
# excercise dynamic linker.
#
# test that linking to shared library B, which is linked to A, loads A as well.
# main is also linked to C, which is also linked to A. A is loaded/initialized only once.
#
# B
# main < > A
# C
#
# this test is used by both test_core and test_browser.
# when run under broswer it excercises how dynamic linker handles concurrency
# - because B and C are loaded in parallel.
def _test_dylink_dso_needed(self, do_run):
create_file('liba.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
static const char *afunc_prev;
extern "C" {
EMSCRIPTEN_KEEPALIVE void afunc(const char *s);
}
void afunc(const char *s) {
printf("a: %s (prev: %s)\n", s, afunc_prev);
afunc_prev = s;
}
struct ainit {
ainit() {
puts("a: loaded");
}
};
static ainit _;
''')
create_file('libb.c', r'''
#include <emscripten.h>
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void bfunc() {
afunc("b");
}
''')
create_file('libc.c', r'''
#include <emscripten.h>
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void cfunc() {
afunc("c");
}
''')
# _test_dylink_dso_needed can be potentially called several times by a test.
# reset dylink-related options first.
self.clear_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
# XXX in wasm each lib load currently takes 5MB; default INITIAL_MEMORY=16MB is thus not enough
self.set_setting('INITIAL_MEMORY', '32mb')
so = '.wasm' if self.is_wasm() else '.js'
def ccshared(src, linkto=[]):
cmdv = [EMCC, src, '-o', shared.unsuffixed(src) + so, '-sSIDE_MODULE'] + self.get_emcc_args()
cmdv += linkto
self.run_process(cmdv)
ccshared('liba.cpp')
ccshared('libb.c', ['liba' + so])
ccshared('libc.c', ['liba' + so])
self.set_setting('MAIN_MODULE')
extra_args = ['-L.', 'libb' + so, 'libc' + so]
do_run(r'''
#ifdef __cplusplus
extern "C" {
#endif
void bfunc();
void cfunc();
#ifdef __cplusplus
}
#endif
int test_main() {
bfunc();
cfunc();
return 0;
}
''',
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n', emcc_args=extra_args)
for libname in ['liba', 'libb', 'libc']:
self.emcc_args += ['--embed-file', libname + so]
do_run(r'''
#include <assert.h>
#include <dlfcn.h>
#include <stddef.h>
int test_main() {
void *bdso, *cdso;
void (*bfunc_ptr)(), (*cfunc_ptr)();
// FIXME for RTLD_LOCAL binding symbols to loaded lib is not currently working
bdso = dlopen("libb%(so)s", RTLD_NOW|RTLD_GLOBAL);
assert(bdso != NULL);
cdso = dlopen("libc%(so)s", RTLD_NOW|RTLD_GLOBAL);
assert(cdso != NULL);
bfunc_ptr = (void (*)())dlsym(bdso, "bfunc");
assert(bfunc_ptr != NULL);
cfunc_ptr = (void (*)())dlsym(cdso, "cfunc");
assert(cfunc_ptr != NULL);
bfunc_ptr();
cfunc_ptr();
return 0;
}
''' % locals(),
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
def filtered_js_engines(self, js_engines=None):
if js_engines is None:
js_engines = self.js_engines
for engine in js_engines:
assert engine in config.JS_ENGINES, "js engine does not exist in config.JS_ENGINES"
assert type(engine) == list
for engine in self.banned_js_engines:
assert type(engine) in (list, type(None))
banned = [b[0] for b in self.banned_js_engines if b]
return [engine for engine in js_engines if engine and engine[0] not in banned]
def do_run(self, src, expected_output, force_c=False, **kwargs):
if 'no_build' in kwargs:
filename = src
else:
if force_c:
filename = 'src.c'
else:
filename = 'src.cpp'
write_file(filename, src)
self._build_and_run(filename, expected_output, **kwargs)
def do_runf(self, filename, expected_output=None, **kwargs):
return self._build_and_run(filename, expected_output, **kwargs)
## Just like `do_run` but with filename of expected output
def do_run_from_file(self, filename, expected_output_filename, **kwargs):
self._build_and_run(filename, read_file(expected_output_filename), **kwargs)
def do_run_in_out_file_test(self, *path, **kwargs):
srcfile = test_file(*path)
out_suffix = kwargs.pop('out_suffix', '')
outfile = shared.unsuffixed(srcfile) + out_suffix + '.out'
expected = read_file(outfile)
self._build_and_run(srcfile, expected, **kwargs)
## Does a complete test - builds, runs, checks output, etc.
def _build_and_run(self, filename, expected_output, args=[], output_nicerizer=None,
no_build=False,
js_engines=None, libraries=[],
includes=[],
assert_returncode=0, assert_identical=False, assert_all=False,
check_for_error=True, force_c=False, emcc_args=[],
interleaved_output=True,
regex=False,
output_basename=None):
logger.debug(f'_build_and_run: {filename}')
if no_build:
js_file = filename
else:
js_file = self.build(filename, libraries=libraries, includes=includes,
force_c=force_c, emcc_args=emcc_args,
output_basename=output_basename)
self.assertExists(js_file)
engines = self.filtered_js_engines(js_engines)
if len(engines) > 1 and not self.use_all_engines:
engines = engines[:1]
# In standalone mode, also add wasm vms as we should be able to run there too.
if self.get_setting('STANDALONE_WASM'):
# TODO once standalone wasm support is more stable, apply use_all_engines
# like with js engines, but for now as we bring it up, test in all of them
if not self.wasm_engines:
logger.warning('no wasm engine was found to run the standalone part of this test')
engines += self.wasm_engines
if self.get_setting('WASM2C') and not EMTEST_LACKS_NATIVE_CLANG:
# compile the c file to a native executable.
c = shared.replace_suffix(js_file, '.wasm.c')
executable = shared.replace_suffix(js_file, '.exe')
cmd = [shared.CLANG_CC, c, '-o', executable] + clang_native.get_clang_native_args()
self.run_process(cmd, env=clang_native.get_clang_native_env())
# we can now run the executable directly, without an engine, which
# we indicate with None as the engine
engines += [[None]]
if len(engines) == 0:
self.skipTest('No JS engine present to run this test with. Check %s and the paths therein.' % config.EM_CONFIG)
for engine in engines:
js_output = self.run_js(js_file, engine, args,
output_nicerizer=output_nicerizer,
assert_returncode=assert_returncode,
interleaved_output=interleaved_output)
js_output = js_output.replace('\r\n', '\n')
if expected_output:
try:
if assert_identical:
self.assertIdentical(expected_output, js_output)
elif assert_all or len(expected_output) == 1:
for o in expected_output:
if regex:
self.assertTrue(re.search(o, js_output), 'Expected regex "%s" to match on:\n%s' % (regex, js_output))
else:
self.assertContained(o, js_output)
else:
if regex:
match_any = any(re.search(o, js_output) for o in expected_output)
self.assertTrue(match_any, 'Expected at least one of "%s" to match on:\n%s' % (expected_output, js_output))
else:
self.assertContained(expected_output, js_output)
if assert_returncode == 0 and check_for_error:
self.assertNotContained('ERROR', js_output)
except Exception:
print('(test did not pass in JS engine: %s)' % engine)
raise
return js_output
def get_freetype_library(self):
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
return self.get_library(os.path.join('third_party', 'freetype'), os.path.join('objs', '.libs', 'libfreetype.a'), configure_args=['--disable-shared', '--without-zlib'])
def get_poppler_library(self, env_init=None):
# The fontconfig symbols are all missing from the poppler build
# e.g. FcConfigSubstitute
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
self.emcc_args += [
'-I' + test_file('third_party/freetype/include'),
'-I' + test_file('third_party/poppler/include')
]
freetype = self.get_freetype_library()
# Poppler has some pretty glaring warning. Suppress them to keep the
# test output readable.
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
self.emcc_args += [
'-Wno-sentinel',
'-Wno-logical-not-parentheses',
'-Wno-unused-private-field',
'-Wno-tautological-compare',
'-Wno-unknown-pragmas',
]
env_init = env_init.copy() if env_init else {}
env_init['FONTCONFIG_CFLAGS'] = ' '
env_init['FONTCONFIG_LIBS'] = ' '
poppler = self.get_library(
os.path.join('third_party', 'poppler'),
[os.path.join('utils', 'pdftoppm.o'), os.path.join('utils', 'parseargs.o'), os.path.join('poppler', '.libs', 'libpoppler.a')],
env_init=env_init,
configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--disable-shared'])
return poppler + freetype
def get_zlib_library(self):
if WINDOWS:
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'),
configure=['cmake', '.'],
make=['cmake', '--build', '.'],
make_args=[])
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'), make_args=['libz.a'])
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def harness_server_func(in_queue, out_queue, port):
class TestServerHandler(SimpleHTTPRequestHandler):
# Request header handler for default do_GET() path in
# SimpleHTTPRequestHandler.do_GET(self) below.
def send_head(self):
if self.path.endswith('.js'):
path = self.translate_path(self.path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found: " + path)
return None
self.send_response(200)
self.send_header('Content-type', 'application/javascript')
self.send_header('Connection', 'close')
self.end_headers()
return f
else:
return SimpleHTTPRequestHandler.send_head(self)
# Add COOP, COEP, CORP, and no-caching headers
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Cross-Origin-Opener-Policy', 'same-origin')
self.send_header('Cross-Origin-Embedder-Policy', 'require-corp')
self.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
self.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
return SimpleHTTPRequestHandler.end_headers(self)
def do_GET(self):
if self.path == '/run_harness':
if DEBUG:
print('[server startup]')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(read_binary(test_file('browser_harness.html')))
elif 'report_' in self.path:
# the test is reporting its result. first change dir away from the
# test dir, as it will be deleted now that the test is finishing, and
# if we got a ping at that time, we'd return an error
os.chdir(path_from_root())
# for debugging, tests may encode the result and their own url (window.location) as result|url
if '|' in self.path:
path, url = self.path.split('|', 1)
else:
path = self.path
url = '?'
if DEBUG:
print('[server response:', path, url, ']')
if out_queue.empty():
out_queue.put(path)
else:
# a badly-behaving test may send multiple xhrs with reported results; we just care
# about the first (if we queued the others, they might be read as responses for
# later tests, or maybe the test sends more than one in a racy manner).
# we place 'None' in the queue here so that the outside knows something went wrong
# (none is not a valid value otherwise; and we need the outside to know because if we
# raise an error in here, it is just swallowed in python's webserver code - we want
# the test to actually fail, which a webserver response can't do).
out_queue.put(None)
raise Exception('browser harness error, excessive response to server - test must be fixed! "%s"' % self.path)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Cache-Control', 'no-cache, must-revalidate')
self.send_header('Connection', 'close')
self.send_header('Expires', '-1')
self.end_headers()
self.wfile.write(b'OK')
elif 'stdout=' in self.path or 'stderr=' in self.path or 'exception=' in self.path:
'''
To get logging to the console from browser tests, add this to
print/printErr/the exception handler in src/shell.html:
var xhr = new XMLHttpRequest();
xhr.open('GET', encodeURI('http://localhost:8888?stdout=' + text));
xhr.send();
'''
print('[client logging:', unquote_plus(self.path), ']')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
elif self.path == '/check':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
if not in_queue.empty():
# there is a new test ready to be served
url, dir = in_queue.get()
if DEBUG:
print('[queue command:', url, dir, ']')
assert in_queue.empty(), 'should not be any blockage - one test runs at a time'
assert out_queue.empty(), 'the single response from the last test was read'
# tell the browser to load the test
self.wfile.write(b'COMMAND:' + url.encode('utf-8'))
# move us to the right place to serve the files for the new test
os.chdir(dir)
else:
# the browser must keep polling
self.wfile.write(b'(wait)')
else:
# Use SimpleHTTPServer default file serving operation for GET.
if DEBUG:
print('[simple HTTP serving:', unquote_plus(self.path), ']')
SimpleHTTPRequestHandler.do_GET(self)
def log_request(code=0, size=0):
# don't log; too noisy
pass
# allows streaming compilation to work
SimpleHTTPRequestHandler.extensions_map['.wasm'] = 'application/wasm'
httpd = HTTPServer(('localhost', port), TestServerHandler)
httpd.serve_forever() # test runner will kill us
class Reporting(Enum):
"""When running browser tests we normally automatically include support
code for reporting results back to the browser. This enum allows tests
to decide what type of support code they need/want.
"""
NONE = 0
# Include the JS helpers for reporting results
JS_ONLY = 1
# Include C/C++ reporting code (REPORT_RESULT mactros) as well as JS helpers
FULL = 2
class BrowserCore(RunnerCore):
# note how many tests hang / do not send an output. if many of these
# happen, likely something is broken and it is best to abort the test
# suite early, as otherwise we will wait for the timeout on every
# single test (hundreds of minutes)
MAX_UNRESPONSIVE_TESTS = 10
unresponsive_tests = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@staticmethod
def browser_open(url):
if not EMTEST_BROWSER:
logger.info('Using default system browser')
webbrowser.open_new(url)
return
browser_args = shlex.split(EMTEST_BROWSER)
# If the given browser is a scalar, treat it like one of the possible types
# from https://docs.python.org/2/library/webbrowser.html
if len(browser_args) == 1:
try:
# This throws if the type of browser isn't available
webbrowser.get(browser_args[0]).open_new(url)
logger.info('Using Emscripten browser: %s', browser_args[0])
return
except webbrowser.Error:
# Ignore the exception and fallback to the custom command logic
pass
# Else assume the given browser is a specific program with additional
# parameters and delegate to that
logger.info('Using Emscripten browser: %s', str(browser_args))
subprocess.Popen(browser_args + [url])
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.also_wasm2js = int(os.getenv('EMTEST_BROWSER_ALSO_WASM2JS', '0')) == 1
cls.port = int(os.getenv('EMTEST_BROWSER_PORT', '8888'))
if not has_browser() or EMTEST_BROWSER == 'node':
return
cls.browser_timeout = 60
cls.harness_in_queue = multiprocessing.Queue()
cls.harness_out_queue = multiprocessing.Queue()
cls.harness_server = multiprocessing.Process(target=harness_server_func, args=(cls.harness_in_queue, cls.harness_out_queue, cls.port))
cls.harness_server.start()
print('[Browser harness server on process %d]' % cls.harness_server.pid)
cls.browser_open('http://localhost:%s/run_harness' % cls.port)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
if not has_browser() or EMTEST_BROWSER == 'node':
return
cls.harness_server.terminate()
print('[Browser harness server terminated]')
if WINDOWS:
# On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit:
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process.
time.sleep(0.1)
def assert_out_queue_empty(self, who):
if not self.harness_out_queue.empty():
while not self.harness_out_queue.empty():
self.harness_out_queue.get()
raise Exception('excessive responses from %s' % who)
# @param extra_tries: how many more times to try this test, if it fails. browser tests have
# many more causes of flakiness (in particular, they do not run
# synchronously, so we have a timeout, which can be hit if the VM
# we run on stalls temporarily), so we let each test try more than
# once by default
def run_browser(self, html_file, message, expectedResult=None, timeout=None, extra_tries=1):
if not has_browser():
return
if BrowserCore.unresponsive_tests >= BrowserCore.MAX_UNRESPONSIVE_TESTS:
self.skipTest('too many unresponsive tests, skipping browser launch - check your setup!')
self.assert_out_queue_empty('previous test')
if DEBUG:
print('[browser launch:', html_file, ']')
if expectedResult is not None:
try:
self.harness_in_queue.put((
'http://localhost:%s/%s' % (self.port, html_file),
self.get_dir()
))
received_output = False
output = '[no http server activity]'
start = time.time()
if timeout is None:
timeout = self.browser_timeout
while time.time() - start < timeout:
if not self.harness_out_queue.empty():
output = self.harness_out_queue.get()
received_output = True
break
time.sleep(0.1)
if not received_output:
BrowserCore.unresponsive_tests += 1
print('[unresponsive tests: %d]' % BrowserCore.unresponsive_tests)
if output is None:
# the browser harness reported an error already, and sent a None to tell
# us to also fail the test
raise Exception('failing test due to browser harness error')
if output.startswith('/report_result?skipped:'):
self.skipTest(unquote(output[len('/report_result?skipped:'):]).strip())
else:
# verify the result, and try again if we should do so
output = unquote(output)
try:
self.assertContained(expectedResult, output)
except Exception as e:
if extra_tries > 0:
print('[test error (see below), automatically retrying]')
print(e)
return self.run_browser(html_file, message, expectedResult, timeout, extra_tries - 1)
else:
raise e
finally:
time.sleep(0.1) # see comment about Windows above
self.assert_out_queue_empty('this test')
else:
webbrowser.open_new(os.path.abspath(html_file))
print('A web browser window should have opened a page containing the results of a part of this test.')
print('You need to manually look at the page to see that it works ok: ' + message)
print('(sleeping for a bit to keep the directory alive for the web browser..)')
time.sleep(5)
print('(moving on..)')
# @manually_trigger If set, we do not assume we should run the reftest when main() is done.
# Instead, call doReftest() in JS yourself at the right time.
def reftest(self, expected, manually_trigger=False):
# make sure the pngs used here have no color correction, using e.g.
# pngcrush -rem gAMA -rem cHRM -rem iCCP -rem sRGB infile outfile
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
reporting = read_file(test_file('browser_reporting.js'))
write_file('reftest.js', '''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
/*
document.body.appendChild(img); // for comparisons
var div = document.createElement('div');
div.innerHTML = '^=expected, v=actual';
document.body.appendChild(div);
document.body.appendChild(actualImage); // to grab it for creating the test reference
*/
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
var width = img.width;
var height = img.height;
for (var x = 0; x < width; x++) {
for (var y = 0; y < height; y++) {
total += Math.abs(expected[y*width*4 + x*4 + 0] - actual[y*width*4 + x*4 + 0]);
total += Math.abs(expected[y*width*4 + x*4 + 1] - actual[y*width*4 + x*4 + 1]);
total += Math.abs(expected[y*width*4 + x*4 + 2] - actual[y*width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
// If the main JS file is in a worker, or modularize, then we need to supply our own reporting logic.
if (typeof reportResultToServer === 'undefined') {
(function() {
%s
reportResultToServer(wrong);
})();
} else {
reportResultToServer(wrong);
}
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
/** @suppress {uselessCode} */
function setupRefTest() {
// Automatically trigger the reftest?
if (!%s) {
// Yes, automatically
Module['postRun'] = doReftest;
if (typeof WebGLClient !== 'undefined') {
// trigger reftest from RAF as well, needed for workers where there is no pre|postRun on the main thread
var realRAF = window.requestAnimationFrame;
/** @suppress{checkTypes} */
window.requestAnimationFrame = function(func) {
return realRAF(function() {
func();
realRAF(doReftest);
});
};
// trigger reftest from canvas render too, for workers not doing GL
var realWOM = worker.onmessage;
worker.onmessage = function(event) {
realWOM(event);
if (event.data.target === 'canvas' && event.data.op === 'render') {
realRAF(doReftest);
}
};
}
} else {
// Manually trigger the reftest.
// The user will call it.
// Add an event loop iteration to ensure rendering, so users don't need to bother.
var realDoReftest = doReftest;
doReftest = function() {
setTimeout(realDoReftest, 1);
};
}
}
setupRefTest();
''' % (reporting, basename, int(manually_trigger)))
def compile_btest(self, args, reporting=Reporting.FULL):
# Inject support code for reporting results. This adds an include a header so testcases can
# use REPORT_RESULT, and also adds a cpp file to be compiled alongside the testcase, which
# contains the implementation of REPORT_RESULT (we can't just include that implementation in
# the header as there may be multiple files being compiled here).
args += ['-sIN_TEST_HARNESS']
if reporting != Reporting.NONE:
# For basic reporting we inject JS helper funtions to report result back to server.
args += ['-DEMTEST_PORT_NUMBER=%d' % self.port,
'--pre-js', test_file('browser_reporting.js')]
if reporting == Reporting.FULL:
# If C reporting (i.e. REPORT_RESULT macro) is required
# also compile in report_result.c and forice-include report_result.h
args += ['-I' + TEST_ROOT,
'-include', test_file('report_result.h'),
test_file('report_result.c')]
if EMTEST_BROWSER == 'node':
args.append('-DEMTEST_NODE')
self.run_process([EMCC] + self.get_emcc_args() + args)
def btest_exit(self, filename, assert_returncode=0, *args, **kwargs):
"""Special case of btest that reports its result solely via exiting
with a given result code.
In this case we set EXIT_RUNTIME and we don't need to provide the
REPORT_RESULT macro to the C code.
"""
self.set_setting('EXIT_RUNTIME')
assert('reporting' not in kwargs)
assert('expected' not in kwargs)
kwargs['reporting'] = Reporting.JS_ONLY
kwargs['expected'] = 'exit:%d' % assert_returncode
return self.btest(filename, *args, **kwargs)
def btest(self, filename, expected=None, reference=None,
reference_slack=0, manual_reference=False, post_build=None,
args=None, message='.', also_proxied=False,
url_suffix='', timeout=None, also_wasm2js=False,
manually_trigger_reftest=False, extra_tries=1,
reporting=Reporting.FULL):
assert expected or reference, 'a btest must either expect an output, or have a reference image'
if args is None:
args = []
original_args = args
args = args.copy()
if not os.path.exists(filename):
filename = test_file(filename)
if reference:
self.reference = reference
expected = [str(i) for i in range(0, reference_slack + 1)]
self.reftest(test_file(reference), manually_trigger=manually_trigger_reftest)
if not manual_reference:
args += ['--pre-js', 'reftest.js', '-sGL_TESTING']
outfile = 'test.html'
args += [filename, '-o', outfile]
# print('all args:', args)
try_delete(outfile)
self.compile_btest(args, reporting=reporting)
self.assertExists(outfile)
if post_build:
post_build()
if not isinstance(expected, list):
expected = [expected]
if EMTEST_BROWSER == 'node':
self.js_engines = [config.NODE_JS]
self.node_args += ['--experimental-wasm-threads', '--experimental-wasm-bulk-memory']
output = self.run_js('test.js')
self.assertContained('RESULT: ' + expected[0], output)
else:
self.run_browser(outfile + url_suffix, message, ['/report_result?' + e for e in expected], timeout=timeout, extra_tries=extra_tries)
# Tests can opt into being run under asmjs as well
if 'WASM=0' not in original_args and (also_wasm2js or self.also_wasm2js):
print('WASM=0')
self.btest(filename, expected, reference, reference_slack, manual_reference, post_build,
original_args + ['-sWASM=0'], message, also_proxied=False, timeout=timeout)
if also_proxied:
print('proxied...')
if reference:
assert not manual_reference
manual_reference = True
assert not post_build
post_build = self.post_manual_reftest
# run proxied
self.btest(filename, expected, reference, reference_slack, manual_reference, post_build,
original_args + ['--proxy-to-worker', '-sGL_TESTING'], message, timeout=timeout)
###################################################################################################
def build_library(name,
build_dir,
output_dir,
generated_libs,
configure,
make,
make_args=[],
cache=None,
cache_name=None,
env_init={},
native=False):
"""Build a library and cache the result. We build the library file
once and cache it for all our tests. (We cache in memory since the test
directory is destroyed and recreated for each test. Note that we cache
separately for different compilers). This cache is just during the test
runner. There is a different concept of caching as well, see |Cache|.
"""
if type(generated_libs) is not list:
generated_libs = [generated_libs]
source_dir = test_file(name.replace('_native', ''))
project_dir = Path(build_dir, name)
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
# Useful in debugging sometimes to comment this out, and two lines above
shutil.copytree(source_dir, project_dir)
generated_libs = [os.path.join(project_dir, lib) for lib in generated_libs]
if native:
env = clang_native.get_clang_native_env()
else:
env = os.environ.copy()
env.update(env_init)
if not native:
# Inject emcmake, emconfigure or emmake accordingly, but only if we are
# cross compiling.
if configure:
if configure[0] == 'cmake':
configure = [EMCMAKE] + configure
else:
configure = [EMCONFIGURE] + configure
else:
make = [EMMAKE] + make
if configure:
try:
with open(os.path.join(project_dir, 'configure_out'), 'w') as out:
with open(os.path.join(project_dir, 'configure_err'), 'w') as err:
stdout = out if EM_BUILD_VERBOSE < 2 else None
stderr = err if EM_BUILD_VERBOSE < 1 else None
shared.run_process(configure, env=env, stdout=stdout, stderr=stderr,
cwd=project_dir)
except subprocess.CalledProcessError:
print('-- configure stdout --')
print(read_file(Path(project_dir, 'configure_out')))
print('-- end configure stdout --')
print('-- configure stderr --')
print(read_file(Path(project_dir, 'configure_err')))
print('-- end configure stderr --')
raise
# if we run configure or cmake we don't then need any kind
# of special env when we run make below
env = None
def open_make_out(mode='r'):
return open(os.path.join(project_dir, 'make.out'), mode)
def open_make_err(mode='r'):
return open(os.path.join(project_dir, 'make.err'), mode)
if EM_BUILD_VERBOSE >= 3:
make_args += ['VERBOSE=1']
try:
with open_make_out('w') as make_out:
with open_make_err('w') as make_err:
stdout = make_out if EM_BUILD_VERBOSE < 2 else None
stderr = make_err if EM_BUILD_VERBOSE < 1 else None
shared.run_process(make + make_args, stdout=stdout, stderr=stderr, env=env,
cwd=project_dir)
except subprocess.CalledProcessError:
with open_make_out() as f:
print('-- make stdout --')
print(f.read())
print('-- end make stdout --')
with open_make_err() as f:
print('-- make stderr --')
print(f.read())
print('-- end stderr --')
raise
if cache is not None:
cache[cache_name] = []
for f in generated_libs:
basename = os.path.basename(f)
cache[cache_name].append((basename, read_binary(f)))
return generated_libs
|
# Copyright 2021 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from enum import Enum
from functools import wraps
from pathlib import Path
from subprocess import PIPE, STDOUT
from urllib.parse import unquote, unquote_plus
from http.server import HTTPServer, SimpleHTTPRequestHandler
import contextlib
import difflib
import hashlib
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import stat
import string
import subprocess
import sys
import tempfile
import time
import webbrowser
import unittest
import clang_native
import jsrun
from tools.shared import TEMP_DIR, EMCC, EMXX, DEBUG, EMCONFIGURE, EMCMAKE
from tools.shared import EMSCRIPTEN_TEMP_DIR
from tools.shared import EM_BUILD_VERBOSE
from tools.shared import get_canonical_temp_dir, try_delete, path_from_root
from tools.utils import MACOS, WINDOWS, read_file, read_binary, write_file, write_binary
from tools import shared, line_endings, building, config
logger = logging.getLogger('common')
# User can specify an environment variable EMTEST_BROWSER to force the browser
# test suite to run using another browser command line than the default system
# browser.
# There are two special value that can be used here if running in an actual
# browser is not desired:
# EMTEST_BROWSER=0 : This will disable the actual running of the test and simply
# verify that it compiles and links.
# EMTEST_BROWSER=node : This will attempt to run the browser test under node.
# For most browser tests this does not work, but it can
# be useful for running pthread tests under node.
EMTEST_BROWSER = None
EMTEST_DETECT_TEMPFILE_LEAKS = None
EMTEST_SAVE_DIR = None
# generally js engines are equivalent, testing 1 is enough. set this
# to force testing on all js engines, good to find js engine bugs
EMTEST_ALL_ENGINES = None
EMTEST_SKIP_SLOW = None
EMTEST_LACKS_NATIVE_CLANG = None
EMTEST_VERBOSE = None
EMTEST_REBASELINE = None
EMTEST_FORCE64 = None
# Special value for passing to assert_returncode which means we expect that program
# to fail with non-zero return code, but we don't care about specifically which one.
NON_ZERO = -1
TEST_ROOT = path_from_root('tests')
WEBIDL_BINDER = shared.bat_suffix(path_from_root('tools/webidl_binder'))
EMBUILDER = shared.bat_suffix(path_from_root('embuilder'))
EMMAKE = shared.bat_suffix(path_from_root('emmake'))
def delete_contents(pathname):
for entry in os.listdir(pathname):
try_delete(os.path.join(pathname, entry))
# TODO(sbc): Should we make try_delete have a stronger guarantee?
assert not os.path.exists(os.path.join(pathname, entry))
def test_file(*path_components):
"""Construct a path relative to the emscripten "tests" directory."""
return str(Path(TEST_ROOT, *path_components))
# checks if browser testing is enabled
def has_browser():
return EMTEST_BROWSER != '0'
def compiler_for(filename, force_c=False):
if shared.suffix(filename) in ('.cc', '.cxx', '.cpp') and not force_c:
return EMXX
else:
return EMCC
# Generic decorator that calls a function named 'condition' on the test class and
# skips the test if that function returns true
def skip_if(func, condition, explanation='', negate=False):
assert callable(func)
explanation_str = ' : %s' % explanation if explanation else ''
@wraps(func)
def decorated(self, *args, **kwargs):
choice = self.__getattribute__(condition)()
if negate:
choice = not choice
if choice:
self.skipTest(condition + explanation_str)
func(self, *args, **kwargs)
return decorated
def needs_dylink(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
self.check_dylink()
return func(self, *args, **kwargs)
return decorated
def is_slow_test(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
if EMTEST_SKIP_SLOW:
return self.skipTest('skipping slow tests')
return func(self, *args, **kwargs)
return decorated
def disabled(note=''):
assert not callable(note)
return unittest.skip(note)
def no_mac(note=''):
assert not callable(note)
if MACOS:
return unittest.skip(note)
return lambda f: f
def no_windows(note=''):
assert not callable(note)
if WINDOWS:
return unittest.skip(note)
return lambda f: f
def requires_native_clang(func):
assert callable(func)
def decorated(self, *args, **kwargs):
if EMTEST_LACKS_NATIVE_CLANG:
return self.skipTest('native clang tests are disabled')
return func(self, *args, **kwargs)
return decorated
def require_node(func):
assert callable(func)
def decorated(self, *args, **kwargs):
self.require_node()
return func(self, *args, **kwargs)
return decorated
def require_v8(func):
assert callable(func)
def decorated(self, *args, **kwargs):
self.require_v8()
return func(self, *args, **kwargs)
return decorated
def node_pthreads(f):
def decorated(self, *args, **kwargs):
self.set_setting('USE_PTHREADS')
self.emcc_args += ['-Wno-pthreads-mem-growth']
if self.get_setting('MINIMAL_RUNTIME'):
self.skipTest('node pthreads not yet supported with MINIMAL_RUNTIME')
self.js_engines = [config.NODE_JS]
self.node_args += ['--experimental-wasm-threads', '--experimental-wasm-bulk-memory']
f(self, *args, **kwargs)
return decorated
@contextlib.contextmanager
def env_modify(updates):
"""A context manager that updates os.environ."""
# This could also be done with mock.patch.dict() but taking a dependency
# on the mock library is probably not worth the benefit.
old_env = os.environ.copy()
print("env_modify: " + str(updates))
# Seting a value to None means clear the environment variable
clears = [key for key, value in updates.items() if value is None]
updates = {key: value for key, value in updates.items() if value is not None}
os.environ.update(updates)
for key in clears:
if key in os.environ:
del os.environ[key]
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
# Decorator version of env_modify
def with_env_modify(updates):
def decorated(f):
def modified(self):
with env_modify(updates):
return f(self)
return modified
return decorated
def ensure_dir(dirname):
dirname = Path(dirname)
dirname.mkdir(parents=True, exist_ok=True)
def limit_size(string, maxbytes=800000 * 20, maxlines=100000, max_line=5000):
lines = string.splitlines()
for i, line in enumerate(lines):
if len(line) > max_line:
lines[i] = line[:max_line] + '[..]'
if len(lines) > maxlines:
lines = lines[0:maxlines // 2] + ['[..]'] + lines[-maxlines // 2:]
string = '\n'.join(lines) + '\n'
if len(string) > maxbytes:
string = string[0:maxbytes // 2] + '\n[..]\n' + string[-maxbytes // 2:]
return string
def create_file(name, contents, binary=False):
name = Path(name)
assert not name.is_absolute()
if binary:
name.write_bytes(contents)
else:
name.write_text(contents)
def make_executable(name):
Path(name).chmod(stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
def parameterized(parameters):
"""
Mark a test as parameterized.
Usage:
@parameterized({
'subtest1': (1, 2, 3),
'subtest2': (4, 5, 6),
})
def test_something(self, a, b, c):
... # actual test body
This is equivalent to defining two tests:
def test_something_subtest1(self):
# runs test_something(1, 2, 3)
def test_something_subtest2(self):
# runs test_something(4, 5, 6)
"""
def decorator(func):
func._parameterize = parameters
return func
return decorator
class RunnerMeta(type):
@classmethod
def make_test(mcs, name, func, suffix, args):
"""
This is a helper function to create new test functions for each parameterized form.
:param name: the original name of the function
:param func: the original function that we are parameterizing
:param suffix: the suffix to append to the name of the function for this parameterization
:param args: the positional arguments to pass to the original function for this parameterization
:returns: a tuple of (new_function_name, new_function_object)
"""
# Create the new test function. It calls the original function with the specified args.
# We use @functools.wraps to copy over all the function attributes.
@wraps(func)
def resulting_test(self):
return func(self, *args)
# Add suffix to the function name so that it displays correctly.
if suffix:
resulting_test.__name__ = f'{name}_{suffix}'
else:
resulting_test.__name__ = name
# On python 3, functions have __qualname__ as well. This is a full dot-separated path to the
# function. We add the suffix to it as well.
resulting_test.__qualname__ = f'{func.__qualname__}_{suffix}'
return resulting_test.__name__, resulting_test
def __new__(mcs, name, bases, attrs):
# This metaclass expands parameterized methods from `attrs` into separate ones in `new_attrs`.
new_attrs = {}
for attr_name, value in attrs.items():
# Check if a member of the new class has _parameterize, the tag inserted by @parameterized.
if hasattr(value, '_parameterize'):
# If it does, we extract the parameterization information, build new test functions.
for suffix, args in value._parameterize.items():
new_name, func = mcs.make_test(attr_name, value, suffix, args)
assert new_name not in new_attrs, 'Duplicate attribute name generated when parameterizing %s' % attr_name
new_attrs[new_name] = func
else:
# If not, we just copy it over to new_attrs verbatim.
assert attr_name not in new_attrs, '%s collided with an attribute from parameterization' % attr_name
new_attrs[attr_name] = value
# We invoke type, the default metaclass, to actually create the new class, with new_attrs.
return type.__new__(mcs, name, bases, new_attrs)
class RunnerCore(unittest.TestCase, metaclass=RunnerMeta):
# default temporary directory settings. set_temp_dir may be called later to
# override these
temp_dir = TEMP_DIR
canonical_temp_dir = get_canonical_temp_dir(TEMP_DIR)
# This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
stderr_redirect = STDOUT
def is_wasm(self):
return self.get_setting('WASM') != 0
def check_dylink(self):
if self.get_setting('ALLOW_MEMORY_GROWTH') == 1 and not self.is_wasm():
self.skipTest('no dynamic linking with memory growth (without wasm)')
if not self.is_wasm():
self.skipTest('no dynamic linking support in wasm2js yet')
if '-fsanitize=address' in self.emcc_args:
self.skipTest('no dynamic linking support in ASan yet')
if '-fsanitize=leak' in self.emcc_args:
self.skipTest('no dynamic linking support in LSan yet')
def require_v8(self):
if not config.V8_ENGINE or config.V8_ENGINE not in config.JS_ENGINES:
if 'EMTEST_SKIP_V8' in os.environ:
self.skipTest('test requires v8 and EMTEST_SKIP_V8 is set')
else:
self.fail('d8 required to run this test. Use EMTEST_SKIP_V8 to skip')
self.js_engines = [config.V8_ENGINE]
self.emcc_args.append('-sENVIRONMENT=shell')
def require_node(self):
if not config.NODE_JS or config.NODE_JS not in config.JS_ENGINES:
if 'EMTEST_SKIP_NODE' in os.environ:
self.skipTest('test requires node and EMTEST_SKIP_NODE is set')
else:
self.fail('node required to run this test. Use EMTEST_SKIP_NODE to skip')
self.js_engines = [config.NODE_JS]
def uses_memory_init_file(self):
if self.get_setting('SIDE_MODULE') or (self.is_wasm() and not self.get_setting('WASM2JS')):
return False
elif '--memory-init-file' in self.emcc_args:
return int(self.emcc_args[self.emcc_args.index('--memory-init-file') + 1])
else:
# side modules handle memory differently; binaryen puts the memory in the wasm module
opt_supports = any(opt in self.emcc_args for opt in ('-O2', '-O3', '-Os', '-Oz'))
return opt_supports
def set_temp_dir(self, temp_dir):
self.temp_dir = temp_dir
self.canonical_temp_dir = get_canonical_temp_dir(self.temp_dir)
# Explicitly set dedicated temporary directory for parallel tests
os.environ['EMCC_TEMP_DIR'] = self.temp_dir
@classmethod
def setUpClass(cls):
super().setUpClass()
print('(checking sanity from test runner)') # do this after we set env stuff
shared.check_sanity(force=True)
def setUp(self):
super().setUp()
self.settings_mods = {}
self.emcc_args = ['-Werror']
# We want to be strict about closure warnings in our test code.
# TODO(sbc): Remove this if we make it the default for `-Werror`:
# https://github.com/emscripten-core/emscripten/issues/16205):
self.ldflags = ['-sCLOSURE_WARNINGS=error']
self.node_args = [
# Increate stack trace limit to maximise usefulness of test failure reports
'--stack-trace-limit=50',
# Opt in to node v15 default behaviour:
# https://nodejs.org/api/cli.html#cli_unhandled_rejections_mode
'--unhandled-rejections=throw',
# Include backtrace for all uncuaght exceptions (not just Error).
'--trace-uncaught',
]
self.v8_args = []
self.env = {}
self.temp_files_before_run = []
self.uses_es6 = False
self.js_engines = config.JS_ENGINES.copy()
self.wasm_engines = config.WASM_ENGINES.copy()
self.banned_js_engines = []
self.use_all_engines = EMTEST_ALL_ENGINES
if EMTEST_DETECT_TEMPFILE_LEAKS:
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, filename)))
if EMTEST_SAVE_DIR:
self.working_dir = os.path.join(self.temp_dir, 'emscripten_test')
if os.path.exists(self.working_dir):
if EMTEST_SAVE_DIR == 2:
print('Not clearing existing test directory')
else:
print('Clearing existing test directory')
# Even when --save-dir is used we still try to start with an empty directory as many tests
# expect this. --no-clean can be used to keep the old contents for the new test
# run. This can be useful when iterating on a given test with extra files you want to keep
# around in the output directory.
delete_contents(self.working_dir)
else:
print('Creating new test output directory')
ensure_dir(self.working_dir)
else:
self.working_dir = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=self.temp_dir)
os.chdir(self.working_dir)
if not EMTEST_SAVE_DIR:
self.has_prev_ll = False
for temp_file in os.listdir(TEMP_DIR):
if temp_file.endswith('.ll'):
self.has_prev_ll = True
def tearDown(self):
if not EMTEST_SAVE_DIR:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.dirname(self.get_dir()))
try_delete(self.get_dir())
if EMTEST_DETECT_TEMPFILE_LEAKS and not DEBUG:
temp_files_after_run = []
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, filename)))
# Our leak detection will pick up *any* new temp files in the temp dir.
# They may not be due to us, but e.g. the browser when running browser
# tests. Until we figure out a proper solution, ignore some temp file
# names that we see on our CI infrastructure.
ignorable_file_prefixes = [
'/tmp/tmpaddon',
'/tmp/circleci-no-output-timeout',
'/tmp/wasmer'
]
left_over_files = set(temp_files_after_run) - set(self.temp_files_before_run)
left_over_files = [f for f in left_over_files if not any([f.startswith(prefix) for prefix in ignorable_file_prefixes])]
if len(left_over_files):
print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr)
for f in left_over_files:
print('leaked file: ' + f, file=sys.stderr)
self.fail('Test leaked ' + str(len(left_over_files)) + ' temporary files!')
def get_setting(self, key, default=None):
return self.settings_mods.get(key, default)
def set_setting(self, key, value=1):
if value is None:
self.clear_setting(key)
self.settings_mods[key] = value
def has_changed_setting(self, key):
return key in self.settings_mods
def clear_setting(self, key):
self.settings_mods.pop(key, None)
def serialize_settings(self):
ret = []
for key, value in self.settings_mods.items():
if value == 1:
ret.append(f'-s{key}')
elif type(value) == list:
ret.append(f'-s{key}={",".join(value)}')
else:
ret.append(f'-s{key}={value}')
return ret
def get_dir(self):
return self.working_dir
def in_dir(self, *pathelems):
return os.path.join(self.get_dir(), *pathelems)
def add_pre_run(self, code):
create_file('prerun.js', 'Module.preRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'prerun.js']
def add_post_run(self, code):
create_file('postrun.js', 'Module.postRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'postrun.js']
def add_on_exit(self, code):
create_file('onexit.js', 'Module.onExit = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'onexit.js']
# returns the full list of arguments to pass to emcc
# param @main_file whether this is the main file of the test. some arguments
# (like --pre-js) do not need to be passed when building
# libraries, for example
def get_emcc_args(self, main_file=False, ldflags=True):
args = self.serialize_settings() + self.emcc_args
if ldflags:
args += self.ldflags
if not main_file:
for i, arg in enumerate(args):
if arg in ('--pre-js', '--post-js'):
args[i] = None
args[i + 1] = None
args = [arg for arg in args if arg is not None]
return args
def verify_es5(self, filename):
es_check = shared.get_npm_cmd('es-check')
# use --quiet once its available
# See: https://github.com/dollarshaveclub/es-check/pull/126/
es_check_env = os.environ.copy()
es_check_env['PATH'] = os.path.dirname(config.NODE_JS[0]) + os.pathsep + es_check_env['PATH']
try:
# es-check prints the details of the errors to stdout, but it also prints
# stuff in the case there are no errors:
# ES-Check: there were no ES version matching errors!
# pipe stdout and stderr so that we can choose if/when to print this
# output and avoid spamming stdout when tests are successful.
shared.run_process(es_check + ['es5', os.path.abspath(filename)], stdout=PIPE, stderr=STDOUT, env=es_check_env)
except subprocess.CalledProcessError as e:
print(e.stdout)
self.fail('es-check failed to verify ES5 output compliance')
# Build JavaScript code from source code
def build(self, filename, libraries=[], includes=[], force_c=False, js_outfile=True, emcc_args=[], output_basename=None):
suffix = '.js' if js_outfile else '.wasm'
compiler = [compiler_for(filename, force_c)]
if compiler[0] == EMCC:
# TODO(https://github.com/emscripten-core/emscripten/issues/11121)
# For historical reasons emcc compiles and links as C++ by default.
# However we want to run our tests in a more strict manner. We can
# remove this if the issue above is ever fixed.
compiler.append('-sNO_DEFAULT_TO_CXX')
if force_c:
compiler.append('-xc')
if output_basename:
output = output_basename + suffix
else:
basename = os.path.basename(filename)
output = shared.unsuffixed(basename) + suffix
cmd = compiler + [filename, '-o', output] + self.get_emcc_args(main_file=True) + emcc_args + libraries
if shared.suffix(filename) not in ('.i', '.ii'):
# Add the location of the test file to include path.
cmd += ['-I.']
cmd += ['-I' + str(include) for include in includes]
self.run_process(cmd, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(output)
if js_outfile and self.uses_memory_init_file():
src = read_file(output)
# side memory init file, or an empty one in the js
assert ('/* memory initializer */' not in src) or ('/* memory initializer */ allocate([]' in src)
return output
def get_func(self, src, name):
start = src.index('function ' + name + '(')
t = start
n = 0
while True:
if src[t] == '{':
n += 1
elif src[t] == '}':
n -= 1
if n == 0:
return src[start:t + 1]
t += 1
assert t < len(src)
def count_funcs(self, javascript_file):
num_funcs = 0
start_tok = "// EMSCRIPTEN_START_FUNCS"
end_tok = "// EMSCRIPTEN_END_FUNCS"
start_off = 0
end_off = 0
js = read_file(javascript_file)
blob = "".join(js.splitlines())
start_off = blob.find(start_tok) + len(start_tok)
end_off = blob.find(end_tok)
asm_chunk = blob[start_off:end_off]
num_funcs = asm_chunk.count('function ')
return num_funcs
def count_wasm_contents(self, wasm_binary, what):
out = self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE).stdout
# output is something like
# [?] : 125
for line in out.splitlines():
if '[' + what + ']' in line:
ret = line.split(':')[1].strip()
return int(ret)
self.fail('Failed to find [%s] in wasm-opt output' % what)
def get_wasm_text(self, wasm_binary):
return self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), wasm_binary], stdout=PIPE).stdout
def is_exported_in_wasm(self, name, wasm):
wat = self.get_wasm_text(wasm)
return ('(export "%s"' % name) in wat
def measure_wasm_code_lines(self, wasm):
wat_lines = self.get_wasm_text(wasm).splitlines()
non_data_lines = [line for line in wat_lines if '(data ' not in line]
return len(non_data_lines)
def run_js(self, filename, engine=None, args=[],
output_nicerizer=None,
assert_returncode=0,
interleaved_output=True):
# use files, as PIPE can get too full and hang us
stdout_file = self.in_dir('stdout')
stderr_file = None
if interleaved_output:
stderr = STDOUT
else:
stderr_file = self.in_dir('stderr')
stderr = open(stderr_file, 'w')
error = None
timeout_error = None
if not engine:
engine = self.js_engines[0]
if engine == config.NODE_JS:
engine = engine + self.node_args
if engine == config.V8_ENGINE:
engine = engine + self.v8_args
try:
jsrun.run_js(filename, engine, args,
stdout=open(stdout_file, 'w'),
stderr=stderr,
assert_returncode=assert_returncode)
except subprocess.TimeoutExpired as e:
timeout_error = e
except subprocess.CalledProcessError as e:
error = e
# Make sure that we produced proper line endings to the .js file we are about to run.
if not filename.endswith('.wasm'):
self.assertEqual(line_endings.check_line_endings(filename), 0)
ret = read_file(stdout_file)
if not interleaved_output:
ret += read_file(stderr_file)
if output_nicerizer:
ret = output_nicerizer(ret)
if error or timeout_error or EMTEST_VERBOSE:
ret = limit_size(ret)
print('-- begin program output --')
print(read_file(stdout_file), end='')
print('-- end program output --')
if not interleaved_output:
print('-- begin program stderr --')
print(read_file(stderr_file), end='')
print('-- end program stderr --')
if timeout_error:
raise timeout_error
if error:
if assert_returncode == NON_ZERO:
self.fail('JS subprocess unexpectedly succeeded (%s): Output:\n%s' % (error.cmd, ret))
else:
self.fail('JS subprocess failed (%s): %s. Output:\n%s' % (error.cmd, error.returncode, ret))
# We should pass all strict mode checks
self.assertNotContained('strict warning:', ret)
return ret
def assertExists(self, filename, msg=None):
if not msg:
msg = 'Expected file not found: ' + filename
self.assertTrue(os.path.exists(filename), msg)
def assertNotExists(self, filename, msg=None):
if not msg:
msg = 'Unexpected file exists: ' + filename
self.assertFalse(os.path.exists(filename), msg)
# Tests that the given two paths are identical, modulo path delimiters. E.g. "C:/foo" is equal to "C:\foo".
def assertPathsIdentical(self, path1, path2):
path1 = path1.replace('\\', '/')
path2 = path2.replace('\\', '/')
return self.assertIdentical(path1, path2)
# Tests that the given two multiline text content are identical, modulo line
# ending differences (\r\n on Windows, \n on Unix).
def assertTextDataIdentical(self, text1, text2, msg=None,
fromfile='expected', tofile='actual'):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertIdentical(text1, text2, msg, fromfile, tofile)
def assertIdentical(self, values, y, msg=None,
fromfile='expected', tofile='actual'):
if type(values) not in (list, tuple):
values = [values]
for x in values:
if x == y:
return # success
diff_lines = difflib.unified_diff(x.splitlines(), y.splitlines(),
fromfile=fromfile, tofile=tofile)
diff = ''.join([a.rstrip() + '\n' for a in diff_lines])
if EMTEST_VERBOSE:
print("Expected to have '%s' == '%s'" % (limit_size(values[0]), limit_size(y)))
fail_message = 'Unexpected difference:\n' + limit_size(diff)
if not EMTEST_VERBOSE:
fail_message += '\nFor full output run with --verbose.'
if msg:
fail_message += '\n' + msg
self.fail(fail_message)
def assertTextDataContained(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertContained(text1, text2)
def assertFileContents(self, filename, contents):
if EMTEST_VERBOSE:
print(f'Comparing results contents of file: {filename}')
contents = contents.replace('\r', '')
if EMTEST_REBASELINE:
with open(filename, 'w') as f:
f.write(contents)
return
if not os.path.exists(filename):
self.fail('Test expectation file not found: ' + filename + '.\n' +
'Run with --rebaseline to generate.')
expected_content = read_file(filename)
message = "Run with --rebaseline to automatically update expectations"
self.assertTextDataIdentical(expected_content, contents, message,
filename, filename + '.new')
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]:
values = [values]
if callable(string):
string = string()
if not any(v in string for v in values):
diff = difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')
diff = ''.join(a.rstrip() + '\n' for a in diff)
self.fail("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string), limit_size(diff),
additional_info
))
def assertNotContained(self, value, string):
if callable(value):
value = value() # lazy loading
if callable(string):
string = string()
if value in string:
self.fail("Expected to NOT find '%s' in '%s'" % (limit_size(value), limit_size(string)))
def assertContainedIf(self, value, string, condition):
if condition:
self.assertContained(value, string)
else:
self.assertNotContained(value, string)
def assertBinaryEqual(self, file1, file2):
self.assertEqual(os.path.getsize(file1),
os.path.getsize(file2))
self.assertEqual(read_binary(file1),
read_binary(file2))
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
ensure_dir(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'],
configure_args=[], make=['make'], make_args=None,
env_init=None, cache_name_extra='', native=False):
if env_init is None:
env_init = {}
if make_args is None:
make_args = ['-j', str(shared.get_num_cores())]
build_dir = self.get_build_dir()
output_dir = self.get_dir()
# get_library() is used to compile libraries, and not link executables,
# so we don't want to pass linker flags here (emscripten warns if you
# try to pass linker settings when compiling).
emcc_args = self.get_emcc_args(ldflags=False)
hash_input = (str(emcc_args) + ' $ ' + str(env_init)).encode('utf-8')
cache_name = name + ','.join([opt for opt in emcc_args if len(opt) < 7]) + '_' + hashlib.md5(hash_input).hexdigest() + cache_name_extra
valid_chars = "_%s%s" % (string.ascii_letters, string.digits)
cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name])
if self.library_cache.get(cache_name):
print('<load %s from cache> ' % cache_name, file=sys.stderr)
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, cache_name + '_' + basename)
write_binary(bc_file, contents)
generated_libs.append(bc_file)
return generated_libs
print(f'<building and saving {cache_name} into cache>', file=sys.stderr)
if configure is not None:
# Avoid += so we don't mutate the default arg
configure = configure + configure_args
cflags = ' '.join(emcc_args)
env_init.setdefault('CFLAGS', cflags)
env_init.setdefault('CXXFLAGS', cflags)
return build_library(name, build_dir, output_dir, generated_libs, configure,
make, make_args, self.library_cache,
cache_name, env_init=env_init, native=native)
def clear(self):
delete_contents(self.get_dir())
if EMSCRIPTEN_TEMP_DIR:
delete_contents(EMSCRIPTEN_TEMP_DIR)
def run_process(self, cmd, check=True, **args):
# Wrapper around shared.run_process. This is desirable so that the tests
# can fail (in the unittest sense) rather than error'ing.
# In the long run it would nice to completely remove the dependency on
# core emscripten code (shared.py) here.
try:
return shared.run_process(cmd, check=check, **args)
except subprocess.CalledProcessError as e:
if check and e.returncode != 0:
print(e.stdout)
print(e.stderr)
self.fail(f'subprocess exited with non-zero return code({e.returncode}): `{shared.shlex_join(cmd)}`')
def emcc(self, filename, args=[], output_filename=None, **kwargs):
if output_filename is None:
output_filename = filename + '.o'
try_delete(output_filename)
self.run_process([compiler_for(filename), filename] + args + ['-o', output_filename], **kwargs)
# Shared test code between main suite and others
def expect_fail(self, cmd, **args):
"""Run a subprocess and assert that it returns non-zero.
Return the stderr of the subprocess.
"""
proc = self.run_process(cmd, check=False, stderr=PIPE, **args)
self.assertNotEqual(proc.returncode, 0, 'subprocess unexpectedly succeeded. stderr:\n' + proc.stderr)
# When we check for failure we expect a user-visible error, not a traceback.
# However, on windows a python traceback can happen randomly sometimes,
# due to "Access is denied" https://github.com/emscripten-core/emscripten/issues/718
if not WINDOWS or 'Access is denied' not in proc.stderr:
self.assertNotContained('Traceback', proc.stderr)
return proc.stderr
# excercise dynamic linker.
#
# test that linking to shared library B, which is linked to A, loads A as well.
# main is also linked to C, which is also linked to A. A is loaded/initialized only once.
#
# B
# main < > A
# C
#
# this test is used by both test_core and test_browser.
# when run under broswer it excercises how dynamic linker handles concurrency
# - because B and C are loaded in parallel.
def _test_dylink_dso_needed(self, do_run):
create_file('liba.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
static const char *afunc_prev;
extern "C" {
EMSCRIPTEN_KEEPALIVE void afunc(const char *s);
}
void afunc(const char *s) {
printf("a: %s (prev: %s)\n", s, afunc_prev);
afunc_prev = s;
}
struct ainit {
ainit() {
puts("a: loaded");
}
};
static ainit _;
''')
create_file('libb.c', r'''
#include <emscripten.h>
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void bfunc() {
afunc("b");
}
''')
create_file('libc.c', r'''
#include <emscripten.h>
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void cfunc() {
afunc("c");
}
''')
# _test_dylink_dso_needed can be potentially called several times by a test.
# reset dylink-related options first.
self.clear_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
# XXX in wasm each lib load currently takes 5MB; default INITIAL_MEMORY=16MB is thus not enough
self.set_setting('INITIAL_MEMORY', '32mb')
so = '.wasm' if self.is_wasm() else '.js'
def ccshared(src, linkto=[]):
cmdv = [EMCC, src, '-o', shared.unsuffixed(src) + so, '-sSIDE_MODULE'] + self.get_emcc_args()
cmdv += linkto
self.run_process(cmdv)
ccshared('liba.cpp')
ccshared('libb.c', ['liba' + so])
ccshared('libc.c', ['liba' + so])
self.set_setting('MAIN_MODULE')
extra_args = ['-L.', 'libb' + so, 'libc' + so]
do_run(r'''
#ifdef __cplusplus
extern "C" {
#endif
void bfunc();
void cfunc();
#ifdef __cplusplus
}
#endif
int test_main() {
bfunc();
cfunc();
return 0;
}
''',
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n', emcc_args=extra_args)
for libname in ['liba', 'libb', 'libc']:
self.emcc_args += ['--embed-file', libname + so]
do_run(r'''
#include <assert.h>
#include <dlfcn.h>
#include <stddef.h>
int test_main() {
void *bdso, *cdso;
void (*bfunc_ptr)(), (*cfunc_ptr)();
// FIXME for RTLD_LOCAL binding symbols to loaded lib is not currently working
bdso = dlopen("libb%(so)s", RTLD_NOW|RTLD_GLOBAL);
assert(bdso != NULL);
cdso = dlopen("libc%(so)s", RTLD_NOW|RTLD_GLOBAL);
assert(cdso != NULL);
bfunc_ptr = (void (*)())dlsym(bdso, "bfunc");
assert(bfunc_ptr != NULL);
cfunc_ptr = (void (*)())dlsym(cdso, "cfunc");
assert(cfunc_ptr != NULL);
bfunc_ptr();
cfunc_ptr();
return 0;
}
''' % locals(),
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
def filtered_js_engines(self, js_engines=None):
if js_engines is None:
js_engines = self.js_engines
for engine in js_engines:
assert engine in config.JS_ENGINES, "js engine does not exist in config.JS_ENGINES"
assert type(engine) == list
for engine in self.banned_js_engines:
assert type(engine) in (list, type(None))
banned = [b[0] for b in self.banned_js_engines if b]
return [engine for engine in js_engines if engine and engine[0] not in banned]
def do_run(self, src, expected_output, force_c=False, **kwargs):
if 'no_build' in kwargs:
filename = src
else:
if force_c:
filename = 'src.c'
else:
filename = 'src.cpp'
write_file(filename, src)
self._build_and_run(filename, expected_output, **kwargs)
def do_runf(self, filename, expected_output=None, **kwargs):
return self._build_and_run(filename, expected_output, **kwargs)
## Just like `do_run` but with filename of expected output
def do_run_from_file(self, filename, expected_output_filename, **kwargs):
self._build_and_run(filename, read_file(expected_output_filename), **kwargs)
def do_run_in_out_file_test(self, *path, **kwargs):
srcfile = test_file(*path)
out_suffix = kwargs.pop('out_suffix', '')
outfile = shared.unsuffixed(srcfile) + out_suffix + '.out'
expected = read_file(outfile)
self._build_and_run(srcfile, expected, **kwargs)
## Does a complete test - builds, runs, checks output, etc.
def _build_and_run(self, filename, expected_output, args=[], output_nicerizer=None,
no_build=False,
js_engines=None, libraries=[],
includes=[],
assert_returncode=0, assert_identical=False, assert_all=False,
check_for_error=True, force_c=False, emcc_args=[],
interleaved_output=True,
regex=False,
output_basename=None):
logger.debug(f'_build_and_run: {filename}')
if no_build:
js_file = filename
else:
js_file = self.build(filename, libraries=libraries, includes=includes,
force_c=force_c, emcc_args=emcc_args,
output_basename=output_basename)
self.assertExists(js_file)
engines = self.filtered_js_engines(js_engines)
if len(engines) > 1 and not self.use_all_engines:
engines = engines[:1]
# In standalone mode, also add wasm vms as we should be able to run there too.
if self.get_setting('STANDALONE_WASM'):
# TODO once standalone wasm support is more stable, apply use_all_engines
# like with js engines, but for now as we bring it up, test in all of them
if not self.wasm_engines:
logger.warning('no wasm engine was found to run the standalone part of this test')
engines += self.wasm_engines
if self.get_setting('WASM2C') and not EMTEST_LACKS_NATIVE_CLANG:
# compile the c file to a native executable.
c = shared.replace_suffix(js_file, '.wasm.c')
executable = shared.replace_suffix(js_file, '.exe')
cmd = [shared.CLANG_CC, c, '-o', executable] + clang_native.get_clang_native_args()
self.run_process(cmd, env=clang_native.get_clang_native_env())
# we can now run the executable directly, without an engine, which
# we indicate with None as the engine
engines += [[None]]
if len(engines) == 0:
self.skipTest('No JS engine present to run this test with. Check %s and the paths therein.' % config.EM_CONFIG)
for engine in engines:
js_output = self.run_js(js_file, engine, args,
output_nicerizer=output_nicerizer,
assert_returncode=assert_returncode,
interleaved_output=interleaved_output)
js_output = js_output.replace('\r\n', '\n')
if expected_output:
try:
if assert_identical:
self.assertIdentical(expected_output, js_output)
elif assert_all or len(expected_output) == 1:
for o in expected_output:
if regex:
self.assertTrue(re.search(o, js_output), 'Expected regex "%s" to match on:\n%s' % (regex, js_output))
else:
self.assertContained(o, js_output)
else:
if regex:
match_any = any(re.search(o, js_output) for o in expected_output)
self.assertTrue(match_any, 'Expected at least one of "%s" to match on:\n%s' % (expected_output, js_output))
else:
self.assertContained(expected_output, js_output)
if assert_returncode == 0 and check_for_error:
self.assertNotContained('ERROR', js_output)
except Exception:
print('(test did not pass in JS engine: %s)' % engine)
raise
return js_output
def get_freetype_library(self):
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
return self.get_library(os.path.join('third_party', 'freetype'), os.path.join('objs', '.libs', 'libfreetype.a'), configure_args=['--disable-shared', '--without-zlib'])
def get_poppler_library(self, env_init=None):
# The fontconfig symbols are all missing from the poppler build
# e.g. FcConfigSubstitute
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
self.emcc_args += [
'-I' + test_file('third_party/freetype/include'),
'-I' + test_file('third_party/poppler/include')
]
freetype = self.get_freetype_library()
# Poppler has some pretty glaring warning. Suppress them to keep the
# test output readable.
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
self.emcc_args += [
'-Wno-sentinel',
'-Wno-logical-not-parentheses',
'-Wno-unused-private-field',
'-Wno-tautological-compare',
'-Wno-unknown-pragmas',
]
env_init = env_init.copy() if env_init else {}
env_init['FONTCONFIG_CFLAGS'] = ' '
env_init['FONTCONFIG_LIBS'] = ' '
poppler = self.get_library(
os.path.join('third_party', 'poppler'),
[os.path.join('utils', 'pdftoppm.o'), os.path.join('utils', 'parseargs.o'), os.path.join('poppler', '.libs', 'libpoppler.a')],
env_init=env_init,
configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--disable-shared'])
return poppler + freetype
def get_zlib_library(self):
if WINDOWS:
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'),
configure=['cmake', '.'],
make=['cmake', '--build', '.'],
make_args=[])
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'), make_args=['libz.a'])
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def harness_server_func(in_queue, out_queue, port):
class TestServerHandler(SimpleHTTPRequestHandler):
# Request header handler for default do_GET() path in
# SimpleHTTPRequestHandler.do_GET(self) below.
def send_head(self):
if self.path.endswith('.js'):
path = self.translate_path(self.path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found: " + path)
return None
self.send_response(200)
self.send_header('Content-type', 'application/javascript')
self.send_header('Connection', 'close')
self.end_headers()
return f
else:
return SimpleHTTPRequestHandler.send_head(self)
# Add COOP, COEP, CORP, and no-caching headers
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Cross-Origin-Opener-Policy', 'same-origin')
self.send_header('Cross-Origin-Embedder-Policy', 'require-corp')
self.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
self.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
return SimpleHTTPRequestHandler.end_headers(self)
def do_GET(self):
if self.path == '/run_harness':
if DEBUG:
print('[server startup]')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(read_binary(test_file('browser_harness.html')))
elif 'report_' in self.path:
# the test is reporting its result. first change dir away from the
# test dir, as it will be deleted now that the test is finishing, and
# if we got a ping at that time, we'd return an error
os.chdir(path_from_root())
# for debugging, tests may encode the result and their own url (window.location) as result|url
if '|' in self.path:
path, url = self.path.split('|', 1)
else:
path = self.path
url = '?'
if DEBUG:
print('[server response:', path, url, ']')
if out_queue.empty():
out_queue.put(path)
else:
# a badly-behaving test may send multiple xhrs with reported results; we just care
# about the first (if we queued the others, they might be read as responses for
# later tests, or maybe the test sends more than one in a racy manner).
# we place 'None' in the queue here so that the outside knows something went wrong
# (none is not a valid value otherwise; and we need the outside to know because if we
# raise an error in here, it is just swallowed in python's webserver code - we want
# the test to actually fail, which a webserver response can't do).
out_queue.put(None)
raise Exception('browser harness error, excessive response to server - test must be fixed! "%s"' % self.path)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Cache-Control', 'no-cache, must-revalidate')
self.send_header('Connection', 'close')
self.send_header('Expires', '-1')
self.end_headers()
self.wfile.write(b'OK')
elif 'stdout=' in self.path or 'stderr=' in self.path or 'exception=' in self.path:
'''
To get logging to the console from browser tests, add this to
print/printErr/the exception handler in src/shell.html:
var xhr = new XMLHttpRequest();
xhr.open('GET', encodeURI('http://localhost:8888?stdout=' + text));
xhr.send();
'''
print('[client logging:', unquote_plus(self.path), ']')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
elif self.path == '/check':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
if not in_queue.empty():
# there is a new test ready to be served
url, dir = in_queue.get()
if DEBUG:
print('[queue command:', url, dir, ']')
assert in_queue.empty(), 'should not be any blockage - one test runs at a time'
assert out_queue.empty(), 'the single response from the last test was read'
# tell the browser to load the test
self.wfile.write(b'COMMAND:' + url.encode('utf-8'))
# move us to the right place to serve the files for the new test
os.chdir(dir)
else:
# the browser must keep polling
self.wfile.write(b'(wait)')
else:
# Use SimpleHTTPServer default file serving operation for GET.
if DEBUG:
print('[simple HTTP serving:', unquote_plus(self.path), ']')
SimpleHTTPRequestHandler.do_GET(self)
def log_request(code=0, size=0):
# don't log; too noisy
pass
# allows streaming compilation to work
SimpleHTTPRequestHandler.extensions_map['.wasm'] = 'application/wasm'
httpd = HTTPServer(('localhost', port), TestServerHandler)
httpd.serve_forever() # test runner will kill us
class Reporting(Enum):
"""When running browser tests we normally automatically include support
code for reporting results back to the browser. This enum allows tests
to decide what type of support code they need/want.
"""
NONE = 0
# Include the JS helpers for reporting results
JS_ONLY = 1
# Include C/C++ reporting code (REPORT_RESULT mactros) as well as JS helpers
FULL = 2
class BrowserCore(RunnerCore):
# note how many tests hang / do not send an output. if many of these
# happen, likely something is broken and it is best to abort the test
# suite early, as otherwise we will wait for the timeout on every
# single test (hundreds of minutes)
MAX_UNRESPONSIVE_TESTS = 10
unresponsive_tests = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@staticmethod
def browser_open(url):
if not EMTEST_BROWSER:
logger.info('Using default system browser')
webbrowser.open_new(url)
return
browser_args = shlex.split(EMTEST_BROWSER)
# If the given browser is a scalar, treat it like one of the possible types
# from https://docs.python.org/2/library/webbrowser.html
if len(browser_args) == 1:
try:
# This throws if the type of browser isn't available
webbrowser.get(browser_args[0]).open_new(url)
logger.info('Using Emscripten browser: %s', browser_args[0])
return
except webbrowser.Error:
# Ignore the exception and fallback to the custom command logic
pass
# Else assume the given browser is a specific program with additional
# parameters and delegate to that
logger.info('Using Emscripten browser: %s', str(browser_args))
subprocess.Popen(browser_args + [url])
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.also_wasm2js = int(os.getenv('EMTEST_BROWSER_ALSO_WASM2JS', '0')) == 1
cls.port = int(os.getenv('EMTEST_BROWSER_PORT', '8888'))
if not has_browser() or EMTEST_BROWSER == 'node':
return
cls.browser_timeout = 60
cls.harness_in_queue = multiprocessing.Queue()
cls.harness_out_queue = multiprocessing.Queue()
cls.harness_server = multiprocessing.Process(target=harness_server_func, args=(cls.harness_in_queue, cls.harness_out_queue, cls.port))
cls.harness_server.start()
print('[Browser harness server on process %d]' % cls.harness_server.pid)
cls.browser_open('http://localhost:%s/run_harness' % cls.port)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
if not has_browser() or EMTEST_BROWSER == 'node':
return
cls.harness_server.terminate()
print('[Browser harness server terminated]')
if WINDOWS:
# On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit:
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process.
time.sleep(0.1)
def assert_out_queue_empty(self, who):
if not self.harness_out_queue.empty():
while not self.harness_out_queue.empty():
self.harness_out_queue.get()
raise Exception('excessive responses from %s' % who)
# @param extra_tries: how many more times to try this test, if it fails. browser tests have
# many more causes of flakiness (in particular, they do not run
# synchronously, so we have a timeout, which can be hit if the VM
# we run on stalls temporarily), so we let each test try more than
# once by default
def run_browser(self, html_file, message, expectedResult=None, timeout=None, extra_tries=1):
if not has_browser():
return
if BrowserCore.unresponsive_tests >= BrowserCore.MAX_UNRESPONSIVE_TESTS:
self.skipTest('too many unresponsive tests, skipping browser launch - check your setup!')
self.assert_out_queue_empty('previous test')
if DEBUG:
print('[browser launch:', html_file, ']')
if expectedResult is not None:
try:
self.harness_in_queue.put((
'http://localhost:%s/%s' % (self.port, html_file),
self.get_dir()
))
received_output = False
output = '[no http server activity]'
start = time.time()
if timeout is None:
timeout = self.browser_timeout
while time.time() - start < timeout:
if not self.harness_out_queue.empty():
output = self.harness_out_queue.get()
received_output = True
break
time.sleep(0.1)
if not received_output:
BrowserCore.unresponsive_tests += 1
print('[unresponsive tests: %d]' % BrowserCore.unresponsive_tests)
if output is None:
# the browser harness reported an error already, and sent a None to tell
# us to also fail the test
raise Exception('failing test due to browser harness error')
if output.startswith('/report_result?skipped:'):
self.skipTest(unquote(output[len('/report_result?skipped:'):]).strip())
else:
# verify the result, and try again if we should do so
output = unquote(output)
try:
self.assertContained(expectedResult, output)
except Exception as e:
if extra_tries > 0:
print('[test error (see below), automatically retrying]')
print(e)
return self.run_browser(html_file, message, expectedResult, timeout, extra_tries - 1)
else:
raise e
finally:
time.sleep(0.1) # see comment about Windows above
self.assert_out_queue_empty('this test')
else:
webbrowser.open_new(os.path.abspath(html_file))
print('A web browser window should have opened a page containing the results of a part of this test.')
print('You need to manually look at the page to see that it works ok: ' + message)
print('(sleeping for a bit to keep the directory alive for the web browser..)')
time.sleep(5)
print('(moving on..)')
# @manually_trigger If set, we do not assume we should run the reftest when main() is done.
# Instead, call doReftest() in JS yourself at the right time.
def reftest(self, expected, manually_trigger=False):
# make sure the pngs used here have no color correction, using e.g.
# pngcrush -rem gAMA -rem cHRM -rem iCCP -rem sRGB infile outfile
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
reporting = read_file(test_file('browser_reporting.js'))
write_file('reftest.js', '''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
/*
document.body.appendChild(img); // for comparisons
var div = document.createElement('div');
div.innerHTML = '^=expected, v=actual';
document.body.appendChild(div);
document.body.appendChild(actualImage); // to grab it for creating the test reference
*/
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
var width = img.width;
var height = img.height;
for (var x = 0; x < width; x++) {
for (var y = 0; y < height; y++) {
total += Math.abs(expected[y*width*4 + x*4 + 0] - actual[y*width*4 + x*4 + 0]);
total += Math.abs(expected[y*width*4 + x*4 + 1] - actual[y*width*4 + x*4 + 1]);
total += Math.abs(expected[y*width*4 + x*4 + 2] - actual[y*width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
// If the main JS file is in a worker, or modularize, then we need to supply our own reporting logic.
if (typeof reportResultToServer === 'undefined') {
(function() {
%s
reportResultToServer(wrong);
})();
} else {
reportResultToServer(wrong);
}
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
/** @suppress {uselessCode} */
function setupRefTest() {
// Automatically trigger the reftest?
if (!%s) {
// Yes, automatically
Module['postRun'] = doReftest;
if (typeof WebGLClient !== 'undefined') {
// trigger reftest from RAF as well, needed for workers where there is no pre|postRun on the main thread
var realRAF = window.requestAnimationFrame;
/** @suppress{checkTypes} */
window.requestAnimationFrame = function(func) {
return realRAF(function() {
func();
realRAF(doReftest);
});
};
// trigger reftest from canvas render too, for workers not doing GL
var realWOM = worker.onmessage;
worker.onmessage = function(event) {
realWOM(event);
if (event.data.target === 'canvas' && event.data.op === 'render') {
realRAF(doReftest);
}
};
}
} else {
// Manually trigger the reftest.
// The user will call it.
// Add an event loop iteration to ensure rendering, so users don't need to bother.
var realDoReftest = doReftest;
doReftest = function() {
setTimeout(realDoReftest, 1);
};
}
}
setupRefTest();
''' % (reporting, basename, int(manually_trigger)))
def compile_btest(self, args, reporting=Reporting.FULL):
# Inject support code for reporting results. This adds an include a header so testcases can
# use REPORT_RESULT, and also adds a cpp file to be compiled alongside the testcase, which
# contains the implementation of REPORT_RESULT (we can't just include that implementation in
# the header as there may be multiple files being compiled here).
args += ['-sIN_TEST_HARNESS']
if reporting != Reporting.NONE:
# For basic reporting we inject JS helper funtions to report result back to server.
args += ['-DEMTEST_PORT_NUMBER=%d' % self.port,
'--pre-js', test_file('browser_reporting.js')]
if reporting == Reporting.FULL:
# If C reporting (i.e. REPORT_RESULT macro) is required
# also compile in report_result.c and forice-include report_result.h
args += ['-I' + TEST_ROOT,
'-include', test_file('report_result.h'),
test_file('report_result.c')]
if EMTEST_BROWSER == 'node':
args.append('-DEMTEST_NODE')
self.run_process([EMCC] + self.get_emcc_args() + args)
def btest_exit(self, filename, assert_returncode=0, *args, **kwargs):
"""Special case of btest that reports its result solely via exiting
with a given result code.
In this case we set EXIT_RUNTIME and we don't need to provide the
REPORT_RESULT macro to the C code.
"""
self.set_setting('EXIT_RUNTIME')
assert('reporting' not in kwargs)
assert('expected' not in kwargs)
kwargs['reporting'] = Reporting.JS_ONLY
kwargs['expected'] = 'exit:%d' % assert_returncode
return self.btest(filename, *args, **kwargs)
def btest(self, filename, expected=None, reference=None,
reference_slack=0, manual_reference=False, post_build=None,
args=None, message='.', also_proxied=False,
url_suffix='', timeout=None, also_wasm2js=False,
manually_trigger_reftest=False, extra_tries=1,
reporting=Reporting.FULL):
assert expected or reference, 'a btest must either expect an output, or have a reference image'
if args is None:
args = []
original_args = args
args = args.copy()
if not os.path.exists(filename):
filename = test_file(filename)
if reference:
self.reference = reference
expected = [str(i) for i in range(0, reference_slack + 1)]
self.reftest(test_file(reference), manually_trigger=manually_trigger_reftest)
if not manual_reference:
args += ['--pre-js', 'reftest.js', '-sGL_TESTING']
outfile = 'test.html'
args += [filename, '-o', outfile]
# print('all args:', args)
try_delete(outfile)
self.compile_btest(args, reporting=reporting)
self.assertExists(outfile)
if post_build:
post_build()
if not isinstance(expected, list):
expected = [expected]
if EMTEST_BROWSER == 'node':
self.js_engines = [config.NODE_JS]
self.node_args += ['--experimental-wasm-threads', '--experimental-wasm-bulk-memory']
output = self.run_js('test.js')
self.assertContained('RESULT: ' + expected[0], output)
else:
self.run_browser(outfile + url_suffix, message, ['/report_result?' + e for e in expected], timeout=timeout, extra_tries=extra_tries)
# Tests can opt into being run under asmjs as well
if 'WASM=0' not in original_args and (also_wasm2js or self.also_wasm2js):
print('WASM=0')
self.btest(filename, expected, reference, reference_slack, manual_reference, post_build,
original_args + ['-sWASM=0'], message, also_proxied=False, timeout=timeout)
if also_proxied:
print('proxied...')
if reference:
assert not manual_reference
manual_reference = True
assert not post_build
post_build = self.post_manual_reftest
# run proxied
self.btest(filename, expected, reference, reference_slack, manual_reference, post_build,
original_args + ['--proxy-to-worker', '-sGL_TESTING'], message, timeout=timeout)
###################################################################################################
def build_library(name,
build_dir,
output_dir,
generated_libs,
configure,
make,
make_args=[],
cache=None,
cache_name=None,
env_init={},
native=False):
"""Build a library and cache the result. We build the library file
once and cache it for all our tests. (We cache in memory since the test
directory is destroyed and recreated for each test. Note that we cache
separately for different compilers). This cache is just during the test
runner. There is a different concept of caching as well, see |Cache|.
"""
if type(generated_libs) is not list:
generated_libs = [generated_libs]
source_dir = test_file(name.replace('_native', ''))
project_dir = Path(build_dir, name)
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
# Useful in debugging sometimes to comment this out, and two lines above
shutil.copytree(source_dir, project_dir)
generated_libs = [os.path.join(project_dir, lib) for lib in generated_libs]
if native:
env = clang_native.get_clang_native_env()
else:
env = os.environ.copy()
env.update(env_init)
if not native:
# Inject emcmake, emconfigure or emmake accordingly, but only if we are
# cross compiling.
if configure:
if configure[0] == 'cmake':
configure = [EMCMAKE] + configure
else:
configure = [EMCONFIGURE] + configure
else:
make = [EMMAKE] + make
if configure:
try:
with open(os.path.join(project_dir, 'configure_out'), 'w') as out:
with open(os.path.join(project_dir, 'configure_err'), 'w') as err:
stdout = out if EM_BUILD_VERBOSE < 2 else None
stderr = err if EM_BUILD_VERBOSE < 1 else None
shared.run_process(configure, env=env, stdout=stdout, stderr=stderr,
cwd=project_dir)
except subprocess.CalledProcessError:
print('-- configure stdout --')
print(read_file(Path(project_dir, 'configure_out')))
print('-- end configure stdout --')
print('-- configure stderr --')
print(read_file(Path(project_dir, 'configure_err')))
print('-- end configure stderr --')
raise
# if we run configure or cmake we don't then need any kind
# of special env when we run make below
env = None
def open_make_out(mode='r'):
return open(os.path.join(project_dir, 'make.out'), mode)
def open_make_err(mode='r'):
return open(os.path.join(project_dir, 'make.err'), mode)
if EM_BUILD_VERBOSE >= 3:
make_args += ['VERBOSE=1']
try:
with open_make_out('w') as make_out:
with open_make_err('w') as make_err:
stdout = make_out if EM_BUILD_VERBOSE < 2 else None
stderr = make_err if EM_BUILD_VERBOSE < 1 else None
shared.run_process(make + make_args, stdout=stdout, stderr=stderr, env=env,
cwd=project_dir)
except subprocess.CalledProcessError:
with open_make_out() as f:
print('-- make stdout --')
print(f.read())
print('-- end make stdout --')
with open_make_err() as f:
print('-- make stderr --')
print(f.read())
print('-- end stderr --')
raise
if cache is not None:
cache[cache_name] = []
for f in generated_libs:
basename = os.path.basename(f)
cache[cache_name].append((basename, read_binary(f)))
return generated_libs
|
#!/usr/bin/env python3
# Copyright (c) 2020-2021 Rugged Bytes IT-Services GmbH
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import json
import os
import click
import elementstx # noqa: F401
from bitcointx import select_chain_params
from bitcointx.core import (
CMutableTransaction,
CTransaction,
CMutableTxIn,
CMutableTxInWitness,
b2lx,
b2x,
x,
)
from bitcointx.core.script import CScript
from bitcointx.core.key import CKey
from bitcointx.rpc import JSONRPCError
from bitcointx.wallet import CCoinExtKey
from elementstx.wallet import CCoinConfidentialAddress
from cli_common import (
load_data_with_checking_hash, save_to_json_with_hash, read_plandata,
network_option
)
from lib.constants import (
CONTRACT_COLLATERAL_INP_INDEX,
CONTRACT_PRINCIPAL_INP_INDEX
)
from lib.loan_utils import create_loan_transaction
from lib.rpc_utils import calculate_fee, get_fee_utxo, get_bitcoin_asset
from lib.types import (
CreditorLoanStartInfo,
DebtorLoanStartInfo,
RPCPathParamType,
ElementsRPCCaller
)
CONTRACT_FEE_INP_INDEX = 2
@click.group()
def facilitator() -> None:
...
@facilitator.command()
@click.option(
"-r",
"--rpc",
type=RPCPathParamType(),
help="config dir path",
required=True,
)
@click.option(
"-p",
"--plan",
type=click.Path(
exists=True,
dir_okay=False,
resolve_path=True,
),
help="path to plan data",
)
@click.option(
"-l",
"--loan",
type=click.Path(
exists=True,
dir_okay=False,
resolve_path=True,
),
required=True,
help="path to principal info",
)
@click.option(
"-c",
"--collateral",
type=click.Path(
exists=True,
dir_okay=False,
resolve_path=True,
),
required=True,
help="path to collateral info",
)
@click.option(
"-oc",
"--output-creditor",
type=click.Path(
file_okay=True,
dir_okay=False,
resolve_path=True,
allow_dash=False,
),
required=True,
help="path to creditor info",
)
@click.option(
"-od",
"--output-debtor",
type=click.Path(
file_okay=True,
dir_okay=False,
resolve_path=True,
allow_dash=False,
),
required=True,
help="path to debtor info",
)
@click.option(
"-ot",
"--output-tx",
type=click.Path(
file_okay=True,
dir_okay=False,
resolve_path=True,
allow_dash=True,
),
required=True,
default="-",
help="path to tx data",
)
@click.option(
"--min-output",
"min_output",
type=int,
default=1,
help="path to write transaction data",
)
@click.option(
"--contract-start-delay",
"contract_start_delay",
type=int,
required=True,
help="Delay in blocks to the start of the contract from the current block",
)
@network_option
def make(
rpc: ElementsRPCCaller,
plan: str,
loan: str,
collateral: str,
output_creditor: str,
output_debtor: str,
output_tx: str,
min_output: int,
contract_start_delay: int,
network: str
) -> None:
select_chain_params(network)
if contract_start_delay < 0:
raise click.UsageError(f"contract_start_delay must be positive")
repayment_plan = read_plandata(plan).to_repayment_plan(min_output)
debtor_start_data = load_data_with_checking_hash(collateral)
try:
txstr = rpc.getrawtransaction(debtor_start_data["txid"])
except JSONRPCError as e:
raise click.UsageError(
f"Can't get transaction {debtor_start_data["txid"]}"
f" expected to contain the principal: {e}"
)
tx = CTransaction.deserialize(x(txstr))
assert isinstance(debtor_start_data["vout_index"], int)
debtor_start_info = DebtorLoanStartInfo(
tx=tx, vout_index=int(debtor_start_data["vout_index"]),
blinding_key=CKey(x(debtor_start_data["blinding_key"])),
control_addr=CCoinConfidentialAddress(
debtor_start_data["control_addr"]),
receive_addr=CCoinConfidentialAddress(
debtor_start_data["receive_addr"]),
collateral_change_addr=CCoinConfidentialAddress(
debtor_start_data["collateral_change_addr"]),
plan=repayment_plan
)
creditor_start_data = load_data_with_checking_hash(loan)
try:
txstr = rpc.getrawtransaction(creditor_start_data["txid"])
except JSONRPCError as e:
raise click.UsageError(
f"Can't get transaction {creditor_start_data["txid"]}"
f" expected to contain collateral: {e}"
)
tx = CTransaction.deserialize(x(txstr))
creditor_start_info = CreditorLoanStartInfo(
tx=tx, vout_index=int(creditor_start_data["vout_index"]),
blinding_key=CKey(x(creditor_start_data["blinding_key"])),
control_addr=CCoinConfidentialAddress(
creditor_start_data["control_addr"]),
principal_change_addr=CCoinConfidentialAddress(
creditor_start_data["principal_change_addr"]),
plan=repayment_plan
)
fee_amount = calculate_fee(rpc)
print(f"Calculated fee amount for the transaction: {fee_amount}")
bitcoin_asset = get_bitcoin_asset(rpc)
fee_utxo_info = get_fee_utxo(rpc, fee_amount, bitcoin_asset)
fee_cout = fee_utxo_info.outpoint
# Lock this utxo
rpc.lockunspent(False, [{"txid": b2lx(fee_cout.hash), "vout": fee_cout.n}])
shared_blinding_xkey = CCoinExtKey.from_seed(os.urandom(32))
start_block_num = rpc.getblockcount() + contract_start_delay
fee_change_addr = CCoinConfidentialAddress(rpc.getnewaddress())
tx, creditor_ctl_asset, debtor_ctl_asset = create_loan_transaction(
repayment_plan,
creditor_start_info,
debtor_start_info,
shared_blinding_xkey,
fee_utxo_info,
fee_change_addr,
bitcoin_asset,
start_block_num=start_block_num,
fee_amount=fee_amount,
)
tx_for_alice = tx.clone()
tx_for_alice.vin[CONTRACT_COLLATERAL_INP_INDEX] = CMutableTxIn()
creditor_info = {
"tx": b2x(tx_for_alice.to_immutable().serialize()),
"shared-blinding-xkey": str(shared_blinding_xkey),
"debtor-control-asset": debtor_ctl_asset.to_hex(),
"bitcoin-asset": bitcoin_asset.to_hex(),
"start-block-num": start_block_num,
}
save_to_json_with_hash(output_creditor, creditor_info)
# mask Alice's input when sending to Bob
tx_for_bob = tx.clone()
tx_for_bob.vin[CONTRACT_PRINCIPAL_INP_INDEX] = CMutableTxIn()
debtor_info = {
"tx": b2x(tx_for_bob.to_immutable().serialize()),
"shared-blinding-xkey": str(shared_blinding_xkey),
"creditor-control-asset": creditor_ctl_asset.to_hex(),
"bitcoin-asset": bitcoin_asset.to_hex(),
"start-block-num": start_block_num,
}
save_to_json_with_hash(output_debtor, debtor_info)
with click.open_file(output_tx, mode="x") as f:
f.write(b2x(tx.to_immutable().serialize()))
print(f"Contract transaction was saved to {output_tx}")
print(f"The transaction can not be broadcast until block "
f"{start_block_num}")
@facilitator.command()
@click.option(
"-r",
"--rpc",
type=RPCPathParamType(),
help="config dir path",
required=True,
)
@click.option(
"-t",
"--tx",
type=click.Path(
exists=True,
dir_okay=False,
resolve_path=True,
),
help="path to loan transaction",
)
@click.option(
"-c",
"--creditor-witness",
type=click.Path(
exists=True,
dir_okay=False,
resolve_path=True,
),
required=True,
help="path to creditor witness",
)
@click.option(
"-d",
"--debtor-witness",
type=click.Path(
exists=True,
dir_okay=False,
resolve_path=True,
),
required=True,
help="path to debtor witness",
)
@click.option(
"-o",
"--output",
type=click.Path(
dir_okay=False,
resolve_path=True,
allow_dash=True,
),
default="-",
help="path to output transaction",
)
@network_option
def sign(rpc: ElementsRPCCaller, tx: str,
creditor_witness: str, debtor_witness: str, output: str,
network: str) -> None:
select_chain_params(network)
with click.open_file(tx) as f:
loan_tx = CMutableTransaction.deserialize(x(f.read()))
with click.open_file(creditor_witness) as f:
data_alice = json.loads(f.read())
with click.open_file(debtor_witness) as f:
data_bob = json.loads(f.read())
w_sign_bob = CMutableTxInWitness.deserialize(
x(data_bob["witnessscript"]))
w_sign_alice = CMutableTxInWitness.deserialize(
x(data_alice["witnessscript"]))
s_sign_bob = CScript(x(data_bob["signscript"]))
s_sign_alice = CScript(x(data_alice["signscript"]))
loan_tx.wit.vtxinwit[CONTRACT_COLLATERAL_INP_INDEX] = w_sign_bob
loan_tx.wit.vtxinwit[CONTRACT_PRINCIPAL_INP_INDEX] = w_sign_alice
loan_tx.vin[CONTRACT_COLLATERAL_INP_INDEX].scriptSig = s_sign_bob
loan_tx.vin[CONTRACT_PRINCIPAL_INP_INDEX].scriptSig = s_sign_alice
result = rpc.signrawtransactionwithwallet(b2x(loan_tx.serialize()))
if not result["complete"]:
raise click.UsageError(
f"Can't sign the transaction: {result["errors"]}"
)
with click.open_file(output, mode="x") as f:
f.write(result["hex"])
# Unlock fee the utxo what was locked in previous step
rpc.lockunspent(
True,
[
{
"txid": b2lx(loan_tx.vin[CONTRACT_FEE_INP_INDEX].prevout.hash),
"vout": loan_tx.vin[CONTRACT_FEE_INP_INDEX].prevout.n,
}
],
)
print(f"Signed contract transaction was saved to {output}")
print(f"UTXO {b2lx(loan_tx.vin[CONTRACT_FEE_INP_INDEX].prevout.hash)}:"
f"{loan_tx.vin[CONTRACT_FEE_INP_INDEX].prevout.n} was locked")
if __name__ == "__main__":
facilitator()
|
#!/usr/bin/env python3
# Copyright (c) 2020-2021 Rugged Bytes IT-Services GmbH
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import json
import os
import click
import elementstx # noqa: F401
from bitcointx import select_chain_params
from bitcointx.core import (
CMutableTransaction,
CTransaction,
CMutableTxIn,
CMutableTxInWitness,
b2lx,
b2x,
x,
)
from bitcointx.core.script import CScript
from bitcointx.core.key import CKey
from bitcointx.rpc import JSONRPCError
from bitcointx.wallet import CCoinExtKey
from elementstx.wallet import CCoinConfidentialAddress
from cli_common import (
load_data_with_checking_hash, save_to_json_with_hash, read_plandata,
network_option
)
from lib.constants import (
CONTRACT_COLLATERAL_INP_INDEX,
CONTRACT_PRINCIPAL_INP_INDEX
)
from lib.loan_utils import create_loan_transaction
from lib.rpc_utils import calculate_fee, get_fee_utxo, get_bitcoin_asset
from lib.types import (
CreditorLoanStartInfo,
DebtorLoanStartInfo,
RPCPathParamType,
ElementsRPCCaller
)
CONTRACT_FEE_INP_INDEX = 2
@click.group()
def facilitator() -> None:
...
@facilitator.command()
@click.option(
"-r",
"--rpc",
type=RPCPathParamType(),
help="config dir path",
required=True,
)
@click.option(
"-p",
"--plan",
type=click.Path(
exists=True,
dir_okay=False,
resolve_path=True,
),
help="path to plan data",
)
@click.option(
"-l",
"--loan",
type=click.Path(
exists=True,
dir_okay=False,
resolve_path=True,
),
required=True,
help="path to principal info",
)
@click.option(
"-c",
"--collateral",
type=click.Path(
exists=True,
dir_okay=False,
resolve_path=True,
),
required=True,
help="path to collateral info",
)
@click.option(
"-oc",
"--output-creditor",
type=click.Path(
file_okay=True,
dir_okay=False,
resolve_path=True,
allow_dash=False,
),
required=True,
help="path to creditor info",
)
@click.option(
"-od",
"--output-debtor",
type=click.Path(
file_okay=True,
dir_okay=False,
resolve_path=True,
allow_dash=False,
),
required=True,
help="path to debtor info",
)
@click.option(
"-ot",
"--output-tx",
type=click.Path(
file_okay=True,
dir_okay=False,
resolve_path=True,
allow_dash=True,
),
required=True,
default="-",
help="path to tx data",
)
@click.option(
"--min-output",
"min_output",
type=int,
default=1,
help="path to write transaction data",
)
@click.option(
"--contract-start-delay",
"contract_start_delay",
type=int,
required=True,
help="Delay in blocks to the start of the contract from the current block",
)
@network_option
def make(
rpc: ElementsRPCCaller,
plan: str,
loan: str,
collateral: str,
output_creditor: str,
output_debtor: str,
output_tx: str,
min_output: int,
contract_start_delay: int,
network: str
) -> None:
select_chain_params(network)
if contract_start_delay < 0:
raise click.UsageError(f"contract_start_delay must be positive")
repayment_plan = read_plandata(plan).to_repayment_plan(min_output)
debtor_start_data = load_data_with_checking_hash(collateral)
try:
txstr = rpc.getrawtransaction(debtor_start_data["txid"])
except JSONRPCError as e:
raise click.UsageError(
f"Can't get transaction {debtor_start_data['txid']}"
f" expected to contain the principal: {e}"
)
tx = CTransaction.deserialize(x(txstr))
assert isinstance(debtor_start_data["vout_index"], int)
debtor_start_info = DebtorLoanStartInfo(
tx=tx, vout_index=int(debtor_start_data["vout_index"]),
blinding_key=CKey(x(debtor_start_data["blinding_key"])),
control_addr=CCoinConfidentialAddress(
debtor_start_data["control_addr"]),
receive_addr=CCoinConfidentialAddress(
debtor_start_data["receive_addr"]),
collateral_change_addr=CCoinConfidentialAddress(
debtor_start_data["collateral_change_addr"]),
plan=repayment_plan
)
creditor_start_data = load_data_with_checking_hash(loan)
try:
txstr = rpc.getrawtransaction(creditor_start_data["txid"])
except JSONRPCError as e:
raise click.UsageError(
f"Can't get transaction {creditor_start_data['txid']}"
f" expected to contain collateral: {e}"
)
tx = CTransaction.deserialize(x(txstr))
creditor_start_info = CreditorLoanStartInfo(
tx=tx, vout_index=int(creditor_start_data["vout_index"]),
blinding_key=CKey(x(creditor_start_data["blinding_key"])),
control_addr=CCoinConfidentialAddress(
creditor_start_data["control_addr"]),
principal_change_addr=CCoinConfidentialAddress(
creditor_start_data["principal_change_addr"]),
plan=repayment_plan
)
fee_amount = calculate_fee(rpc)
print(f"Calculated fee amount for the transaction: {fee_amount}")
bitcoin_asset = get_bitcoin_asset(rpc)
fee_utxo_info = get_fee_utxo(rpc, fee_amount, bitcoin_asset)
fee_cout = fee_utxo_info.outpoint
# Lock this utxo
rpc.lockunspent(False, [{"txid": b2lx(fee_cout.hash), "vout": fee_cout.n}])
shared_blinding_xkey = CCoinExtKey.from_seed(os.urandom(32))
start_block_num = rpc.getblockcount() + contract_start_delay
fee_change_addr = CCoinConfidentialAddress(rpc.getnewaddress())
tx, creditor_ctl_asset, debtor_ctl_asset = create_loan_transaction(
repayment_plan,
creditor_start_info,
debtor_start_info,
shared_blinding_xkey,
fee_utxo_info,
fee_change_addr,
bitcoin_asset,
start_block_num=start_block_num,
fee_amount=fee_amount,
)
tx_for_alice = tx.clone()
tx_for_alice.vin[CONTRACT_COLLATERAL_INP_INDEX] = CMutableTxIn()
creditor_info = {
"tx": b2x(tx_for_alice.to_immutable().serialize()),
"shared-blinding-xkey": str(shared_blinding_xkey),
"debtor-control-asset": debtor_ctl_asset.to_hex(),
"bitcoin-asset": bitcoin_asset.to_hex(),
"start-block-num": start_block_num,
}
save_to_json_with_hash(output_creditor, creditor_info)
# mask Alice's input when sending to Bob
tx_for_bob = tx.clone()
tx_for_bob.vin[CONTRACT_PRINCIPAL_INP_INDEX] = CMutableTxIn()
debtor_info = {
"tx": b2x(tx_for_bob.to_immutable().serialize()),
"shared-blinding-xkey": str(shared_blinding_xkey),
"creditor-control-asset": creditor_ctl_asset.to_hex(),
"bitcoin-asset": bitcoin_asset.to_hex(),
"start-block-num": start_block_num,
}
save_to_json_with_hash(output_debtor, debtor_info)
with click.open_file(output_tx, mode="x") as f:
f.write(b2x(tx.to_immutable().serialize()))
print(f"Contract transaction was saved to {output_tx}")
print(f"The transaction can not be broadcast until block "
f"{start_block_num}")
@facilitator.command()
@click.option(
"-r",
"--rpc",
type=RPCPathParamType(),
help="config dir path",
required=True,
)
@click.option(
"-t",
"--tx",
type=click.Path(
exists=True,
dir_okay=False,
resolve_path=True,
),
help="path to loan transaction",
)
@click.option(
"-c",
"--creditor-witness",
type=click.Path(
exists=True,
dir_okay=False,
resolve_path=True,
),
required=True,
help="path to creditor witness",
)
@click.option(
"-d",
"--debtor-witness",
type=click.Path(
exists=True,
dir_okay=False,
resolve_path=True,
),
required=True,
help="path to debtor witness",
)
@click.option(
"-o",
"--output",
type=click.Path(
dir_okay=False,
resolve_path=True,
allow_dash=True,
),
default="-",
help="path to output transaction",
)
@network_option
def sign(rpc: ElementsRPCCaller, tx: str,
creditor_witness: str, debtor_witness: str, output: str,
network: str) -> None:
select_chain_params(network)
with click.open_file(tx) as f:
loan_tx = CMutableTransaction.deserialize(x(f.read()))
with click.open_file(creditor_witness) as f:
data_alice = json.loads(f.read())
with click.open_file(debtor_witness) as f:
data_bob = json.loads(f.read())
w_sign_bob = CMutableTxInWitness.deserialize(
x(data_bob["witnessscript"]))
w_sign_alice = CMutableTxInWitness.deserialize(
x(data_alice["witnessscript"]))
s_sign_bob = CScript(x(data_bob["signscript"]))
s_sign_alice = CScript(x(data_alice["signscript"]))
loan_tx.wit.vtxinwit[CONTRACT_COLLATERAL_INP_INDEX] = w_sign_bob
loan_tx.wit.vtxinwit[CONTRACT_PRINCIPAL_INP_INDEX] = w_sign_alice
loan_tx.vin[CONTRACT_COLLATERAL_INP_INDEX].scriptSig = s_sign_bob
loan_tx.vin[CONTRACT_PRINCIPAL_INP_INDEX].scriptSig = s_sign_alice
result = rpc.signrawtransactionwithwallet(b2x(loan_tx.serialize()))
if not result["complete"]:
raise click.UsageError(
f"Can't sign the transaction: {result['errors']}"
)
with click.open_file(output, mode="x") as f:
f.write(result["hex"])
# Unlock fee the utxo what was locked in previous step
rpc.lockunspent(
True,
[
{
"txid": b2lx(loan_tx.vin[CONTRACT_FEE_INP_INDEX].prevout.hash),
"vout": loan_tx.vin[CONTRACT_FEE_INP_INDEX].prevout.n,
}
],
)
print(f"Signed contract transaction was saved to {output}")
print(f"UTXO {b2lx(loan_tx.vin[CONTRACT_FEE_INP_INDEX].prevout.hash)}:"
f"{loan_tx.vin[CONTRACT_FEE_INP_INDEX].prevout.n} was locked")
if __name__ == "__main__":
facilitator()
|
"""Validate router configuration variables."""
# Standard Library
import os
import re
from typing import Any, Dict, List, Union, Optional
from pathlib import Path
from ipaddress import IPv4Address, IPv6Address
# Third Party
from pydantic import StrictInt, StrictStr, StrictBool, validator, root_validator
# Project
from hyperglass.log import log
from hyperglass.util import validate_nos, resolve_hostname
from hyperglass.constants import SCRAPE_HELPERS, SUPPORTED_STRUCTURED_OUTPUT
from hyperglass.exceptions import ConfigError, UnsupportedDevice
# Local
from .ssl import Ssl
from .vrf import Vrf, Info
from ..main import HyperglassModel, HyperglassModelExtra
from .proxy import Proxy
from .network import Network
from .credential import Credential
_default_vrf = {
"name": "default",
"display_name": "Global",
"info": Info(),
"ipv4": {
"source_address": None,
"access_list": [
{"network": "0.0.0.0/0", "action": "permit", "ge": 0, "le": 32}
],
},
"ipv6": {
"source_address": None,
"access_list": [{"network": "::/0", "action": "permit", "ge": 0, "le": 128}],
},
}
class Device(HyperglassModel):
"""Validation model for per-router config in devices.yaml."""
name: StrictStr
address: Union[IPv4Address, IPv6Address, StrictStr]
network: Network
credential: Credential
proxy: Optional[Proxy]
display_name: StrictStr
port: StrictInt
ssl: Optional[Ssl]
nos: StrictStr
commands: Optional[StrictStr]
vrfs: List[Vrf] = [_default_vrf]
display_vrfs: List[StrictStr] = []
vrf_names: List[StrictStr] = []
structured_output: Optional[StrictBool]
def __hash__(self) -> int:
"""Make device object hashable so the object can be deduplicated with set()."""
return hash((self.name,))
def __eq__(self, other: Any) -> bool:
"""Make device object comparable so the object can be deduplicated with set()."""
result = False
if isinstance(other, HyperglassModel):
result = self.name == other.name
return result
@property
def _target(self):
return str(self.address)
@validator("address")
def validate_address(cls, value, values):
"""Ensure a hostname is resolvable."""
if not isinstance(value, (IPv4Address, IPv6Address)):
if not any(resolve_hostname(value)):
raise ConfigError(
"Device '{d}' has an address of '{a}', which is not resolvable.",
d=values["name"],
a=value,
)
return value
@validator("structured_output", pre=True, always=True)
def validate_structured_output(cls, value, values):
"""Validate structured output is supported on the device & set a default.
Raises:
ConfigError: Raised if true on a device that doesn't support structured output.
Returns:
{bool} -- True if hyperglass should return structured output for this device.
"""
if value is True and values["nos"] not in SUPPORTED_STRUCTURED_OUTPUT:
raise ConfigError(
"The 'structured_output' field is set to 'true' on device '{d}' with "
+ "NOS '{n}', which does not support structured output",
d=values["name"],
n=values["nos"],
)
elif value is None and values["nos"] in SUPPORTED_STRUCTURED_OUTPUT:
value = True
else:
value = False
return value
@validator("ssl")
def validate_ssl(cls, value, values):
"""Set default cert file location if undefined.
Arguments:
value {object} -- SSL object
values {dict} -- Other already-validated fields
Returns:
{object} -- SSL configuration
"""
if value is not None:
if value.enable and value.cert is None:
app_path = Path(os.environ["hyperglass_directory"])
cert_file = app_path / "certs" / f'{values['name']}.pem'
if not cert_file.exists():
log.warning("No certificate found for device {d}", d=values["name"])
cert_file.touch()
value.cert = cert_file
return value
@root_validator(pre=True)
def validate_nos_commands(cls, values: "Device") -> "Device":
"""Validate & rewrite NOS, set default commands."""
nos = values.get("nos", "")
if not nos:
# Ensure nos is defined.
raise ValueError(
f'Device {values['name']} is missing a `nos` (Network Operating System).'
)
if nos in SCRAPE_HELPERS.keys():
# Rewrite NOS to helper value if needed.
nos = SCRAPE_HELPERS[nos]
# Verify NOS is supported by hyperglass.
supported, _ = validate_nos(nos)
if not supported:
raise UnsupportedDevice('"{nos}" is not supported.', nos=nos)
values["nos"] = nos
commands = values.get("commands")
if commands is None:
# If no commands are defined, set commands to the NOS.
inferred = values["nos"]
# If the _telnet prefix is added, remove it from the command
# profile so the commands are the same regardless of
# protocol.
if "_telnet" in inferred:
inferred = inferred.replace("_telnet", "")
values["commands"] = inferred
return values
@validator("vrfs", pre=True)
def validate_vrfs(cls, value, values):
"""Validate VRF definitions.
- Ensures source IP addresses are set for the default VRF
(global routing table).
- Initializes the default VRF with the DefaultVRF() class so
that specific defaults can be set for the global routing
table.
- If the 'display_name' is not set for a non-default VRF, try
to make one that looks pretty based on the 'name'.
Arguments:
value {list} -- List of VRFs
values {dict} -- Other already-validated fields
Raises:
ConfigError: Raised if the VRF is missing a source address
Returns:
{list} -- List of valid VRFs
"""
vrfs = []
for vrf in value:
vrf_name = vrf.get("name")
for afi in ("ipv4", "ipv6"):
vrf_afi = vrf.get(afi)
# If AFI is actually defined (enabled), and if the
# source_address field is not set, raise an error
if vrf_afi is not None and vrf_afi.get("source_address") is None:
raise ConfigError(
(
"VRF '{vrf}' in router '{router}' is missing a source "
"{afi} address."
),
vrf=vrf.get("name"),
router=values.get("name"),
afi=afi.replace("ip", "IP"),
)
# If no display_name is set for a non-default VRF, try
# to make one by replacing non-alphanumeric characters
# with whitespaces and using str.title() to make each
# word look "pretty".
if vrf_name != "default" and not isinstance(
vrf.get("display_name"), StrictStr
):
new_name = vrf["name"]
new_name = re.sub(r"[^a-zA-Z0-9]", " ", new_name)
new_name = re.split(" ", new_name)
vrf["display_name"] = " ".join([w.title() for w in new_name])
log.debug(
f'Field "display_name" for VRF "{vrf['name']}" was not set. '
f"Generated '{vrf["display_name"]}'"
)
elif vrf_name == "default" and vrf.get("display_name") is None:
vrf["display_name"] = "Global"
# Validate the non-default VRF against the standard
# Vrf() class.
vrf = Vrf(**vrf)
vrfs.append(vrf)
return vrfs
class Devices(HyperglassModelExtra):
"""Validation model for device configurations."""
hostnames: List[StrictStr] = []
vrfs: List[StrictStr] = []
display_vrfs: List[StrictStr] = []
vrf_objects: List[Vrf] = []
objects: List[Device] = []
all_nos: List[StrictStr] = []
default_vrf: Vrf = Vrf(name="default", display_name="Global")
def __init__(self, input_params: List[Dict]) -> None:
"""Import loaded YAML, initialize per-network definitions.
Remove unsupported characters from device names, dynamically
set attributes for the devices class. Builds lists of common
attributes for easy access in other modules.
Arguments:
input_params {dict} -- Unvalidated router definitions
Returns:
{object} -- Validated routers object
"""
vrfs = set()
display_vrfs = set()
vrf_objects = set()
all_nos = set()
objects = set()
hostnames = set()
init_kwargs = {}
for definition in input_params:
# Validate each router config against Router() model/schema
device = Device(**definition)
# Add router-level attributes (assumed to be unique) to
# class lists, e.g. so all hostnames can be accessed as a
# list with `devices.hostnames`, same for all router
# classes, for when iteration over all routers is required.
hostnames.add(device.name)
objects.add(device)
all_nos.add(device.commands)
for vrf in device.vrfs:
# For each configured router VRF, add its name and
# display_name to a class set (for automatic de-duping).
vrfs.add(vrf.name)
display_vrfs.add(vrf.display_name)
# Also add the names to a router-level list so each
# router's VRFs and display VRFs can be easily accessed.
device.display_vrfs.append(vrf.display_name)
device.vrf_names.append(vrf.name)
# Add a 'default_vrf' attribute to the devices class
# which contains the configured default VRF display name.
if vrf.name == "default" and not hasattr(self, "default_vrf"):
init_kwargs["default_vrf"] = Vrf(
name=vrf.name, display_name=vrf.display_name
)
# Add the native VRF objects to a set (for automatic
# de-duping), but exlcude device-specific fields.
vrf_objects.add(
vrf.copy(
deep=True,
exclude={
"ipv4": {"source_address"},
"ipv6": {"source_address"},
},
)
)
# Convert the de-duplicated sets to a standard list, add lists
# as class attributes. Sort router list by router name attribute
init_kwargs["hostnames"] = list(hostnames)
init_kwargs["all_nos"] = list(all_nos)
init_kwargs["vrfs"] = list(vrfs)
init_kwargs["display_vrfs"] = list(vrfs)
init_kwargs["vrf_objects"] = list(vrf_objects)
init_kwargs["objects"] = sorted(objects, key=lambda x: x.display_name)
super().__init__(**init_kwargs)
def __getitem__(self, accessor: str) -> Device:
"""Get a device by its name."""
for device in self.objects:
if device.name == accessor:
return device
raise AttributeError(f"No device named '{accessor}'")
|
"""Validate router configuration variables."""
# Standard Library
import os
import re
from typing import Any, Dict, List, Union, Optional
from pathlib import Path
from ipaddress import IPv4Address, IPv6Address
# Third Party
from pydantic import StrictInt, StrictStr, StrictBool, validator, root_validator
# Project
from hyperglass.log import log
from hyperglass.util import validate_nos, resolve_hostname
from hyperglass.constants import SCRAPE_HELPERS, SUPPORTED_STRUCTURED_OUTPUT
from hyperglass.exceptions import ConfigError, UnsupportedDevice
# Local
from .ssl import Ssl
from .vrf import Vrf, Info
from ..main import HyperglassModel, HyperglassModelExtra
from .proxy import Proxy
from .network import Network
from .credential import Credential
_default_vrf = {
"name": "default",
"display_name": "Global",
"info": Info(),
"ipv4": {
"source_address": None,
"access_list": [
{"network": "0.0.0.0/0", "action": "permit", "ge": 0, "le": 32}
],
},
"ipv6": {
"source_address": None,
"access_list": [{"network": "::/0", "action": "permit", "ge": 0, "le": 128}],
},
}
class Device(HyperglassModel):
"""Validation model for per-router config in devices.yaml."""
name: StrictStr
address: Union[IPv4Address, IPv6Address, StrictStr]
network: Network
credential: Credential
proxy: Optional[Proxy]
display_name: StrictStr
port: StrictInt
ssl: Optional[Ssl]
nos: StrictStr
commands: Optional[StrictStr]
vrfs: List[Vrf] = [_default_vrf]
display_vrfs: List[StrictStr] = []
vrf_names: List[StrictStr] = []
structured_output: Optional[StrictBool]
def __hash__(self) -> int:
"""Make device object hashable so the object can be deduplicated with set()."""
return hash((self.name,))
def __eq__(self, other: Any) -> bool:
"""Make device object comparable so the object can be deduplicated with set()."""
result = False
if isinstance(other, HyperglassModel):
result = self.name == other.name
return result
@property
def _target(self):
return str(self.address)
@validator("address")
def validate_address(cls, value, values):
"""Ensure a hostname is resolvable."""
if not isinstance(value, (IPv4Address, IPv6Address)):
if not any(resolve_hostname(value)):
raise ConfigError(
"Device '{d}' has an address of '{a}', which is not resolvable.",
d=values["name"],
a=value,
)
return value
@validator("structured_output", pre=True, always=True)
def validate_structured_output(cls, value, values):
"""Validate structured output is supported on the device & set a default.
Raises:
ConfigError: Raised if true on a device that doesn't support structured output.
Returns:
{bool} -- True if hyperglass should return structured output for this device.
"""
if value is True and values["nos"] not in SUPPORTED_STRUCTURED_OUTPUT:
raise ConfigError(
"The 'structured_output' field is set to 'true' on device '{d}' with "
+ "NOS '{n}', which does not support structured output",
d=values["name"],
n=values["nos"],
)
elif value is None and values["nos"] in SUPPORTED_STRUCTURED_OUTPUT:
value = True
else:
value = False
return value
@validator("ssl")
def validate_ssl(cls, value, values):
"""Set default cert file location if undefined.
Arguments:
value {object} -- SSL object
values {dict} -- Other already-validated fields
Returns:
{object} -- SSL configuration
"""
if value is not None:
if value.enable and value.cert is None:
app_path = Path(os.environ["hyperglass_directory"])
cert_file = app_path / "certs" / f'{values["name"]}.pem'
if not cert_file.exists():
log.warning("No certificate found for device {d}", d=values["name"])
cert_file.touch()
value.cert = cert_file
return value
@root_validator(pre=True)
def validate_nos_commands(cls, values: "Device") -> "Device":
"""Validate & rewrite NOS, set default commands."""
nos = values.get("nos", "")
if not nos:
# Ensure nos is defined.
raise ValueError(
f'Device {values["name"]} is missing a `nos` (Network Operating System).'
)
if nos in SCRAPE_HELPERS.keys():
# Rewrite NOS to helper value if needed.
nos = SCRAPE_HELPERS[nos]
# Verify NOS is supported by hyperglass.
supported, _ = validate_nos(nos)
if not supported:
raise UnsupportedDevice('"{nos}" is not supported.', nos=nos)
values["nos"] = nos
commands = values.get("commands")
if commands is None:
# If no commands are defined, set commands to the NOS.
inferred = values["nos"]
# If the _telnet prefix is added, remove it from the command
# profile so the commands are the same regardless of
# protocol.
if "_telnet" in inferred:
inferred = inferred.replace("_telnet", "")
values["commands"] = inferred
return values
@validator("vrfs", pre=True)
def validate_vrfs(cls, value, values):
"""Validate VRF definitions.
- Ensures source IP addresses are set for the default VRF
(global routing table).
- Initializes the default VRF with the DefaultVRF() class so
that specific defaults can be set for the global routing
table.
- If the 'display_name' is not set for a non-default VRF, try
to make one that looks pretty based on the 'name'.
Arguments:
value {list} -- List of VRFs
values {dict} -- Other already-validated fields
Raises:
ConfigError: Raised if the VRF is missing a source address
Returns:
{list} -- List of valid VRFs
"""
vrfs = []
for vrf in value:
vrf_name = vrf.get("name")
for afi in ("ipv4", "ipv6"):
vrf_afi = vrf.get(afi)
# If AFI is actually defined (enabled), and if the
# source_address field is not set, raise an error
if vrf_afi is not None and vrf_afi.get("source_address") is None:
raise ConfigError(
(
"VRF '{vrf}' in router '{router}' is missing a source "
"{afi} address."
),
vrf=vrf.get("name"),
router=values.get("name"),
afi=afi.replace("ip", "IP"),
)
# If no display_name is set for a non-default VRF, try
# to make one by replacing non-alphanumeric characters
# with whitespaces and using str.title() to make each
# word look "pretty".
if vrf_name != "default" and not isinstance(
vrf.get("display_name"), StrictStr
):
new_name = vrf["name"]
new_name = re.sub(r"[^a-zA-Z0-9]", " ", new_name)
new_name = re.split(" ", new_name)
vrf["display_name"] = " ".join([w.title() for w in new_name])
log.debug(
f'Field "display_name" for VRF "{vrf["name"]}" was not set. '
f"Generated '{vrf['display_name']}'"
)
elif vrf_name == "default" and vrf.get("display_name") is None:
vrf["display_name"] = "Global"
# Validate the non-default VRF against the standard
# Vrf() class.
vrf = Vrf(**vrf)
vrfs.append(vrf)
return vrfs
class Devices(HyperglassModelExtra):
"""Validation model for device configurations."""
hostnames: List[StrictStr] = []
vrfs: List[StrictStr] = []
display_vrfs: List[StrictStr] = []
vrf_objects: List[Vrf] = []
objects: List[Device] = []
all_nos: List[StrictStr] = []
default_vrf: Vrf = Vrf(name="default", display_name="Global")
def __init__(self, input_params: List[Dict]) -> None:
"""Import loaded YAML, initialize per-network definitions.
Remove unsupported characters from device names, dynamically
set attributes for the devices class. Builds lists of common
attributes for easy access in other modules.
Arguments:
input_params {dict} -- Unvalidated router definitions
Returns:
{object} -- Validated routers object
"""
vrfs = set()
display_vrfs = set()
vrf_objects = set()
all_nos = set()
objects = set()
hostnames = set()
init_kwargs = {}
for definition in input_params:
# Validate each router config against Router() model/schema
device = Device(**definition)
# Add router-level attributes (assumed to be unique) to
# class lists, e.g. so all hostnames can be accessed as a
# list with `devices.hostnames`, same for all router
# classes, for when iteration over all routers is required.
hostnames.add(device.name)
objects.add(device)
all_nos.add(device.commands)
for vrf in device.vrfs:
# For each configured router VRF, add its name and
# display_name to a class set (for automatic de-duping).
vrfs.add(vrf.name)
display_vrfs.add(vrf.display_name)
# Also add the names to a router-level list so each
# router's VRFs and display VRFs can be easily accessed.
device.display_vrfs.append(vrf.display_name)
device.vrf_names.append(vrf.name)
# Add a 'default_vrf' attribute to the devices class
# which contains the configured default VRF display name.
if vrf.name == "default" and not hasattr(self, "default_vrf"):
init_kwargs["default_vrf"] = Vrf(
name=vrf.name, display_name=vrf.display_name
)
# Add the native VRF objects to a set (for automatic
# de-duping), but exlcude device-specific fields.
vrf_objects.add(
vrf.copy(
deep=True,
exclude={
"ipv4": {"source_address"},
"ipv6": {"source_address"},
},
)
)
# Convert the de-duplicated sets to a standard list, add lists
# as class attributes. Sort router list by router name attribute
init_kwargs["hostnames"] = list(hostnames)
init_kwargs["all_nos"] = list(all_nos)
init_kwargs["vrfs"] = list(vrfs)
init_kwargs["display_vrfs"] = list(vrfs)
init_kwargs["vrf_objects"] = list(vrf_objects)
init_kwargs["objects"] = sorted(objects, key=lambda x: x.display_name)
super().__init__(**init_kwargs)
def __getitem__(self, accessor: str) -> Device:
"""Get a device by its name."""
for device in self.objects:
if device.name == accessor:
return device
raise AttributeError(f"No device named '{accessor}'")
|
from __future__ import annotations
import inspect
import sys
from contextlib import contextmanager
from typing import TYPE_CHECKING, Any
from magicgui._type_wrapper import resolve_forward_refs
from magicgui.application import use_app
from magicgui.events import Signal
from magicgui.widgets import _protocols
BUILDING_DOCS = sys.argv[-2:] == ["build", "docs"]
if BUILDING_DOCS:
import numpy as np
if TYPE_CHECKING:
from weakref import ReferenceType
import numpy as np # noqa
from magicgui.widgets._concrete import _LabeledWidget
class Widget:
"""Basic Widget, wrapping a class that implements WidgetProtocol.
Parameters
----------
widget_type : Type[WidgetProtocol]
A class implementing a widget protocol. Will be instantiated during __init__.
name : str, optional
The name of the parameter represented by this widget. by default ""
annotation : Any, optional
The type annotation for the parameter represented by the widget, by default
``None``
label : str
A string to use for an associated Label widget (if this widget is being
shown in a :class:`~magicgui.widgets.Container` widget, and labels are on).
By default, ``name`` will be used. Note: ``name`` refers the name of the
parameter, as might be used in a signature, whereas label is just the label
for that widget in the GUI.
tooltip : str, optional
A tooltip to display when hovering over the widget.
visible : bool, optional
Whether the widget is visible, by default ``True``.
backend_kwargs : dict, optional
keyword argument to pass to the backend widget constructor.
"""
_widget: _protocols.WidgetProtocol
# if this widget becomes owned by a labeled widget
_labeled_widget_ref: ReferenceType[_LabeledWidget] | None = None
parent_changed = Signal(object)
label_changed = Signal(str)
def __init__(
self,
widget_type: type[_protocols.WidgetProtocol],
name: str = "",
annotation: Any = None,
label: str = None,
tooltip: str | None = None,
visible: bool | None = None,
enabled: bool = True,
gui_only=False,
backend_kwargs=dict(),
**extra,
):
# for ipywidgets API compatibility
label = label or extra.pop("description", None)
if extra:
raise TypeError(
f"{type(self).__name__} got an unexpected "
f"keyword argument: {", ".join(extra)}"
)
for m in self.__class__.__mro__[:-1]:
_prot = m.__annotations__.get("_widget")
if _prot:
break
else:
raise TypeError(
f"Widget type {self.__class__} declared no _widget annotation"
)
if not isinstance(_prot, str):
_prot = _prot.__name__
prot = getattr(_protocols, _prot.replace("_protocols.", ""))
_protocols.assert_protocol(widget_type, prot)
self.__magicgui_app__ = use_app()
assert self.__magicgui_app__.native
self._widget = widget_type(**backend_kwargs)
self.name: str = name
self.param_kind = inspect.Parameter.POSITIONAL_OR_KEYWORD
self._label = label
self.tooltip = tooltip
self.enabled = enabled
self.annotation: Any = annotation
self.gui_only = gui_only
self._widget._mgui_bind_parent_change_callback(self._emit_parent)
# put the magicgui widget on the native object...may cause error on some backend
self.native._magic_widget = self
self._post_init()
self._visible: bool = False
self._explicitly_hidden: bool = False
if visible is not None:
self.visible = visible
@property
def annotation(self):
"""Return type annotation for the parameter represented by the widget.
ForwardRefs will be resolve when setting the annotation.
"""
return self._annotation
@annotation.setter
def annotation(self, value):
self._annotation = resolve_forward_refs(value)
@property
def param_kind(self) -> inspect._ParameterKind:
"""Return :attr:`inspect.Parameter.kind` represented by this widget.
Used in building signatures from multiple widgets, by default
:attr:`~inspect.Parameter.POSITIONAL_OR_KEYWORD`
"""
return self._param_kind
@param_kind.setter
def param_kind(self, kind: str | inspect._ParameterKind):
if isinstance(kind, str):
kind = inspect._ParameterKind[kind.upper()]
if not isinstance(kind, inspect._ParameterKind):
raise TypeError(
"'param_kind' must be either a string or a inspect._ParameterKind."
)
self._param_kind: inspect._ParameterKind = kind
def _post_init(self):
pass
@property
def options(self) -> dict:
"""Return options currently being used in this widget."""
return {"enabled": self.enabled, "visible": self.visible}
@property
def native(self):
"""Return native backend widget."""
return self._widget._mgui_get_native_widget()
@property
def enabled(self) -> bool:
"""Whether widget is enabled (editable)."""
return self._widget._mgui_get_enabled()
@enabled.setter
def enabled(self, value: bool):
self._widget._mgui_set_enabled(value)
@property
def parent(self) -> Widget:
"""Return the parent widget."""
return self._widget._mgui_get_parent()
@parent.setter
def parent(self, value: Widget):
self._widget._mgui_set_parent(value)
@property
def widget_type(self) -> str:
"""Return type of widget."""
return self.__class__.__name__
@property
def label(self):
"""Return a label to use for this widget when present in Containers."""
if self._label is None:
return self.name.replace("_", " ")
return self._label
@label.setter
def label(self, value):
self._label = value
self.label_changed.emit(value)
@property
def width(self) -> int:
"""Return the current width of the widget."""
return self._widget._mgui_get_width()
@width.setter
def width(self, value: int) -> None:
"""Set the minimum allowable width of the widget."""
self._widget._mgui_set_width(value)
@property
def min_width(self) -> int:
"""Get the minimum width of the widget."""
return self._widget._mgui_get_min_width()
@min_width.setter
def min_width(self, value: int) -> None:
"""Set the minimum width of the widget."""
self._widget._mgui_set_min_width(value)
@property
def max_width(self) -> int:
"""Get the maximum width of the widget."""
return self._widget._mgui_get_max_width()
@max_width.setter
def max_width(self, value: int) -> None:
"""Set the maximum width of the widget."""
self._widget._mgui_set_max_width(value)
@property
def height(self) -> int:
"""Return the current height of the widget."""
return self._widget._mgui_get_height()
@height.setter
def height(self, value: int) -> None:
"""Set the minimum allowable height of the widget."""
self._widget._mgui_set_height(value)
@property
def min_height(self) -> int:
"""Get the minimum height of the widget."""
return self._widget._mgui_get_min_height()
@min_height.setter
def min_height(self, value: int) -> None:
"""Set the minimum height of the widget."""
self._widget._mgui_set_min_height(value)
@property
def max_height(self) -> int:
"""Get the maximum height of the widget."""
return self._widget._mgui_get_max_height()
@max_height.setter
def max_height(self, value: int) -> None:
"""Set the maximum height of the widget."""
self._widget._mgui_set_max_height(value)
@property
def tooltip(self) -> str | None:
"""Get the tooltip for this widget."""
return self._widget._mgui_get_tooltip() or None
@tooltip.setter
def tooltip(self, value: str | None) -> None:
"""Set the tooltip for this widget."""
return self._widget._mgui_set_tooltip(value)
def _labeled_widget(self) -> _LabeledWidget | None:
"""Return _LabeledWidget container, if applicable."""
return self._labeled_widget_ref() if self._labeled_widget_ref else None
@property
def visible(self) -> bool:
"""Return whether widget is visible."""
return self._widget._mgui_get_visible()
@visible.setter
def visible(self, value: bool):
"""Set widget visibility.
``widget.show()`` is an alias for ``widget.visible = True``
``widget.hide()`` is an alias for ``widget.visible = False``
"""
if value is None:
return
self._widget._mgui_set_visible(value)
self._explicitly_hidden = not value
labeled_widget = self._labeled_widget()
if labeled_widget is not None:
labeled_widget.visible = value
def show(self, run=False):
"""Show widget.
alias for ``widget.visible = True``
Parameters
----------
run : bool, optional
Whether to start the application event loop, by default False
"""
self.visible = True
if run:
self.__magicgui_app__.run()
return self # useful for generating repr in sphinx
@contextmanager
def shown(self):
"""Context manager to show the widget."""
try:
self.show()
yield self.__magicgui_app__.__enter__()
finally:
self.__magicgui_app__.__exit__()
def hide(self):
"""Hide widget.
alias for ``widget.visible = False``
"""
self.visible = False
def close(self) -> None:
"""Close widget."""
self._widget._mgui_close_widget()
def render(self) -> np.ndarray:
"""Return an RGBA (MxNx4) numpy array bitmap of the rendered widget."""
return self._widget._mgui_render()
def __repr__(self) -> str:
"""Return representation of widget of instsance."""
return f"{self.widget_type}(annotation={self.annotation!r}, name={self.name!r})"
def _repr_png_(self):
"""Return PNG representation of the widget for QtConsole."""
from io import BytesIO
try:
from imageio import imsave
except ImportError:
print(
"(For a nicer magicgui widget representation in "
"Jupyter, please `pip install imageio`)"
)
return None
with BytesIO() as file_obj:
imsave(file_obj, self.render(), format="png")
file_obj.seek(0)
return file_obj.read()
def _emit_parent(self, *_):
self.parent_changed.emit(self.parent)
|
from __future__ import annotations
import inspect
import sys
from contextlib import contextmanager
from typing import TYPE_CHECKING, Any
from magicgui._type_wrapper import resolve_forward_refs
from magicgui.application import use_app
from magicgui.events import Signal
from magicgui.widgets import _protocols
BUILDING_DOCS = sys.argv[-2:] == ["build", "docs"]
if BUILDING_DOCS:
import numpy as np
if TYPE_CHECKING:
from weakref import ReferenceType
import numpy as np # noqa
from magicgui.widgets._concrete import _LabeledWidget
class Widget:
"""Basic Widget, wrapping a class that implements WidgetProtocol.
Parameters
----------
widget_type : Type[WidgetProtocol]
A class implementing a widget protocol. Will be instantiated during __init__.
name : str, optional
The name of the parameter represented by this widget. by default ""
annotation : Any, optional
The type annotation for the parameter represented by the widget, by default
``None``
label : str
A string to use for an associated Label widget (if this widget is being
shown in a :class:`~magicgui.widgets.Container` widget, and labels are on).
By default, ``name`` will be used. Note: ``name`` refers the name of the
parameter, as might be used in a signature, whereas label is just the label
for that widget in the GUI.
tooltip : str, optional
A tooltip to display when hovering over the widget.
visible : bool, optional
Whether the widget is visible, by default ``True``.
backend_kwargs : dict, optional
keyword argument to pass to the backend widget constructor.
"""
_widget: _protocols.WidgetProtocol
# if this widget becomes owned by a labeled widget
_labeled_widget_ref: ReferenceType[_LabeledWidget] | None = None
parent_changed = Signal(object)
label_changed = Signal(str)
def __init__(
self,
widget_type: type[_protocols.WidgetProtocol],
name: str = "",
annotation: Any = None,
label: str = None,
tooltip: str | None = None,
visible: bool | None = None,
enabled: bool = True,
gui_only=False,
backend_kwargs=dict(),
**extra,
):
# for ipywidgets API compatibility
label = label or extra.pop("description", None)
if extra:
raise TypeError(
f"{type(self).__name__} got an unexpected "
f"keyword argument: {', '.join(extra)}"
)
for m in self.__class__.__mro__[:-1]:
_prot = m.__annotations__.get("_widget")
if _prot:
break
else:
raise TypeError(
f"Widget type {self.__class__} declared no _widget annotation"
)
if not isinstance(_prot, str):
_prot = _prot.__name__
prot = getattr(_protocols, _prot.replace("_protocols.", ""))
_protocols.assert_protocol(widget_type, prot)
self.__magicgui_app__ = use_app()
assert self.__magicgui_app__.native
self._widget = widget_type(**backend_kwargs)
self.name: str = name
self.param_kind = inspect.Parameter.POSITIONAL_OR_KEYWORD
self._label = label
self.tooltip = tooltip
self.enabled = enabled
self.annotation: Any = annotation
self.gui_only = gui_only
self._widget._mgui_bind_parent_change_callback(self._emit_parent)
# put the magicgui widget on the native object...may cause error on some backend
self.native._magic_widget = self
self._post_init()
self._visible: bool = False
self._explicitly_hidden: bool = False
if visible is not None:
self.visible = visible
@property
def annotation(self):
"""Return type annotation for the parameter represented by the widget.
ForwardRefs will be resolve when setting the annotation.
"""
return self._annotation
@annotation.setter
def annotation(self, value):
self._annotation = resolve_forward_refs(value)
@property
def param_kind(self) -> inspect._ParameterKind:
"""Return :attr:`inspect.Parameter.kind` represented by this widget.
Used in building signatures from multiple widgets, by default
:attr:`~inspect.Parameter.POSITIONAL_OR_KEYWORD`
"""
return self._param_kind
@param_kind.setter
def param_kind(self, kind: str | inspect._ParameterKind):
if isinstance(kind, str):
kind = inspect._ParameterKind[kind.upper()]
if not isinstance(kind, inspect._ParameterKind):
raise TypeError(
"'param_kind' must be either a string or a inspect._ParameterKind."
)
self._param_kind: inspect._ParameterKind = kind
def _post_init(self):
pass
@property
def options(self) -> dict:
"""Return options currently being used in this widget."""
return {"enabled": self.enabled, "visible": self.visible}
@property
def native(self):
"""Return native backend widget."""
return self._widget._mgui_get_native_widget()
@property
def enabled(self) -> bool:
"""Whether widget is enabled (editable)."""
return self._widget._mgui_get_enabled()
@enabled.setter
def enabled(self, value: bool):
self._widget._mgui_set_enabled(value)
@property
def parent(self) -> Widget:
"""Return the parent widget."""
return self._widget._mgui_get_parent()
@parent.setter
def parent(self, value: Widget):
self._widget._mgui_set_parent(value)
@property
def widget_type(self) -> str:
"""Return type of widget."""
return self.__class__.__name__
@property
def label(self):
"""Return a label to use for this widget when present in Containers."""
if self._label is None:
return self.name.replace("_", " ")
return self._label
@label.setter
def label(self, value):
self._label = value
self.label_changed.emit(value)
@property
def width(self) -> int:
"""Return the current width of the widget."""
return self._widget._mgui_get_width()
@width.setter
def width(self, value: int) -> None:
"""Set the minimum allowable width of the widget."""
self._widget._mgui_set_width(value)
@property
def min_width(self) -> int:
"""Get the minimum width of the widget."""
return self._widget._mgui_get_min_width()
@min_width.setter
def min_width(self, value: int) -> None:
"""Set the minimum width of the widget."""
self._widget._mgui_set_min_width(value)
@property
def max_width(self) -> int:
"""Get the maximum width of the widget."""
return self._widget._mgui_get_max_width()
@max_width.setter
def max_width(self, value: int) -> None:
"""Set the maximum width of the widget."""
self._widget._mgui_set_max_width(value)
@property
def height(self) -> int:
"""Return the current height of the widget."""
return self._widget._mgui_get_height()
@height.setter
def height(self, value: int) -> None:
"""Set the minimum allowable height of the widget."""
self._widget._mgui_set_height(value)
@property
def min_height(self) -> int:
"""Get the minimum height of the widget."""
return self._widget._mgui_get_min_height()
@min_height.setter
def min_height(self, value: int) -> None:
"""Set the minimum height of the widget."""
self._widget._mgui_set_min_height(value)
@property
def max_height(self) -> int:
"""Get the maximum height of the widget."""
return self._widget._mgui_get_max_height()
@max_height.setter
def max_height(self, value: int) -> None:
"""Set the maximum height of the widget."""
self._widget._mgui_set_max_height(value)
@property
def tooltip(self) -> str | None:
"""Get the tooltip for this widget."""
return self._widget._mgui_get_tooltip() or None
@tooltip.setter
def tooltip(self, value: str | None) -> None:
"""Set the tooltip for this widget."""
return self._widget._mgui_set_tooltip(value)
def _labeled_widget(self) -> _LabeledWidget | None:
"""Return _LabeledWidget container, if applicable."""
return self._labeled_widget_ref() if self._labeled_widget_ref else None
@property
def visible(self) -> bool:
"""Return whether widget is visible."""
return self._widget._mgui_get_visible()
@visible.setter
def visible(self, value: bool):
"""Set widget visibility.
``widget.show()`` is an alias for ``widget.visible = True``
``widget.hide()`` is an alias for ``widget.visible = False``
"""
if value is None:
return
self._widget._mgui_set_visible(value)
self._explicitly_hidden = not value
labeled_widget = self._labeled_widget()
if labeled_widget is not None:
labeled_widget.visible = value
def show(self, run=False):
"""Show widget.
alias for ``widget.visible = True``
Parameters
----------
run : bool, optional
Whether to start the application event loop, by default False
"""
self.visible = True
if run:
self.__magicgui_app__.run()
return self # useful for generating repr in sphinx
@contextmanager
def shown(self):
"""Context manager to show the widget."""
try:
self.show()
yield self.__magicgui_app__.__enter__()
finally:
self.__magicgui_app__.__exit__()
def hide(self):
"""Hide widget.
alias for ``widget.visible = False``
"""
self.visible = False
def close(self) -> None:
"""Close widget."""
self._widget._mgui_close_widget()
def render(self) -> np.ndarray:
"""Return an RGBA (MxNx4) numpy array bitmap of the rendered widget."""
return self._widget._mgui_render()
def __repr__(self) -> str:
"""Return representation of widget of instsance."""
return f"{self.widget_type}(annotation={self.annotation!r}, name={self.name!r})"
def _repr_png_(self):
"""Return PNG representation of the widget for QtConsole."""
from io import BytesIO
try:
from imageio import imsave
except ImportError:
print(
"(For a nicer magicgui widget representation in "
"Jupyter, please `pip install imageio`)"
)
return None
with BytesIO() as file_obj:
imsave(file_obj, self.render(), format="png")
file_obj.seek(0)
return file_obj.read()
def _emit_parent(self, *_):
self.parent_changed.emit(self.parent)
|
"""Light for Shelly."""
from typing import Optional, Tuple
from aioshelly import Block
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_WHITE_VALUE,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.core import callback
from homeassistant.util.color import (
color_hs_to_RGB,
color_RGB_to_hs,
color_temperature_kelvin_to_mired,
color_temperature_mired_to_kelvin,
)
from . import ShellyDeviceWrapper
from .const import (
COAP,
DATA_CONFIG_ENTRY,
DOMAIN,
KELVIN_MAX_VALUE,
KELVIN_MIN_VALUE_COLOR,
KELVIN_MIN_VALUE_WHITE,
)
from .entity import ShellyBlockEntity
from .utils import async_remove_shelly_entity
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up lights for device."""
wrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][config_entry.entry_id][COAP]
blocks = []
for block in wrapper.device.blocks:
if block.type == "light":
blocks.append(block)
elif block.type == "relay":
appliance_type = wrapper.device.settings["relays"][int(block.channel)].get(
"appliance_type"
)
if appliance_type and appliance_type.lower() == "light":
blocks.append(block)
unique_id = (
f'{wrapper.device.shelly['mac']}-{block.type}_{block.channel}'
)
await async_remove_shelly_entity(hass, "switch", unique_id)
if not blocks:
return
async_add_entities(ShellyLight(wrapper, block) for block in blocks)
class ShellyLight(ShellyBlockEntity, LightEntity):
"""Switch that controls a relay block on Shelly devices."""
def __init__(self, wrapper: ShellyDeviceWrapper, block: Block) -> None:
"""Initialize light."""
super().__init__(wrapper, block)
self.control_result = None
self.mode_result = None
self._supported_features = 0
self._min_kelvin = KELVIN_MIN_VALUE_WHITE
self._max_kelvin = KELVIN_MAX_VALUE
if hasattr(block, "brightness") or hasattr(block, "gain"):
self._supported_features |= SUPPORT_BRIGHTNESS
if hasattr(block, "colorTemp"):
self._supported_features |= SUPPORT_COLOR_TEMP
if hasattr(block, "white"):
self._supported_features |= SUPPORT_WHITE_VALUE
if hasattr(block, "red") and hasattr(block, "green") and hasattr(block, "blue"):
self._supported_features |= SUPPORT_COLOR
self._min_kelvin = KELVIN_MIN_VALUE_COLOR
@property
def supported_features(self) -> int:
"""Supported features."""
return self._supported_features
@property
def is_on(self) -> bool:
"""If light is on."""
if self.control_result:
return self.control_result["ison"]
return self.block.output
@property
def mode(self) -> Optional[str]:
"""Return the color mode of the light."""
if self.mode_result:
return self.mode_result["mode"]
if hasattr(self.block, "mode"):
return self.block.mode
if (
hasattr(self.block, "red")
and hasattr(self.block, "green")
and hasattr(self.block, "blue")
):
return "color"
return "white"
@property
def brightness(self) -> int:
"""Brightness of light."""
if self.mode == "color":
if self.control_result:
brightness = self.control_result["gain"]
else:
brightness = self.block.gain
else:
if self.control_result:
brightness = self.control_result["brightness"]
else:
brightness = self.block.brightness
return int(brightness / 100 * 255)
@property
def white_value(self) -> int:
"""White value of light."""
if self.control_result:
white = self.control_result["white"]
else:
white = self.block.white
return int(white)
@property
def hs_color(self) -> Tuple[float, float]:
"""Return the hue and saturation color value of light."""
if self.mode == "white":
return color_RGB_to_hs(255, 255, 255)
if self.control_result:
red = self.control_result["red"]
green = self.control_result["green"]
blue = self.control_result["blue"]
else:
red = self.block.red
green = self.block.green
blue = self.block.blue
return color_RGB_to_hs(red, green, blue)
@property
def color_temp(self) -> Optional[int]:
"""Return the CT color value in mireds."""
if self.mode == "color":
return None
if self.control_result:
color_temp = self.control_result["temp"]
else:
color_temp = self.block.colorTemp
color_temp = min(self._max_kelvin, max(self._min_kelvin, color_temp))
return int(color_temperature_kelvin_to_mired(color_temp))
@property
def min_mireds(self) -> int:
"""Return the coldest color_temp that this light supports."""
return int(color_temperature_kelvin_to_mired(self._max_kelvin))
@property
def max_mireds(self) -> int:
"""Return the warmest color_temp that this light supports."""
return int(color_temperature_kelvin_to_mired(self._min_kelvin))
async def async_turn_on(self, **kwargs) -> None:
"""Turn on light."""
if self.block.type == "relay":
self.control_result = await self.block.set_state(turn="on")
self.async_write_ha_state()
return
set_mode = None
params = {"turn": "on"}
if ATTR_BRIGHTNESS in kwargs:
tmp_brightness = int(kwargs[ATTR_BRIGHTNESS] / 255 * 100)
if hasattr(self.block, "gain"):
params["gain"] = tmp_brightness
if hasattr(self.block, "brightness"):
params["brightness"] = tmp_brightness
if ATTR_COLOR_TEMP in kwargs:
color_temp = color_temperature_mired_to_kelvin(kwargs[ATTR_COLOR_TEMP])
color_temp = min(self._max_kelvin, max(self._min_kelvin, color_temp))
# Color temperature change - used only in white mode, switch device mode to white
set_mode = "white"
params["red"] = params["green"] = params["blue"] = 255
params["temp"] = int(color_temp)
if ATTR_HS_COLOR in kwargs:
red, green, blue = color_hs_to_RGB(*kwargs[ATTR_HS_COLOR])
# Color channels change - used only in color mode, switch device mode to color
set_mode = "color"
params["red"] = red
params["green"] = green
params["blue"] = blue
if ATTR_WHITE_VALUE in kwargs:
# White channel change - used only in color mode, switch device mode device to color
set_mode = "color"
params["white"] = int(kwargs[ATTR_WHITE_VALUE])
if set_mode and self.mode != set_mode:
self.mode_result = await self.wrapper.device.switch_light_mode(set_mode)
self.control_result = await self.block.set_state(**params)
self.async_write_ha_state()
async def async_turn_off(self, **kwargs) -> None:
"""Turn off light."""
self.control_result = await self.block.set_state(turn="off")
self.async_write_ha_state()
@callback
def _update_callback(self):
"""When device updates, clear control & mode result that overrides state."""
self.control_result = None
self.mode_result = None
super()._update_callback()
|
"""Light for Shelly."""
from typing import Optional, Tuple
from aioshelly import Block
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_WHITE_VALUE,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.core import callback
from homeassistant.util.color import (
color_hs_to_RGB,
color_RGB_to_hs,
color_temperature_kelvin_to_mired,
color_temperature_mired_to_kelvin,
)
from . import ShellyDeviceWrapper
from .const import (
COAP,
DATA_CONFIG_ENTRY,
DOMAIN,
KELVIN_MAX_VALUE,
KELVIN_MIN_VALUE_COLOR,
KELVIN_MIN_VALUE_WHITE,
)
from .entity import ShellyBlockEntity
from .utils import async_remove_shelly_entity
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up lights for device."""
wrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][config_entry.entry_id][COAP]
blocks = []
for block in wrapper.device.blocks:
if block.type == "light":
blocks.append(block)
elif block.type == "relay":
appliance_type = wrapper.device.settings["relays"][int(block.channel)].get(
"appliance_type"
)
if appliance_type and appliance_type.lower() == "light":
blocks.append(block)
unique_id = (
f'{wrapper.device.shelly["mac"]}-{block.type}_{block.channel}'
)
await async_remove_shelly_entity(hass, "switch", unique_id)
if not blocks:
return
async_add_entities(ShellyLight(wrapper, block) for block in blocks)
class ShellyLight(ShellyBlockEntity, LightEntity):
"""Switch that controls a relay block on Shelly devices."""
def __init__(self, wrapper: ShellyDeviceWrapper, block: Block) -> None:
"""Initialize light."""
super().__init__(wrapper, block)
self.control_result = None
self.mode_result = None
self._supported_features = 0
self._min_kelvin = KELVIN_MIN_VALUE_WHITE
self._max_kelvin = KELVIN_MAX_VALUE
if hasattr(block, "brightness") or hasattr(block, "gain"):
self._supported_features |= SUPPORT_BRIGHTNESS
if hasattr(block, "colorTemp"):
self._supported_features |= SUPPORT_COLOR_TEMP
if hasattr(block, "white"):
self._supported_features |= SUPPORT_WHITE_VALUE
if hasattr(block, "red") and hasattr(block, "green") and hasattr(block, "blue"):
self._supported_features |= SUPPORT_COLOR
self._min_kelvin = KELVIN_MIN_VALUE_COLOR
@property
def supported_features(self) -> int:
"""Supported features."""
return self._supported_features
@property
def is_on(self) -> bool:
"""If light is on."""
if self.control_result:
return self.control_result["ison"]
return self.block.output
@property
def mode(self) -> Optional[str]:
"""Return the color mode of the light."""
if self.mode_result:
return self.mode_result["mode"]
if hasattr(self.block, "mode"):
return self.block.mode
if (
hasattr(self.block, "red")
and hasattr(self.block, "green")
and hasattr(self.block, "blue")
):
return "color"
return "white"
@property
def brightness(self) -> int:
"""Brightness of light."""
if self.mode == "color":
if self.control_result:
brightness = self.control_result["gain"]
else:
brightness = self.block.gain
else:
if self.control_result:
brightness = self.control_result["brightness"]
else:
brightness = self.block.brightness
return int(brightness / 100 * 255)
@property
def white_value(self) -> int:
"""White value of light."""
if self.control_result:
white = self.control_result["white"]
else:
white = self.block.white
return int(white)
@property
def hs_color(self) -> Tuple[float, float]:
"""Return the hue and saturation color value of light."""
if self.mode == "white":
return color_RGB_to_hs(255, 255, 255)
if self.control_result:
red = self.control_result["red"]
green = self.control_result["green"]
blue = self.control_result["blue"]
else:
red = self.block.red
green = self.block.green
blue = self.block.blue
return color_RGB_to_hs(red, green, blue)
@property
def color_temp(self) -> Optional[int]:
"""Return the CT color value in mireds."""
if self.mode == "color":
return None
if self.control_result:
color_temp = self.control_result["temp"]
else:
color_temp = self.block.colorTemp
color_temp = min(self._max_kelvin, max(self._min_kelvin, color_temp))
return int(color_temperature_kelvin_to_mired(color_temp))
@property
def min_mireds(self) -> int:
"""Return the coldest color_temp that this light supports."""
return int(color_temperature_kelvin_to_mired(self._max_kelvin))
@property
def max_mireds(self) -> int:
"""Return the warmest color_temp that this light supports."""
return int(color_temperature_kelvin_to_mired(self._min_kelvin))
async def async_turn_on(self, **kwargs) -> None:
"""Turn on light."""
if self.block.type == "relay":
self.control_result = await self.block.set_state(turn="on")
self.async_write_ha_state()
return
set_mode = None
params = {"turn": "on"}
if ATTR_BRIGHTNESS in kwargs:
tmp_brightness = int(kwargs[ATTR_BRIGHTNESS] / 255 * 100)
if hasattr(self.block, "gain"):
params["gain"] = tmp_brightness
if hasattr(self.block, "brightness"):
params["brightness"] = tmp_brightness
if ATTR_COLOR_TEMP in kwargs:
color_temp = color_temperature_mired_to_kelvin(kwargs[ATTR_COLOR_TEMP])
color_temp = min(self._max_kelvin, max(self._min_kelvin, color_temp))
# Color temperature change - used only in white mode, switch device mode to white
set_mode = "white"
params["red"] = params["green"] = params["blue"] = 255
params["temp"] = int(color_temp)
if ATTR_HS_COLOR in kwargs:
red, green, blue = color_hs_to_RGB(*kwargs[ATTR_HS_COLOR])
# Color channels change - used only in color mode, switch device mode to color
set_mode = "color"
params["red"] = red
params["green"] = green
params["blue"] = blue
if ATTR_WHITE_VALUE in kwargs:
# White channel change - used only in color mode, switch device mode device to color
set_mode = "color"
params["white"] = int(kwargs[ATTR_WHITE_VALUE])
if set_mode and self.mode != set_mode:
self.mode_result = await self.wrapper.device.switch_light_mode(set_mode)
self.control_result = await self.block.set_state(**params)
self.async_write_ha_state()
async def async_turn_off(self, **kwargs) -> None:
"""Turn off light."""
self.control_result = await self.block.set_state(turn="off")
self.async_write_ha_state()
@callback
def _update_callback(self):
"""When device updates, clear control & mode result that overrides state."""
self.control_result = None
self.mode_result = None
super()._update_callback()
|
def arithmetic_arranger(problems, answer=False):
top = []
bottom = []
lines = []
answers = []
if len(problems) > 5:
return "Error: Too many problems."
for prob in problems:
a, sign, b = prob.split()
if sign not in ['+', '-']:
return "Error: Operator must be '+' or '-'."
if not (a.isdigit() and b.isdigit()):
return "Error: Numbers must only contain digits."
if len(a) > 4 or len(b) > 4:
return "Error: Numbers cannot be more than four digits."
length = max(len(a), len(b)) + 2
top.append(a.rjust(length))
bottom.append(f"{sign} {b.rjust(length - 2)}")
lines.append('-' * length)
if sign == '+':
answers.append(str(int(a) + int(b)).rjust(length))
else:
answers.append(str(int(a) - int(b)).rjust(length))
if answer:
return f"{" ".join(top)}\n{" ".join(bottom)}\n{" ".join(lines)}\n{" ".join(answers)}"
else:
return f"{" ".join(top)}\n{" ".join(bottom)}\n{" ".join(lines)}"
|
def arithmetic_arranger(problems, answer=False):
top = []
bottom = []
lines = []
answers = []
if len(problems) > 5:
return "Error: Too many problems."
for prob in problems:
a, sign, b = prob.split()
if sign not in ['+', '-']:
return "Error: Operator must be '+' or '-'."
if not (a.isdigit() and b.isdigit()):
return "Error: Numbers must only contain digits."
if len(a) > 4 or len(b) > 4:
return "Error: Numbers cannot be more than four digits."
length = max(len(a), len(b)) + 2
top.append(a.rjust(length))
bottom.append(f"{sign} {b.rjust(length - 2)}")
lines.append('-' * length)
if sign == '+':
answers.append(str(int(a) + int(b)).rjust(length))
else:
answers.append(str(int(a) - int(b)).rjust(length))
if answer:
return f"{' '.join(top)}\n{' '.join(bottom)}\n{' '.join(lines)}\n{' '.join(answers)}"
else:
return f"{' '.join(top)}\n{' '.join(bottom)}\n{' '.join(lines)}"
|
def main(request, response):
def fmt(x):
return f'"{x.decode('utf-8')}"' if x is not None else "undefined"
purpose = request.headers.get("Purpose", b"").decode("utf-8")
sec_purpose = request.headers.get("Sec-Purpose", b"").decode("utf-8")
headers = [(b"Content-Type", b"text/html"), (b'WWW-Authenticate', 'Basic')]
status = 200 if request.auth.username is not None or sec_purpose.startswith(
"prefetch") else 401
content = f'''
<!DOCTYPE html>
<script src="/common/dispatcher/dispatcher.js"></script>
<script src="utils.sub.js"></script>
<script>
window.requestHeaders = {{
purpose: "{purpose}",
sec_purpose: "{sec_purpose}"
}};
window.requestCredentials = {{
username: {fmt(request.auth.username)},
password: {fmt(request.auth.password)}
}};
const uuid = new URLSearchParams(location.search).get('uuid');
window.executor = new Executor(uuid);
</script>
'''
return status, headers, content
|
def main(request, response):
def fmt(x):
return f'"{x.decode("utf-8")}"' if x is not None else "undefined"
purpose = request.headers.get("Purpose", b"").decode("utf-8")
sec_purpose = request.headers.get("Sec-Purpose", b"").decode("utf-8")
headers = [(b"Content-Type", b"text/html"), (b'WWW-Authenticate', 'Basic')]
status = 200 if request.auth.username is not None or sec_purpose.startswith(
"prefetch") else 401
content = f'''
<!DOCTYPE html>
<script src="/common/dispatcher/dispatcher.js"></script>
<script src="utils.sub.js"></script>
<script>
window.requestHeaders = {{
purpose: "{purpose}",
sec_purpose: "{sec_purpose}"
}};
window.requestCredentials = {{
username: {fmt(request.auth.username)},
password: {fmt(request.auth.password)}
}};
const uuid = new URLSearchParams(location.search).get('uuid');
window.executor = new Executor(uuid);
</script>
'''
return status, headers, content
|
# Copyright 2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Representations of the system's Snaps, and abstractions around managing them.
The `snap` module provides convenience methods for listing, installing, refreshing, and removing
Snap packages, in addition to setting and getting configuration options for them.
In the `snap` module, `SnapCache` creates a dict-like mapping of `Snap` objects at when
instantiated. Installed snaps are fully populated, and available snaps are lazily-loaded upon
request. This module relies on an installed and running `snapd` daemon to perform operations over
the `snapd` HTTP API.
`SnapCache` objects can be used to install or modify Snap packages by name in a manner similar to
using the `snap` command from the commandline.
An example of adding Juju to the system with `SnapCache` and setting a config value:
```python
try:
cache = snap.SnapCache()
juju = cache["juju"]
if not juju.present:
juju.ensure(snap.SnapState.Latest, channel="beta")
juju.set("key", "value")
except snap.SnapError as e:
logger.error(f"An exception occurred when installing charmcraft. Reason: {e.message}")
```
In addition, the `snap` module provides "bare" methods which can act on Snap packages as
simple function calls. :meth:`add`, :meth:`remove`, and :meth:`ensure` are provided, as
well as :meth:`add_local` for installing directly from a local `.snap` file. These return
`Snap` objects.
As an example of installing several Snaps and checking details:
```python
try:
nextcloud, charmcraft = snap.add(["nextcloud", "charmcraft"])
if nextcloud.get("mode") != "production":
nextcloud.set("mode", "production")
except snap.SnapError as e:
logger.error(f"An exception occurred when installing snaps. Reason: {e.message}")
```
"""
import http.client
import json
import logging
import os
import socket
import subprocess
import sys
import urllib.error
import urllib.parse
import urllib.request
from collections.abc import Mapping
from enum import Enum
from subprocess import CalledProcessError
from typing import Dict, Iterable, List, Optional, Union
logger = logging.getLogger(__name__)
# The unique Charmhub library identifier, never change it
LIBID = "05394e5893f94f2d90feb7cbe6b633cd"
# Increment this major API version when introducing breaking changes
LIBAPI = 0
# Increment this PATCH version before using `charmcraft publish-lib` or reset
# to 0 if you are raising the major API version
LIBPATCH = 2
def _cache_init(func):
def inner(*args, **kwargs):
if _Cache.cache is None:
_Cache.cache = SnapCache()
return func(*args, **kwargs)
return inner
class MetaCache(type):
"""MetaCache class used for initialising the snap cache."""
@property
def cache(cls) -> "SnapCache":
"""Property for returning the snap cache."""
return cls._cache
@cache.setter
def cache(cls, cache: "SnapCache") -> None:
"""Setter for the snap cache."""
cls._cache = cache
def __getitem__(cls, name) -> "Snap":
"""Snap cache getter."""
return cls._cache[name]
class _Cache(object, metaclass=MetaCache):
_cache = None
class Error(Exception):
"""Base class of most errors raised by this library."""
def __repr__(self):
"""String representation of the Error class."""
return f"<{type(self).__module__}.{type(self).__name__} {self.args}>"
@property
def name(self):
"""Return a string representation of the model plus class."""
return f"<{type(self).__module__}.{type(self).__name__}>"
@property
def message(self):
"""Return the message passed as an argument."""
return self.args[0]
class SnapAPIError(Error):
"""Raised when an HTTP API error occurs talking to the Snapd server."""
def __init__(self, body: Dict, code: int, status: str, message: str):
"""This shouldn't be instantiated directly."""
super().__init__(message) # Makes str(e) return message
self.body = body
self.code = code
self.status = status
self._message = message
def __repr__(self):
"""String representation of the SnapAPIError class."""
return "APIError({!r}, {!r}, {!r}, {!r})".format(
self.body, self.code, self.status, self._message
)
class SnapState(Enum):
"""The state of a snap on the system or in the cache."""
Present = "present"
Absent = "absent"
Latest = "latest"
Available = "available"
class SnapError(Error):
"""Raised when there's an error installing or removing a snap."""
class SnapNotFoundError(Error):
"""Raised when a requested snap is not known to the system."""
class Snap(object):
"""Represents a snap package and its properties.
`Snap` exposes the following properties about a snap:
- name: the name of the snap
- state: a `SnapState` representation of its install status
- channel: "stable", "candidate", "beta", and "edge" are common
- revision: a string representing the snap's revision
- confinement: "classic" or "strict"
"""
def __init__(
self, name, state: SnapState, channel: str, revision: str, confinement: str
) -> None:
self._name = name
self._state = state
self._channel = channel
self._revision = revision
self._confinement = confinement
def __eq__(self, other) -> bool:
"""Equality for comparison."""
return (
isinstance(other, self.__class__)
and (
self._name,
self._revision,
)
== (other._name, other._revision)
)
def __hash__(self):
"""A basic hash so this class can be used in Mappings and dicts."""
return hash((self._name, self._revision))
def __repr__(self):
"""A representation of the snap."""
return f"<{self.__module__}.{self.__class__.__name__}: {self.__dict__}>"
def __str__(self):
"""A human-readable representation of the snap."""
return "<{}: {}-{}.{} -- {}>".format(
self.__class__.__name__,
self._name,
self._revision,
self._channel,
str(self._state),
)
def _snap(self, command: str, optargs: Optional[List[str]] = None) -> str:
"""Perform a snap operation.
Args:
command: the snap command to execute
optargs: an (optional) list of additional arguments to pass,
commonly confinement or channel
Raises:
SnapError if there is a problem encountered
"""
optargs = optargs if optargs is not None else []
_cmd = ["snap", command, self._name, *optargs]
try:
return subprocess.check_output(_cmd, universal_newlines=True)
except CalledProcessError as e:
raise SnapError("Could not %s snap [%s]: %s", _cmd, self._name, e.output)
def get(self, key) -> str:
"""Gets a snap configuration value.
Args:
key: the key to retrieve
"""
return self._snap("get", [key])
def set(self, key, value) -> str:
"""Sets a snap configuration value.
Args:
key: the key to set
value: the value to set it to
"""
return self._snap("set", [key, value])
def unset(self, key) -> str:
"""Unsets a snap configuration value.
Args:
key: the key to unset
"""
return self._snap("unset", [key])
def _install(self, channel: Optional[str] = "") -> None:
"""Add a snap to the system.
Args:
channel: the channel to install from
"""
confinement = "--classic" if self._confinement == "classic" else ""
channel = f'--channel="{channel}"' if channel else ""
self._snap("install", [confinement, channel])
def _refresh(self, channel: Optional[str] = "") -> None:
"""Refresh a snap.
Args:
channel: the channel to install from
"""
channel = f"--{channel}" if channel else self._channel
self._snap("refresh", [channel])
def _remove(self) -> None:
"""Removes a snap from the system."""
return self._snap("remove")
@property
def name(self) -> str:
"""Returns the name of the snap."""
return self._name
def ensure(
self,
state: SnapState,
classic: Optional[bool] = False,
channel: Optional[str] = "",
):
"""Ensures that a snap is in a given state.
Args:
state: a `SnapState` to reconcile to.
classic: an (Optional) boolean indicating whether classic confinement should be used
channel: the channel to install from
Raises:
SnapError if an error is encountered
"""
self._confinement = "classic" if classic or self._confinement == "classic" else ""
if self._state is not state:
if state not in (SnapState.Present, SnapState.Latest):
self._remove()
else:
self._install(channel)
self._state = state
@property
def present(self) -> bool:
"""Returns whether or not a snap is present."""
return self._state in (SnapState.Present, SnapState.Latest)
@property
def latest(self) -> bool:
"""Returns whether the snap is the most recent version."""
return self._state is SnapState.Latest
@property
def state(self) -> SnapState:
"""Returns the current snap state."""
return self._state
@state.setter
def state(self, state: SnapState) -> None:
"""Sets the snap state to a given value.
Args:
state: a `SnapState` to reconcile the snap to.
Raises:
SnapError if an error is encountered
"""
if self._state is not state:
self.ensure(state)
self._state = state
@property
def revision(self) -> str:
"""Returns the revision for a snap."""
return self._revision
@property
def channel(self) -> str:
"""Returns the channel for a snap."""
return self._channel
@property
def confinement(self) -> str:
"""Returns the confinement for a snap."""
return self._confinement
class _UnixSocketConnection(http.client.HTTPConnection):
"""Implementation of HTTPConnection that connects to a named Unix socket."""
def __init__(self, host, timeout=None, socket_path=None):
if timeout is None:
super().__init__(host)
else:
super().__init__(host, timeout=timeout)
self.socket_path = socket_path
def connect(self):
"""Override connect to use Unix socket (instead of TCP socket)."""
if not hasattr(socket, "AF_UNIX"):
raise NotImplementedError(f"Unix sockets not supported on {sys.platform}")
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(self.socket_path)
if self.timeout is not None:
self.sock.settimeout(self.timeout)
class _UnixSocketHandler(urllib.request.AbstractHTTPHandler):
"""Implementation of HTTPHandler that uses a named Unix socket."""
def __init__(self, socket_path: str):
super().__init__()
self.socket_path = socket_path
def http_open(self, req) -> http.client.HTTPResponse:
"""Override http_open to use a Unix socket connection (instead of TCP)."""
return self.do_open(_UnixSocketConnection, req, socket_path=self.socket_path)
class SnapClient:
"""Snapd API client to talk to HTTP over UNIX sockets.
In order to avoid shelling out and/or involving sudo in calling the snapd API,
use a wrapper based on the Pebble Client, trimmed down to only the utility methods
needed for talking to snapd.
"""
def __init__(
self,
socket_path: str = "/run/snapd.socket",
opener: Optional[urllib.request.OpenerDirector] = None,
base_url: str = "http://localhost/v2/",
timeout: float = 5.0,
):
"""Initialize a client instance.
Args:
socket_path: a path to the socket on the filesystem. Defaults to /run/snap/snapd.socket
opener: specifies an opener for unix socket, if unspecified a default is used
base_url: base url for making requests to the snap client. Defaults to
http://localhost/v2/
timeout: timeout in seconds to use when making requests to the API. Default is 5.0s.
"""
if opener is None:
opener = self._get_default_opener(socket_path)
self.opener = opener
self.base_url = base_url
self.timeout = timeout
@classmethod
def _get_default_opener(cls, socket_path):
"""Build the default opener to use for requests (HTTP over Unix socket)."""
opener = urllib.request.OpenerDirector()
opener.add_handler(_UnixSocketHandler(socket_path))
opener.add_handler(urllib.request.HTTPDefaultErrorHandler())
opener.add_handler(urllib.request.HTTPRedirectHandler())
opener.add_handler(urllib.request.HTTPErrorProcessor())
return opener
def _request(
self,
method: str,
path: str,
query: Dict = None,
body: Dict = None,
) -> Dict:
"""Make a JSON request to the Snapd server with the given HTTP method and path.
If query dict is provided, it is encoded and appended as a query string
to the URL. If body dict is provided, it is serialied as JSON and used
as the HTTP body (with Content-Type: "application/json"). The resulting
body is decoded from JSON.
"""
headers = {"Accept": "application/json"}
data = None
if body is not None:
data = json.dumps(body).encode("utf-8")
headers["Content-Type"] = "application/json"
response = self._request_raw(method, path, query, headers, data)
return json.loads(response.read().decode())["result"]
def _request_raw(
self,
method: str,
path: str,
query: Dict = None,
headers: Dict = None,
data: bytes = None,
) -> http.client.HTTPResponse:
"""Make a request to the Snapd server; return the raw HTTPResponse object."""
url = self.base_url + path
if query:
url = url + "?" + urllib.parse.urlencode(query)
if headers is None:
headers = {}
request = urllib.request.Request(url, method=method, data=data, headers=headers)
try:
response = self.opener.open(request, timeout=self.timeout)
except urllib.error.HTTPError as e:
code = e.code
status = e.reason
message = ""
try:
body = json.loads(e.read().decode())["result"]
except (IOError, ValueError, KeyError) as e2:
# Will only happen on read error or if Pebble sends invalid JSON.
body = {}
message = f"{type(e2).__name__} - {e2}"
raise SnapAPIError(body, code, status, message)
except urllib.error.URLError as e:
raise SnapAPIError({}, 500, "Not found", e.reason)
return response
def get_installed_snaps(self) -> Dict:
"""Get information about currently installed snaps."""
return self._request("GET", "snaps")
def get_snap_information(self, name: str) -> Dict:
"""Query the snap server for information about single snap."""
return self._request("GET", "find", {"name": name})[0]
class SnapCache(Mapping):
"""An abstraction to represent installed/available packages.
When instantiated, `SnapCache` iterates through the list of installed
snaps using the `snapd` HTTP API, and a list of available snaps by reading
the filesystem to populate the cache. Information about available snaps is lazily-loaded
from the `snapd` API when requested.
"""
def __init__(self):
if not self.snapd_installed:
raise SnapError("snapd is not installed or not in /usr/bin") from None
self._snap_client = SnapClient()
self._snap_map = {}
if self.snapd_installed:
self._load_available_snaps()
self._load_installed_snaps()
def __contains__(self, key: str) -> bool:
"""Magic method to ease checking if a given snap is in the cache."""
return key in self._snap_map
def __len__(self) -> int:
"""Returns number of items in the snap cache."""
return len(self._snap_map)
def __iter__(self) -> Iterable["Snap"]:
"""Magic method to provide an iterator for the snap cache."""
return iter(self._snap_map.values())
def __getitem__(self, snap_name: str) -> Snap:
"""Return either the installed version or latest version for a given snap."""
snap = None
try:
snap = self._snap_map[snap_name]
except KeyError:
# The snap catalog may not be populated yet. Try to fetch info
# blindly
logger.warning(
"Snap '{}' not found in the snap cache. "
"The catalog may not be populated by snapd yet".format(snap_name)
)
if snap is None:
try:
self._snap_map[snap_name] = self._load_info(snap_name)
except SnapAPIError:
raise SnapNotFoundError(f"Snap '{snap_name}' not found!")
return self._snap_map[snap_name]
@property
def snapd_installed(self) -> bool:
"""Check whether snapd has been installled on the system."""
return os.path.isfile("/usr/bin/snap")
def _load_available_snaps(self) -> None:
"""Load the list of available snaps from disk.
Leave them empty and lazily load later if asked for.
"""
if not os.path.isfile("/var/cache/snapd/names"):
logger.warning(
"The snap cache has not been populated or is not in the default location"
)
return
with open("/var/cache/snapd/names", "r") as f:
for line in f:
if line.strip():
self._snap_map[line.strip()] = None
def _load_installed_snaps(self) -> None:
"""Load the installed snaps into the dict."""
installed = self._snap_client.get_installed_snaps()
for i in installed:
snap = Snap(
i["name"],
SnapState.Latest,
i["channel"],
i["revision"],
i["confinement"],
)
self._snap_map[snap.name] = snap
def _load_info(self, name) -> Snap:
"""Load info for snaps which are not installed if requested.
Args:
name: a string representing the name of the snap
"""
info = self._snap_client.get_snap_information(name)
return Snap(
info["name"],
SnapState.Available,
info["channel"],
info["revision"],
info["confinement"],
)
@_cache_init
def add(
snap_names: Union[str, List[str]],
state: Union[str, SnapState] = SnapState.Latest,
channel: Optional[str] = "latest",
classic: Optional[bool] = False,
) -> Union[Snap, List[Snap]]:
"""Add a snap to the system.
Args:
snap_names: the name or names of the snaps to install
state: a string or `SnapState` representation of the desired state, one of
[`Present` or `Latest`]
channel: an (Optional) channel as a string. Defaults to 'latest'
classic: an (Optional) boolean specifying whether it should be added with classic
confinement. Default `False`
Raises:
SnapError if some snaps failed to install or were not found.
"""
snap_names = [snap_names] if type(snap_names) is str else snap_names
if not snap_names:
raise TypeError("Expected at least one snap to add, received zero!")
if type(state) is str:
state = SnapState(state)
return _wrap_snap_operations(snap_names, state, channel, classic)
@_cache_init
def remove(snap_names: Union[str, List[str]]) -> Union[Snap, List[Snap]]:
"""Removes a snap from the system.
Args:
snap_names: the name or names of the snaps to install
Raises:
SnapError if some snaps failed to install.
"""
snap_names = [snap_names] if type(snap_names) is str else snap_names
if not snap_names:
raise TypeError("Expected at least one snap to add, received zero!")
return _wrap_snap_operations(snap_names, SnapState.Absent, "", False)
@_cache_init
def ensure(
snap_names: Union[str, List[str]],
state: str,
channel: Optional[str] = "latest",
classic: Optional[bool] = False,
) -> Union[Snap, List[Snap]]:
"""Ensures a snap is in a given state to the system.
Args:
name: the name(s) of the snaps to operate on
state: a string representation of the desired state, from `SnapState`
channel: an (Optional) channel as a string. Defaults to 'latest'
classic: an (Optional) boolean specifying whether it should be added with classic
confinement. Default `False`
Raises:
SnapError if the snap is not in the cache.
"""
if state in ("present", "latest"):
return add(snap_names, SnapState(state), channel, classic)
else:
return remove(snap_names)
def _wrap_snap_operations(
snap_names: List[str], state: SnapState, channel: str, classic: bool
) -> Union[Snap, List[Snap]]:
"""Wrap common operations for bare commands."""
snaps = {"success": [], "failed": []}
op = "remove" if state is SnapState.Absent else "install or refresh"
for s in snap_names:
try:
snap = _Cache[s]
if state is SnapState.Absent:
snap.ensure(state=SnapState.Absent)
else:
snap.ensure(state=state, classic=classic, channel=channel)
snaps["success"].append(snap)
except SnapError as e:
logger.warning(f"Failed to {op} snap {s}: {e.message}!")
snaps["failed"].append(s)
except SnapNotFoundError:
logger.warning(f"Snap '{s}' not found in cache!")
snaps["failed"].append(s)
if len(snaps["failed"]):
raise SnapError(
f"Failed to install or refresh snap(s): {", ".join([s for s in snaps["failed"]])}"
)
return snaps["success"] if len(snaps["success"]) > 1 else snaps["success"][0]
def install_local(
self, filename: str, classic: Optional[bool] = False, dangerous: Optional[bool] = False
) -> Snap:
"""Perform a snap operation.
Args:
filename: the path to a local .snap file to install
classic: whether to use classic confinement
dangerous: whether --dangerous should be passed to install snaps without a signature
Raises:
SnapError if there is a problem encountered
"""
_cmd = [
"snap",
"install",
filename,
"--classic" if classic else "",
"--dangerous" if dangerous else "",
]
try:
result = subprocess.check_output(_cmd, universal_newlines=True).splitlines()[0]
snap_name, _ = result.split(" ", 1)
c = SnapCache()
return c[snap_name]
except CalledProcessError as e:
raise SnapError("Could not install snap [%s]: %s", _cmd, filename, e.output)
|
# Copyright 2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Representations of the system's Snaps, and abstractions around managing them.
The `snap` module provides convenience methods for listing, installing, refreshing, and removing
Snap packages, in addition to setting and getting configuration options for them.
In the `snap` module, `SnapCache` creates a dict-like mapping of `Snap` objects at when
instantiated. Installed snaps are fully populated, and available snaps are lazily-loaded upon
request. This module relies on an installed and running `snapd` daemon to perform operations over
the `snapd` HTTP API.
`SnapCache` objects can be used to install or modify Snap packages by name in a manner similar to
using the `snap` command from the commandline.
An example of adding Juju to the system with `SnapCache` and setting a config value:
```python
try:
cache = snap.SnapCache()
juju = cache["juju"]
if not juju.present:
juju.ensure(snap.SnapState.Latest, channel="beta")
juju.set("key", "value")
except snap.SnapError as e:
logger.error(f"An exception occurred when installing charmcraft. Reason: {e.message}")
```
In addition, the `snap` module provides "bare" methods which can act on Snap packages as
simple function calls. :meth:`add`, :meth:`remove`, and :meth:`ensure` are provided, as
well as :meth:`add_local` for installing directly from a local `.snap` file. These return
`Snap` objects.
As an example of installing several Snaps and checking details:
```python
try:
nextcloud, charmcraft = snap.add(["nextcloud", "charmcraft"])
if nextcloud.get("mode") != "production":
nextcloud.set("mode", "production")
except snap.SnapError as e:
logger.error(f"An exception occurred when installing snaps. Reason: {e.message}")
```
"""
import http.client
import json
import logging
import os
import socket
import subprocess
import sys
import urllib.error
import urllib.parse
import urllib.request
from collections.abc import Mapping
from enum import Enum
from subprocess import CalledProcessError
from typing import Dict, Iterable, List, Optional, Union
logger = logging.getLogger(__name__)
# The unique Charmhub library identifier, never change it
LIBID = "05394e5893f94f2d90feb7cbe6b633cd"
# Increment this major API version when introducing breaking changes
LIBAPI = 0
# Increment this PATCH version before using `charmcraft publish-lib` or reset
# to 0 if you are raising the major API version
LIBPATCH = 2
def _cache_init(func):
def inner(*args, **kwargs):
if _Cache.cache is None:
_Cache.cache = SnapCache()
return func(*args, **kwargs)
return inner
class MetaCache(type):
"""MetaCache class used for initialising the snap cache."""
@property
def cache(cls) -> "SnapCache":
"""Property for returning the snap cache."""
return cls._cache
@cache.setter
def cache(cls, cache: "SnapCache") -> None:
"""Setter for the snap cache."""
cls._cache = cache
def __getitem__(cls, name) -> "Snap":
"""Snap cache getter."""
return cls._cache[name]
class _Cache(object, metaclass=MetaCache):
_cache = None
class Error(Exception):
"""Base class of most errors raised by this library."""
def __repr__(self):
"""String representation of the Error class."""
return f"<{type(self).__module__}.{type(self).__name__} {self.args}>"
@property
def name(self):
"""Return a string representation of the model plus class."""
return f"<{type(self).__module__}.{type(self).__name__}>"
@property
def message(self):
"""Return the message passed as an argument."""
return self.args[0]
class SnapAPIError(Error):
"""Raised when an HTTP API error occurs talking to the Snapd server."""
def __init__(self, body: Dict, code: int, status: str, message: str):
"""This shouldn't be instantiated directly."""
super().__init__(message) # Makes str(e) return message
self.body = body
self.code = code
self.status = status
self._message = message
def __repr__(self):
"""String representation of the SnapAPIError class."""
return "APIError({!r}, {!r}, {!r}, {!r})".format(
self.body, self.code, self.status, self._message
)
class SnapState(Enum):
"""The state of a snap on the system or in the cache."""
Present = "present"
Absent = "absent"
Latest = "latest"
Available = "available"
class SnapError(Error):
"""Raised when there's an error installing or removing a snap."""
class SnapNotFoundError(Error):
"""Raised when a requested snap is not known to the system."""
class Snap(object):
"""Represents a snap package and its properties.
`Snap` exposes the following properties about a snap:
- name: the name of the snap
- state: a `SnapState` representation of its install status
- channel: "stable", "candidate", "beta", and "edge" are common
- revision: a string representing the snap's revision
- confinement: "classic" or "strict"
"""
def __init__(
self, name, state: SnapState, channel: str, revision: str, confinement: str
) -> None:
self._name = name
self._state = state
self._channel = channel
self._revision = revision
self._confinement = confinement
def __eq__(self, other) -> bool:
"""Equality for comparison."""
return (
isinstance(other, self.__class__)
and (
self._name,
self._revision,
)
== (other._name, other._revision)
)
def __hash__(self):
"""A basic hash so this class can be used in Mappings and dicts."""
return hash((self._name, self._revision))
def __repr__(self):
"""A representation of the snap."""
return f"<{self.__module__}.{self.__class__.__name__}: {self.__dict__}>"
def __str__(self):
"""A human-readable representation of the snap."""
return "<{}: {}-{}.{} -- {}>".format(
self.__class__.__name__,
self._name,
self._revision,
self._channel,
str(self._state),
)
def _snap(self, command: str, optargs: Optional[List[str]] = None) -> str:
"""Perform a snap operation.
Args:
command: the snap command to execute
optargs: an (optional) list of additional arguments to pass,
commonly confinement or channel
Raises:
SnapError if there is a problem encountered
"""
optargs = optargs if optargs is not None else []
_cmd = ["snap", command, self._name, *optargs]
try:
return subprocess.check_output(_cmd, universal_newlines=True)
except CalledProcessError as e:
raise SnapError("Could not %s snap [%s]: %s", _cmd, self._name, e.output)
def get(self, key) -> str:
"""Gets a snap configuration value.
Args:
key: the key to retrieve
"""
return self._snap("get", [key])
def set(self, key, value) -> str:
"""Sets a snap configuration value.
Args:
key: the key to set
value: the value to set it to
"""
return self._snap("set", [key, value])
def unset(self, key) -> str:
"""Unsets a snap configuration value.
Args:
key: the key to unset
"""
return self._snap("unset", [key])
def _install(self, channel: Optional[str] = "") -> None:
"""Add a snap to the system.
Args:
channel: the channel to install from
"""
confinement = "--classic" if self._confinement == "classic" else ""
channel = f'--channel="{channel}"' if channel else ""
self._snap("install", [confinement, channel])
def _refresh(self, channel: Optional[str] = "") -> None:
"""Refresh a snap.
Args:
channel: the channel to install from
"""
channel = f"--{channel}" if channel else self._channel
self._snap("refresh", [channel])
def _remove(self) -> None:
"""Removes a snap from the system."""
return self._snap("remove")
@property
def name(self) -> str:
"""Returns the name of the snap."""
return self._name
def ensure(
self,
state: SnapState,
classic: Optional[bool] = False,
channel: Optional[str] = "",
):
"""Ensures that a snap is in a given state.
Args:
state: a `SnapState` to reconcile to.
classic: an (Optional) boolean indicating whether classic confinement should be used
channel: the channel to install from
Raises:
SnapError if an error is encountered
"""
self._confinement = "classic" if classic or self._confinement == "classic" else ""
if self._state is not state:
if state not in (SnapState.Present, SnapState.Latest):
self._remove()
else:
self._install(channel)
self._state = state
@property
def present(self) -> bool:
"""Returns whether or not a snap is present."""
return self._state in (SnapState.Present, SnapState.Latest)
@property
def latest(self) -> bool:
"""Returns whether the snap is the most recent version."""
return self._state is SnapState.Latest
@property
def state(self) -> SnapState:
"""Returns the current snap state."""
return self._state
@state.setter
def state(self, state: SnapState) -> None:
"""Sets the snap state to a given value.
Args:
state: a `SnapState` to reconcile the snap to.
Raises:
SnapError if an error is encountered
"""
if self._state is not state:
self.ensure(state)
self._state = state
@property
def revision(self) -> str:
"""Returns the revision for a snap."""
return self._revision
@property
def channel(self) -> str:
"""Returns the channel for a snap."""
return self._channel
@property
def confinement(self) -> str:
"""Returns the confinement for a snap."""
return self._confinement
class _UnixSocketConnection(http.client.HTTPConnection):
"""Implementation of HTTPConnection that connects to a named Unix socket."""
def __init__(self, host, timeout=None, socket_path=None):
if timeout is None:
super().__init__(host)
else:
super().__init__(host, timeout=timeout)
self.socket_path = socket_path
def connect(self):
"""Override connect to use Unix socket (instead of TCP socket)."""
if not hasattr(socket, "AF_UNIX"):
raise NotImplementedError(f"Unix sockets not supported on {sys.platform}")
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(self.socket_path)
if self.timeout is not None:
self.sock.settimeout(self.timeout)
class _UnixSocketHandler(urllib.request.AbstractHTTPHandler):
"""Implementation of HTTPHandler that uses a named Unix socket."""
def __init__(self, socket_path: str):
super().__init__()
self.socket_path = socket_path
def http_open(self, req) -> http.client.HTTPResponse:
"""Override http_open to use a Unix socket connection (instead of TCP)."""
return self.do_open(_UnixSocketConnection, req, socket_path=self.socket_path)
class SnapClient:
"""Snapd API client to talk to HTTP over UNIX sockets.
In order to avoid shelling out and/or involving sudo in calling the snapd API,
use a wrapper based on the Pebble Client, trimmed down to only the utility methods
needed for talking to snapd.
"""
def __init__(
self,
socket_path: str = "/run/snapd.socket",
opener: Optional[urllib.request.OpenerDirector] = None,
base_url: str = "http://localhost/v2/",
timeout: float = 5.0,
):
"""Initialize a client instance.
Args:
socket_path: a path to the socket on the filesystem. Defaults to /run/snap/snapd.socket
opener: specifies an opener for unix socket, if unspecified a default is used
base_url: base url for making requests to the snap client. Defaults to
http://localhost/v2/
timeout: timeout in seconds to use when making requests to the API. Default is 5.0s.
"""
if opener is None:
opener = self._get_default_opener(socket_path)
self.opener = opener
self.base_url = base_url
self.timeout = timeout
@classmethod
def _get_default_opener(cls, socket_path):
"""Build the default opener to use for requests (HTTP over Unix socket)."""
opener = urllib.request.OpenerDirector()
opener.add_handler(_UnixSocketHandler(socket_path))
opener.add_handler(urllib.request.HTTPDefaultErrorHandler())
opener.add_handler(urllib.request.HTTPRedirectHandler())
opener.add_handler(urllib.request.HTTPErrorProcessor())
return opener
def _request(
self,
method: str,
path: str,
query: Dict = None,
body: Dict = None,
) -> Dict:
"""Make a JSON request to the Snapd server with the given HTTP method and path.
If query dict is provided, it is encoded and appended as a query string
to the URL. If body dict is provided, it is serialied as JSON and used
as the HTTP body (with Content-Type: "application/json"). The resulting
body is decoded from JSON.
"""
headers = {"Accept": "application/json"}
data = None
if body is not None:
data = json.dumps(body).encode("utf-8")
headers["Content-Type"] = "application/json"
response = self._request_raw(method, path, query, headers, data)
return json.loads(response.read().decode())["result"]
def _request_raw(
self,
method: str,
path: str,
query: Dict = None,
headers: Dict = None,
data: bytes = None,
) -> http.client.HTTPResponse:
"""Make a request to the Snapd server; return the raw HTTPResponse object."""
url = self.base_url + path
if query:
url = url + "?" + urllib.parse.urlencode(query)
if headers is None:
headers = {}
request = urllib.request.Request(url, method=method, data=data, headers=headers)
try:
response = self.opener.open(request, timeout=self.timeout)
except urllib.error.HTTPError as e:
code = e.code
status = e.reason
message = ""
try:
body = json.loads(e.read().decode())["result"]
except (IOError, ValueError, KeyError) as e2:
# Will only happen on read error or if Pebble sends invalid JSON.
body = {}
message = f"{type(e2).__name__} - {e2}"
raise SnapAPIError(body, code, status, message)
except urllib.error.URLError as e:
raise SnapAPIError({}, 500, "Not found", e.reason)
return response
def get_installed_snaps(self) -> Dict:
"""Get information about currently installed snaps."""
return self._request("GET", "snaps")
def get_snap_information(self, name: str) -> Dict:
"""Query the snap server for information about single snap."""
return self._request("GET", "find", {"name": name})[0]
class SnapCache(Mapping):
"""An abstraction to represent installed/available packages.
When instantiated, `SnapCache` iterates through the list of installed
snaps using the `snapd` HTTP API, and a list of available snaps by reading
the filesystem to populate the cache. Information about available snaps is lazily-loaded
from the `snapd` API when requested.
"""
def __init__(self):
if not self.snapd_installed:
raise SnapError("snapd is not installed or not in /usr/bin") from None
self._snap_client = SnapClient()
self._snap_map = {}
if self.snapd_installed:
self._load_available_snaps()
self._load_installed_snaps()
def __contains__(self, key: str) -> bool:
"""Magic method to ease checking if a given snap is in the cache."""
return key in self._snap_map
def __len__(self) -> int:
"""Returns number of items in the snap cache."""
return len(self._snap_map)
def __iter__(self) -> Iterable["Snap"]:
"""Magic method to provide an iterator for the snap cache."""
return iter(self._snap_map.values())
def __getitem__(self, snap_name: str) -> Snap:
"""Return either the installed version or latest version for a given snap."""
snap = None
try:
snap = self._snap_map[snap_name]
except KeyError:
# The snap catalog may not be populated yet. Try to fetch info
# blindly
logger.warning(
"Snap '{}' not found in the snap cache. "
"The catalog may not be populated by snapd yet".format(snap_name)
)
if snap is None:
try:
self._snap_map[snap_name] = self._load_info(snap_name)
except SnapAPIError:
raise SnapNotFoundError(f"Snap '{snap_name}' not found!")
return self._snap_map[snap_name]
@property
def snapd_installed(self) -> bool:
"""Check whether snapd has been installled on the system."""
return os.path.isfile("/usr/bin/snap")
def _load_available_snaps(self) -> None:
"""Load the list of available snaps from disk.
Leave them empty and lazily load later if asked for.
"""
if not os.path.isfile("/var/cache/snapd/names"):
logger.warning(
"The snap cache has not been populated or is not in the default location"
)
return
with open("/var/cache/snapd/names", "r") as f:
for line in f:
if line.strip():
self._snap_map[line.strip()] = None
def _load_installed_snaps(self) -> None:
"""Load the installed snaps into the dict."""
installed = self._snap_client.get_installed_snaps()
for i in installed:
snap = Snap(
i["name"],
SnapState.Latest,
i["channel"],
i["revision"],
i["confinement"],
)
self._snap_map[snap.name] = snap
def _load_info(self, name) -> Snap:
"""Load info for snaps which are not installed if requested.
Args:
name: a string representing the name of the snap
"""
info = self._snap_client.get_snap_information(name)
return Snap(
info["name"],
SnapState.Available,
info["channel"],
info["revision"],
info["confinement"],
)
@_cache_init
def add(
snap_names: Union[str, List[str]],
state: Union[str, SnapState] = SnapState.Latest,
channel: Optional[str] = "latest",
classic: Optional[bool] = False,
) -> Union[Snap, List[Snap]]:
"""Add a snap to the system.
Args:
snap_names: the name or names of the snaps to install
state: a string or `SnapState` representation of the desired state, one of
[`Present` or `Latest`]
channel: an (Optional) channel as a string. Defaults to 'latest'
classic: an (Optional) boolean specifying whether it should be added with classic
confinement. Default `False`
Raises:
SnapError if some snaps failed to install or were not found.
"""
snap_names = [snap_names] if type(snap_names) is str else snap_names
if not snap_names:
raise TypeError("Expected at least one snap to add, received zero!")
if type(state) is str:
state = SnapState(state)
return _wrap_snap_operations(snap_names, state, channel, classic)
@_cache_init
def remove(snap_names: Union[str, List[str]]) -> Union[Snap, List[Snap]]:
"""Removes a snap from the system.
Args:
snap_names: the name or names of the snaps to install
Raises:
SnapError if some snaps failed to install.
"""
snap_names = [snap_names] if type(snap_names) is str else snap_names
if not snap_names:
raise TypeError("Expected at least one snap to add, received zero!")
return _wrap_snap_operations(snap_names, SnapState.Absent, "", False)
@_cache_init
def ensure(
snap_names: Union[str, List[str]],
state: str,
channel: Optional[str] = "latest",
classic: Optional[bool] = False,
) -> Union[Snap, List[Snap]]:
"""Ensures a snap is in a given state to the system.
Args:
name: the name(s) of the snaps to operate on
state: a string representation of the desired state, from `SnapState`
channel: an (Optional) channel as a string. Defaults to 'latest'
classic: an (Optional) boolean specifying whether it should be added with classic
confinement. Default `False`
Raises:
SnapError if the snap is not in the cache.
"""
if state in ("present", "latest"):
return add(snap_names, SnapState(state), channel, classic)
else:
return remove(snap_names)
def _wrap_snap_operations(
snap_names: List[str], state: SnapState, channel: str, classic: bool
) -> Union[Snap, List[Snap]]:
"""Wrap common operations for bare commands."""
snaps = {"success": [], "failed": []}
op = "remove" if state is SnapState.Absent else "install or refresh"
for s in snap_names:
try:
snap = _Cache[s]
if state is SnapState.Absent:
snap.ensure(state=SnapState.Absent)
else:
snap.ensure(state=state, classic=classic, channel=channel)
snaps["success"].append(snap)
except SnapError as e:
logger.warning(f"Failed to {op} snap {s}: {e.message}!")
snaps["failed"].append(s)
except SnapNotFoundError:
logger.warning(f"Snap '{s}' not found in cache!")
snaps["failed"].append(s)
if len(snaps["failed"]):
raise SnapError(
f"Failed to install or refresh snap(s): {', '.join([s for s in snaps['failed']])}"
)
return snaps["success"] if len(snaps["success"]) > 1 else snaps["success"][0]
def install_local(
self, filename: str, classic: Optional[bool] = False, dangerous: Optional[bool] = False
) -> Snap:
"""Perform a snap operation.
Args:
filename: the path to a local .snap file to install
classic: whether to use classic confinement
dangerous: whether --dangerous should be passed to install snaps without a signature
Raises:
SnapError if there is a problem encountered
"""
_cmd = [
"snap",
"install",
filename,
"--classic" if classic else "",
"--dangerous" if dangerous else "",
]
try:
result = subprocess.check_output(_cmd, universal_newlines=True).splitlines()[0]
snap_name, _ = result.split(" ", 1)
c = SnapCache()
return c[snap_name]
except CalledProcessError as e:
raise SnapError("Could not install snap [%s]: %s", _cmd, filename, e.output)
|
"""
VIAME Fish format deserializer
"""
import csv
import datetime
import io
import json
import re
from typing import Any, Dict, Generator, List, Tuple, Union
from dive_utils.models import Feature, Track, interpolate
def format_timestamp(fps: int, frame: int) -> str:
return str(datetime.datetime.utcfromtimestamp(frame / fps).strftime(r'%H:%M:%S.%f'))
def writeHeader(writer: 'csv._writer', metadata: Dict): # type: ignore
writer.writerow(
[
"# 1: Detection or Track-id",
"2: Video or Image Identifier",
"3: Unique Frame Identifier",
"4-7: Img-bbox(TL_x",
"TL_y",
"BR_x",
"BR_y)",
"8: Detection or Length Confidence",
"9: Target Length (0 or -1 if invalid)",
"10-11+: Repeated Species",
"Confidence Pairs or Attributes",
]
)
metadata_dict = {}
metadata_dict.update(metadata)
metadata_dict['exported_by'] = 'dive:python'
metadata_dict['exported_time'] = datetime.datetime.now().ctime()
metadata_list = []
for (key, value) in metadata_dict.items():
metadata_list.append(f"{key}: {json.dumps(value)}")
writer.writerow(['# metadata', *metadata_list])
def valueToString(value):
if value is True:
return "true"
elif value is False:
return "false"
return str(value)
def row_info(row: List[str]) -> Tuple[int, str, int, List[int], float]:
trackId = int(row[0])
filename = str(row[1])
frame = int(row[2])
bounds = [round(float(x)) for x in row[3:7]]
fish_length = float(row[8])
return trackId, filename, frame, bounds, fish_length
def _deduceType(value: str) -> Union[bool, float, str]:
if value == "true":
return True
if value == "false":
return False
try:
number = float(value)
return number
except ValueError:
return value
def create_geoJSONFeature(features: Dict[str, Any], type: str, coords: List[Any], key=''):
feature = {}
if "geometry" not in features:
features["geometry"] = {"type": "FeatureCollection", "features": []}
else: # check for existing type/key pairs
if features["geometry"]["features"]:
for subfeature in features["geometry"]["features"]:
if (
subfeature["geometry"]["type"] == type
and subfeature["properties"]["key"] == key
):
feature = subfeature
break
if "geometry" not in feature:
feature = {
"type": "Feature",
"properties": {"key": key},
"geometry": {"type": type},
}
if type == 'Polygon':
feature["geometry"]['coordinates'] = [coords]
elif type in ["LineString", "Point"]:
feature['geometry']['coordinates'] = coords
features['geometry']['features'].append(feature)
def _parse_row(row: List[str]) -> Tuple[Dict, Dict, Dict, List]:
"""
Parse a single CSV line into its composite track and detection parts
"""
features: Dict[str, Any] = {}
attributes: Dict[str, Any] = {}
track_attributes: Dict[str, Any] = {}
confidence_pairs: List[Tuple[str, float]] = [
(row[i], float(row[i + 1]))
for i in range(9, len(row), 2)
if i + 1 < len(row) and row[i] and row[i + 1] and not row[i].startswith("(")
]
sorted_confidence_pairs = sorted(confidence_pairs, key=lambda item: item[1], reverse=True)
head_tail = []
start = 9 + len(sorted_confidence_pairs) * 2
for j in range(start, len(row)):
# (kp) head x y
head_regex = re.match(r"^\(kp\) head ([0-9]+\.*[0-9]*) ([0-9]+\.*[0-9]*)", row[j])
if head_regex:
point = [float(head_regex[1]), float(head_regex[2])]
head_tail.append(point)
create_geoJSONFeature(features, 'Point', point, 'head')
# (kp) tail x y
tail_regex = re.match(r"^\(kp\) tail ([0-9]+\.*[0-9]*) ([0-9]+\.*[0-9]*)", row[j])
if tail_regex:
point = [float(tail_regex[1]), float(tail_regex[2])]
head_tail.append(point)
create_geoJSONFeature(features, 'Point', point, 'tail')
# (atr) text
atr_regex = re.match(r"^\(atr\) (.*?)\s(.+)", row[j])
if atr_regex:
attributes[atr_regex[1]] = _deduceType(atr_regex[2])
# (trk-atr) text
trk_regex = re.match(r"^\(trk-atr\) (.*?)\s(.+)", row[j])
if trk_regex:
track_attributes[trk_regex[1]] = _deduceType(trk_regex[2])
# (poly) x1 y1 x2 y2 ...
poly_regex = re.match(r"^(\(poly\)) ((?:[0-9]+\.*[0-9]*\s*)+)", row[j])
if poly_regex:
temp = [float(x) for x in poly_regex[2].split()]
coords = list(zip(temp[::2], temp[1::2]))
create_geoJSONFeature(features, 'Polygon', coords)
if len(head_tail) == 2:
create_geoJSONFeature(features, 'LineString', head_tail, 'HeadTails')
# ensure confidence pairs list is not empty
if len(sorted_confidence_pairs) == 0:
# extract Detection or Length Confidence field
try:
confidence = float(row[7])
except ValueError: # in case field is empty
confidence = 1.0
# add a dummy pair with a default type
sorted_confidence_pairs.append(('unknown', confidence))
return features, attributes, track_attributes, sorted_confidence_pairs
def _parse_row_for_tracks(row: List[str]) -> Tuple[Feature, Dict, Dict, List]:
head_tail_feature, attributes, track_attributes, confidence_pairs = _parse_row(row)
trackId, filename, frame, bounds, fishLength = row_info(row)
feature = Feature(
frame=frame,
bounds=bounds,
attributes=attributes or None,
fishLength=fishLength if fishLength > 0 else None,
**head_tail_feature,
)
# Pass the rest of the unchanged info through as well
return feature, attributes, track_attributes, confidence_pairs
def create_attributes(
metadata_attributes: Dict[str, Dict[str, Any]],
test_vals: Dict[str, Dict[str, int]],
atr_type: str,
key: str,
val,
):
valstring = f'{val}'
attribute_key = f'{atr_type}_{key}'
if attribute_key not in metadata_attributes:
metadata_attributes[attribute_key] = {
'belongs': atr_type,
'datatype': 'text',
'name': key,
'key': attribute_key,
}
test_vals[attribute_key] = {}
test_vals[attribute_key][valstring] = 1
elif attribute_key in metadata_attributes and attribute_key in test_vals:
if valstring in test_vals[attribute_key]:
test_vals[attribute_key][valstring] += 1
else:
test_vals[attribute_key][valstring] = 1
def calculate_attribute_types(
metadata_attributes: Dict[str, Dict[str, Any]], test_vals: Dict[str, Dict[str, int]]
):
# count all keys must have a value to convert to predefined
predefined_min_count = 3
for attributeKey in metadata_attributes.keys():
if attributeKey in test_vals:
attribute_type = 'number'
low_count = predefined_min_count
values = []
for (key, val) in test_vals[attributeKey].items():
if val <= low_count:
low_count = val
values.append(key)
if attribute_type == 'number':
try:
float(key)
except ValueError:
attribute_type = 'boolean'
if attribute_type == 'boolean' and key != 'True' and key != 'False':
attribute_type = 'text'
# If all text values are used 3 or more times they are defined values
if low_count >= predefined_min_count and 'text' in attribute_type:
metadata_attributes[attributeKey]['values'] = values
metadata_attributes[attributeKey]['datatype'] = attribute_type
def load_csv_as_tracks_and_attributes(rows: List[str]) -> Tuple[dict, dict]:
"""
Convert VIAME CSV to json tracks.
Expect detections to be in increasing order (either globally or by track).
"""
reader = csv.reader(row for row in rows if (not row.startswith("#") and row))
tracks: Dict[int, Track] = {}
metadata_attributes: Dict[str, Dict[str, Any]] = {}
test_vals: Dict[str, Dict[str, int]] = {}
for row in reader:
(
feature,
attributes,
track_attributes,
confidence_pairs,
) = _parse_row_for_tracks(row)
trackId, _, frame, _, _ = row_info(row)
if trackId not in tracks:
tracks[trackId] = Track(begin=frame, end=frame, trackId=trackId)
track = tracks[trackId]
track.begin = min(frame, track.begin)
track.end = max(track.end, frame)
track.features.append(feature)
track.confidencePairs = confidence_pairs
for (key, val) in track_attributes.items():
track.attributes[key] = val
create_attributes(metadata_attributes, test_vals, 'track', key, val)
for (key, val) in attributes.items():
create_attributes(metadata_attributes, test_vals, 'detection', key, val)
# Now we process all the metadata_attributes for the types
calculate_attribute_types(metadata_attributes, test_vals)
track_json = {trackId: track.dict(exclude_none=True) for trackId, track in tracks.items()}
return track_json, metadata_attributes
def export_tracks_as_csv(
track_iterator,
excludeBelowThreshold=False,
thresholds=None,
filenames=None,
fps=None,
header=True,
typeFilter=None,
) -> Generator[str, None, None]:
"""
Export track json to a CSV format.
:param excludeBelowThreshold: omit tracks below a certain confidence. Requires thresholds.
:param thresholds: key/value pairs with threshold values
:param filenames: list of string file names. filenames[n] should be the image at frame n
:param fps: if FPS is set, column 2 will be video timestamp derived from (frame / fps)
:param header: include or omit header
:param typeFilter: set of track types to only export if not empty
"""
if thresholds is None:
thresholds = {}
if typeFilter is None:
typeFilter = set()
csvFile = io.StringIO()
writer = csv.writer(csvFile)
if header:
metadata = {}
if fps is not None:
metadata["fps"] = fps
writeHeader(writer, metadata)
for t in track_iterator:
track = Track(**t)
if (not excludeBelowThreshold) or track.exceeds_thresholds(thresholds):
# filter by types if applicable
if typeFilter:
confidence_pairs = [item for item in track.confidencePairs if item[0] in typeFilter]
# skip line if no confidence pairs
if not confidence_pairs:
continue
else:
confidence_pairs = track.confidencePairs
sorted_confidence_pairs = sorted(
confidence_pairs, key=lambda item: item[1], reverse=True
)
for index, keyframe in enumerate(track.features):
features = [keyframe]
# If this is not the last keyframe, and interpolation is
# enabled for this keyframe, interpolate
if keyframe.interpolate and index < len(track.features) - 1:
nextKeyframe = track.features[index + 1]
# interpolate all features in [a,b)
features = interpolate(keyframe, nextKeyframe)
for feature in features:
columns = [
track.trackId,
"",
feature.frame,
*feature.bounds,
sorted_confidence_pairs[0][1],
feature.fishLength or -1,
]
# If FPS is set, column 2 will be video timestamp
if fps is not None and fps > 0:
columns[1] = format_timestamp(fps, feature.frame)
# else if filenames is set, column 2 will be image file name
elif filenames and feature.frame < len(filenames):
columns[1] = filenames[feature.frame]
for pair in sorted_confidence_pairs:
columns.extend(list(pair))
if feature.attributes:
for key, val in feature.attributes.items():
columns.append(f"(atr) {key} {valueToString(val)}")
if track.attributes:
for key, val in track.attributes.items():
columns.append(f"(trk-atr) {key} {valueToString(val)}")
if feature.geometry and "FeatureCollection" == feature.geometry.type:
for geoJSONFeature in feature.geometry.features:
if 'Polygon' == geoJSONFeature.geometry.type:
# Coordinates need to be flattened out from their list of tuples
coordinates = [
item
for sublist in geoJSONFeature.geometry.coordinates[
0
] # type: ignore
for item in sublist # type: ignore
]
columns.append(
f"(poly) {" ".join(map(lambda x: str(round(x)), coordinates))}"
)
if 'Point' == geoJSONFeature.geometry.type:
coordinates = geoJSONFeature.geometry.coordinates # type: ignore
columns.append(
f"(kp) {geoJSONFeature.properties["key"]} "
f"{round(coordinates[0])} {round(coordinates[1])}"
)
# TODO: support for multiple GeoJSON Objects of the same type
# once the CSV supports it
writer.writerow(columns)
yield csvFile.getvalue()
csvFile.seek(0)
csvFile.truncate(0)
yield csvFile.getvalue()
|
"""
VIAME Fish format deserializer
"""
import csv
import datetime
import io
import json
import re
from typing import Any, Dict, Generator, List, Tuple, Union
from dive_utils.models import Feature, Track, interpolate
def format_timestamp(fps: int, frame: int) -> str:
return str(datetime.datetime.utcfromtimestamp(frame / fps).strftime(r'%H:%M:%S.%f'))
def writeHeader(writer: 'csv._writer', metadata: Dict): # type: ignore
writer.writerow(
[
"# 1: Detection or Track-id",
"2: Video or Image Identifier",
"3: Unique Frame Identifier",
"4-7: Img-bbox(TL_x",
"TL_y",
"BR_x",
"BR_y)",
"8: Detection or Length Confidence",
"9: Target Length (0 or -1 if invalid)",
"10-11+: Repeated Species",
"Confidence Pairs or Attributes",
]
)
metadata_dict = {}
metadata_dict.update(metadata)
metadata_dict['exported_by'] = 'dive:python'
metadata_dict['exported_time'] = datetime.datetime.now().ctime()
metadata_list = []
for (key, value) in metadata_dict.items():
metadata_list.append(f"{key}: {json.dumps(value)}")
writer.writerow(['# metadata', *metadata_list])
def valueToString(value):
if value is True:
return "true"
elif value is False:
return "false"
return str(value)
def row_info(row: List[str]) -> Tuple[int, str, int, List[int], float]:
trackId = int(row[0])
filename = str(row[1])
frame = int(row[2])
bounds = [round(float(x)) for x in row[3:7]]
fish_length = float(row[8])
return trackId, filename, frame, bounds, fish_length
def _deduceType(value: str) -> Union[bool, float, str]:
if value == "true":
return True
if value == "false":
return False
try:
number = float(value)
return number
except ValueError:
return value
def create_geoJSONFeature(features: Dict[str, Any], type: str, coords: List[Any], key=''):
feature = {}
if "geometry" not in features:
features["geometry"] = {"type": "FeatureCollection", "features": []}
else: # check for existing type/key pairs
if features["geometry"]["features"]:
for subfeature in features["geometry"]["features"]:
if (
subfeature["geometry"]["type"] == type
and subfeature["properties"]["key"] == key
):
feature = subfeature
break
if "geometry" not in feature:
feature = {
"type": "Feature",
"properties": {"key": key},
"geometry": {"type": type},
}
if type == 'Polygon':
feature["geometry"]['coordinates'] = [coords]
elif type in ["LineString", "Point"]:
feature['geometry']['coordinates'] = coords
features['geometry']['features'].append(feature)
def _parse_row(row: List[str]) -> Tuple[Dict, Dict, Dict, List]:
"""
Parse a single CSV line into its composite track and detection parts
"""
features: Dict[str, Any] = {}
attributes: Dict[str, Any] = {}
track_attributes: Dict[str, Any] = {}
confidence_pairs: List[Tuple[str, float]] = [
(row[i], float(row[i + 1]))
for i in range(9, len(row), 2)
if i + 1 < len(row) and row[i] and row[i + 1] and not row[i].startswith("(")
]
sorted_confidence_pairs = sorted(confidence_pairs, key=lambda item: item[1], reverse=True)
head_tail = []
start = 9 + len(sorted_confidence_pairs) * 2
for j in range(start, len(row)):
# (kp) head x y
head_regex = re.match(r"^\(kp\) head ([0-9]+\.*[0-9]*) ([0-9]+\.*[0-9]*)", row[j])
if head_regex:
point = [float(head_regex[1]), float(head_regex[2])]
head_tail.append(point)
create_geoJSONFeature(features, 'Point', point, 'head')
# (kp) tail x y
tail_regex = re.match(r"^\(kp\) tail ([0-9]+\.*[0-9]*) ([0-9]+\.*[0-9]*)", row[j])
if tail_regex:
point = [float(tail_regex[1]), float(tail_regex[2])]
head_tail.append(point)
create_geoJSONFeature(features, 'Point', point, 'tail')
# (atr) text
atr_regex = re.match(r"^\(atr\) (.*?)\s(.+)", row[j])
if atr_regex:
attributes[atr_regex[1]] = _deduceType(atr_regex[2])
# (trk-atr) text
trk_regex = re.match(r"^\(trk-atr\) (.*?)\s(.+)", row[j])
if trk_regex:
track_attributes[trk_regex[1]] = _deduceType(trk_regex[2])
# (poly) x1 y1 x2 y2 ...
poly_regex = re.match(r"^(\(poly\)) ((?:[0-9]+\.*[0-9]*\s*)+)", row[j])
if poly_regex:
temp = [float(x) for x in poly_regex[2].split()]
coords = list(zip(temp[::2], temp[1::2]))
create_geoJSONFeature(features, 'Polygon', coords)
if len(head_tail) == 2:
create_geoJSONFeature(features, 'LineString', head_tail, 'HeadTails')
# ensure confidence pairs list is not empty
if len(sorted_confidence_pairs) == 0:
# extract Detection or Length Confidence field
try:
confidence = float(row[7])
except ValueError: # in case field is empty
confidence = 1.0
# add a dummy pair with a default type
sorted_confidence_pairs.append(('unknown', confidence))
return features, attributes, track_attributes, sorted_confidence_pairs
def _parse_row_for_tracks(row: List[str]) -> Tuple[Feature, Dict, Dict, List]:
head_tail_feature, attributes, track_attributes, confidence_pairs = _parse_row(row)
trackId, filename, frame, bounds, fishLength = row_info(row)
feature = Feature(
frame=frame,
bounds=bounds,
attributes=attributes or None,
fishLength=fishLength if fishLength > 0 else None,
**head_tail_feature,
)
# Pass the rest of the unchanged info through as well
return feature, attributes, track_attributes, confidence_pairs
def create_attributes(
metadata_attributes: Dict[str, Dict[str, Any]],
test_vals: Dict[str, Dict[str, int]],
atr_type: str,
key: str,
val,
):
valstring = f'{val}'
attribute_key = f'{atr_type}_{key}'
if attribute_key not in metadata_attributes:
metadata_attributes[attribute_key] = {
'belongs': atr_type,
'datatype': 'text',
'name': key,
'key': attribute_key,
}
test_vals[attribute_key] = {}
test_vals[attribute_key][valstring] = 1
elif attribute_key in metadata_attributes and attribute_key in test_vals:
if valstring in test_vals[attribute_key]:
test_vals[attribute_key][valstring] += 1
else:
test_vals[attribute_key][valstring] = 1
def calculate_attribute_types(
metadata_attributes: Dict[str, Dict[str, Any]], test_vals: Dict[str, Dict[str, int]]
):
# count all keys must have a value to convert to predefined
predefined_min_count = 3
for attributeKey in metadata_attributes.keys():
if attributeKey in test_vals:
attribute_type = 'number'
low_count = predefined_min_count
values = []
for (key, val) in test_vals[attributeKey].items():
if val <= low_count:
low_count = val
values.append(key)
if attribute_type == 'number':
try:
float(key)
except ValueError:
attribute_type = 'boolean'
if attribute_type == 'boolean' and key != 'True' and key != 'False':
attribute_type = 'text'
# If all text values are used 3 or more times they are defined values
if low_count >= predefined_min_count and 'text' in attribute_type:
metadata_attributes[attributeKey]['values'] = values
metadata_attributes[attributeKey]['datatype'] = attribute_type
def load_csv_as_tracks_and_attributes(rows: List[str]) -> Tuple[dict, dict]:
"""
Convert VIAME CSV to json tracks.
Expect detections to be in increasing order (either globally or by track).
"""
reader = csv.reader(row for row in rows if (not row.startswith("#") and row))
tracks: Dict[int, Track] = {}
metadata_attributes: Dict[str, Dict[str, Any]] = {}
test_vals: Dict[str, Dict[str, int]] = {}
for row in reader:
(
feature,
attributes,
track_attributes,
confidence_pairs,
) = _parse_row_for_tracks(row)
trackId, _, frame, _, _ = row_info(row)
if trackId not in tracks:
tracks[trackId] = Track(begin=frame, end=frame, trackId=trackId)
track = tracks[trackId]
track.begin = min(frame, track.begin)
track.end = max(track.end, frame)
track.features.append(feature)
track.confidencePairs = confidence_pairs
for (key, val) in track_attributes.items():
track.attributes[key] = val
create_attributes(metadata_attributes, test_vals, 'track', key, val)
for (key, val) in attributes.items():
create_attributes(metadata_attributes, test_vals, 'detection', key, val)
# Now we process all the metadata_attributes for the types
calculate_attribute_types(metadata_attributes, test_vals)
track_json = {trackId: track.dict(exclude_none=True) for trackId, track in tracks.items()}
return track_json, metadata_attributes
def export_tracks_as_csv(
track_iterator,
excludeBelowThreshold=False,
thresholds=None,
filenames=None,
fps=None,
header=True,
typeFilter=None,
) -> Generator[str, None, None]:
"""
Export track json to a CSV format.
:param excludeBelowThreshold: omit tracks below a certain confidence. Requires thresholds.
:param thresholds: key/value pairs with threshold values
:param filenames: list of string file names. filenames[n] should be the image at frame n
:param fps: if FPS is set, column 2 will be video timestamp derived from (frame / fps)
:param header: include or omit header
:param typeFilter: set of track types to only export if not empty
"""
if thresholds is None:
thresholds = {}
if typeFilter is None:
typeFilter = set()
csvFile = io.StringIO()
writer = csv.writer(csvFile)
if header:
metadata = {}
if fps is not None:
metadata["fps"] = fps
writeHeader(writer, metadata)
for t in track_iterator:
track = Track(**t)
if (not excludeBelowThreshold) or track.exceeds_thresholds(thresholds):
# filter by types if applicable
if typeFilter:
confidence_pairs = [item for item in track.confidencePairs if item[0] in typeFilter]
# skip line if no confidence pairs
if not confidence_pairs:
continue
else:
confidence_pairs = track.confidencePairs
sorted_confidence_pairs = sorted(
confidence_pairs, key=lambda item: item[1], reverse=True
)
for index, keyframe in enumerate(track.features):
features = [keyframe]
# If this is not the last keyframe, and interpolation is
# enabled for this keyframe, interpolate
if keyframe.interpolate and index < len(track.features) - 1:
nextKeyframe = track.features[index + 1]
# interpolate all features in [a,b)
features = interpolate(keyframe, nextKeyframe)
for feature in features:
columns = [
track.trackId,
"",
feature.frame,
*feature.bounds,
sorted_confidence_pairs[0][1],
feature.fishLength or -1,
]
# If FPS is set, column 2 will be video timestamp
if fps is not None and fps > 0:
columns[1] = format_timestamp(fps, feature.frame)
# else if filenames is set, column 2 will be image file name
elif filenames and feature.frame < len(filenames):
columns[1] = filenames[feature.frame]
for pair in sorted_confidence_pairs:
columns.extend(list(pair))
if feature.attributes:
for key, val in feature.attributes.items():
columns.append(f"(atr) {key} {valueToString(val)}")
if track.attributes:
for key, val in track.attributes.items():
columns.append(f"(trk-atr) {key} {valueToString(val)}")
if feature.geometry and "FeatureCollection" == feature.geometry.type:
for geoJSONFeature in feature.geometry.features:
if 'Polygon' == geoJSONFeature.geometry.type:
# Coordinates need to be flattened out from their list of tuples
coordinates = [
item
for sublist in geoJSONFeature.geometry.coordinates[
0
] # type: ignore
for item in sublist # type: ignore
]
columns.append(
f"(poly) {' '.join(map(lambda x: str(round(x)), coordinates))}"
)
if 'Point' == geoJSONFeature.geometry.type:
coordinates = geoJSONFeature.geometry.coordinates # type: ignore
columns.append(
f"(kp) {geoJSONFeature.properties['key']} "
f"{round(coordinates[0])} {round(coordinates[1])}"
)
# TODO: support for multiple GeoJSON Objects of the same type
# once the CSV supports it
writer.writerow(columns)
yield csvFile.getvalue()
csvFile.seek(0)
csvFile.truncate(0)
yield csvFile.getvalue()
|
import base64
import json
filter_keys = ['id', 'name', 'abv', 'ibu', 'target_fg', 'target_og', 'ebc', 'srm', 'ph']
def lambda_handler(event, context):
output = []
print (f"Evento: {event}")
print (f"Leitura dos registros: {len(event["records"])}")
for record in event['records']:
print(f"ID: {record["recordId"]}")
payload = base64.b64decode(record['data'])
# Recupera campos específicos para modelo
print (f"Payload: {payload}")
data_str = payload.decode()
print (f"Data string: {data_str}")
data_dict = json.loads(data_str)
print (f"Data dict: {data_dict}")
final_data = { k: data_dict[k] for k in filter_keys }
print (f"Chaves finais: {final_data.keys()}")
encoded_data = f"{final_data}\n".encode()
print (f"Encoded final data: {encoded_data}")
output_record = {
'recordId': record['recordId'],
'result': 'Ok',
'data': base64.b64encode(encoded_data)
}
output.append(output_record)
print(f"Processado {len(event["records"])} registros com sucesso.")
print(f"Output: {output}")
return {'records': output}
# Ref: https://github.com/amazon-archives/serverless-app-examples/blob/master/python/kinesis-firehose-process-record-python/lambda_function.py
|
import base64
import json
filter_keys = ['id', 'name', 'abv', 'ibu', 'target_fg', 'target_og', 'ebc', 'srm', 'ph']
def lambda_handler(event, context):
output = []
print (f"Evento: {event}")
print (f"Leitura dos registros: {len(event['records'])}")
for record in event['records']:
print(f"ID: {record['recordId']}")
payload = base64.b64decode(record['data'])
# Recupera campos específicos para modelo
print (f"Payload: {payload}")
data_str = payload.decode()
print (f"Data string: {data_str}")
data_dict = json.loads(data_str)
print (f"Data dict: {data_dict}")
final_data = { k: data_dict[k] for k in filter_keys }
print (f"Chaves finais: {final_data.keys()}")
encoded_data = f"{final_data}\n".encode()
print (f"Encoded final data: {encoded_data}")
output_record = {
'recordId': record['recordId'],
'result': 'Ok',
'data': base64.b64encode(encoded_data)
}
output.append(output_record)
print(f"Processado {len(event['records'])} registros com sucesso.")
print(f"Output: {output}")
return {'records': output}
# Ref: https://github.com/amazon-archives/serverless-app-examples/blob/master/python/kinesis-firehose-process-record-python/lambda_function.py
|
#!/usr/bin/python3
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
'''
import argparse
import base64
import hashlib
import io
import logging
import os
import subprocess
import sys
import time
import zipfile
import simplejson as json
from keylime.requests_client import RequestsClient
from keylime.common import states
from keylime import config
from keylime import keylime_logging
from keylime import registrar_client
from keylime.tpm import tpm_obj
from keylime.tpm.tpm_abstract import TPM_Utilities
from keylime import ima
from keylime import crypto
from keylime.cmd import user_data_encrypt
from keylime import ca_util
from keylime.common import algorithms
# setup logging
logger = keylime_logging.init_logging('tenant')
# special exception that suppresses stack traces when it happens
class UserError(Exception):
pass
class Tenant():
"""Simple command processor example."""
config = None
cloudverifier_ip = None
cloudverifier_port = None
cloudagent_ip = None
cv_cloudagent_ip = None
cloudagent_port = None
registrar_ip = None
registrar_port = None
webapp_ip = None
webapp_port = None
uuid_service_generate_locally = None
agent_uuid = None
K = None
V = None
U = None
auth_tag = None
tpm_policy = None
vtpm_policy = {}
metadata = {}
allowlist = {}
revocation_key = ""
accept_tpm_hash_algs = []
accept_tpm_encryption_algs = []
accept_tpm_signing_algs = []
payload = None
def __init__(self):
""" Set up required values and TLS
"""
self.agent_ip = None
self.nonce = None
self.verifier_ip = config.get('cloud_verifier', 'cloudverifier_ip')
self.verifier_port = config.get('cloud_verifier', 'cloudverifier_port')
self.agent_port = config.get('cloud_agent', 'cloudagent_port')
self.registrar_port = config.get('registrar', 'registrar_tls_port')
self.webapp_port = config.getint('webapp', 'webapp_port')
if not config.REQUIRE_ROOT and self.webapp_port < 1024:
self.webapp_port += 2000
self.registrar_ip = config.get('registrar', 'registrar_ip')
self.verifier_base_url = f'{self.verifier_ip}:{self.verifier_port}'
self.webapp_ip = config.get('webapp', 'webapp_ip')
self.my_cert, self.my_priv_key = self.get_tls_context()
self.cert = (self.my_cert, self.my_priv_key)
if config.getboolean('general', "enable_tls"):
self.tls_enabled = True
else:
self.tls_enabled = False
self.cert = ""
logger.warning(
"TLS is currently disabled, keys will be sent in the clear! Should only be used for testing")
def get_tls_context(self):
"""Generate certifcate naming and path
Returns:
string -- my_cert (client_cert), my_priv_key (client private key)
"""
my_cert = config.get('tenant', 'my_cert')
my_priv_key = config.get('tenant', 'private_key')
tls_dir = config.get('tenant', 'tls_dir')
if tls_dir == 'default':
my_cert = 'client-cert.crt'
my_priv_key = 'client-private.pem'
tls_dir = 'cv_ca'
if tls_dir[0] != '/':
tls_dir = os.path.abspath('%s/%s' % (config.WORK_DIR, tls_dir))
logger.info(f"Setting up client TLS in {tls_dir}")
my_cert = "%s/%s" % (tls_dir, my_cert)
my_priv_key = "%s/%s" % (tls_dir, my_priv_key)
return my_cert, my_priv_key
def init_add(self, args):
""" Set up required values. Command line options can overwrite these config values
Arguments:
args {[string]} -- agent_ip|agent_port|cv_agent_ip
"""
if "agent_ip" in args:
self.agent_ip = args["agent_ip"]
if 'agent_port' in args and args['agent_port'] is not None:
self.agent_port = args['agent_port']
if 'cv_agent_ip' in args and args['cv_agent_ip'] is not None:
self.cv_cloudagent_ip = args['cv_agent_ip']
else:
self.cv_cloudagent_ip = self.agent_ip
# Make sure all keys exist in dictionary
if "file" not in args:
args["file"] = None
if "keyfile" not in args:
args["keyfile"] = None
if "payload" not in args:
args["payload"] = None
if "ca_dir" not in args:
args["ca_dir"] = None
if "incl_dir" not in args:
args["incl_dir"] = None
if "ca_dir_pw" not in args:
args["ca_dir_pw"] = None
# Set up accepted algorithms
self.accept_tpm_hash_algs = config.get(
'tenant', 'accept_tpm_hash_algs').split(',')
self.accept_tpm_encryption_algs = config.get(
'tenant', 'accept_tpm_encryption_algs').split(',')
self.accept_tpm_signing_algs = config.get(
'tenant', 'accept_tpm_signing_algs').split(',')
# Set up PCR values
tpm_policy = config.get('tenant', 'tpm_policy')
if "tpm_policy" in args and args["tpm_policy"] is not None:
tpm_policy = args["tpm_policy"]
self.tpm_policy = TPM_Utilities.readPolicy(tpm_policy)
logger.info(f"TPM PCR Mask from policy is {self.tpm_policy["mask"]}")
vtpm_policy = config.get('tenant', 'vtpm_policy')
if "vtpm_policy" in args and args["vtpm_policy"] is not None:
vtpm_policy = args["vtpm_policy"]
self.vtpm_policy = TPM_Utilities.readPolicy(vtpm_policy)
logger.info(f"TPM PCR Mask from policy is {self.vtpm_policy["mask"]}")
# Read command-line path string allowlist
al_data = None
if "allowlist" in args and args["allowlist"] is not None:
# Auto-enable IMA (or-bit mask)
self.tpm_policy['mask'] = "0x%X" % (
int(self.tpm_policy['mask'], 0) | (1 << config.IMA_PCR))
if isinstance(args["allowlist"], str):
if args["allowlist"] == "default":
args["allowlist"] = config.get(
'tenant', 'allowlist')
al_data = ima.read_allowlist(args["allowlist"])
elif isinstance(args["allowlist"], list):
al_data = args["allowlist"]
else:
raise UserError("Invalid allowlist provided")
# Read command-line path string IMA exclude list
excl_data = None
if "ima_exclude" in args and args["ima_exclude"] is not None:
if isinstance(args["ima_exclude"], str):
if args["ima_exclude"] == "default":
args["ima_exclude"] = config.get(
'tenant', 'ima_excludelist')
excl_data = ima.read_excllist(args["ima_exclude"])
elif isinstance(args["ima_exclude"], list):
excl_data = args["ima_exclude"]
else:
raise UserError("Invalid exclude list provided")
# Set up IMA
if TPM_Utilities.check_mask(self.tpm_policy['mask'], config.IMA_PCR) or \
TPM_Utilities.check_mask(self.vtpm_policy['mask'], config.IMA_PCR):
# Process allowlists
self.allowlist = ima.process_allowlists(al_data, excl_data)
# if none
if (args["file"] is None and args["keyfile"] is None and args["ca_dir"] is None):
raise UserError(
"You must specify one of -k, -f, or --cert to specify the key/contents to be securely delivered to the agent")
if args["keyfile"] is not None:
if args["file"] is not None or args["ca_dir"] is not None:
raise UserError(
"You must specify one of -k, -f, or --cert to specify the key/contents to be securely delivered to the agent")
# read the keys in
if isinstance(args["keyfile"], dict) and "data" in args["keyfile"]:
if isinstance(args["keyfile"]["data"], list) and len(args["keyfile"]["data"]) == 1:
keyfile = args["keyfile"]["data"][0]
if keyfile is None:
raise UserError("Invalid key file contents")
f = io.StringIO(keyfile)
else:
raise UserError("Invalid key file provided")
else:
f = open(args["keyfile"], 'r')
self.K = base64.b64decode(f.readline())
self.U = base64.b64decode(f.readline())
self.V = base64.b64decode(f.readline())
f.close()
# read the payload in (opt.)
if isinstance(args["payload"], dict) and "data" in args["payload"]:
if isinstance(args["payload"]["data"], list) and len(args["payload"]["data"]) > 0:
self.payload = args["payload"]["data"][0]
else:
if args["payload"] is not None:
f = open(args["payload"], 'r')
self.payload = f.read()
f.close()
if args["file"] is not None:
if args["keyfile"] is not None or args["ca_dir"] is not None:
raise UserError(
"You must specify one of -k, -f, or --cert to specify the key/contents to be securely delivered to the agent")
if isinstance(args["file"], dict) and "data" in args["file"]:
if isinstance(args["file"]["data"], list) and len(args["file"]["data"]) > 0:
contents = args["file"]["data"][0]
if contents is None:
raise UserError("Invalid file payload contents")
else:
raise UserError("Invalid file payload provided")
else:
with open(args["file"], 'r') as f:
contents = f.read()
ret = user_data_encrypt.encrypt(contents)
self.K = ret['k']
self.U = ret['u']
self.V = ret['v']
self.payload = ret['ciphertext']
if args["ca_dir"] is None and args["incl_dir"] is not None:
raise UserError(
"--include option is only valid when used with --cert")
if args["ca_dir"] is not None:
if args["file"] is not None or args["keyfile"] is not None:
raise UserError(
"You must specify one of -k, -f, or --cert to specify the key/contents to be securely delivered to the agent")
if args["ca_dir"] == 'default':
args["ca_dir"] = config.CA_WORK_DIR
if "ca_dir_pw" in args and args["ca_dir_pw"] is not None:
ca_util.setpassword(args["ca_dir_pw"])
if not os.path.exists(args["ca_dir"]) or not os.path.exists("%s/cacert.crt" % args["ca_dir"]):
logger.warning(" CA directory does not exist. Creating...")
ca_util.cmd_init(args["ca_dir"])
if not os.path.exists("%s/%s-private.pem" % (args["ca_dir"], self.agent_uuid)):
ca_util.cmd_mkcert(args["ca_dir"], self.agent_uuid)
cert_pkg, serial, subject = ca_util.cmd_certpkg(
args["ca_dir"], self.agent_uuid)
# support revocation
if not os.path.exists("%s/RevocationNotifier-private.pem" % args["ca_dir"]):
ca_util.cmd_mkcert(args["ca_dir"], "RevocationNotifier")
rev_package, _, _ = ca_util.cmd_certpkg(
args["ca_dir"], "RevocationNotifier")
# extract public and private keys from package
sf = io.BytesIO(rev_package)
with zipfile.ZipFile(sf) as zf:
privkey = zf.read("RevocationNotifier-private.pem")
cert = zf.read("RevocationNotifier-cert.crt")
# put the cert of the revoker into the cert package
sf = io.BytesIO(cert_pkg)
with zipfile.ZipFile(sf, 'a', compression=zipfile.ZIP_STORED) as zf:
zf.writestr('RevocationNotifier-cert.crt', cert)
# add additional files to zip
if args["incl_dir"] is not None:
if isinstance(args["incl_dir"], dict) and "data" in args["incl_dir"] and "name" in args["incl_dir"]:
if isinstance(args["incl_dir"]["data"], list) and isinstance(args["incl_dir"]["name"], list):
if len(args["incl_dir"]["data"]) != len(args["incl_dir"]["name"]):
raise UserError("Invalid incl_dir provided")
for i in range(len(args["incl_dir"]["data"])):
zf.writestr(os.path.basename(
args["incl_dir"]["name"][i]), args["incl_dir"]["data"][i])
else:
if os.path.exists(args["incl_dir"]):
files = next(os.walk(args["incl_dir"]))[2]
for filename in files:
with open("%s/%s" % (args["incl_dir"], filename), 'rb') as f:
zf.writestr(
os.path.basename(f.name), f.read())
else:
logger.warn(
f'Specified include directory {args['incl_dir']} does not exist. Skipping...')
cert_pkg = sf.getvalue()
# put the private key into the data to be send to the CV
self.revocation_key = privkey
# encrypt up the cert package
ret = user_data_encrypt.encrypt(cert_pkg)
self.K = ret['k']
self.U = ret['u']
self.V = ret['v']
self.metadata = {'cert_serial': serial, 'subject': subject}
self.payload = ret['ciphertext']
if self.payload is not None and len(self.payload) > config.getint('tenant', 'max_payload_size'):
raise UserError("Payload size %s exceeds max size %d" % (
len(self.payload), config.getint('tenant', 'max_payload_size')))
def preloop(self):
""" encrypt the agent UUID as a check for delivering the correct key
"""
self.auth_tag = crypto.do_hmac(self.K, self.agent_uuid)
# be very careful printing K, U, or V as they leak in logs stored on unprotected disks
if config.INSECURE_DEBUG:
logger.debug(F"K: {base64.b64encode(self.K)}")
logger.debug(F"V: {base64.b64encode(self.V)}")
logger.debug(F"U: {base64.b64encode(self.U)}")
logger.debug(F"Auth Tag: {self.auth_tag}")
def check_ek(self, ek, ekcert, tpm):
""" Check the Entity Key
Arguments:
ek {[type]} -- [description]
ekcert {[type]} -- [description]
tpm {[type]} -- [description]
Returns:
[type] -- [description]
"""
if config.getboolean('tenant', 'require_ek_cert'):
if config.STUB_TPM:
logger.debug("not checking ekcert due to STUB_TPM mode")
elif ekcert == 'virtual':
logger.debug("not checking ekcert of VTPM")
elif ekcert == 'emulator' and config.DISABLE_EK_CERT_CHECK_EMULATOR:
logger.debug("not checking ekcert of TPM emulator")
elif ekcert is None:
logger.warning(
"No EK cert provided, require_ek_cert option in config set to True")
return False
elif not tpm.verify_ek(base64.b64decode(ekcert), ek):
logger.warning("Invalid EK certificate")
return False
return True
def validate_tpm_quote(self, public_key, quote, tpm_version, hash_alg):
""" Validate TPM Quote received from the Agent
Arguments:
public_key {[type]} -- [description]
quote {[type]} -- [description]
tpm_version {[type]} -- [description]
hash_alg {bool} -- [description]
Raises:
UserError: [description]
Returns:
[type] -- [description]
"""
registrar_client.init_client_tls('tenant')
reg_keys = registrar_client.getKeys(
self.registrar_ip, self.registrar_port, self.agent_uuid)
if reg_keys is None:
logger.warning("AIK not found in registrar, quote not validated")
return False
tpm = tpm_obj.getTPM(need_hw_tpm=False, tpm_version=tpm_version)
if not tpm.check_quote(self.agent_uuid, self.nonce, public_key, quote, reg_keys['aik'], hash_alg=hash_alg):
if reg_keys['regcount'] > 1:
logger.error("WARNING: This UUID had more than one ek-ekcert registered to it! This might indicate that your system is misconfigured or a malicious host is present. Run 'regdelete' for this agent and restart")
sys.exit()
return False
if reg_keys['regcount'] > 1:
logger.warn("WARNING: This UUID had more than one ek-ekcert registered to it! This might indicate that your system is misconfigured. Run 'regdelete' for this agent and restart")
if not config.STUB_TPM and (not config.getboolean('tenant', 'require_ek_cert') and config.get('tenant', 'ek_check_script') == ""):
logger.warn(
"DANGER: EK cert checking is disabled and no additional checks on EKs have been specified with ek_check_script option. Keylime is not secure!!")
# check EK cert and make sure it matches EK
if not self.check_ek(reg_keys['ek'], reg_keys['ekcert'], tpm):
return False
# if agent is virtual, check phyisical EK cert and make sure it matches phyiscal EK
if 'provider_keys' in reg_keys:
if not self.check_ek(reg_keys['provider_keys']['ek'], reg_keys['provider_keys']['ekcert'], tpm):
return False
# check all EKs with optional script:
script = config.get('tenant', 'ek_check_script')
if not script:
return True
if script[0] != '/':
script = "%s/%s" % (config.WORK_DIR, script)
logger.info(f"Checking EK with script {script}")
# now we need to exec the script with the ek and ek cert in vars
env = os.environ.copy()
env['AGENT_UUID'] = self.agent_uuid
env['EK'] = reg_keys['ek']
if reg_keys['ekcert'] is not None:
env['EK_CERT'] = reg_keys['ekcert']
else:
env['EK_CERT'] = ""
env['PROVKEYS'] = json.dumps(reg_keys.get('provider_keys', {}))
proc = subprocess.Popen(script, env=env, shell=True,
cwd=config.WORK_DIR, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
retval = proc.wait()
if retval != 0:
raise UserError("External check script failed to validate EK")
logger.debug(
"External check script successfully to validated EK")
while True:
line = proc.stdout.readline().decode()
if line == "":
break
logger.debug(f"ek_check output: {line.strip()}")
return True
def do_cv(self):
""" Initiaite v, agent_id and ip and initiate the cloudinit sequence
"""
b64_v = base64.b64encode(self.V).decode('utf-8')
logger.debug("b64_v:" + b64_v)
data = {
'v': b64_v,
'cloudagent_ip': self.cv_cloudagent_ip,
'cloudagent_port': self.agent_port,
'tpm_policy': json.dumps(self.tpm_policy),
'vtpm_policy': json.dumps(self.vtpm_policy),
'allowlist': json.dumps(self.allowlist),
'metadata': json.dumps(self.metadata),
'revocation_key': self.revocation_key,
'accept_tpm_hash_algs': self.accept_tpm_hash_algs,
'accept_tpm_encryption_algs': self.accept_tpm_encryption_algs,
'accept_tpm_signing_algs': self.accept_tpm_signing_algs,
}
json_message = json.dumps(data)
do_cv = RequestsClient(self.verifier_base_url, self.tls_enabled)
response = do_cv.post(
(f'/agents/{self.agent_uuid}'),
data=json_message,
cert=self.cert,
verify=False
)
if response.status_code == 503:
logger.error(
f"Cannot connect to Verifier at {self.verifier_ip} with Port {self.verifier_port}. Connection refused.")
sys.exit()
elif response.status_code == 504:
logger.error(
f"Verifier at {self.verifier_ip} with Port {self.verifier_port} timed out.")
sys.exit()
if response.status_code == 409:
# this is a conflict, need to update or delete it
logger.error(
f"Agent {self.agent_uuid} already existed at CV. Please use delete or update.")
sys.exit()
elif response.status_code != 200:
keylime_logging.log_http_response(
logger, logging.ERROR, response.json())
logger.error(
f"POST command response: {response.status} Unexpected response from Cloud Verifier: {response.read()}")
sys.exit()
def do_cvstatus(self, listing=False):
""" Perform opertional state look up for agent
Keyword Arguments:
listing {bool} -- If True, list all agent statues (default: {False})
"""
agent_uuid = ""
if not listing:
agent_uuid = self.agent_uuid
do_cvstatus = RequestsClient(self.verifier_base_url, self.tls_enabled)
response = do_cvstatus.get(
(f'/agents/{agent_uuid}'),
cert=self.cert,
verify=False
)
if response.status_code == 503:
logger.error(
f"Cannot connect to Verifier at {self.verifier_ip} with Port {self.verifier_port}. Connection refused.")
sys.exit()
elif response == 504:
logger.error(
f"Verifier at {self.verifier_ip} with Port {self.verifier_port} timed out.")
sys.exit()
if response.status_code == 404:
logger.error(
f"Agent {agent_uuid} does not exist on the verifier. Please try to add or update agent")
sys.exit()
if response.status_code != 200:
logger.error(
f"Status command response: {response.status}. Unexpected response from Cloud Verifier.")
sys.exit()
else:
response_json = response.json()
if not listing:
operational_state = response_json["results"]["operational_state"]
logger.info(f'Agent Status: "{states.state_to_str(operational_state)}"')
else:
agent_array = response_json["results"]["uuids"]
logger.info(f'Agents: "{agent_array}"')
def do_cvdelete(self):
"""Delete agent from Verifier
"""
do_cvdelete = RequestsClient(self.verifier_base_url, self.tls_enabled)
response = do_cvdelete.delete(
(f'/agents/{self.agent_uuid}'),
cert=self.cert,
verify=False
)
if response.status_code == 503:
logger.error(
f"Cannot connect to Verifier at {self.verifier_ip} with Port {self.verifier_port}. Connection refused.")
sys.exit()
elif response.status_code == 504:
logger.error(
f"Verifier at {self.verifier_ip} with Port {self.verifier_port} timed out.")
sys.exit()
if response.status_code == 202:
deleted = False
for _ in range(12):
get_cvdelete = RequestsClient(
self.verifier_base_url, self.tls_enabled)
response = get_cvdelete.get(
(f'/agents/{self.agent_uuid}'),
cert=self.cert,
verify=False
)
if response.status_code in (200, 404):
deleted = True
break
time.sleep(.4)
if deleted:
logger.info(
f"CV completed deletion of agent {self.agent_uuid}")
else:
logger.error(
f"Timed out waiting for delete of agent {self.agent_uuid} to complete at CV")
sys.exit()
elif response.status_code == 200:
logger.info(f"Agent {self.agent_uuid} deleted from the CV")
else:
response_body = response.json()
keylime_logging.log_http_response(
logger, logging.ERROR, response_body)
def do_regdelete(self):
""" Delete agent from Registrar
"""
registrar_client.init_client_tls('tenant')
registrar_client.doRegistrarDelete(
self.registrar_ip, self.registrar_port, self.agent_uuid)
def do_cvreactivate(self):
""" Reactive Agent
"""
do_cvreactivate = RequestsClient(
self.verifier_base_url, self.tls_enabled)
response = do_cvreactivate.put(
(f'/agents/{self.agent_uuid}/reactivate'),
data=b'',
cert=self.cert,
verify=False
)
if response.status_code == 503:
logger.error(
f"Cannot connect to Verifier at {self.verifier_ip} with Port {self.verifier_port}. Connection refused.")
sys.exit()
elif response.status_code == 504:
logger.error(
f"Verifier at {self.verifier_ip} with Port {self.verifier_port} timed out.")
sys.exit()
response_body = response.json()
if response.status_code != 200:
keylime_logging.log_http_response(
logger, logging.ERROR, response_body)
logger.error(
f"Update command response: {response.status_code} Unexpected response from Cloud Verifier.")
else:
logger.info(f"Agent {self.agent_uuid} re-activated")
def do_cvstop(self):
""" Stop declared active agent
"""
params = f'/agents/{self.agent_uuid}/stop'
do_cvstop = RequestsClient(self.verifier_base_url, self.tls_enabled)
response = do_cvstop.put(
params,
cert=self.cert,
data=b'',
verify=False
)
if response.status_code == 503:
logger.error(
f"Cannot connect to Verifier at {self.verifier_ip} with Port {self.verifier_port}. Connection refused.")
sys.exit()
elif response.status_code == 504:
logger.error(
f"Verifier at {self.verifier_ip} with Port {self.verifier_port} timed out.")
sys.exit()
response_body = response.json()
if response.status_code != 200:
keylime_logging.log_http_response(
logger, logging.ERROR, response_body)
else:
logger.info(f"Agent {self.agent_uuid} stopped")
def do_quote(self):
""" Perform TPM quote by GET towards Agent
Raises:
UserError: Connection handler
"""
self.nonce = TPM_Utilities.random_password(20)
numtries = 0
response = None
# Note: We need a specific retry handler (perhaps in common), no point having localised unless we have too.
while True:
try:
params = '/quotes/identity?nonce=%s' % (self.nonce)
cloudagent_base_url = f'{self.agent_ip}:{self.agent_port}'
do_quote = RequestsClient(cloudagent_base_url, tls_enabled=False)
response = do_quote.get(
params,
cert=self.cert
)
response_body = response.json()
except Exception as e:
if response.status_code in (503, 504):
numtries += 1
maxr = config.getint('tenant', 'max_retries')
if numtries >= maxr:
logger.error(
f"tenant cannot establish connection to agent on {self.agent_ip} with port {self.agent_port}")
sys.exit()
retry = config.getfloat('tenant', 'retry_interval')
logger.info(
f"tenant connection to agent at {self.agent_ip} refused {numtries}/{maxr} times, trying again in {retry} seconds...")
time.sleep(retry)
continue
raise e
break
try:
if response is not None and response.status_code != 200:
raise UserError(
"Status command response: %d Unexpected response from Cloud Agent." % response.status)
if "results" not in response_body:
raise UserError(
"Error: unexpected http response body from Cloud Agent: %s" % str(response.status))
quote = response_body["results"]["quote"]
logger.debug(f"agent_quote received quote: {quote}")
public_key = response_body["results"]["pubkey"]
logger.debug(f"agent_quote received public key: {public_key}")
# Get tpm_version, hash_alg
tpm_version = response_body["results"]["tpm_version"]
logger.debug(
f"agent_quote received tpm version: {str(tpm_version)}")
# Ensure hash_alg is in accept_tpm_hash_algs list
hash_alg = response_body["results"]["hash_alg"]
logger.debug(f"agent_quote received hash algorithm: {hash_alg}")
if not algorithms.is_accepted(hash_alg, config.get('tenant', 'accept_tpm_hash_algs').split(',')):
raise UserError(
"TPM Quote is using an unaccepted hash algorithm: %s" % hash_alg)
# Ensure enc_alg is in accept_tpm_encryption_algs list
enc_alg = response_body["results"]["enc_alg"]
logger.debug(
f"agent_quote received encryption algorithm: {enc_alg}")
if not algorithms.is_accepted(enc_alg, config.get('tenant', 'accept_tpm_encryption_algs').split(',')):
raise UserError(
"TPM Quote is using an unaccepted encryption algorithm: %s" % enc_alg)
# Ensure sign_alg is in accept_tpm_encryption_algs list
sign_alg = response_body["results"]["sign_alg"]
logger.debug(f"agent_quote received signing algorithm: {sign_alg}")
if not algorithms.is_accepted(sign_alg, config.get('tenant', 'accept_tpm_signing_algs').split(',')):
raise UserError(
"TPM Quote is using an unaccepted signing algorithm: %s" % sign_alg)
if not self.validate_tpm_quote(public_key, quote, tpm_version, hash_alg):
raise UserError(
"TPM Quote from cloud agent is invalid for nonce: %s" % self.nonce)
logger.info(f"Quote from {self.agent_ip} validated")
# encrypt U with the public key
encrypted_U = crypto.rsa_encrypt(
crypto.rsa_import_pubkey(public_key), self.U)
b64_encrypted_u = base64.b64encode(encrypted_U)
logger.debug("b64_encrypted_u: " + b64_encrypted_u.decode('utf-8'))
data = {
'encrypted_key': b64_encrypted_u,
'auth_tag': self.auth_tag
}
if self.payload is not None:
data['payload'] = self.payload
u_json_message = json.dumps(data)
# post encrypted U back to CloudAgent
params = '/keys/ukey'
cloudagent_base_url = (
f'{self.agent_ip}:{self.agent_port}'
)
post_ukey = RequestsClient(cloudagent_base_url, tls_enabled=False)
response = post_ukey.post(
params,
data=u_json_message
)
if response.status_code == 503:
logger.error(
f"Cannot connect to Agent at {self.agent_ip} with Port {self.agent_port}. Connection refused.")
sys.exit()
elif response.status_code == 504:
logger.error(
f"Verifier at {self.verifier_ip} with Port {self.verifier_port} timed out.")
sys.exit()
if response.status_code != 200:
keylime_logging.log_http_response(
logger, logging.ERROR, response_body)
raise UserError(
"Posting of Encrypted U to the Cloud Agent failed with response code %d" % response.status)
except Exception as e:
self.do_cvstop()
raise e
def do_verify(self):
""" Perform verify using a random generated challenge
"""
challenge = TPM_Utilities.random_password(20)
numtries = 0
while True:
try:
cloudagent_base_url = (
f'{self.agent_ip}:{self.agent_port}'
)
do_verify = RequestsClient(
cloudagent_base_url, tls_enabled=False)
response = do_verify.get(
(f'/keys/verify?challenge={challenge}'),
cert=self.cert,
verify=False
)
except Exception as e:
if response.status_code in (503, 504):
numtries += 1
maxr = config.getint('tenant', 'max_retries')
if numtries >= maxr:
logger.error(
f"Cannot establish connection to agent on {self.agent_ip} with port {self.agent_port}")
sys.exit()
retry = config.getfloat('tenant', 'retry_interval')
logger.info(
f"Verifier connection to agent at {self.agent_ip} refused {numtries}/{maxr} times, trying again in {retry} seconds...")
time.sleep(retry)
continue
raise e
response_body = response.json()
if response.status_code == 200:
if "results" not in response_body or 'hmac' not in response_body['results']:
logger.critical(
f"Error: unexpected http response body from Cloud Agent: {response.status}")
break
mac = response_body['results']['hmac']
ex_mac = crypto.do_hmac(self.K, challenge)
if mac == ex_mac:
logger.info("Key derivation successful")
else:
logger.error("Key derivation failed")
else:
keylime_logging.log_http_response(
logger, logging.ERROR, response_body)
retry = config.getfloat('tenant', 'retry_interval')
logger.warning(
f"Key derivation not yet complete...trying again in {retry} seconds...Ctrl-C to stop")
time.sleep(retry)
continue
break
def main(argv=sys.argv):
"""[summary]
Keyword Arguments:
argv {[type]} -- [description] (default: {sys.argv})
Raises:
UserError: [description]
UserError: [description]
UserError: [description]
"""
parser = argparse.ArgumentParser(argv[0])
parser.add_argument('-c', '--command', action='store', dest='command', default='add',
help="valid commands are add,delete,update,status,list,reactivate,regdelete. defaults to add")
parser.add_argument('-t', '--targethost', action='store',
dest='agent_ip', help="the IP address of the host to provision")
parser.add_argument('-tp', '--targetport', action='store',
dest='agent_port', help="the Port of the host to provision")
parser.add_argument('--cv_targethost', action='store', default=None, dest='cv_agent_ip',
help='the IP address of the host to provision that the verifier will use (optional). Use only if different than argument to option -t/--targethost')
parser.add_argument('-v', '--cv', action='store', dest='verifier_ip',
help="the IP address of the cloud verifier")
parser.add_argument('-u', '--uuid', action='store',
dest='agent_uuid', help="UUID for the agent to provision")
parser.add_argument('-f', '--file', action='store', default=None,
help='Deliver the specified plaintext to the provisioned agent')
parser.add_argument('--cert', action='store', dest='ca_dir', default=None,
help='Create and deliver a certificate using a CA created by ca-util. Pass in the CA directory or use "default" to use the standard dir')
parser.add_argument('-k', '--key', action='store', dest='keyfile',
help='an intermedia key file produced by user_data_encrypt')
parser.add_argument('-p', '--payload', action='store', default=None,
help='Specify the encrypted payload to deliver with encrypted keys specified by -k')
parser.add_argument('--include', action='store', dest='incl_dir', default=None,
help="Include additional files in provided directory in certificate zip file. Must be specified with --cert")
parser.add_argument('--allowlist', action='store', dest='allowlist',
default=None, help="Specify the location of an allowlist")
parser.add_argument('--exclude', action='store', dest='ima_exclude',
default=None, help="Specify the location of an IMA exclude list")
parser.add_argument('--tpm_policy', action='store', dest='tpm_policy', default=None,
help="Specify a TPM policy in JSON format. e.g., {\"15\":\"0000000000000000000000000000000000000000\"}")
parser.add_argument('--vtpm_policy', action='store', dest='vtpm_policy',
default=None, help="Specify a vTPM policy in JSON format")
parser.add_argument('--verify', action='store_true', default=False,
help='Block on cryptographically checked key derivation confirmation from the agent once it has been provisioned')
args = parser.parse_args(argv[1:])
mytenant = Tenant()
if args.command not in ['list', 'regdelete', 'delete'] and args.agent_ip is None:
raise UserError(
f"-t/--targethost is required for command {args.command}")
if args.agent_uuid is not None:
mytenant.agent_uuid = args.agent_uuid
# if the uuid is actually a public key, then hash it
if mytenant.agent_uuid.startswith('-----BEGIN PUBLIC KEY-----'):
mytenant.agent_uuid = hashlib.sha256(
mytenant.agent_uuid).hexdigest()
else:
logger.warning(
"Using default UUID D432FBB3-D2F1-4A97-9EF7-75BD81C00000")
mytenant.agent_uuid = "D432FBB3-D2F1-4A97-9EF7-75BD81C00000"
if config.STUB_VTPM and config.TPM_CANNED_VALUES is not None:
# Use canned values for agent UUID
jsonIn = config.TPM_CANNED_VALUES
if "add_vtpm_to_group" in jsonIn:
mytenant.agent_uuid = jsonIn['add_vtpm_to_group']['retout']
else:
# Our command hasn't been canned!
raise UserError("Command %s not found in canned JSON!" %
("add_vtpm_to_group"))
if args.verifier_ip is not None:
mytenant.cloudverifier_ip = args.verifier_ip
if args.command == 'add':
mytenant.init_add(vars(args))
mytenant.preloop()
mytenant.do_cv()
mytenant.do_quote()
if args.verify:
mytenant.do_verify()
elif args.command == 'update':
mytenant.init_add(vars(args))
mytenant.do_cvdelete()
mytenant.preloop()
mytenant.do_cv()
mytenant.do_quote()
if args.verify:
mytenant.do_verify()
elif args.command == 'delete':
mytenant.do_cvdelete()
elif args.command == 'status':
mytenant.do_cvstatus()
elif args.command == 'list':
mytenant.do_cvstatus(listing=True)
elif args.command == 'reactivate':
mytenant.do_cvreactivate()
elif args.command == 'regdelete':
mytenant.do_regdelete()
else:
raise UserError("Invalid command specified: %s" % (args.command))
|
#!/usr/bin/python3
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
'''
import argparse
import base64
import hashlib
import io
import logging
import os
import subprocess
import sys
import time
import zipfile
import simplejson as json
from keylime.requests_client import RequestsClient
from keylime.common import states
from keylime import config
from keylime import keylime_logging
from keylime import registrar_client
from keylime.tpm import tpm_obj
from keylime.tpm.tpm_abstract import TPM_Utilities
from keylime import ima
from keylime import crypto
from keylime.cmd import user_data_encrypt
from keylime import ca_util
from keylime.common import algorithms
# setup logging
logger = keylime_logging.init_logging('tenant')
# special exception that suppresses stack traces when it happens
class UserError(Exception):
pass
class Tenant():
"""Simple command processor example."""
config = None
cloudverifier_ip = None
cloudverifier_port = None
cloudagent_ip = None
cv_cloudagent_ip = None
cloudagent_port = None
registrar_ip = None
registrar_port = None
webapp_ip = None
webapp_port = None
uuid_service_generate_locally = None
agent_uuid = None
K = None
V = None
U = None
auth_tag = None
tpm_policy = None
vtpm_policy = {}
metadata = {}
allowlist = {}
revocation_key = ""
accept_tpm_hash_algs = []
accept_tpm_encryption_algs = []
accept_tpm_signing_algs = []
payload = None
def __init__(self):
""" Set up required values and TLS
"""
self.agent_ip = None
self.nonce = None
self.verifier_ip = config.get('cloud_verifier', 'cloudverifier_ip')
self.verifier_port = config.get('cloud_verifier', 'cloudverifier_port')
self.agent_port = config.get('cloud_agent', 'cloudagent_port')
self.registrar_port = config.get('registrar', 'registrar_tls_port')
self.webapp_port = config.getint('webapp', 'webapp_port')
if not config.REQUIRE_ROOT and self.webapp_port < 1024:
self.webapp_port += 2000
self.registrar_ip = config.get('registrar', 'registrar_ip')
self.verifier_base_url = f'{self.verifier_ip}:{self.verifier_port}'
self.webapp_ip = config.get('webapp', 'webapp_ip')
self.my_cert, self.my_priv_key = self.get_tls_context()
self.cert = (self.my_cert, self.my_priv_key)
if config.getboolean('general', "enable_tls"):
self.tls_enabled = True
else:
self.tls_enabled = False
self.cert = ""
logger.warning(
"TLS is currently disabled, keys will be sent in the clear! Should only be used for testing")
def get_tls_context(self):
"""Generate certifcate naming and path
Returns:
string -- my_cert (client_cert), my_priv_key (client private key)
"""
my_cert = config.get('tenant', 'my_cert')
my_priv_key = config.get('tenant', 'private_key')
tls_dir = config.get('tenant', 'tls_dir')
if tls_dir == 'default':
my_cert = 'client-cert.crt'
my_priv_key = 'client-private.pem'
tls_dir = 'cv_ca'
if tls_dir[0] != '/':
tls_dir = os.path.abspath('%s/%s' % (config.WORK_DIR, tls_dir))
logger.info(f"Setting up client TLS in {tls_dir}")
my_cert = "%s/%s" % (tls_dir, my_cert)
my_priv_key = "%s/%s" % (tls_dir, my_priv_key)
return my_cert, my_priv_key
def init_add(self, args):
""" Set up required values. Command line options can overwrite these config values
Arguments:
args {[string]} -- agent_ip|agent_port|cv_agent_ip
"""
if "agent_ip" in args:
self.agent_ip = args["agent_ip"]
if 'agent_port' in args and args['agent_port'] is not None:
self.agent_port = args['agent_port']
if 'cv_agent_ip' in args and args['cv_agent_ip'] is not None:
self.cv_cloudagent_ip = args['cv_agent_ip']
else:
self.cv_cloudagent_ip = self.agent_ip
# Make sure all keys exist in dictionary
if "file" not in args:
args["file"] = None
if "keyfile" not in args:
args["keyfile"] = None
if "payload" not in args:
args["payload"] = None
if "ca_dir" not in args:
args["ca_dir"] = None
if "incl_dir" not in args:
args["incl_dir"] = None
if "ca_dir_pw" not in args:
args["ca_dir_pw"] = None
# Set up accepted algorithms
self.accept_tpm_hash_algs = config.get(
'tenant', 'accept_tpm_hash_algs').split(',')
self.accept_tpm_encryption_algs = config.get(
'tenant', 'accept_tpm_encryption_algs').split(',')
self.accept_tpm_signing_algs = config.get(
'tenant', 'accept_tpm_signing_algs').split(',')
# Set up PCR values
tpm_policy = config.get('tenant', 'tpm_policy')
if "tpm_policy" in args and args["tpm_policy"] is not None:
tpm_policy = args["tpm_policy"]
self.tpm_policy = TPM_Utilities.readPolicy(tpm_policy)
logger.info(f"TPM PCR Mask from policy is {self.tpm_policy['mask']}")
vtpm_policy = config.get('tenant', 'vtpm_policy')
if "vtpm_policy" in args and args["vtpm_policy"] is not None:
vtpm_policy = args["vtpm_policy"]
self.vtpm_policy = TPM_Utilities.readPolicy(vtpm_policy)
logger.info(f"TPM PCR Mask from policy is {self.vtpm_policy['mask']}")
# Read command-line path string allowlist
al_data = None
if "allowlist" in args and args["allowlist"] is not None:
# Auto-enable IMA (or-bit mask)
self.tpm_policy['mask'] = "0x%X" % (
int(self.tpm_policy['mask'], 0) | (1 << config.IMA_PCR))
if isinstance(args["allowlist"], str):
if args["allowlist"] == "default":
args["allowlist"] = config.get(
'tenant', 'allowlist')
al_data = ima.read_allowlist(args["allowlist"])
elif isinstance(args["allowlist"], list):
al_data = args["allowlist"]
else:
raise UserError("Invalid allowlist provided")
# Read command-line path string IMA exclude list
excl_data = None
if "ima_exclude" in args and args["ima_exclude"] is not None:
if isinstance(args["ima_exclude"], str):
if args["ima_exclude"] == "default":
args["ima_exclude"] = config.get(
'tenant', 'ima_excludelist')
excl_data = ima.read_excllist(args["ima_exclude"])
elif isinstance(args["ima_exclude"], list):
excl_data = args["ima_exclude"]
else:
raise UserError("Invalid exclude list provided")
# Set up IMA
if TPM_Utilities.check_mask(self.tpm_policy['mask'], config.IMA_PCR) or \
TPM_Utilities.check_mask(self.vtpm_policy['mask'], config.IMA_PCR):
# Process allowlists
self.allowlist = ima.process_allowlists(al_data, excl_data)
# if none
if (args["file"] is None and args["keyfile"] is None and args["ca_dir"] is None):
raise UserError(
"You must specify one of -k, -f, or --cert to specify the key/contents to be securely delivered to the agent")
if args["keyfile"] is not None:
if args["file"] is not None or args["ca_dir"] is not None:
raise UserError(
"You must specify one of -k, -f, or --cert to specify the key/contents to be securely delivered to the agent")
# read the keys in
if isinstance(args["keyfile"], dict) and "data" in args["keyfile"]:
if isinstance(args["keyfile"]["data"], list) and len(args["keyfile"]["data"]) == 1:
keyfile = args["keyfile"]["data"][0]
if keyfile is None:
raise UserError("Invalid key file contents")
f = io.StringIO(keyfile)
else:
raise UserError("Invalid key file provided")
else:
f = open(args["keyfile"], 'r')
self.K = base64.b64decode(f.readline())
self.U = base64.b64decode(f.readline())
self.V = base64.b64decode(f.readline())
f.close()
# read the payload in (opt.)
if isinstance(args["payload"], dict) and "data" in args["payload"]:
if isinstance(args["payload"]["data"], list) and len(args["payload"]["data"]) > 0:
self.payload = args["payload"]["data"][0]
else:
if args["payload"] is not None:
f = open(args["payload"], 'r')
self.payload = f.read()
f.close()
if args["file"] is not None:
if args["keyfile"] is not None or args["ca_dir"] is not None:
raise UserError(
"You must specify one of -k, -f, or --cert to specify the key/contents to be securely delivered to the agent")
if isinstance(args["file"], dict) and "data" in args["file"]:
if isinstance(args["file"]["data"], list) and len(args["file"]["data"]) > 0:
contents = args["file"]["data"][0]
if contents is None:
raise UserError("Invalid file payload contents")
else:
raise UserError("Invalid file payload provided")
else:
with open(args["file"], 'r') as f:
contents = f.read()
ret = user_data_encrypt.encrypt(contents)
self.K = ret['k']
self.U = ret['u']
self.V = ret['v']
self.payload = ret['ciphertext']
if args["ca_dir"] is None and args["incl_dir"] is not None:
raise UserError(
"--include option is only valid when used with --cert")
if args["ca_dir"] is not None:
if args["file"] is not None or args["keyfile"] is not None:
raise UserError(
"You must specify one of -k, -f, or --cert to specify the key/contents to be securely delivered to the agent")
if args["ca_dir"] == 'default':
args["ca_dir"] = config.CA_WORK_DIR
if "ca_dir_pw" in args and args["ca_dir_pw"] is not None:
ca_util.setpassword(args["ca_dir_pw"])
if not os.path.exists(args["ca_dir"]) or not os.path.exists("%s/cacert.crt" % args["ca_dir"]):
logger.warning(" CA directory does not exist. Creating...")
ca_util.cmd_init(args["ca_dir"])
if not os.path.exists("%s/%s-private.pem" % (args["ca_dir"], self.agent_uuid)):
ca_util.cmd_mkcert(args["ca_dir"], self.agent_uuid)
cert_pkg, serial, subject = ca_util.cmd_certpkg(
args["ca_dir"], self.agent_uuid)
# support revocation
if not os.path.exists("%s/RevocationNotifier-private.pem" % args["ca_dir"]):
ca_util.cmd_mkcert(args["ca_dir"], "RevocationNotifier")
rev_package, _, _ = ca_util.cmd_certpkg(
args["ca_dir"], "RevocationNotifier")
# extract public and private keys from package
sf = io.BytesIO(rev_package)
with zipfile.ZipFile(sf) as zf:
privkey = zf.read("RevocationNotifier-private.pem")
cert = zf.read("RevocationNotifier-cert.crt")
# put the cert of the revoker into the cert package
sf = io.BytesIO(cert_pkg)
with zipfile.ZipFile(sf, 'a', compression=zipfile.ZIP_STORED) as zf:
zf.writestr('RevocationNotifier-cert.crt', cert)
# add additional files to zip
if args["incl_dir"] is not None:
if isinstance(args["incl_dir"], dict) and "data" in args["incl_dir"] and "name" in args["incl_dir"]:
if isinstance(args["incl_dir"]["data"], list) and isinstance(args["incl_dir"]["name"], list):
if len(args["incl_dir"]["data"]) != len(args["incl_dir"]["name"]):
raise UserError("Invalid incl_dir provided")
for i in range(len(args["incl_dir"]["data"])):
zf.writestr(os.path.basename(
args["incl_dir"]["name"][i]), args["incl_dir"]["data"][i])
else:
if os.path.exists(args["incl_dir"]):
files = next(os.walk(args["incl_dir"]))[2]
for filename in files:
with open("%s/%s" % (args["incl_dir"], filename), 'rb') as f:
zf.writestr(
os.path.basename(f.name), f.read())
else:
logger.warn(
f'Specified include directory {args["incl_dir"]} does not exist. Skipping...')
cert_pkg = sf.getvalue()
# put the private key into the data to be send to the CV
self.revocation_key = privkey
# encrypt up the cert package
ret = user_data_encrypt.encrypt(cert_pkg)
self.K = ret['k']
self.U = ret['u']
self.V = ret['v']
self.metadata = {'cert_serial': serial, 'subject': subject}
self.payload = ret['ciphertext']
if self.payload is not None and len(self.payload) > config.getint('tenant', 'max_payload_size'):
raise UserError("Payload size %s exceeds max size %d" % (
len(self.payload), config.getint('tenant', 'max_payload_size')))
def preloop(self):
""" encrypt the agent UUID as a check for delivering the correct key
"""
self.auth_tag = crypto.do_hmac(self.K, self.agent_uuid)
# be very careful printing K, U, or V as they leak in logs stored on unprotected disks
if config.INSECURE_DEBUG:
logger.debug(F"K: {base64.b64encode(self.K)}")
logger.debug(F"V: {base64.b64encode(self.V)}")
logger.debug(F"U: {base64.b64encode(self.U)}")
logger.debug(F"Auth Tag: {self.auth_tag}")
def check_ek(self, ek, ekcert, tpm):
""" Check the Entity Key
Arguments:
ek {[type]} -- [description]
ekcert {[type]} -- [description]
tpm {[type]} -- [description]
Returns:
[type] -- [description]
"""
if config.getboolean('tenant', 'require_ek_cert'):
if config.STUB_TPM:
logger.debug("not checking ekcert due to STUB_TPM mode")
elif ekcert == 'virtual':
logger.debug("not checking ekcert of VTPM")
elif ekcert == 'emulator' and config.DISABLE_EK_CERT_CHECK_EMULATOR:
logger.debug("not checking ekcert of TPM emulator")
elif ekcert is None:
logger.warning(
"No EK cert provided, require_ek_cert option in config set to True")
return False
elif not tpm.verify_ek(base64.b64decode(ekcert), ek):
logger.warning("Invalid EK certificate")
return False
return True
def validate_tpm_quote(self, public_key, quote, tpm_version, hash_alg):
""" Validate TPM Quote received from the Agent
Arguments:
public_key {[type]} -- [description]
quote {[type]} -- [description]
tpm_version {[type]} -- [description]
hash_alg {bool} -- [description]
Raises:
UserError: [description]
Returns:
[type] -- [description]
"""
registrar_client.init_client_tls('tenant')
reg_keys = registrar_client.getKeys(
self.registrar_ip, self.registrar_port, self.agent_uuid)
if reg_keys is None:
logger.warning("AIK not found in registrar, quote not validated")
return False
tpm = tpm_obj.getTPM(need_hw_tpm=False, tpm_version=tpm_version)
if not tpm.check_quote(self.agent_uuid, self.nonce, public_key, quote, reg_keys['aik'], hash_alg=hash_alg):
if reg_keys['regcount'] > 1:
logger.error("WARNING: This UUID had more than one ek-ekcert registered to it! This might indicate that your system is misconfigured or a malicious host is present. Run 'regdelete' for this agent and restart")
sys.exit()
return False
if reg_keys['regcount'] > 1:
logger.warn("WARNING: This UUID had more than one ek-ekcert registered to it! This might indicate that your system is misconfigured. Run 'regdelete' for this agent and restart")
if not config.STUB_TPM and (not config.getboolean('tenant', 'require_ek_cert') and config.get('tenant', 'ek_check_script') == ""):
logger.warn(
"DANGER: EK cert checking is disabled and no additional checks on EKs have been specified with ek_check_script option. Keylime is not secure!!")
# check EK cert and make sure it matches EK
if not self.check_ek(reg_keys['ek'], reg_keys['ekcert'], tpm):
return False
# if agent is virtual, check phyisical EK cert and make sure it matches phyiscal EK
if 'provider_keys' in reg_keys:
if not self.check_ek(reg_keys['provider_keys']['ek'], reg_keys['provider_keys']['ekcert'], tpm):
return False
# check all EKs with optional script:
script = config.get('tenant', 'ek_check_script')
if not script:
return True
if script[0] != '/':
script = "%s/%s" % (config.WORK_DIR, script)
logger.info(f"Checking EK with script {script}")
# now we need to exec the script with the ek and ek cert in vars
env = os.environ.copy()
env['AGENT_UUID'] = self.agent_uuid
env['EK'] = reg_keys['ek']
if reg_keys['ekcert'] is not None:
env['EK_CERT'] = reg_keys['ekcert']
else:
env['EK_CERT'] = ""
env['PROVKEYS'] = json.dumps(reg_keys.get('provider_keys', {}))
proc = subprocess.Popen(script, env=env, shell=True,
cwd=config.WORK_DIR, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
retval = proc.wait()
if retval != 0:
raise UserError("External check script failed to validate EK")
logger.debug(
"External check script successfully to validated EK")
while True:
line = proc.stdout.readline().decode()
if line == "":
break
logger.debug(f"ek_check output: {line.strip()}")
return True
def do_cv(self):
""" Initiaite v, agent_id and ip and initiate the cloudinit sequence
"""
b64_v = base64.b64encode(self.V).decode('utf-8')
logger.debug("b64_v:" + b64_v)
data = {
'v': b64_v,
'cloudagent_ip': self.cv_cloudagent_ip,
'cloudagent_port': self.agent_port,
'tpm_policy': json.dumps(self.tpm_policy),
'vtpm_policy': json.dumps(self.vtpm_policy),
'allowlist': json.dumps(self.allowlist),
'metadata': json.dumps(self.metadata),
'revocation_key': self.revocation_key,
'accept_tpm_hash_algs': self.accept_tpm_hash_algs,
'accept_tpm_encryption_algs': self.accept_tpm_encryption_algs,
'accept_tpm_signing_algs': self.accept_tpm_signing_algs,
}
json_message = json.dumps(data)
do_cv = RequestsClient(self.verifier_base_url, self.tls_enabled)
response = do_cv.post(
(f'/agents/{self.agent_uuid}'),
data=json_message,
cert=self.cert,
verify=False
)
if response.status_code == 503:
logger.error(
f"Cannot connect to Verifier at {self.verifier_ip} with Port {self.verifier_port}. Connection refused.")
sys.exit()
elif response.status_code == 504:
logger.error(
f"Verifier at {self.verifier_ip} with Port {self.verifier_port} timed out.")
sys.exit()
if response.status_code == 409:
# this is a conflict, need to update or delete it
logger.error(
f"Agent {self.agent_uuid} already existed at CV. Please use delete or update.")
sys.exit()
elif response.status_code != 200:
keylime_logging.log_http_response(
logger, logging.ERROR, response.json())
logger.error(
f"POST command response: {response.status} Unexpected response from Cloud Verifier: {response.read()}")
sys.exit()
def do_cvstatus(self, listing=False):
""" Perform opertional state look up for agent
Keyword Arguments:
listing {bool} -- If True, list all agent statues (default: {False})
"""
agent_uuid = ""
if not listing:
agent_uuid = self.agent_uuid
do_cvstatus = RequestsClient(self.verifier_base_url, self.tls_enabled)
response = do_cvstatus.get(
(f'/agents/{agent_uuid}'),
cert=self.cert,
verify=False
)
if response.status_code == 503:
logger.error(
f"Cannot connect to Verifier at {self.verifier_ip} with Port {self.verifier_port}. Connection refused.")
sys.exit()
elif response == 504:
logger.error(
f"Verifier at {self.verifier_ip} with Port {self.verifier_port} timed out.")
sys.exit()
if response.status_code == 404:
logger.error(
f"Agent {agent_uuid} does not exist on the verifier. Please try to add or update agent")
sys.exit()
if response.status_code != 200:
logger.error(
f"Status command response: {response.status}. Unexpected response from Cloud Verifier.")
sys.exit()
else:
response_json = response.json()
if not listing:
operational_state = response_json["results"]["operational_state"]
logger.info(f'Agent Status: "{states.state_to_str(operational_state)}"')
else:
agent_array = response_json["results"]["uuids"]
logger.info(f'Agents: "{agent_array}"')
def do_cvdelete(self):
"""Delete agent from Verifier
"""
do_cvdelete = RequestsClient(self.verifier_base_url, self.tls_enabled)
response = do_cvdelete.delete(
(f'/agents/{self.agent_uuid}'),
cert=self.cert,
verify=False
)
if response.status_code == 503:
logger.error(
f"Cannot connect to Verifier at {self.verifier_ip} with Port {self.verifier_port}. Connection refused.")
sys.exit()
elif response.status_code == 504:
logger.error(
f"Verifier at {self.verifier_ip} with Port {self.verifier_port} timed out.")
sys.exit()
if response.status_code == 202:
deleted = False
for _ in range(12):
get_cvdelete = RequestsClient(
self.verifier_base_url, self.tls_enabled)
response = get_cvdelete.get(
(f'/agents/{self.agent_uuid}'),
cert=self.cert,
verify=False
)
if response.status_code in (200, 404):
deleted = True
break
time.sleep(.4)
if deleted:
logger.info(
f"CV completed deletion of agent {self.agent_uuid}")
else:
logger.error(
f"Timed out waiting for delete of agent {self.agent_uuid} to complete at CV")
sys.exit()
elif response.status_code == 200:
logger.info(f"Agent {self.agent_uuid} deleted from the CV")
else:
response_body = response.json()
keylime_logging.log_http_response(
logger, logging.ERROR, response_body)
def do_regdelete(self):
""" Delete agent from Registrar
"""
registrar_client.init_client_tls('tenant')
registrar_client.doRegistrarDelete(
self.registrar_ip, self.registrar_port, self.agent_uuid)
def do_cvreactivate(self):
""" Reactive Agent
"""
do_cvreactivate = RequestsClient(
self.verifier_base_url, self.tls_enabled)
response = do_cvreactivate.put(
(f'/agents/{self.agent_uuid}/reactivate'),
data=b'',
cert=self.cert,
verify=False
)
if response.status_code == 503:
logger.error(
f"Cannot connect to Verifier at {self.verifier_ip} with Port {self.verifier_port}. Connection refused.")
sys.exit()
elif response.status_code == 504:
logger.error(
f"Verifier at {self.verifier_ip} with Port {self.verifier_port} timed out.")
sys.exit()
response_body = response.json()
if response.status_code != 200:
keylime_logging.log_http_response(
logger, logging.ERROR, response_body)
logger.error(
f"Update command response: {response.status_code} Unexpected response from Cloud Verifier.")
else:
logger.info(f"Agent {self.agent_uuid} re-activated")
def do_cvstop(self):
""" Stop declared active agent
"""
params = f'/agents/{self.agent_uuid}/stop'
do_cvstop = RequestsClient(self.verifier_base_url, self.tls_enabled)
response = do_cvstop.put(
params,
cert=self.cert,
data=b'',
verify=False
)
if response.status_code == 503:
logger.error(
f"Cannot connect to Verifier at {self.verifier_ip} with Port {self.verifier_port}. Connection refused.")
sys.exit()
elif response.status_code == 504:
logger.error(
f"Verifier at {self.verifier_ip} with Port {self.verifier_port} timed out.")
sys.exit()
response_body = response.json()
if response.status_code != 200:
keylime_logging.log_http_response(
logger, logging.ERROR, response_body)
else:
logger.info(f"Agent {self.agent_uuid} stopped")
def do_quote(self):
""" Perform TPM quote by GET towards Agent
Raises:
UserError: Connection handler
"""
self.nonce = TPM_Utilities.random_password(20)
numtries = 0
response = None
# Note: We need a specific retry handler (perhaps in common), no point having localised unless we have too.
while True:
try:
params = '/quotes/identity?nonce=%s' % (self.nonce)
cloudagent_base_url = f'{self.agent_ip}:{self.agent_port}'
do_quote = RequestsClient(cloudagent_base_url, tls_enabled=False)
response = do_quote.get(
params,
cert=self.cert
)
response_body = response.json()
except Exception as e:
if response.status_code in (503, 504):
numtries += 1
maxr = config.getint('tenant', 'max_retries')
if numtries >= maxr:
logger.error(
f"tenant cannot establish connection to agent on {self.agent_ip} with port {self.agent_port}")
sys.exit()
retry = config.getfloat('tenant', 'retry_interval')
logger.info(
f"tenant connection to agent at {self.agent_ip} refused {numtries}/{maxr} times, trying again in {retry} seconds...")
time.sleep(retry)
continue
raise e
break
try:
if response is not None and response.status_code != 200:
raise UserError(
"Status command response: %d Unexpected response from Cloud Agent." % response.status)
if "results" not in response_body:
raise UserError(
"Error: unexpected http response body from Cloud Agent: %s" % str(response.status))
quote = response_body["results"]["quote"]
logger.debug(f"agent_quote received quote: {quote}")
public_key = response_body["results"]["pubkey"]
logger.debug(f"agent_quote received public key: {public_key}")
# Get tpm_version, hash_alg
tpm_version = response_body["results"]["tpm_version"]
logger.debug(
f"agent_quote received tpm version: {str(tpm_version)}")
# Ensure hash_alg is in accept_tpm_hash_algs list
hash_alg = response_body["results"]["hash_alg"]
logger.debug(f"agent_quote received hash algorithm: {hash_alg}")
if not algorithms.is_accepted(hash_alg, config.get('tenant', 'accept_tpm_hash_algs').split(',')):
raise UserError(
"TPM Quote is using an unaccepted hash algorithm: %s" % hash_alg)
# Ensure enc_alg is in accept_tpm_encryption_algs list
enc_alg = response_body["results"]["enc_alg"]
logger.debug(
f"agent_quote received encryption algorithm: {enc_alg}")
if not algorithms.is_accepted(enc_alg, config.get('tenant', 'accept_tpm_encryption_algs').split(',')):
raise UserError(
"TPM Quote is using an unaccepted encryption algorithm: %s" % enc_alg)
# Ensure sign_alg is in accept_tpm_encryption_algs list
sign_alg = response_body["results"]["sign_alg"]
logger.debug(f"agent_quote received signing algorithm: {sign_alg}")
if not algorithms.is_accepted(sign_alg, config.get('tenant', 'accept_tpm_signing_algs').split(',')):
raise UserError(
"TPM Quote is using an unaccepted signing algorithm: %s" % sign_alg)
if not self.validate_tpm_quote(public_key, quote, tpm_version, hash_alg):
raise UserError(
"TPM Quote from cloud agent is invalid for nonce: %s" % self.nonce)
logger.info(f"Quote from {self.agent_ip} validated")
# encrypt U with the public key
encrypted_U = crypto.rsa_encrypt(
crypto.rsa_import_pubkey(public_key), self.U)
b64_encrypted_u = base64.b64encode(encrypted_U)
logger.debug("b64_encrypted_u: " + b64_encrypted_u.decode('utf-8'))
data = {
'encrypted_key': b64_encrypted_u,
'auth_tag': self.auth_tag
}
if self.payload is not None:
data['payload'] = self.payload
u_json_message = json.dumps(data)
# post encrypted U back to CloudAgent
params = '/keys/ukey'
cloudagent_base_url = (
f'{self.agent_ip}:{self.agent_port}'
)
post_ukey = RequestsClient(cloudagent_base_url, tls_enabled=False)
response = post_ukey.post(
params,
data=u_json_message
)
if response.status_code == 503:
logger.error(
f"Cannot connect to Agent at {self.agent_ip} with Port {self.agent_port}. Connection refused.")
sys.exit()
elif response.status_code == 504:
logger.error(
f"Verifier at {self.verifier_ip} with Port {self.verifier_port} timed out.")
sys.exit()
if response.status_code != 200:
keylime_logging.log_http_response(
logger, logging.ERROR, response_body)
raise UserError(
"Posting of Encrypted U to the Cloud Agent failed with response code %d" % response.status)
except Exception as e:
self.do_cvstop()
raise e
def do_verify(self):
""" Perform verify using a random generated challenge
"""
challenge = TPM_Utilities.random_password(20)
numtries = 0
while True:
try:
cloudagent_base_url = (
f'{self.agent_ip}:{self.agent_port}'
)
do_verify = RequestsClient(
cloudagent_base_url, tls_enabled=False)
response = do_verify.get(
(f'/keys/verify?challenge={challenge}'),
cert=self.cert,
verify=False
)
except Exception as e:
if response.status_code in (503, 504):
numtries += 1
maxr = config.getint('tenant', 'max_retries')
if numtries >= maxr:
logger.error(
f"Cannot establish connection to agent on {self.agent_ip} with port {self.agent_port}")
sys.exit()
retry = config.getfloat('tenant', 'retry_interval')
logger.info(
f"Verifier connection to agent at {self.agent_ip} refused {numtries}/{maxr} times, trying again in {retry} seconds...")
time.sleep(retry)
continue
raise e
response_body = response.json()
if response.status_code == 200:
if "results" not in response_body or 'hmac' not in response_body['results']:
logger.critical(
f"Error: unexpected http response body from Cloud Agent: {response.status}")
break
mac = response_body['results']['hmac']
ex_mac = crypto.do_hmac(self.K, challenge)
if mac == ex_mac:
logger.info("Key derivation successful")
else:
logger.error("Key derivation failed")
else:
keylime_logging.log_http_response(
logger, logging.ERROR, response_body)
retry = config.getfloat('tenant', 'retry_interval')
logger.warning(
f"Key derivation not yet complete...trying again in {retry} seconds...Ctrl-C to stop")
time.sleep(retry)
continue
break
def main(argv=sys.argv):
"""[summary]
Keyword Arguments:
argv {[type]} -- [description] (default: {sys.argv})
Raises:
UserError: [description]
UserError: [description]
UserError: [description]
"""
parser = argparse.ArgumentParser(argv[0])
parser.add_argument('-c', '--command', action='store', dest='command', default='add',
help="valid commands are add,delete,update,status,list,reactivate,regdelete. defaults to add")
parser.add_argument('-t', '--targethost', action='store',
dest='agent_ip', help="the IP address of the host to provision")
parser.add_argument('-tp', '--targetport', action='store',
dest='agent_port', help="the Port of the host to provision")
parser.add_argument('--cv_targethost', action='store', default=None, dest='cv_agent_ip',
help='the IP address of the host to provision that the verifier will use (optional). Use only if different than argument to option -t/--targethost')
parser.add_argument('-v', '--cv', action='store', dest='verifier_ip',
help="the IP address of the cloud verifier")
parser.add_argument('-u', '--uuid', action='store',
dest='agent_uuid', help="UUID for the agent to provision")
parser.add_argument('-f', '--file', action='store', default=None,
help='Deliver the specified plaintext to the provisioned agent')
parser.add_argument('--cert', action='store', dest='ca_dir', default=None,
help='Create and deliver a certificate using a CA created by ca-util. Pass in the CA directory or use "default" to use the standard dir')
parser.add_argument('-k', '--key', action='store', dest='keyfile',
help='an intermedia key file produced by user_data_encrypt')
parser.add_argument('-p', '--payload', action='store', default=None,
help='Specify the encrypted payload to deliver with encrypted keys specified by -k')
parser.add_argument('--include', action='store', dest='incl_dir', default=None,
help="Include additional files in provided directory in certificate zip file. Must be specified with --cert")
parser.add_argument('--allowlist', action='store', dest='allowlist',
default=None, help="Specify the location of an allowlist")
parser.add_argument('--exclude', action='store', dest='ima_exclude',
default=None, help="Specify the location of an IMA exclude list")
parser.add_argument('--tpm_policy', action='store', dest='tpm_policy', default=None,
help="Specify a TPM policy in JSON format. e.g., {\"15\":\"0000000000000000000000000000000000000000\"}")
parser.add_argument('--vtpm_policy', action='store', dest='vtpm_policy',
default=None, help="Specify a vTPM policy in JSON format")
parser.add_argument('--verify', action='store_true', default=False,
help='Block on cryptographically checked key derivation confirmation from the agent once it has been provisioned')
args = parser.parse_args(argv[1:])
mytenant = Tenant()
if args.command not in ['list', 'regdelete', 'delete'] and args.agent_ip is None:
raise UserError(
f"-t/--targethost is required for command {args.command}")
if args.agent_uuid is not None:
mytenant.agent_uuid = args.agent_uuid
# if the uuid is actually a public key, then hash it
if mytenant.agent_uuid.startswith('-----BEGIN PUBLIC KEY-----'):
mytenant.agent_uuid = hashlib.sha256(
mytenant.agent_uuid).hexdigest()
else:
logger.warning(
"Using default UUID D432FBB3-D2F1-4A97-9EF7-75BD81C00000")
mytenant.agent_uuid = "D432FBB3-D2F1-4A97-9EF7-75BD81C00000"
if config.STUB_VTPM and config.TPM_CANNED_VALUES is not None:
# Use canned values for agent UUID
jsonIn = config.TPM_CANNED_VALUES
if "add_vtpm_to_group" in jsonIn:
mytenant.agent_uuid = jsonIn['add_vtpm_to_group']['retout']
else:
# Our command hasn't been canned!
raise UserError("Command %s not found in canned JSON!" %
("add_vtpm_to_group"))
if args.verifier_ip is not None:
mytenant.cloudverifier_ip = args.verifier_ip
if args.command == 'add':
mytenant.init_add(vars(args))
mytenant.preloop()
mytenant.do_cv()
mytenant.do_quote()
if args.verify:
mytenant.do_verify()
elif args.command == 'update':
mytenant.init_add(vars(args))
mytenant.do_cvdelete()
mytenant.preloop()
mytenant.do_cv()
mytenant.do_quote()
if args.verify:
mytenant.do_verify()
elif args.command == 'delete':
mytenant.do_cvdelete()
elif args.command == 'status':
mytenant.do_cvstatus()
elif args.command == 'list':
mytenant.do_cvstatus(listing=True)
elif args.command == 'reactivate':
mytenant.do_cvreactivate()
elif args.command == 'regdelete':
mytenant.do_regdelete()
else:
raise UserError("Invalid command specified: %s" % (args.command))
|
"""
Tests for the course search form.
"""
from unittest import mock
from django.http.request import QueryDict
from django.test import TestCase
from richie.apps.core.defaults import ALL_LANGUAGES_DICT
from richie.apps.search.forms import ItemSearchForm
@mock.patch.dict(ALL_LANGUAGES_DICT, {"fr": "French", "en": "English"})
class ItemSearchFormTestCase(TestCase):
"""
Test the course search form.
"""
def test_forms_items_params_not_required(self, *_):
"""No params are required for the search form."""
form = ItemSearchForm()
self.assertTrue(form.is_valid())
def test_forms_items_empty_querystring(self, *_):
"""The empty query string should be a valid search form."""
form = ItemSearchForm(data=QueryDict())
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data, {"limit": None, "offset": None, "query": "", "scope": ""}
)
def test_forms_items_limit_greater_than_1(self, *_):
"""The `limit` param should be greater than 1."""
form = ItemSearchForm(data=QueryDict(query_string="limit=0"))
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors, {"limit": ["Ensure this value is greater than or equal to 1."]}
)
def test_forms_items_limit_integer(self, *_):
"""The `limit` param should be an integer."""
form = ItemSearchForm(data=QueryDict(query_string="limit=a"))
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {"limit": ["Enter a whole number."]})
form = ItemSearchForm(data=QueryDict(query_string="limit=1"))
self.assertTrue(form.is_valid())
def test_forms_items_offset_greater_than_0(self, *_):
"""The `offset` param should be greater than 0."""
form = ItemSearchForm(data=QueryDict(query_string="offset=-1"))
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{"offset": ["Ensure this value is greater than or equal to 0."]},
)
def test_forms_items_offset_integer(self, *_):
"""The `offset` param should be an integer."""
form = ItemSearchForm(data=QueryDict(query_string="offset=a"))
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {"offset": ["Enter a whole number."]})
form = ItemSearchForm(data=QueryDict(query_string="offset=1"))
self.assertTrue(form.is_valid())
def test_forms_items_query_between_3_and_100_characters_long(self, *_):
"""The `query` param should be between 3 and 100 characters long."""
form = ItemSearchForm(data=QueryDict(query_string="query=aa"))
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{"query": ["Ensure this value has at least 3 characters (it has 2)."]},
)
form = ItemSearchForm(data=QueryDict(query_string="query=aaa"))
self.assertTrue(form.is_valid())
form = ItemSearchForm(data=QueryDict(query_string=f"query={"a" * 100:s}"))
self.assertTrue(form.is_valid())
form = ItemSearchForm(data=QueryDict(query_string=f"query={"a" * 101:s}"))
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{"query": ["Ensure this value has at most 100 characters (it has 101)."]},
)
def test_forms_items_single_values_in_querystring(self, *_):
"""
The fields from filter definitions should be normalized as lists. The fields defined
on the form should be single values (limit, offset and query).
"""
form = ItemSearchForm(
data=QueryDict(query_string=("limit=9&offset=3&query=maths"))
)
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data, {"limit": 9, "offset": 3, "query": "maths", "scope": ""}
)
def test_forms_items_build_es_query_search_by_match_text(self, *_):
"""
Happy path: build a query that filters items by matching text
"""
form = ItemSearchForm(
data=QueryDict(query_string="limit=20&offset=2&query=some%20phrase%20terms")
)
self.assertTrue(form.is_valid())
self.assertEqual(
form.build_es_query(),
(
20,
2,
{
"query": {
"bool": {
"must": [
{
"multi_match": {
"analyzer": "english",
"fields": ["title.*"],
"query": "some phrase " "terms",
}
}
]
}
}
},
),
)
def test_forms_items_build_es_query_by_match_text_with_kind(self, *_):
"""
Make sure the generated query filters the items by kind when one is provided
as argument.
"""
form = ItemSearchForm(
data=QueryDict(query_string="limit=20&offset=2&query=some%20phrase%20terms")
)
self.assertTrue(form.is_valid())
self.assertEqual(
form.build_es_query(kind="subjects"),
(
20,
2,
{
"query": {
"bool": {
"must": [
{"term": {"kind": "subjects"}},
{
"multi_match": {
"analyzer": "english",
"fields": ["title.*"],
"query": "some phrase " "terms",
}
},
]
}
}
},
),
)
def test_forms_items_build_es_query_search_all(self, *_):
"""
Happy path: a match all query is returned
"""
form = ItemSearchForm(data=QueryDict(query_string="limit=11&offset=4"))
self.assertTrue(form.is_valid())
self.assertEqual(form.build_es_query(), (11, 4, {"query": {"match_all": {}}}))
|
"""
Tests for the course search form.
"""
from unittest import mock
from django.http.request import QueryDict
from django.test import TestCase
from richie.apps.core.defaults import ALL_LANGUAGES_DICT
from richie.apps.search.forms import ItemSearchForm
@mock.patch.dict(ALL_LANGUAGES_DICT, {"fr": "French", "en": "English"})
class ItemSearchFormTestCase(TestCase):
"""
Test the course search form.
"""
def test_forms_items_params_not_required(self, *_):
"""No params are required for the search form."""
form = ItemSearchForm()
self.assertTrue(form.is_valid())
def test_forms_items_empty_querystring(self, *_):
"""The empty query string should be a valid search form."""
form = ItemSearchForm(data=QueryDict())
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data, {"limit": None, "offset": None, "query": "", "scope": ""}
)
def test_forms_items_limit_greater_than_1(self, *_):
"""The `limit` param should be greater than 1."""
form = ItemSearchForm(data=QueryDict(query_string="limit=0"))
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors, {"limit": ["Ensure this value is greater than or equal to 1."]}
)
def test_forms_items_limit_integer(self, *_):
"""The `limit` param should be an integer."""
form = ItemSearchForm(data=QueryDict(query_string="limit=a"))
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {"limit": ["Enter a whole number."]})
form = ItemSearchForm(data=QueryDict(query_string="limit=1"))
self.assertTrue(form.is_valid())
def test_forms_items_offset_greater_than_0(self, *_):
"""The `offset` param should be greater than 0."""
form = ItemSearchForm(data=QueryDict(query_string="offset=-1"))
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{"offset": ["Ensure this value is greater than or equal to 0."]},
)
def test_forms_items_offset_integer(self, *_):
"""The `offset` param should be an integer."""
form = ItemSearchForm(data=QueryDict(query_string="offset=a"))
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {"offset": ["Enter a whole number."]})
form = ItemSearchForm(data=QueryDict(query_string="offset=1"))
self.assertTrue(form.is_valid())
def test_forms_items_query_between_3_and_100_characters_long(self, *_):
"""The `query` param should be between 3 and 100 characters long."""
form = ItemSearchForm(data=QueryDict(query_string="query=aa"))
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{"query": ["Ensure this value has at least 3 characters (it has 2)."]},
)
form = ItemSearchForm(data=QueryDict(query_string="query=aaa"))
self.assertTrue(form.is_valid())
form = ItemSearchForm(data=QueryDict(query_string=f"query={'a' * 100:s}"))
self.assertTrue(form.is_valid())
form = ItemSearchForm(data=QueryDict(query_string=f"query={'a' * 101:s}"))
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{"query": ["Ensure this value has at most 100 characters (it has 101)."]},
)
def test_forms_items_single_values_in_querystring(self, *_):
"""
The fields from filter definitions should be normalized as lists. The fields defined
on the form should be single values (limit, offset and query).
"""
form = ItemSearchForm(
data=QueryDict(query_string=("limit=9&offset=3&query=maths"))
)
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data, {"limit": 9, "offset": 3, "query": "maths", "scope": ""}
)
def test_forms_items_build_es_query_search_by_match_text(self, *_):
"""
Happy path: build a query that filters items by matching text
"""
form = ItemSearchForm(
data=QueryDict(query_string="limit=20&offset=2&query=some%20phrase%20terms")
)
self.assertTrue(form.is_valid())
self.assertEqual(
form.build_es_query(),
(
20,
2,
{
"query": {
"bool": {
"must": [
{
"multi_match": {
"analyzer": "english",
"fields": ["title.*"],
"query": "some phrase " "terms",
}
}
]
}
}
},
),
)
def test_forms_items_build_es_query_by_match_text_with_kind(self, *_):
"""
Make sure the generated query filters the items by kind when one is provided
as argument.
"""
form = ItemSearchForm(
data=QueryDict(query_string="limit=20&offset=2&query=some%20phrase%20terms")
)
self.assertTrue(form.is_valid())
self.assertEqual(
form.build_es_query(kind="subjects"),
(
20,
2,
{
"query": {
"bool": {
"must": [
{"term": {"kind": "subjects"}},
{
"multi_match": {
"analyzer": "english",
"fields": ["title.*"],
"query": "some phrase " "terms",
}
},
]
}
}
},
),
)
def test_forms_items_build_es_query_search_all(self, *_):
"""
Happy path: a match all query is returned
"""
form = ItemSearchForm(data=QueryDict(query_string="limit=11&offset=4"))
self.assertTrue(form.is_valid())
self.assertEqual(form.build_es_query(), (11, 4, {"query": {"match_all": {}}}))
|
#!/usr/bin/env python3
# Copyright 2011 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""emcc - compiler helper script
=============================
emcc is a drop-in replacement for a compiler like gcc or clang.
See emcc --help for details.
emcc can be influenced by a few environment variables:
EMCC_DEBUG - "1" will log out useful information during compilation, as well as
save each compiler step as an emcc-* file in the temp dir
(by default /tmp/emscripten_temp). "2" will save additional emcc-*
steps, that would normally not be separately produced (so this
slows down compilation).
"""
from tools.toolchain_profiler import ToolchainProfiler
import base64
import json
import logging
import os
import re
import shlex
import shutil
import stat
import sys
import time
from enum import Enum, unique, auto
from subprocess import PIPE
from urllib.parse import quote
import emscripten
from tools import shared, system_libs
from tools import colored_logger, diagnostics, building
from tools.shared import unsuffixed, unsuffixed_basename, WINDOWS, safe_copy
from tools.shared import run_process, read_and_preprocess, exit_with_error, DEBUG
from tools.shared import do_replace, strip_prefix
from tools.response_file import substitute_response_files
from tools.minimal_runtime_shell import generate_minimal_runtime_html
import tools.line_endings
from tools import js_manipulation
from tools import wasm2c
from tools import webassembly
from tools import config
from tools.settings import settings, MEM_SIZE_SETTINGS, COMPILE_TIME_SETTINGS
from tools.utils import read_file, write_file, read_binary
logger = logging.getLogger('emcc')
# endings = dot + a suffix, safe to test by filename.endswith(endings)
C_ENDINGS = ('.c', '.i')
CXX_ENDINGS = ('.cpp', '.cxx', '.cc', '.c++', '.CPP', '.CXX', '.C', '.CC', '.C++', '.ii')
OBJC_ENDINGS = ('.m', '.mi')
OBJCXX_ENDINGS = ('.mm', '.mii')
ASSEMBLY_CPP_ENDINGS = ('.S',)
SPECIAL_ENDINGLESS_FILENAMES = (os.devnull,)
SOURCE_ENDINGS = C_ENDINGS + CXX_ENDINGS + OBJC_ENDINGS + OBJCXX_ENDINGS + SPECIAL_ENDINGLESS_FILENAMES + ASSEMBLY_CPP_ENDINGS
C_ENDINGS = C_ENDINGS + SPECIAL_ENDINGLESS_FILENAMES # consider the special endingless filenames like /dev/null to be C
EXECUTABLE_ENDINGS = ('.wasm', '.html', '.js', '.mjs', '.out', '')
DYNAMICLIB_ENDINGS = ('.dylib', '.so') # Windows .dll suffix is not included in this list, since those are never linked to directly on the command line.
STATICLIB_ENDINGS = ('.a',)
ASSEMBLY_ENDINGS = ('.ll', '.s')
HEADER_ENDINGS = ('.h', '.hxx', '.hpp', '.hh', '.H', '.HXX', '.HPP', '.HH')
# Supported LLD flags which we will pass through to the linker.
SUPPORTED_LINKER_FLAGS = (
'--start-group', '--end-group',
'-(', '-)',
'--whole-archive', '--no-whole-archive',
'-whole-archive', '-no-whole-archive'
)
# Unsupported LLD flags which we will ignore.
# Maps to true if the flag takes an argument.
UNSUPPORTED_LLD_FLAGS = {
# macOS-specific linker flag that libtool (ltmain.sh) will if macOS is detected.
'-bind_at_load': False,
'-M': False,
# wasm-ld doesn't support soname or other dynamic linking flags (yet). Ignore them
# in order to aid build systems that want to pass these flags.
'-soname': True,
'-allow-shlib-undefined': False,
'-rpath': True,
'-rpath-link': True,
'-version-script': True,
}
DEFAULT_ASYNCIFY_IMPORTS = [
'emscripten_sleep', 'emscripten_wget', 'emscripten_wget_data', 'emscripten_idb_load',
'emscripten_idb_store', 'emscripten_idb_delete', 'emscripten_idb_exists',
'emscripten_idb_load_blob', 'emscripten_idb_store_blob', 'SDL_Delay',
'emscripten_scan_registers', 'emscripten_lazy_load_code',
'emscripten_fiber_swap',
'wasi_snapshot_preview1.fd_sync', '__wasi_fd_sync', '_emval_await',
'dlopen', '__asyncjs__*'
]
# Target options
final_js = None
UBSAN_SANITIZERS = {
'alignment',
'bool',
'builtin',
'bounds',
'enum',
'float-cast-overflow',
'float-divide-by-zero',
'function',
'implicit-unsigned-integer-truncation',
'implicit-signed-integer-truncation',
'implicit-integer-sign-change',
'integer-divide-by-zero',
'nonnull-attribute',
'null',
'nullability-arg',
'nullability-assign',
'nullability-return',
'object-size',
'pointer-overflow',
'return',
'returns-nonnull-attribute',
'shift',
'signed-integer-overflow',
'unreachable',
'unsigned-integer-overflow',
'vla-bound',
'vptr',
'undefined',
'undefined-trap',
'implicit-integer-truncation',
'implicit-integer-arithmetic-value-change',
'implicit-conversion',
'integer',
'nullability',
}
VALID_ENVIRONMENTS = ('web', 'webview', 'worker', 'node', 'shell')
SIMD_INTEL_FEATURE_TOWER = ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-mavx']
SIMD_NEON_FLAGS = ['-mfpu=neon']
# this function uses the global 'final' variable, which contains the current
# final output file. if a method alters final, and calls this method, then it
# must modify final globally (i.e. it can't receive final as a param and
# return it)
# TODO: refactor all this, a singleton that abstracts over the final output
# and saving of intermediates
def save_intermediate(name, suffix='js'):
if not DEBUG:
return
if not final_js:
logger.debug(f'(not saving intermediate {name} because not generating JS)')
return
building.save_intermediate(final_js, f'{name}.{suffix}')
def save_intermediate_with_wasm(name, wasm_binary):
if not DEBUG:
return
save_intermediate(name) # save the js
building.save_intermediate(wasm_binary, name + '.wasm')
def base64_encode(b):
b64 = base64.b64encode(b)
return b64.decode('ascii')
@unique
class OFormat(Enum):
# Output a relocatable object file. We use this
# today for `-r` and `-shared`.
OBJECT = auto()
WASM = auto()
JS = auto()
MJS = auto()
HTML = auto()
BARE = auto()
@unique
class Mode(Enum):
PREPROCESS_ONLY = auto()
PCH = auto()
COMPILE_ONLY = auto()
POST_LINK_ONLY = auto()
COMPILE_AND_LINK = auto()
class EmccState:
def __init__(self, args):
self.mode = Mode.COMPILE_AND_LINK
self.orig_args = args
self.has_dash_c = False
self.has_dash_E = False
self.has_dash_S = False
self.link_flags = []
self.lib_dirs = []
self.forced_stdlibs = []
def add_link_flag(state, i, f):
if f.startswith('-L'):
state.lib_dirs.append(f[2:])
state.link_flags.append((i, f))
class EmccOptions:
def __init__(self):
self.output_file = None
self.post_link = False
self.executable = False
self.compiler_wrapper = None
self.oformat = None
self.requested_debug = ''
self.profiling_funcs = False
self.tracing = False
self.emit_symbol_map = False
self.use_closure_compiler = None
self.closure_args = []
self.js_transform = None
self.pre_js = '' # before all js
self.post_js = '' # after all js
self.extern_pre_js = '' # before all js, external to optimized code
self.extern_post_js = '' # after all js, external to optimized code
self.preload_files = []
self.embed_files = []
self.exclude_files = []
self.ignore_dynamic_linking = False
self.shell_path = shared.path_from_root('src', 'shell.html')
self.source_map_base = ''
self.emrun = False
self.cpu_profiler = False
self.thread_profiler = False
self.memory_profiler = False
self.memory_init_file = None
self.use_preload_cache = False
self.use_preload_plugins = False
self.default_object_extension = '.o'
self.valid_abspaths = []
self.cfi = False
# Specifies the line ending format to use for all generated text files.
# Defaults to using the native EOL on each platform (\r\n on Windows, \n on
# Linux & MacOS)
self.output_eol = os.linesep
self.no_entry = False
self.shared = False
self.relocatable = False
def will_metadce():
# The metadce JS parsing code does not currently support the JS that gets generated
# when assertions are enabled.
if settings.ASSERTIONS:
return False
return settings.OPT_LEVEL >= 3 or settings.SHRINK_LEVEL >= 1
def setup_environment_settings():
# Environment setting based on user input
environments = settings.ENVIRONMENT.split(',')
if any([x for x in environments if x not in VALID_ENVIRONMENTS]):
exit_with_error(f'Invalid environment specified in "ENVIRONMENT": {settings.ENVIRONMENT}. Should be one of: {','.join(VALID_ENVIRONMENTS)}')
settings.ENVIRONMENT_MAY_BE_WEB = not settings.ENVIRONMENT or 'web' in environments
settings.ENVIRONMENT_MAY_BE_WEBVIEW = not settings.ENVIRONMENT or 'webview' in environments
settings.ENVIRONMENT_MAY_BE_NODE = not settings.ENVIRONMENT or 'node' in environments
settings.ENVIRONMENT_MAY_BE_SHELL = not settings.ENVIRONMENT or 'shell' in environments
# The worker case also includes Node.js workers when pthreads are
# enabled and Node.js is one of the supported environments for the build to
# run on. Node.js workers are detected as a combination of
# ENVIRONMENT_IS_WORKER and ENVIRONMENT_IS_NODE.
settings.ENVIRONMENT_MAY_BE_WORKER = \
not settings.ENVIRONMENT or \
'worker' in environments or \
(settings.ENVIRONMENT_MAY_BE_NODE and settings.USE_PTHREADS)
if not settings.ENVIRONMENT_MAY_BE_WORKER and settings.PROXY_TO_WORKER:
exit_with_error('If you specify --proxy-to-worker and specify a "-s ENVIRONMENT=" directive, it must include "worker" as a target! (Try e.g. -s ENVIRONMENT=web,worker)')
if not settings.ENVIRONMENT_MAY_BE_WORKER and settings.USE_PTHREADS:
exit_with_error('When building with multithreading enabled and a "-s ENVIRONMENT=" directive is specified, it must include "worker" as a target! (Try e.g. -s ENVIRONMENT=web,worker)')
def minify_whitespace():
return settings.OPT_LEVEL >= 2 and settings.DEBUG_LEVEL == 0
def embed_memfile():
return (settings.SINGLE_FILE or
(settings.MEM_INIT_METHOD == 0 and
(not settings.MAIN_MODULE and
not settings.SIDE_MODULE and
not settings.GENERATE_SOURCE_MAP)))
def expand_byte_size_suffixes(value):
"""Given a string with KB/MB size suffixes, such as "32MB", computes how
many bytes that is and returns it as an integer.
"""
value = value.strip()
match = re.match(r'^(\d+)\s*([kmgt]?b)?$', value, re.I)
if not match:
exit_with_error("invalid byte size `%s`. Valid suffixes are: kb, mb, gb, tb" % value)
value, suffix = match.groups()
value = int(value)
if suffix:
size_suffixes = {suffix: 1024 ** i for i, suffix in enumerate(['b', 'kb', 'mb', 'gb', 'tb'])}
value *= size_suffixes[suffix.lower()]
return value
def apply_settings(changes):
"""Take a map of users settings {NAME: VALUE} and apply them to the global
settings object.
"""
def standardize_setting_change(key, value):
# boolean NO_X settings are aliases for X
# (note that *non*-boolean setting values have special meanings,
# and we can't just flip them, so leave them as-is to be
# handled in a special way later)
if key.startswith('NO_') and value in ('0', '1'):
key = strip_prefix(key, 'NO_')
value = str(1 - int(value))
return key, value
for key, value in changes.items():
key, value = standardize_setting_change(key, value)
if key in settings.internal_settings:
exit_with_error('%s is an internal setting and cannot be set from command line', key)
# map legacy settings which have aliases to the new names
# but keep the original key so errors are correctly reported via the `setattr` below
user_key = key
if key in settings.legacy_settings and key in settings.alt_names:
key = settings.alt_names[key]
# In those settings fields that represent amount of memory, translate suffixes to multiples of 1024.
if key in MEM_SIZE_SETTINGS:
value = str(expand_byte_size_suffixes(value))
filename = None
if value and value[0] == '@':
filename = strip_prefix(value, '@')
if not os.path.exists(filename):
exit_with_error('%s: file not found parsing argument: %s=%s' % (filename, key, value))
value = read_file(filename).strip()
else:
value = value.replace('\\', '\\\\')
existing = getattr(settings, user_key, None)
expect_list = type(existing) == list
if filename and expect_list and value.strip()[0] != '[':
# Prefer simpler one-line-per value parser
value = parse_symbol_list_file(value)
else:
try:
value = parse_value(value, expect_list)
except Exception as e:
exit_with_error('a problem occurred in evaluating the content after a "-s", specifically "%s=%s": %s', key, value, str(e))
# Do some basic type checking by comparing to the existing settings.
# Sadly we can't do this generically in the SettingsManager since there are settings
# that so change types internally over time.
# We only currently worry about lists vs non-lists.
if expect_list != (type(value) == list):
exit_with_error('setting `%s` expects `%s` but got `%s`' % (user_key, type(existing), type(value)))
setattr(settings, user_key, value)
if key == 'EXPORTED_FUNCTIONS':
# used for warnings in emscripten.py
settings.USER_EXPORTED_FUNCTIONS = settings.EXPORTED_FUNCTIONS.copy()
# TODO(sbc): Remove this legacy way.
if key == 'WASM_OBJECT_FILES':
settings.LTO = 0 if value else 'full'
def is_ar_file_with_missing_index(archive_file):
# We parse the archive header outselves because llvm-nm --print-armap is slower and less
# reliable.
# See: https://github.com/emscripten-core/emscripten/issues/10195
archive_header = b'!<arch>\n'
file_header_size = 60
with open(archive_file, 'rb') as f:
header = f.read(len(archive_header))
if header != archive_header:
# This is not even an ar file
return False
file_header = f.read(file_header_size)
if len(file_header) != file_header_size:
# We don't have any file entires at all so we don't consider the index missing
return False
name = file_header[:16].strip()
# If '/' is the name of the first file we have an index
return name != b'/'
def ensure_archive_index(archive_file):
# Fastcomp linking works without archive indexes.
if not settings.AUTO_ARCHIVE_INDEXES:
return
if is_ar_file_with_missing_index(archive_file):
diagnostics.warning('emcc', '%s: archive is missing an index; Use emar when creating libraries to ensure an index is created', archive_file)
diagnostics.warning('emcc', '%s: adding index', archive_file)
run_process([shared.LLVM_RANLIB, archive_file])
@ToolchainProfiler.profile_block('JS symbol generation')
def get_all_js_syms():
# Runs the js compiler to generate a list of all symbols available in the JS
# libraries. This must be done separately for each linker invokation since the
# list of symbols depends on what settings are used.
# TODO(sbc): Find a way to optimize this. Potentially we could add a super-set
# mode of the js compiler that would generate a list of all possible symbols
# that could be checked in.
old_full = settings.INCLUDE_FULL_LIBRARY
try:
# Temporarily define INCLUDE_FULL_LIBRARY since we want a full list
# of all available JS library functions.
settings.INCLUDE_FULL_LIBRARY = True
settings.ONLY_CALC_JS_SYMBOLS = True
emscripten.generate_struct_info()
glue, forwarded_data = emscripten.compile_settings()
forwarded_json = json.loads(forwarded_data)
library_syms = set()
for name in forwarded_json['libraryFunctions']:
if shared.is_c_symbol(name):
name = shared.demangle_c_symbol_name(name)
library_syms.add(name)
finally:
settings.ONLY_CALC_JS_SYMBOLS = False
settings.INCLUDE_FULL_LIBRARY = old_full
return library_syms
def filter_link_flags(flags, using_lld):
def is_supported(f):
if using_lld:
for flag, takes_arg in UNSUPPORTED_LLD_FLAGS.items():
# lld allows various flags to have either a single -foo or double --foo
if f.startswith(flag) or f.startswith('-' + flag):
diagnostics.warning('linkflags', 'ignoring unsupported linker flag: `%s`', f)
# Skip the next argument if this linker flag takes and argument and that
# argument was not specified as a separately (i.e. it was specified as
# single arg containing an `=` char.)
skip_next = takes_arg and '=' not in f
return False, skip_next
return True, False
else:
if f in SUPPORTED_LINKER_FLAGS:
return True, False
# Silently ignore -l/-L flags when not using lld. If using lld allow
# them to pass through the linker
if f.startswith('-l') or f.startswith('-L'):
return False, False
diagnostics.warning('linkflags', 'ignoring unsupported linker flag: `%s`', f)
return False, False
results = []
skip_next = False
for f in flags:
if skip_next:
skip_next = False
continue
keep, skip_next = is_supported(f[1])
if keep:
results.append(f)
return results
def fix_windows_newlines(text):
# Avoid duplicating \r\n to \r\r\n when writing out text.
if WINDOWS:
text = text.replace('\r\n', '\n')
return text
def cxx_to_c_compiler(cxx):
# Convert C++ compiler name into C compiler name
dirname, basename = os.path.split(cxx)
basename = basename.replace('clang++', 'clang').replace('g++', 'gcc').replace('em++', 'emcc')
return os.path.join(dirname, basename)
def get_binaryen_passes():
# run the binaryen optimizer in -O2+. in -O0 we don't need it obviously, while
# in -O1 we don't run it as the LLVM optimizer has been run, and it does the
# great majority of the work; not running the binaryen optimizer in that case
# keeps -O1 mostly-optimized while compiling quickly and without rewriting
# DWARF etc.
run_binaryen_optimizer = settings.OPT_LEVEL >= 2
passes = []
# safe heap must run before post-emscripten, so post-emscripten can apply the sbrk ptr
if settings.SAFE_HEAP:
passes += ['--safe-heap']
if settings.MEMORY64 == 2:
passes += ['--memory64-lowering']
if run_binaryen_optimizer:
passes += ['--post-emscripten']
if not settings.EXIT_RUNTIME:
passes += ['--no-exit-runtime']
if run_binaryen_optimizer:
passes += [building.opt_level_to_str(settings.OPT_LEVEL, settings.SHRINK_LEVEL)]
# when optimizing, use the fact that low memory is never used (1024 is a
# hardcoded value in the binaryen pass)
if run_binaryen_optimizer and settings.GLOBAL_BASE >= 1024:
passes += ['--low-memory-unused']
if settings.AUTODEBUG:
# adding '--flatten' here may make these even more effective
passes += ['--instrument-locals']
passes += ['--log-execution']
passes += ['--instrument-memory']
if settings.LEGALIZE_JS_FFI:
# legalize it again now, as the instrumentation may need it
passes += ['--legalize-js-interface']
if settings.EMULATE_FUNCTION_POINTER_CASTS:
# note that this pass must run before asyncify, as if it runs afterwards we only
# generate the byn$fpcast_emu functions after asyncify runs, and so we wouldn't
# be able to further process them.
passes += ['--fpcast-emu']
if settings.ASYNCIFY:
passes += ['--asyncify']
if settings.ASSERTIONS:
passes += ['--pass-arg=asyncify-asserts']
if settings.ASYNCIFY_ADVISE:
passes += ['--pass-arg=asyncify-verbose']
if settings.ASYNCIFY_IGNORE_INDIRECT:
passes += ['--pass-arg=asyncify-ignore-indirect']
passes += ['--pass-arg=asyncify-imports@%s' % ','.join(settings.ASYNCIFY_IMPORTS)]
# shell escaping can be confusing; try to emit useful warnings
def check_human_readable_list(items):
for item in items:
if item.count('(') != item.count(')'):
logger.warning('emcc: ASYNCIFY list contains an item without balanced parentheses ("(", ")"):')
logger.warning(' ' + item)
logger.warning('This may indicate improper escaping that led to splitting inside your names.')
logger.warning('Try using a response file. e.g: [email protected]. The format is a simple')
logger.warning('text file, one line per function.')
break
if settings.ASYNCIFY_REMOVE:
check_human_readable_list(settings.ASYNCIFY_REMOVE)
passes += ['--pass-arg=asyncify-removelist@%s' % ','.join(settings.ASYNCIFY_REMOVE)]
if settings.ASYNCIFY_ADD:
check_human_readable_list(settings.ASYNCIFY_ADD)
passes += ['--pass-arg=asyncify-addlist@%s' % ','.join(settings.ASYNCIFY_ADD)]
if settings.ASYNCIFY_ONLY:
check_human_readable_list(settings.ASYNCIFY_ONLY)
passes += ['--pass-arg=asyncify-onlylist@%s' % ','.join(settings.ASYNCIFY_ONLY)]
if settings.BINARYEN_IGNORE_IMPLICIT_TRAPS:
passes += ['--ignore-implicit-traps']
# normally we can assume the memory, if imported, has not been modified
# beforehand (in fact, in most cases the memory is not even imported anyhow,
# but it is still safe to pass the flag), and is therefore filled with zeros.
# the one exception is dynamic linking of a side module: the main module is ok
# as it is loaded first, but the side module may be assigned memory that was
# previously used.
if run_binaryen_optimizer and not settings.SIDE_MODULE:
passes += ['--zero-filled-memory']
if settings.BINARYEN_EXTRA_PASSES:
# BINARYEN_EXTRA_PASSES is comma-separated, and we support both '-'-prefixed and
# unprefixed pass names
extras = settings.BINARYEN_EXTRA_PASSES.split(',')
passes += [('--' + p) if p[0] != '-' else p for p in extras if p]
return passes
def make_js_executable(script):
src = read_file(script)
cmd = shared.shlex_join(config.JS_ENGINE)
if not os.path.isabs(config.JS_ENGINE[0]):
# TODO: use whereis etc. And how about non-*NIX?
cmd = '/usr/bin/env -S ' + cmd
logger.debug('adding `#!` to JavaScript file: %s' % cmd)
# add shebang
with open(script, 'w') as f:
f.write('#!%s\n' % cmd)
f.write(src)
try:
os.chmod(script, stat.S_IMODE(os.stat(script).st_mode) | stat.S_IXUSR) # make executable
except OSError:
pass # can fail if e.g. writing the executable to /dev/null
def do_split_module(wasm_file):
os.rename(wasm_file, wasm_file + '.orig')
args = ['--instrument']
building.run_binaryen_command('wasm-split', wasm_file + '.orig', outfile=wasm_file, args=args)
def is_dash_s_for_emcc(args, i):
# -s OPT=VALUE or -s OPT or -sOPT are all interpreted as emscripten flags.
# -s by itself is a linker option (alias for --strip-all)
if args[i] == '-s':
if len(args) <= i + 1:
return False
arg = args[i + 1]
else:
arg = strip_prefix(args[i], '-s')
arg = arg.split('=')[0]
return arg.isidentifier() and arg.isupper()
def filter_out_dynamic_libs(options, inputs):
# Filters out "fake" dynamic libraries that are really just intermediate object files.
def check(input_file):
if get_file_suffix(input_file) in DYNAMICLIB_ENDINGS and not building.is_wasm_dylib(input_file):
if not options.ignore_dynamic_linking:
diagnostics.warning('emcc', 'ignoring dynamic library %s because not compiling to JS or HTML, remember to link it when compiling to JS or HTML at the end', os.path.basename(input_file))
return False
else:
return True
return [f for f in inputs if check(f)]
def filter_out_duplicate_dynamic_libs(inputs):
seen = set()
# Filter out duplicate "fake" shared libraries (intermediate object files).
# See test_core.py:test_redundant_link
def check(input_file):
if get_file_suffix(input_file) in DYNAMICLIB_ENDINGS and not building.is_wasm_dylib(input_file):
abspath = os.path.abspath(input_file)
if abspath in seen:
return False
seen.add(abspath)
return True
return [f for f in inputs if check(f)]
def process_dynamic_libs(dylibs, lib_dirs):
extras = []
seen = set()
to_process = dylibs.copy()
while to_process:
dylib = to_process.pop()
dylink = webassembly.parse_dylink_section(dylib)
for needed in dylink.needed:
if needed in seen:
continue
path = find_library(needed, lib_dirs)
if path:
extras.append(path)
seen.add(needed)
else:
exit_with_error(f'{os.path.normpath(dylib)}: shared library dependency not found: `{needed}`')
to_process.append(path)
dylibs += extras
for dylib in dylibs:
exports = webassembly.get_exports(dylib)
exports = set(e.name for e in exports)
settings.SIDE_MODULE_EXPORTS.extend(exports)
imports = webassembly.get_imports(dylib)
imports = [i.field for i in imports if i.kind in (webassembly.ExternType.FUNC, webassembly.ExternType.GLOBAL)]
# For now we ignore `invoke_` functions imported by side modules and rely
# on the dynamic linker to create them on the fly.
# TODO(sbc): Integrate with metadata['invokeFuncs'] that comes from the
# main module to avoid creating new invoke functions at runtime.
imports = set(i for i in imports if not i.startswith('invoke_'))
weak_imports = imports.intersection(exports)
strong_imports = imports.difference(exports)
logger.debug('Adding symbols requirements from `%s`: %s', dylib, imports)
mangled_imports = [shared.asmjs_mangle(e) for e in imports]
mangled_strong_imports = [shared.asmjs_mangle(e) for e in strong_imports]
settings.SIDE_MODULE_IMPORTS.extend(mangled_imports)
settings.EXPORTED_FUNCTIONS.extend(mangled_strong_imports)
settings.EXPORT_IF_DEFINED.extend(weak_imports)
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE.extend(strong_imports)
building.user_requested_exports.update(mangled_strong_imports)
def unmangle_symbols_from_cmdline(symbols):
def unmangle(x):
return x.replace('.', ' ').replace('#', '&').replace('?', ',')
if type(symbols) is list:
return [unmangle(x) for x in symbols]
return unmangle(symbols)
def parse_s_args(args):
settings_changes = []
for i in range(len(args)):
if args[i].startswith('-s'):
if is_dash_s_for_emcc(args, i):
if args[i] == '-s':
key = args[i + 1]
args[i + 1] = ''
else:
key = strip_prefix(args[i], '-s')
args[i] = ''
# If not = is specified default to 1
if '=' not in key:
key += '=1'
# Special handling of browser version targets. A version -1 means that the specific version
# is not supported at all. Replace those with INT32_MAX to make it possible to compare e.g.
# #if MIN_FIREFOX_VERSION < 68
if re.match(r'MIN_.*_VERSION(=.*)?', key):
try:
if int(key.split('=')[1]) < 0:
key = key.split('=')[0] + '=0x7FFFFFFF'
except Exception:
pass
settings_changes.append(key)
newargs = [a for a in args if a]
return (settings_changes, newargs)
def emsdk_ldflags(user_args):
if os.environ.get('EMMAKEN_NO_SDK'):
return []
library_paths = [
shared.Cache.get_lib_dir(absolute=True)
]
ldflags = ['-L' + l for l in library_paths]
if '-nostdlib' in user_args:
return ldflags
return ldflags
def emsdk_cflags(user_args):
cflags = ['--sysroot=' + shared.Cache.get_sysroot(absolute=True)]
def array_contains_any_of(hay, needles):
for n in needles:
if n in hay:
return True
if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER) or array_contains_any_of(user_args, SIMD_NEON_FLAGS):
if '-msimd128' not in user_args:
exit_with_error('Passing any of ' + ', '.join(SIMD_INTEL_FEATURE_TOWER + SIMD_NEON_FLAGS) + ' flags also requires passing -msimd128!')
cflags += ['-D__SSE__=1']
if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[1:]):
cflags += ['-D__SSE2__=1']
if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[2:]):
cflags += ['-D__SSE3__=1']
if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[3:]):
cflags += ['-D__SSSE3__=1']
if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[4:]):
cflags += ['-D__SSE4_1__=1']
if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[5:]):
cflags += ['-D__SSE4_2__=1']
if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[6:]):
cflags += ['-D__AVX__=1']
if array_contains_any_of(user_args, SIMD_NEON_FLAGS):
cflags += ['-D__ARM_NEON__=1']
return cflags + ['-Xclang', '-iwithsysroot' + os.path.join('/include', 'compat')]
def get_clang_flags():
return ['-target', get_llvm_target()]
def get_llvm_target():
if settings.MEMORY64:
return 'wasm64-unknown-emscripten'
else:
return 'wasm32-unknown-emscripten'
cflags = None
def get_cflags(options, user_args):
global cflags
if cflags:
return cflags
# Flags we pass to the compiler when building C/C++ code
# We add these to the user's flags (newargs), but not when building .s or .S assembly files
cflags = get_clang_flags()
if options.tracing:
cflags.append('-D__EMSCRIPTEN_TRACING__=1')
if settings.USE_PTHREADS:
cflags.append('-D__EMSCRIPTEN_PTHREADS__=1')
if not settings.STRICT:
# The preprocessor define EMSCRIPTEN is deprecated. Don't pass it to code
# in strict mode. Code should use the define __EMSCRIPTEN__ instead.
cflags.append('-DEMSCRIPTEN')
# if exception catching is disabled, we can prevent that code from being
# generated in the frontend
if settings.DISABLE_EXCEPTION_CATCHING and not settings.EXCEPTION_HANDLING:
cflags.append('-fignore-exceptions')
if settings.INLINING_LIMIT:
cflags.append('-fno-inline-functions')
if settings.RELOCATABLE:
cflags.append('-fPIC')
cflags.append('-fvisibility=default')
if settings.LTO:
if not any(a.startswith('-flto') for a in user_args):
cflags.append('-flto=' + settings.LTO)
else:
# In LTO mode these args get passed instead at link time when the backend runs.
for a in building.llvm_backend_args():
cflags += ['-mllvm', a]
# Set the LIBCPP ABI version to at least 2 so that we get nicely aligned string
# data and other nice fixes.
cflags += [# '-fno-threadsafe-statics', # disabled due to issue 1289
'-D__EMSCRIPTEN_major__=' + str(shared.EMSCRIPTEN_VERSION_MAJOR),
'-D__EMSCRIPTEN_minor__=' + str(shared.EMSCRIPTEN_VERSION_MINOR),
'-D__EMSCRIPTEN_tiny__=' + str(shared.EMSCRIPTEN_VERSION_TINY),
'-D_LIBCPP_ABI_VERSION=2']
# For compatability with the fastcomp compiler that defined these
cflags += ['-Dunix',
'-D__unix',
'-D__unix__']
# Changes to default clang behavior
# Implicit functions can cause horribly confusing function pointer type errors, see #2175
# If your codebase really needs them - very unrecommended! - you can disable the error with
# -Wno-error=implicit-function-declaration
# or disable even a warning about it with
# -Wno-implicit-function-declaration
cflags += ['-Werror=implicit-function-declaration']
system_libs.add_ports_cflags(cflags, settings)
if os.environ.get('EMMAKEN_NO_SDK') or '-nostdinc' in user_args:
return cflags
cflags += emsdk_cflags(user_args)
return cflags
def get_file_suffix(filename):
"""Parses the essential suffix of a filename, discarding Unix-style version
numbers in the name. For example for 'libz.so.1.2.8' returns '.so'"""
if filename in SPECIAL_ENDINGLESS_FILENAMES:
return filename
while filename:
filename, suffix = os.path.splitext(filename)
if not suffix[1:].isdigit():
return suffix
return ''
def get_library_basename(filename):
"""Similar to get_file_suffix this strips off all numeric suffixes and then
then final non-numeric one. For example for 'libz.so.1.2.8' returns 'libz'"""
filename = os.path.basename(filename)
while filename:
filename, suffix = os.path.splitext(filename)
# Keep stipping suffixes until we strip a non-numeric one.
if not suffix[1:].isdigit():
return filename
def get_secondary_target(target, ext):
# Depending on the output format emscripten creates zero or more secondary
# output files (e.g. the .wasm file when creating JS output, or the
# .js and the .wasm file when creating html output.
# Thus function names the secondary output files, while ensuring they
# never collide with the primary one.
base = unsuffixed(target)
if get_file_suffix(target) == ext:
base += '_'
return base + ext
def in_temp(name):
temp_dir = shared.get_emscripten_temp_dir()
return os.path.join(temp_dir, os.path.basename(name))
def dedup_list(lst):
rtn = []
for item in lst:
if item not in rtn:
rtn.append(item)
return rtn
def move_file(src, dst):
logging.debug('move: %s -> %s', src, dst)
if os.path.isdir(dst):
exit_with_error(f'cannot write output file `{dst}`: Is a directory')
src = os.path.abspath(src)
dst = os.path.abspath(dst)
if src == dst:
return
if dst == os.devnull:
return
shutil.move(src, dst)
run_via_emxx = False
#
# Main run() function
#
def run(args):
# Additional compiler flags that we treat as if they were passed to us on the
# commandline
EMCC_CFLAGS = os.environ.get('EMCC_CFLAGS')
if DEBUG:
cmd = shared.shlex_join(args)
if EMCC_CFLAGS:
cmd += ' + ' + EMCC_CFLAGS
logger.warning(f'invocation: {cmd} (in {os.getcwd()})')
if EMCC_CFLAGS:
args.extend(shlex.split(EMCC_CFLAGS))
# Strip args[0] (program name)
args = args[1:]
misc_temp_files = shared.configuration.get_temp_files()
# Handle some global flags
# read response files very early on
try:
args = substitute_response_files(args)
except IOError as e:
exit_with_error(e)
if '--help' in args:
# Documentation for emcc and its options must be updated in:
# site/source/docs/tools_reference/emcc.rst
# This then gets built (via: `make -C site text`) to:
# site/build/text/docs/tools_reference/emcc.txt
# This then needs to be copied to its final home in docs/emcc.txt from where
# we read it here. We have CI rules that ensure its always up-to-date.
with open(shared.path_from_root('docs', 'emcc.txt'), 'r') as f:
print(f.read())
print('''
------------------------------------------------------------------
emcc: supported targets: llvm bitcode, WebAssembly, NOT elf
(autoconf likes to see elf above to enable shared object support)
''')
return 0
if '--version' in args:
print(version_string())
print('''\
Copyright (C) 2014 the Emscripten authors (see AUTHORS.txt)
This is free and open source software under the MIT license.
There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
''')
return 0
if run_via_emxx:
clang = shared.CLANG_CXX
else:
clang = shared.CLANG_CC
if len(args) == 1 and args[0] == '-v': # -v with no inputs
# autoconf likes to see 'GNU' in the output to enable shared object support
print(version_string(), file=sys.stderr)
return shared.check_call([clang, '-v'] + get_clang_flags(), check=False).returncode
if '-dumpmachine' in args:
print(get_llvm_target())
return 0
if '-dumpversion' in args: # gcc's doc states "Print the compiler version [...] and don't do anything else."
print(shared.EMSCRIPTEN_VERSION)
return 0
if '--cflags' in args:
# fake running the command, to see the full args we pass to clang
args = [x for x in args if x != '--cflags']
with misc_temp_files.get_file(suffix='.o') as temp_target:
input_file = 'hello_world.c'
cmd = [shared.PYTHON, sys.argv[0], shared.path_from_root('tests', input_file), '-v', '-c', '-o', temp_target] + args
proc = run_process(cmd, stderr=PIPE, check=False)
if proc.returncode != 0:
print(proc.stderr)
exit_with_error('error getting cflags')
lines = [x for x in proc.stderr.splitlines() if clang in x and input_file in x]
parts = shlex.split(lines[0].replace('\\', '\\\\'))
parts = [x for x in parts if x not in ['-c', '-o', '-v', '-emit-llvm'] and input_file not in x and temp_target not in x]
print(shared.shlex_join(parts[1:]))
return 0
shared.check_sanity()
if '-print-search-dirs' in args:
return run_process([clang, '-print-search-dirs'], check=False).returncode
EMMAKEN_CFLAGS = os.environ.get('EMMAKEN_CFLAGS')
if EMMAKEN_CFLAGS:
args += shlex.split(EMMAKEN_CFLAGS)
if 'EMMAKEN_NO_SDK' in os.environ:
diagnostics.warning('deprecated', 'We hope to deprecated EMMAKEN_NO_SDK. See https://github.com/emscripten-core/emscripten/issues/14050 if use use this feature.')
## Process argument and setup the compiler
state = EmccState(args)
options, newargs, settings_map = phase_parse_arguments(state)
# For internal consistency, ensure we don't attempt or read or write any link time
# settings until we reach the linking phase.
settings.limit_settings(COMPILE_TIME_SETTINGS)
newargs, input_files = phase_setup(options, state, newargs, settings_map)
if state.mode == Mode.POST_LINK_ONLY:
settings.limit_settings(None)
target, wasm_target = phase_linker_setup(options, state, newargs, settings_map)
process_libraries(state, [])
if len(input_files) != 1:
exit_with_error('--post-link requires a single input file')
phase_post_link(options, state, input_files[0][1], wasm_target, target)
return 0
## Compile source code to object files
linker_inputs = phase_compile_inputs(options, state, newargs, input_files)
if state.mode != Mode.COMPILE_AND_LINK:
logger.debug('stopping after compile phase')
for flag in state.link_flags:
diagnostics.warning('unused-command-line-argument', "argument unused during compilation: '%s'" % flag[1])
for f in linker_inputs:
diagnostics.warning('unused-command-line-argument', "%s: linker input file unused because linking not done" % f[1])
return 0
# We have now passed the compile phase, allow reading/writing of all settings.
settings.limit_settings(None)
if options.output_file and options.output_file.startswith('-'):
exit_with_error(f'invalid output filename: `{options.output_file}`')
target, wasm_target = phase_linker_setup(options, state, newargs, settings_map)
# Link object files using wasm-ld or llvm-link (for bitcode linking)
linker_arguments = phase_calculate_linker_inputs(options, state, linker_inputs)
if options.oformat == OFormat.OBJECT:
logger.debug(f'link_to_object: {linker_arguments} -> {target}')
building.link_to_object(linker_arguments, target)
logger.debug('stopping after linking to object file')
return 0
phase_calculate_system_libraries(state, linker_arguments, linker_inputs, newargs)
phase_link(linker_arguments, wasm_target)
# Special handling for when the user passed '-Wl,--version'. In this case the linker
# does not create the output file, but just prints its version and exits with 0.
if '--version' in linker_arguments:
return 0
# TODO(sbc): In theory we should really run the whole pipeline even if the output is
# /dev/null, but that will take some refactoring
if target == os.devnull:
return 0
# Perform post-link steps (unless we are running bare mode)
if options.oformat != OFormat.BARE:
phase_post_link(options, state, wasm_target, wasm_target, target)
return 0
@ToolchainProfiler.profile_block('calculate linker inputs')
def phase_calculate_linker_inputs(options, state, linker_inputs):
using_lld = not (options.oformat == OFormat.OBJECT and settings.LTO)
state.link_flags = filter_link_flags(state.link_flags, using_lld)
# Decide what we will link
process_libraries(state, linker_inputs)
linker_args = [val for _, val in sorted(linker_inputs + state.link_flags)]
# If we are linking to an intermediate object then ignore other
# "fake" dynamic libraries, since otherwise we will end up with
# multiple copies in the final executable.
if options.oformat == OFormat.OBJECT or options.ignore_dynamic_linking:
linker_args = filter_out_dynamic_libs(options, linker_args)
else:
linker_args = filter_out_duplicate_dynamic_libs(linker_args)
if settings.MAIN_MODULE:
dylibs = [a for a in linker_args if building.is_wasm_dylib(a)]
process_dynamic_libs(dylibs, state.lib_dirs)
return linker_args
@ToolchainProfiler.profile_block('parse arguments')
def phase_parse_arguments(state):
"""The first phase of the compiler. Parse command line argument and
populate settings.
"""
newargs = state.orig_args.copy()
# Scan and strip emscripten specific cmdline warning flags.
# This needs to run before other cmdline flags have been parsed, so that
# warnings are properly printed during arg parse.
newargs = diagnostics.capture_warnings(newargs)
for i in range(len(newargs)):
if newargs[i] in ('-l', '-L', '-I'):
# Scan for individual -l/-L/-I arguments and concatenate the next arg on
# if there is no suffix
newargs[i] += newargs[i + 1]
newargs[i + 1] = ''
options, settings_changes, user_js_defines, newargs = parse_args(newargs)
if options.post_link or options.oformat == OFormat.BARE:
diagnostics.warning('experimental', '--oformat=base/--post-link are experimental and subject to change.')
explicit_settings_changes, newargs = parse_s_args(newargs)
settings_changes += explicit_settings_changes
user_settings = {}
for s in settings_changes:
key, value = s.split('=', 1)
user_settings[key] = value
# STRICT is used when applying settings so it needs to be applied first before
# called ing `apply_settings`.
strict_cmdline = user_settings.get('STRICT')
if strict_cmdline:
settings.STRICT = int(strict_cmdline)
# Apply user -jsD settings
for s in user_js_defines:
settings[s[0]] = s[1]
# Apply -s settings in newargs here (after optimization levels, so they can override them)
apply_settings(user_settings)
return options, newargs, user_settings
@ToolchainProfiler.profile_block('setup')
def phase_setup(options, state, newargs, settings_map):
"""Second phase: configure and setup the compiler based on the specified settings and arguments.
"""
if settings.RUNTIME_LINKED_LIBS:
diagnostics.warning('deprecated', 'RUNTIME_LINKED_LIBS is deprecated; you can simply list the libraries directly on the commandline now')
newargs += settings.RUNTIME_LINKED_LIBS
def default_setting(name, new_default):
if name not in settings_map:
setattr(settings, name, new_default)
if settings.STRICT:
default_setting('DEFAULT_TO_CXX', 0)
# Find input files
# These three arrays are used to store arguments of different types for
# type-specific processing. In order to shuffle the arguments back together
# after processing, all of these arrays hold tuples (original_index, value).
# Note that the index part of the tuple can have a fractional part for input
# arguments that expand into multiple processed arguments, as in -Wl,-f1,-f2.
input_files = []
# find input files with a simple heuristic. we should really analyze
# based on a full understanding of gcc params, right now we just assume that
# what is left contains no more |-x OPT| things
skip = False
has_header_inputs = False
for i in range(len(newargs)):
if skip:
skip = False
continue
arg = newargs[i]
if arg in ('-MT', '-MF', '-MJ', '-MQ', '-D', '-U', '-o', '-x',
'-Xpreprocessor', '-include', '-imacros', '-idirafter',
'-iprefix', '-iwithprefix', '-iwithprefixbefore',
'-isysroot', '-imultilib', '-A', '-isystem', '-iquote',
'-install_name', '-compatibility_version',
'-current_version', '-I', '-L', '-include-pch',
'-Xlinker', '-Xclang'):
skip = True
if not arg.startswith('-'):
# we already removed -o <target>, so all these should be inputs
newargs[i] = ''
# os.devnul should always be reported as existing but there is bug in windows
# python before 3.8:
# https://bugs.python.org/issue1311
if not os.path.exists(arg) and arg != os.devnull:
exit_with_error('%s: No such file or directory ("%s" was expected to be an input file, based on the commandline arguments provided)', arg, arg)
file_suffix = get_file_suffix(arg)
if file_suffix in HEADER_ENDINGS:
has_header_inputs = True
if file_suffix in STATICLIB_ENDINGS and not building.is_ar(arg):
if building.is_bitcode(arg):
message = f'{arg}: File has a suffix of a static library {STATICLIB_ENDINGS}, but instead is an LLVM bitcode file! When linking LLVM bitcode files use .bc or .o.'
else:
message = arg + ': Unknown format, not a static library!'
exit_with_error(message)
if file_suffix in DYNAMICLIB_ENDINGS and not building.is_bitcode(arg) and not building.is_wasm(arg):
# For shared libraries that are neither bitcode nor wasm, assuming its local native
# library and attempt to find a library by the same name in our own library path.
# TODO(sbc): Do we really need this feature? See test_other.py:test_local_link
libname = strip_prefix(get_library_basename(arg), 'lib')
flag = '-l' + libname
diagnostics.warning('map-unrecognized-libraries', f'unrecognized file type: `{arg}`. Mapping to `{flag}` and hoping for the best')
add_link_flag(state, i, flag)
else:
input_files.append((i, arg))
elif arg.startswith('-L'):
add_link_flag(state, i, arg)
newargs[i] = ''
elif arg.startswith('-l'):
add_link_flag(state, i, arg)
newargs[i] = ''
elif arg.startswith('-Wl,'):
# Multiple comma separated link flags can be specified. Create fake
# fractional indices for these: -Wl,a,b,c,d at index 4 becomes:
# (4, a), (4.25, b), (4.5, c), (4.75, d)
link_flags_to_add = arg.split(',')[1:]
for flag_index, flag in enumerate(link_flags_to_add):
add_link_flag(state, i + float(flag_index) / len(link_flags_to_add), flag)
newargs[i] = ''
elif arg == '-Xlinker':
add_link_flag(state, i + 1, newargs[i + 1])
newargs[i] = ''
newargs[i + 1] = ''
elif arg == '-s':
# -s and some other compiler flags are normally passed onto the linker
# TODO(sbc): Pass this and other flags through when using lld
# link_flags.append((i, arg))
newargs[i] = ''
elif arg == '-':
input_files.append((i, arg))
newargs[i] = ''
if not input_files and not state.link_flags:
exit_with_error('no input files')
newargs = [a for a in newargs if a]
# SSEx is implemented on top of SIMD128 instruction set, but do not pass SSE flags to LLVM
# so it won't think about generating native x86 SSE code.
newargs = [x for x in newargs if x not in SIMD_INTEL_FEATURE_TOWER and x not in SIMD_NEON_FLAGS]
state.has_dash_c = '-c' in newargs
state.has_dash_S = '-S' in newargs
state.has_dash_E = '-E' in newargs
if options.post_link:
state.mode = Mode.POST_LINK_ONLY
elif state.has_dash_E or '-M' in newargs or '-MM' in newargs or '-fsyntax-only' in newargs:
state.mode = Mode.PREPROCESS_ONLY
elif has_header_inputs:
state.mode = Mode.PCH
elif state.has_dash_c or state.has_dash_S:
state.mode = Mode.COMPILE_ONLY
if state.mode in (Mode.COMPILE_ONLY, Mode.PREPROCESS_ONLY):
for key in settings_map:
if key not in COMPILE_TIME_SETTINGS:
diagnostics.warning('unused-command-line-argument', "linker setting ignored during compilation: '%s'" % key)
if state.has_dash_c:
if '-emit-llvm' in newargs:
options.default_object_extension = '.bc'
elif state.has_dash_S:
if '-emit-llvm' in newargs:
options.default_object_extension = '.ll'
else:
options.default_object_extension = '.s'
elif '-M' in newargs or '-MM' in newargs:
options.default_object_extension = '.mout' # not bitcode, not js; but just dependency rule of the input file
if options.output_file and len(input_files) > 1:
exit_with_error('cannot specify -o with -c/-S/-E/-M and multiple source files')
if settings.MAIN_MODULE or settings.SIDE_MODULE:
settings.RELOCATABLE = 1
if settings.USE_PTHREADS and '-pthread' not in newargs:
newargs += ['-pthread']
if 'DISABLE_EXCEPTION_CATCHING' in settings_map and 'EXCEPTION_CATCHING_ALLOWED' in settings_map:
# If we get here then the user specified both DISABLE_EXCEPTION_CATCHING and EXCEPTION_CATCHING_ALLOWED
# on the command line. This is no longer valid so report either an error or a warning (for
# backwards compat with the old `DISABLE_EXCEPTION_CATCHING=2`
if settings_map['DISABLE_EXCEPTION_CATCHING'] in ('0', '2'):
diagnostics.warning('deprecated', 'DISABLE_EXCEPTION_CATCHING=X is no longer needed when specifying EXCEPTION_CATCHING_ALLOWED')
else:
exit_with_error('DISABLE_EXCEPTION_CATCHING and EXCEPTION_CATCHING_ALLOWED are mutually exclusive')
if settings.EXCEPTION_CATCHING_ALLOWED:
settings.DISABLE_EXCEPTION_CATCHING = 0
if settings.DISABLE_EXCEPTION_THROWING and not settings.DISABLE_EXCEPTION_CATCHING:
exit_with_error("DISABLE_EXCEPTION_THROWING was set (probably from -fno-exceptions) but is not compatible with enabling exception catching (DISABLE_EXCEPTION_CATCHING=0). If you don't want exceptions, set DISABLE_EXCEPTION_CATCHING to 1; if you do want exceptions, don't link with -fno-exceptions")
return (newargs, input_files)
@ToolchainProfiler.profile_block('linker_setup')
def phase_linker_setup(options, state, newargs, settings_map):
autoconf = os.environ.get('EMMAKEN_JUST_CONFIGURE') or 'conftest.c' in state.orig_args
if autoconf:
# configure tests want a more shell-like style, where we emit return codes on exit()
settings.EXIT_RUNTIME = 1
# use node.js raw filesystem access, to behave just like a native executable
settings.NODERAWFS = 1
# Add `#!` line to output JS and make it executable.
options.executable = True
ldflags = emsdk_ldflags(newargs)
for f in ldflags:
add_link_flag(state, sys.maxsize, f)
if options.emrun:
options.pre_js += read_file(shared.path_from_root('src', 'emrun_prejs.js')) + '\n'
options.post_js += read_file(shared.path_from_root('src', 'emrun_postjs.js')) + '\n'
# emrun mode waits on program exit
settings.EXIT_RUNTIME = 1
if options.cpu_profiler:
options.post_js += read_file(shared.path_from_root('src', 'cpuprofiler.js')) + '\n'
if options.memory_profiler:
settings.MEMORYPROFILER = 1
if options.thread_profiler:
options.post_js += read_file(shared.path_from_root('src', 'threadprofiler.js')) + '\n'
if options.memory_init_file is None:
options.memory_init_file = settings.OPT_LEVEL >= 2
# TODO: support source maps with js_transform
if options.js_transform and settings.GENERATE_SOURCE_MAP:
logger.warning('disabling source maps because a js transform is being done')
settings.GENERATE_SOURCE_MAP = 0
# options.output_file is the user-specified one, target is what we will generate
if options.output_file:
target = options.output_file
# check for the existence of the output directory now, to avoid having
# to do so repeatedly when each of the various output files (.mem, .wasm,
# etc) are written. This gives a more useful error message than the
# IOError and python backtrace that users would otherwise see.
dirname = os.path.dirname(target)
if dirname and not os.path.isdir(dirname):
exit_with_error("specified output file (%s) is in a directory that does not exist" % target)
elif autoconf:
# Autoconf expects the executable output file to be called `a.out`
target = 'a.out'
elif settings.SIDE_MODULE:
target = 'a.out.wasm'
else:
target = 'a.out.js'
final_suffix = get_file_suffix(target)
if settings.EXTRA_EXPORTED_RUNTIME_METHODS:
diagnostics.warning('deprecated', 'EXTRA_EXPORTED_RUNTIME_METHODS is deprecated, please use EXPORTED_RUNTIME_METHODS instead')
settings.EXPORTED_RUNTIME_METHODS += settings.EXTRA_EXPORTED_RUNTIME_METHODS
# If no output format was sepecific we try to imply the format based on
# the output filename extension.
if not options.oformat and (options.relocatable or (options.shared and not settings.SIDE_MODULE)):
# Until we have a better story for actually producing runtime shared libraries
# we support a compatibility mode where shared libraries are actually just
# object files linked with `wasm-ld --relocatable` or `llvm-link` in the case
# of LTO.
if final_suffix in EXECUTABLE_ENDINGS:
diagnostics.warning('emcc', '-shared/-r used with executable output suffix. This behaviour is deprecated. Please remove -shared/-r to build an executable or avoid the executable suffix (%s) when building object files.' % final_suffix)
else:
if options.shared:
diagnostics.warning('emcc', 'linking a library with `-shared` will emit a static object file. This is a form of emulation to support existing build systems. If you want to build a runtime shared library use the SIDE_MODULE setting.')
options.oformat = OFormat.OBJECT
if not options.oformat:
if settings.SIDE_MODULE or final_suffix == '.wasm':
options.oformat = OFormat.WASM
elif final_suffix == '.mjs':
options.oformat = OFormat.MJS
elif final_suffix == '.html':
options.oformat = OFormat.HTML
else:
options.oformat = OFormat.JS
if options.oformat == OFormat.MJS:
settings.EXPORT_ES6 = 1
settings.MODULARIZE = 1
if options.oformat in (OFormat.WASM, OFormat.BARE):
# If the user asks directly for a wasm file then this *is* the target
wasm_target = target
else:
# Otherwise the wasm file is produced alongside the final target.
wasm_target = get_secondary_target(target, '.wasm')
if settings.SAFE_HEAP not in [0, 1]:
exit_with_error('emcc: SAFE_HEAP must be 0 or 1')
if not settings.WASM:
# When the user requests non-wasm output, we enable wasm2js. that is,
# we still compile to wasm normally, but we compile the final output
# to js.
settings.WASM = 1
settings.WASM2JS = 1
if settings.WASM == 2:
# Requesting both Wasm and Wasm2JS support
settings.WASM2JS = 1
if (options.oformat == OFormat.WASM or settings.PURE_WASI) and not settings.SIDE_MODULE:
# if the output is just a wasm file, it will normally be a standalone one,
# as there is no JS. an exception are side modules, as we can't tell at
# compile time whether JS will be involved or not - the main module may
# have JS, and the side module is expected to link against that.
# we also do not support standalone mode in fastcomp.
settings.STANDALONE_WASM = 1
if settings.LZ4:
settings.EXPORTED_RUNTIME_METHODS += ['LZ4']
if settings.WASM2C:
# wasm2c only makes sense with standalone wasm - there will be no JS,
# just wasm and then C
settings.STANDALONE_WASM = 1
# wasm2c doesn't need any special handling of i64, we have proper i64
# handling on the FFI boundary, which is exactly like the case of JS with
# BigInt support
settings.WASM_BIGINT = 1
if options.no_entry:
settings.EXPECT_MAIN = 0
elif settings.STANDALONE_WASM:
if '_main' in settings.EXPORTED_FUNCTIONS:
# TODO(sbc): Make this into a warning?
logger.debug('including `_main` in EXPORTED_FUNCTIONS is not necessary in standalone mode')
else:
# In normal non-standalone mode we have special handling of `_main` in EXPORTED_FUNCTIONS.
# 1. If the user specifies exports, but doesn't include `_main` we assume they want to build a
# reactor.
# 2. If the user doesn't export anything we default to exporting `_main` (unless `--no-entry`
# is specified (see above).
if 'EXPORTED_FUNCTIONS' in settings_map:
if '_main' not in settings.USER_EXPORTED_FUNCTIONS:
settings.EXPECT_MAIN = 0
else:
assert not settings.EXPORTED_FUNCTIONS
settings.EXPORTED_FUNCTIONS = ['_main']
if settings.STANDALONE_WASM:
# In STANDALONE_WASM mode we either build a command or a reactor.
# See https://github.com/WebAssembly/WASI/blob/main/design/application-abi.md
# For a command we always want EXIT_RUNTIME=1
# For a reactor we always want EXIT_RUNTIME=0
if 'EXIT_RUNTIME' in settings_map:
exit_with_error('Explictly setting EXIT_RUNTIME not compatible with STANDALONE_WASM. EXIT_RUNTIME will always be True for programs (with a main function) and False for reactors (not main function).')
settings.EXIT_RUNTIME = settings.EXPECT_MAIN
# Note the exports the user requested
building.user_requested_exports.update(settings.EXPORTED_FUNCTIONS)
def default_setting(name, new_default):
if name not in settings_map:
setattr(settings, name, new_default)
if settings.OPT_LEVEL >= 1:
default_setting('ASSERTIONS', 0)
if settings.SHRINK_LEVEL >= 2:
default_setting('EVAL_CTORS', 1)
# -s ASSERTIONS=1 implies basic stack overflow checks, and ASSERTIONS=2
# implies full stack overflow checks.
if settings.ASSERTIONS:
# However, we don't set this default in PURE_WASI, or when we are linking without standard
# libraries because STACK_OVERFLOW_CHECK depends on emscripten_stack_get_end which is defined
# in libcompiler-rt.
if not settings.PURE_WASI and '-nostdlib' not in newargs and '-nodefaultlibs' not in newargs:
default_setting('STACK_OVERFLOW_CHECK', max(settings.ASSERTIONS, settings.STACK_OVERFLOW_CHECK))
if settings.LLD_REPORT_UNDEFINED or settings.STANDALONE_WASM:
# Reporting undefined symbols at wasm-ld time requires us to know if we have a `main` function
# or not, as does standalone wasm mode.
# TODO(sbc): Remove this once this becomes the default
settings.IGNORE_MISSING_MAIN = 0
# For users that opt out of WARN_ON_UNDEFINED_SYMBOLS we assume they also
# want to opt out of ERROR_ON_UNDEFINED_SYMBOLS.
if settings_map.get('WARN_ON_UNDEFINED_SYMBOLS') == '0':
default_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
# It is unlikely that developers targeting "native web" APIs with MINIMAL_RUNTIME need
# errno support by default.
if settings.MINIMAL_RUNTIME:
default_setting('SUPPORT_ERRNO', 0)
# Require explicit -lfoo.js flags to link with JS libraries.
default_setting('AUTO_JS_LIBRARIES', 0)
if settings.STRICT:
default_setting('STRICT_JS', 1)
default_setting('AUTO_JS_LIBRARIES', 0)
default_setting('AUTO_NATIVE_LIBRARIES', 0)
default_setting('AUTO_ARCHIVE_INDEXES', 0)
default_setting('IGNORE_MISSING_MAIN', 0)
default_setting('ALLOW_UNIMPLEMENTED_SYSCALLS', 0)
if not settings.AUTO_JS_LIBRARIES:
default_setting('USE_SDL', 0)
# Default to TEXTDECODER=2 (always use TextDecoder to decode UTF-8 strings)
# in -Oz builds, since custom decoder for UTF-8 takes up space.
# In pthreads enabled builds, TEXTDECODER==2 may not work, see
# https://github.com/whatwg/encoding/issues/172
# When supporting shell environments, do not do this as TextDecoder is not
# widely supported there.
if settings.SHRINK_LEVEL >= 2 and not settings.USE_PTHREADS and \
not settings.ENVIRONMENT_MAY_BE_SHELL:
default_setting('TEXTDECODER', 2)
# If set to 1, we will run the autodebugger (the automatic debugging tool, see
# tools/autodebugger). Note that this will disable inclusion of libraries. This
# is useful because including dlmalloc makes it hard to compare native and js
# builds
if os.environ.get('EMCC_AUTODEBUG'):
settings.AUTODEBUG = 1
# Use settings
if settings.DEBUG_LEVEL > 1 and options.use_closure_compiler:
diagnostics.warning('emcc', 'disabling closure because debug info was requested')
options.use_closure_compiler = False
if settings.WASM == 2 and settings.SINGLE_FILE:
exit_with_error('cannot have both WASM=2 and SINGLE_FILE enabled at the same time')
if settings.SEPARATE_DWARF and settings.WASM2JS:
exit_with_error('cannot have both SEPARATE_DWARF and WASM2JS at the same time (as there is no wasm file)')
if settings.MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION and settings.MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION:
exit_with_error('MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION and MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION are mutually exclusive!')
if options.emrun:
if settings.MINIMAL_RUNTIME:
exit_with_error('--emrun is not compatible with -s MINIMAL_RUNTIME=1')
settings.EXPORTED_RUNTIME_METHODS.append('addOnExit')
if options.use_closure_compiler:
settings.USE_CLOSURE_COMPILER = options.use_closure_compiler
if settings.CLOSURE_WARNINGS not in ['quiet', 'warn', 'error']:
exit_with_error('Invalid option -s CLOSURE_WARNINGS=%s specified! Allowed values are "quiet", "warn" or "error".' % settings.CLOSURE_WARNINGS)
# Include dynCall() function by default in DYNCALLS builds in classic runtime; in MINIMAL_RUNTIME, must add this explicitly.
if settings.DYNCALLS and not settings.MINIMAL_RUNTIME:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$dynCall']
if settings.MAIN_MODULE:
assert not settings.SIDE_MODULE
if settings.MAIN_MODULE == 1:
settings.INCLUDE_FULL_LIBRARY = 1
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$preloadDylibs']
elif settings.SIDE_MODULE:
assert not settings.MAIN_MODULE
# memory init file is not supported with side modules, must be executable synchronously (for dlopen)
options.memory_init_file = False
# If we are including the entire JS library then we know for sure we will, by definition,
# require all the reverse dependencies.
if settings.INCLUDE_FULL_LIBRARY:
default_setting('REVERSE_DEPS', 'all')
if settings.MAIN_MODULE == 1 or settings.SIDE_MODULE == 1:
settings.LINKABLE = 1
settings.EXPORT_ALL = 1
if settings.MAIN_MODULE:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$getDylinkMetadata', '$mergeLibSymbols']
if settings.RELOCATABLE:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += [
'$reportUndefinedSymbols',
'$relocateExports',
'$GOTHandler',
'__heap_base',
'__stack_pointer',
]
settings.EXPORTED_FUNCTIONS += [
# This needs to be exported on the Module object too so it's visible
# to side modules too.
'___heap_base',
# Unconditional dependency in library_dylink.js
'_setThrew',
]
if settings.MINIMAL_RUNTIME:
exit_with_error('MINIMAL_RUNTIME is not compatible with relocatable output')
if settings.WASM2JS:
exit_with_error('WASM2JS is not compatible with relocatable output')
# shared modules need memory utilities to allocate their memory
settings.EXPORTED_RUNTIME_METHODS += ['allocate']
settings.ALLOW_TABLE_GROWTH = 1
# various settings require sbrk() access
if settings.DETERMINISTIC or \
settings.EMSCRIPTEN_TRACING or \
settings.SAFE_HEAP or \
settings.MEMORYPROFILER:
settings.EXPORTED_FUNCTIONS += ['_sbrk']
if settings.MEMORYPROFILER:
settings.EXPORTED_FUNCTIONS += ['___heap_base',
'_emscripten_stack_get_base',
'_emscripten_stack_get_end',
'_emscripten_stack_get_current']
if settings.ASYNCIFY_LAZY_LOAD_CODE:
settings.ASYNCIFY = 1
if settings.ASYNCIFY:
# See: https://github.com/emscripten-core/emscripten/issues/12065
# See: https://github.com/emscripten-core/emscripten/issues/12066
settings.DYNCALLS = 1
settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_get_base',
'_emscripten_stack_get_end',
'_emscripten_stack_set_limits']
settings.ASYNCIFY_ADD = unmangle_symbols_from_cmdline(settings.ASYNCIFY_ADD)
settings.ASYNCIFY_REMOVE = unmangle_symbols_from_cmdline(settings.ASYNCIFY_REMOVE)
settings.ASYNCIFY_ONLY = unmangle_symbols_from_cmdline(settings.ASYNCIFY_ONLY)
if state.mode == Mode.COMPILE_AND_LINK and final_suffix in ('.o', '.bc', '.so', '.dylib') and not settings.SIDE_MODULE:
diagnostics.warning('emcc', 'generating an executable with an object extension (%s). If you meant to build an object file please use `-c, `-r`, or `-shared`' % final_suffix)
if settings.SUPPORT_BIG_ENDIAN:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += [
'$LE_HEAP_STORE_U16',
'$LE_HEAP_STORE_I16',
'$LE_HEAP_STORE_U32',
'$LE_HEAP_STORE_I32',
'$LE_HEAP_STORE_F32',
'$LE_HEAP_STORE_F64',
'$LE_HEAP_LOAD_U16',
'$LE_HEAP_LOAD_I16',
'$LE_HEAP_LOAD_U32',
'$LE_HEAP_LOAD_I32',
'$LE_HEAP_LOAD_F32',
'$LE_HEAP_LOAD_F64'
]
if settings.STACK_OVERFLOW_CHECK:
# The basic writeStackCookie/checkStackCookie mechanism just needs to know where the end
# of the stack is.
settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_get_end', '_emscripten_stack_get_free']
if settings.STACK_OVERFLOW_CHECK == 2:
# The full checking done by binaryen's `StackCheck` pass also needs to know the base of the
# stack.
settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_get_base']
# We call one of these two functions during startup which caches the stack limits
# in wasm globals allowing get_base/get_free to be super fast.
# See compiler-rt/stack_limits.S.
if settings.RELOCATABLE:
settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_set_limits']
else:
settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_init']
if settings.MODULARIZE:
if settings.PROXY_TO_WORKER:
exit_with_error('-s MODULARIZE=1 is not compatible with --proxy-to-worker (if you want to run in a worker with -s MODULARIZE=1, you likely want to do the worker side setup manually)')
# in MINIMAL_RUNTIME we may not need to emit the Promise code, as the
# HTML output creates a singleton instance, and it does so without the
# Promise. However, in Pthreads mode the Promise is used for worker
# creation.
if settings.MINIMAL_RUNTIME and options.oformat == OFormat.HTML and not settings.USE_PTHREADS:
settings.EXPORT_READY_PROMISE = 0
if settings.LEGACY_VM_SUPPORT:
if settings.WASM2JS:
settings.POLYFILL_OLD_MATH_FUNCTIONS = 1
# Support all old browser versions
settings.MIN_FIREFOX_VERSION = 0
settings.MIN_SAFARI_VERSION = 0
settings.MIN_IE_VERSION = 0
settings.MIN_EDGE_VERSION = 0
settings.MIN_CHROME_VERSION = 0
if settings.MIN_CHROME_VERSION <= 37:
settings.WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG = 1
setup_environment_settings()
# Silently drop any individual backwards compatibility emulation flags that are known never to occur on browsers that support WebAssembly.
if not settings.WASM2JS:
settings.POLYFILL_OLD_MATH_FUNCTIONS = 0
settings.WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG = 0
if settings.STB_IMAGE and final_suffix in EXECUTABLE_ENDINGS:
state.forced_stdlibs.append('libstb_image')
settings.EXPORTED_FUNCTIONS += ['_stbi_load', '_stbi_load_from_memory', '_stbi_image_free']
if settings.USE_WEBGL2:
settings.MAX_WEBGL_VERSION = 2
# MIN_WEBGL_VERSION=2 implies MAX_WEBGL_VERSION=2
if settings.MIN_WEBGL_VERSION == 2:
default_setting('MAX_WEBGL_VERSION', 2)
if settings.MIN_WEBGL_VERSION > settings.MAX_WEBGL_VERSION:
exit_with_error('MIN_WEBGL_VERSION must be smaller or equal to MAX_WEBGL_VERSION!')
if not settings.GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS and settings.GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS:
exit_with_error('-s GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=0 only makes sense with -s GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0!')
if settings.ASMFS and final_suffix in EXECUTABLE_ENDINGS:
state.forced_stdlibs.append('libasmfs')
settings.FILESYSTEM = 0
settings.SYSCALLS_REQUIRE_FILESYSTEM = 0
settings.FETCH = 1
settings.JS_LIBRARIES.append((0, 'library_asmfs.js'))
# Explicitly drop linking in a malloc implementation if program is not using any dynamic allocation calls.
if not settings.USES_DYNAMIC_ALLOC:
settings.MALLOC = 'none'
if settings.FETCH and final_suffix in EXECUTABLE_ENDINGS:
state.forced_stdlibs.append('libfetch')
settings.JS_LIBRARIES.append((0, 'library_fetch.js'))
if settings.USE_PTHREADS:
settings.FETCH_WORKER_FILE = unsuffixed(os.path.basename(target)) + '.fetch.js'
if settings.DEMANGLE_SUPPORT:
settings.EXPORTED_FUNCTIONS += ['___cxa_demangle']
if settings.FULL_ES3:
settings.FULL_ES2 = 1
settings.MAX_WEBGL_VERSION = max(2, settings.MAX_WEBGL_VERSION)
if settings.EMBIND:
state.forced_stdlibs.append('libembind')
settings.EXPORTED_FUNCTIONS += ['_stackSave', '_stackRestore', '_stackAlloc']
if not settings.STANDALONE_WASM:
# in standalone mode, crt1 will call the constructors from inside the wasm
settings.EXPORTED_FUNCTIONS.append('___wasm_call_ctors')
if settings.RELOCATABLE and not settings.DYNAMIC_EXECUTION:
exit_with_error('cannot have both DYNAMIC_EXECUTION=0 and RELOCATABLE enabled at the same time, since RELOCATABLE needs to eval()')
if settings.SIDE_MODULE and settings.GLOBAL_BASE != -1:
exit_with_error('Cannot set GLOBAL_BASE when building SIDE_MODULE')
# When building a side module we currently have to assume that any undefined
# symbols that exist at link time will be satisfied by the main module or JS.
if settings.SIDE_MODULE:
default_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
default_setting('WARN_ON_UNDEFINED_SYMBOLS', 0)
else:
settings.EXPORT_IF_DEFINED.append('__start_em_asm')
settings.EXPORT_IF_DEFINED.append('__stop_em_asm')
if options.use_preload_plugins or len(options.preload_files) or len(options.embed_files):
if settings.NODERAWFS:
exit_with_error('--preload-file and --embed-file cannot be used with NODERAWFS which disables virtual filesystem')
# if we include any files, or intend to use preload plugins, then we definitely need filesystem support
settings.FORCE_FILESYSTEM = 1
if settings.PROXY_TO_WORKER or options.use_preload_plugins:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$Browser']
if not settings.MINIMAL_RUNTIME:
# In non-MINIMAL_RUNTIME, the core runtime depends on these functions to be present. (In MINIMAL_RUNTIME, they are
# no longer always bundled in)
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += [
'$demangle',
'$demangleAll',
'$jsStackTrace',
'$stackTrace'
]
if settings.FILESYSTEM and not settings.BOOTSTRAPPING_STRUCT_INFO and not settings.STANDALONE_WASM:
# to flush streams on FS exit, we need to be able to call fflush
# we only include it if the runtime is exitable, or when ASSERTIONS
# (ASSERTIONS will check that streams do not need to be flushed,
# helping people see when they should have enabled EXIT_RUNTIME)
if settings.EXIT_RUNTIME or settings.ASSERTIONS:
settings.EXPORTED_FUNCTIONS += ['_fflush']
if settings.SUPPORT_ERRNO and not settings.BOOTSTRAPPING_STRUCT_INFO:
# so setErrNo JS library function can report errno back to C
settings.EXPORTED_FUNCTIONS += ['___errno_location']
if settings.SAFE_HEAP:
# SAFE_HEAP check includes calling emscripten_get_sbrk_ptr() from wasm
settings.EXPORTED_FUNCTIONS += ['_emscripten_get_sbrk_ptr', '_emscripten_stack_get_base']
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$unSign']
if not settings.DECLARE_ASM_MODULE_EXPORTS:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$exportAsmFunctions']
if settings.ALLOW_MEMORY_GROWTH:
# Setting ALLOW_MEMORY_GROWTH turns off ABORTING_MALLOC, as in that mode we default to
# the behavior of trying to grow and returning 0 from malloc on failure, like
# a standard system would. However, if the user sets the flag it
# overrides that.
default_setting('ABORTING_MALLOC', 0)
if settings.USE_PTHREADS:
if settings.USE_PTHREADS == 2:
exit_with_error('USE_PTHREADS=2 is no longer supported')
if settings.ALLOW_MEMORY_GROWTH:
diagnostics.warning('pthreads-mem-growth', 'USE_PTHREADS + ALLOW_MEMORY_GROWTH may run non-wasm code slowly, see https://github.com/WebAssembly/design/issues/1271')
settings.JS_LIBRARIES.append((0, 'library_pthread.js'))
settings.EXPORTED_FUNCTIONS += [
'___emscripten_pthread_data_constructor',
'__emscripten_call_on_thread',
'__emscripten_main_thread_futex',
'__emscripten_thread_init',
'__emscripten_thread_exit',
'_emscripten_current_thread_process_queued_calls',
'__emscripten_allow_main_runtime_queued_calls',
'_emscripten_futex_wake',
'_emscripten_get_global_libc',
'_emscripten_main_browser_thread_id',
'_emscripten_main_thread_process_queued_calls',
'_emscripten_run_in_main_runtime_thread_js',
'_emscripten_stack_set_limits',
'_emscripten_sync_run_in_main_thread_2',
'_emscripten_sync_run_in_main_thread_4',
'_emscripten_tls_init',
'_pthread_self',
'_pthread_testcancel',
]
# Some of these symbols are using by worker.js but otherwise unreferenced.
# Because emitDCEGraph only considered the main js file, and not worker.js
# we have explicitly mark these symbols as user-exported so that they will
# kept alive through DCE.
# TODO: Find a less hacky way to do this, perhaps by also scanning worker.js
# for roots.
building.user_requested_exports.add('_emscripten_tls_init')
building.user_requested_exports.add('_emscripten_current_thread_process_queued_calls')
# set location of worker.js
settings.PTHREAD_WORKER_FILE = unsuffixed(os.path.basename(target)) + '.worker.js'
else:
settings.JS_LIBRARIES.append((0, 'library_pthread_stub.js'))
if settings.FORCE_FILESYSTEM and not settings.MINIMAL_RUNTIME:
# when the filesystem is forced, we export by default methods that filesystem usage
# may need, including filesystem usage from standalone file packager output (i.e.
# file packages not built together with emcc, but that are loaded at runtime
# separately, and they need emcc's output to contain the support they need)
if not settings.ASMFS:
settings.EXPORTED_RUNTIME_METHODS += [
'FS_createPath',
'FS_createDataFile',
'FS_createPreloadedFile',
'FS_createLazyFile',
'FS_createDevice',
'FS_unlink'
]
settings.EXPORTED_RUNTIME_METHODS += [
'addRunDependency',
'removeRunDependency',
]
if not settings.MINIMAL_RUNTIME or settings.EXIT_RUNTIME:
# MINIMAL_RUNTIME only needs callRuntimeCallbacks in certain cases, but the normal runtime
# always does.
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$callRuntimeCallbacks']
if settings.USE_PTHREADS:
# memalign is used to ensure allocated thread stacks are aligned.
settings.EXPORTED_FUNCTIONS += ['_memalign']
if settings.MINIMAL_RUNTIME:
building.user_requested_exports.add('exit')
if settings.PROXY_TO_PTHREAD:
settings.EXPORTED_FUNCTIONS += ['_emscripten_proxy_main']
# pthread stack setup and other necessary utilities
def include_and_export(name):
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$' + name]
settings.EXPORTED_FUNCTIONS += [name]
include_and_export('establishStackSpace')
include_and_export('invokeEntryPoint')
if not settings.MINIMAL_RUNTIME:
# keepRuntimeAlive does not apply to MINIMAL_RUNTIME.
settings.EXPORTED_RUNTIME_METHODS += ['keepRuntimeAlive']
if settings.MODULARIZE:
if not settings.EXPORT_ES6 and settings.EXPORT_NAME == 'Module':
exit_with_error('pthreads + MODULARIZE currently require you to set -s EXPORT_NAME=Something (see settings.js) to Something != Module, so that the .worker.js file can work')
# MODULARIZE+USE_PTHREADS mode requires extra exports out to Module so that worker.js
# can access them:
# general threading variables:
settings.EXPORTED_RUNTIME_METHODS += ['PThread']
# To keep code size to minimum, MINIMAL_RUNTIME does not utilize the global ExitStatus
# object, only regular runtime has it.
if not settings.MINIMAL_RUNTIME:
settings.EXPORTED_RUNTIME_METHODS += ['ExitStatus']
if settings.RELOCATABLE:
# phtreads + dyanmic linking has certain limitations
if settings.SIDE_MODULE:
diagnostics.warning('experimental', '-s SIDE_MODULE + pthreads is experimental')
elif settings.MAIN_MODULE:
diagnostics.warning('experimental', '-s MAIN_MODULE + pthreads is experimental')
elif settings.LINKABLE:
diagnostics.warning('experimental', '-s LINKABLE + pthreads is experimental')
default_setting('SUPPORT_LONGJMP', 0)
if settings.SUPPORT_LONGJMP:
exit_with_error('SUPPORT_LONGJMP is not compatible with pthreads + dynamic linking')
if settings.PROXY_TO_WORKER:
exit_with_error('--proxy-to-worker is not supported with -s USE_PTHREADS>0! Use the option -s PROXY_TO_PTHREAD=1 if you want to run the main thread of a multithreaded application in a web worker.')
elif settings.PROXY_TO_PTHREAD:
exit_with_error('-s PROXY_TO_PTHREAD=1 requires -s USE_PTHREADS to work!')
def check_memory_setting(setting):
if settings[setting] % webassembly.WASM_PAGE_SIZE != 0:
exit_with_error(f'{setting} must be a multiple of WebAssembly page size (64KiB), was {settings[setting]}')
check_memory_setting('INITIAL_MEMORY')
check_memory_setting('MAXIMUM_MEMORY')
if settings.INITIAL_MEMORY >= 2 * 1024 * 1024 * 1024:
exit_with_error('INITIAL_MEMORY must be less than 2GB due to current spec limitations')
if settings.INITIAL_MEMORY < settings.TOTAL_STACK:
exit_with_error(f'INITIAL_MEMORY must be larger than TOTAL_STACK, was {settings.INITIAL_MEMORY} (TOTAL_STACK={settings.TOTAL_STACK})')
if settings.MEMORY_GROWTH_LINEAR_STEP != -1:
check_memory_setting('MEMORY_GROWTH_LINEAR_STEP')
if 'MAXIMUM_MEMORY' in settings_map and not settings.ALLOW_MEMORY_GROWTH:
diagnostics.warning('unused-command-line-argument', 'MAXIMUM_MEMORY is only meaningful with ALLOW_MEMORY_GROWTH')
if settings.EXPORT_ES6 and not settings.MODULARIZE:
# EXPORT_ES6 requires output to be a module
if 'MODULARIZE' in settings_map:
exit_with_error('EXPORT_ES6 requires MODULARIZE to be set')
settings.MODULARIZE = 1
if settings.MODULARIZE and not settings.DECLARE_ASM_MODULE_EXPORTS:
# When MODULARIZE option is used, currently requires declaring all module exports
# individually - TODO: this could be optimized
exit_with_error('DECLARE_ASM_MODULE_EXPORTS=0 is not compatible with MODULARIZE')
# When not declaring wasm module exports in outer scope one by one, disable minifying
# wasm module export names so that the names can be passed directly to the outer scope.
# Also, if using library_exports.js API, disable minification so that the feature can work.
if not settings.DECLARE_ASM_MODULE_EXPORTS or '-lexports.js' in [x for _, x in state.link_flags]:
settings.MINIFY_ASMJS_EXPORT_NAMES = 0
# Enable minification of wasm imports and exports when appropriate, if we
# are emitting an optimized JS+wasm combo (then the JS knows how to load the minified names).
# Things that process the JS after this operation would be done must disable this.
# For example, ASYNCIFY_LAZY_LOAD_CODE needs to identify import names.
if will_metadce() and \
settings.OPT_LEVEL >= 2 and \
settings.DEBUG_LEVEL <= 2 and \
options.oformat not in (OFormat.WASM, OFormat.BARE) and \
not settings.LINKABLE and \
not settings.STANDALONE_WASM and \
not settings.AUTODEBUG and \
not settings.ASSERTIONS and \
not settings.RELOCATABLE and \
not settings.ASYNCIFY_LAZY_LOAD_CODE and \
settings.MINIFY_ASMJS_EXPORT_NAMES:
settings.MINIFY_WASM_IMPORTS_AND_EXPORTS = 1
settings.MINIFY_WASM_IMPORTED_MODULES = 1
if settings.MINIMAL_RUNTIME:
# Minimal runtime uses a different default shell file
if options.shell_path == shared.path_from_root('src', 'shell.html'):
options.shell_path = shared.path_from_root('src', 'shell_minimal_runtime.html')
if settings.EXIT_RUNTIME:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['proc_exit']
if settings.ASSERTIONS:
# In ASSERTIONS-builds, functions UTF8ArrayToString() and stringToUTF8Array() (which are not JS library functions), both
# use warnOnce(), which in MINIMAL_RUNTIME is a JS library function, so explicitly have to mark dependency to warnOnce()
# in that case. If string functions are turned to library functions in the future, then JS dependency tracking can be
# used and this special directive can be dropped.
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$warnOnce']
if settings.MODULARIZE and not (settings.EXPORT_ES6 and not settings.SINGLE_FILE) and \
settings.EXPORT_NAME == 'Module' and options.oformat == OFormat.HTML and \
(options.shell_path == shared.path_from_root('src', 'shell.html') or options.shell_path == shared.path_from_root('src', 'shell_minimal.html')):
exit_with_error(f'Due to collision in variable name "Module", the shell file "{options.shell_path}" is not compatible with build options "-s MODULARIZE=1 -s EXPORT_NAME=Module". Either provide your own shell file, change the name of the export to something else to avoid the name collision. (see https://github.com/emscripten-core/emscripten/issues/7950 for details)')
if settings.STANDALONE_WASM:
if settings.USE_PTHREADS:
exit_with_error('STANDALONE_WASM does not support pthreads yet')
if settings.MINIMAL_RUNTIME:
exit_with_error('MINIMAL_RUNTIME reduces JS size, and is incompatible with STANDALONE_WASM which focuses on ignoring JS anyhow and being 100% wasm')
# the wasm must be runnable without the JS, so there cannot be anything that
# requires JS legalization
settings.LEGALIZE_JS_FFI = 0
# TODO(sbc): Remove WASM2JS here once the size regression it would introduce has been fixed.
if settings.USE_PTHREADS or settings.RELOCATABLE or settings.ASYNCIFY_LAZY_LOAD_CODE or settings.WASM2JS:
settings.IMPORTED_MEMORY = 1
if settings.WASM_BIGINT:
settings.LEGALIZE_JS_FFI = 0
if settings.SINGLE_FILE:
settings.GENERATE_SOURCE_MAP = 0
if options.use_closure_compiler == 2 and not settings.WASM2JS:
exit_with_error('closure compiler mode 2 assumes the code is asm.js, so not meaningful for wasm')
if 'MEM_INIT_METHOD' in settings_map:
exit_with_error('MEM_INIT_METHOD is not supported in wasm. Memory will be embedded in the wasm binary if threads are not used, and included in a separate file if threads are used.')
if settings.WASM2JS:
settings.MAYBE_WASM2JS = 1
# when using wasm2js, if the memory segments are in the wasm then they
# end up converted by wasm2js into base64 encoded JS. alternatively, we
# can use a .mem file like asm.js used to.
# generally we follow what the options tell us to do (which is to use
# a .mem file in most cases, since it is binary & compact). however, for
# pthreads we must keep the memory segments in the wasm as they will be
# passive segments which the .mem format cannot handle.
settings.MEM_INIT_IN_WASM = not options.memory_init_file or settings.SINGLE_FILE or settings.USE_PTHREADS
else:
# wasm includes the mem init in the wasm binary. The exception is
# wasm2js, which behaves more like js.
options.memory_init_file = True
settings.MEM_INIT_IN_WASM = True
# wasm side modules have suffix .wasm
if settings.SIDE_MODULE and target.endswith('.js'):
diagnostics.warning('emcc', 'output suffix .js requested, but wasm side modules are just wasm files; emitting only a .wasm, no .js')
sanitize = set()
for arg in newargs:
if arg.startswith('-fsanitize='):
sanitize.update(arg.split('=', 1)[1].split(','))
elif arg.startswith('-fno-sanitize='):
sanitize.difference_update(arg.split('=', 1)[1].split(','))
if sanitize:
settings.USE_OFFSET_CONVERTER = 1
settings.EXPORTED_FUNCTIONS += [
'_memalign',
'_emscripten_builtin_memalign',
'_emscripten_builtin_malloc',
'_emscripten_builtin_free',
'___heap_base',
'___global_base'
]
if settings.USE_OFFSET_CONVERTER and settings.WASM2JS:
exit_with_error('wasm2js is not compatible with USE_OFFSET_CONVERTER (see #14630)')
if sanitize & UBSAN_SANITIZERS:
if '-fsanitize-minimal-runtime' in newargs:
settings.UBSAN_RUNTIME = 1
else:
settings.UBSAN_RUNTIME = 2
if 'leak' in sanitize:
settings.USE_LSAN = 1
settings.EXIT_RUNTIME = 1
if settings.LINKABLE:
exit_with_error('LSan does not support dynamic linking')
if 'address' in sanitize:
settings.USE_ASAN = 1
if not settings.UBSAN_RUNTIME:
settings.UBSAN_RUNTIME = 2
settings.EXPORTED_FUNCTIONS.append('_emscripten_builtin_memset')
# helper functions for JS to call into C to do memory operations. these
# let us sanitize memory access from the JS side, by calling into C where
# it has been instrumented.
ASAN_C_HELPERS = [
'asan_c_load_1', 'asan_c_load_1u',
'asan_c_load_2', 'asan_c_load_2u',
'asan_c_load_4', 'asan_c_load_4u',
'asan_c_load_f', 'asan_c_load_d',
'asan_c_store_1', 'asan_c_store_1u',
'asan_c_store_2', 'asan_c_store_2u',
'asan_c_store_4', 'asan_c_store_4u',
'asan_c_store_f', 'asan_c_store_d',
]
settings.EXPORTED_FUNCTIONS += ['_' + x for x in ASAN_C_HELPERS]
if settings.ASYNCIFY and not settings.ASYNCIFY_ONLY:
# we do not want asyncify to instrument these helpers - they just access
# memory as small getters/setters, so they cannot pause anyhow, and also
# we access them in the runtime as we prepare to rewind, which would hit
# an asyncify assertion, if asyncify instrumented them.
#
# note that if ASYNCIFY_ONLY was set by the user then we do not need to
# do anything (as the user's list won't contain these functions), and if
# we did add them, the pass would assert on incompatible lists, hence the
# condition in the above if.
settings.ASYNCIFY_REMOVE += ASAN_C_HELPERS
if settings.ASAN_SHADOW_SIZE != -1:
diagnostics.warning('emcc', 'ASAN_SHADOW_SIZE is ignored and will be removed in a future release')
if settings.GLOBAL_BASE != -1:
exit_with_error("ASan does not support custom GLOBAL_BASE")
max_mem = settings.INITIAL_MEMORY
if settings.ALLOW_MEMORY_GROWTH:
max_mem = settings.MAXIMUM_MEMORY
shadow_size = max_mem // 8
settings.GLOBAL_BASE = shadow_size
sanitizer_mem = (shadow_size + webassembly.WASM_PAGE_SIZE) & ~webassembly.WASM_PAGE_SIZE
# sanitizers do at least 9 page allocs of a single page during startup.
sanitizer_mem += webassembly.WASM_PAGE_SIZE * 9
# we also allocate at least 11 "regions". Each region is kRegionSize (2 << 20) but
# MmapAlignedOrDieOnFatalError adds another 2 << 20 for alignment.
sanitizer_mem += (1 << 21) * 11
# When running in the threaded mode asan needs to allocate an array of kMaxNumberOfThreads
# (1 << 22) pointers. See compiler-rt/lib/asan/asan_thread.cpp.
if settings.USE_PTHREADS:
sanitizer_mem += (1 << 22) * 4
# Increase the size of the initial memory according to how much memory
# we think the sanitizers will use.
settings.INITIAL_MEMORY += sanitizer_mem
if settings.SAFE_HEAP:
# SAFE_HEAP instruments ASan's shadow memory accesses.
# Since the shadow memory starts at 0, the act of accessing the shadow memory is detected
# by SAFE_HEAP as a null pointer dereference.
exit_with_error('ASan does not work with SAFE_HEAP')
if settings.LINKABLE:
exit_with_error('ASan does not support dynamic linking')
if sanitize and settings.GENERATE_SOURCE_MAP:
settings.LOAD_SOURCE_MAP = 1
if settings.GLOBAL_BASE == -1:
# default if nothing else sets it
# a higher global base is useful for optimizing load/store offsets, as it
# enables the --post-emscripten pass
settings.GLOBAL_BASE = 1024
# various settings require malloc/free support from JS
if settings.RELOCATABLE or \
settings.BUILD_AS_WORKER or \
settings.USE_WEBGPU or \
settings.USE_PTHREADS or \
settings.OFFSCREENCANVAS_SUPPORT or \
settings.LEGACY_GL_EMULATION or \
not settings.DISABLE_EXCEPTION_CATCHING or \
settings.ASYNCIFY or \
settings.ASMFS or \
settings.DEMANGLE_SUPPORT or \
settings.FORCE_FILESYSTEM or \
settings.STB_IMAGE or \
settings.EMBIND or \
settings.FETCH or \
settings.PROXY_POSIX_SOCKETS or \
options.memory_profiler or \
sanitize:
settings.EXPORTED_FUNCTIONS += ['_malloc', '_free']
if not settings.DISABLE_EXCEPTION_CATCHING:
settings.EXPORTED_FUNCTIONS += [
# For normal builds the entries in deps_info.py are enough to include
# these symbols whenever __cxa_find_matching_catch_* functions are
# found. However, under LTO these symbols don't exist prior to linking
# so we include then unconditionally when exceptions are enabled.
'___cxa_is_pointer_type',
'___cxa_can_catch',
# Emscripten exception handling can generate invoke calls, and they call
# setThrew(). We cannot handle this using deps_info as the invokes are not
# emitted because of library function usage, but by codegen itself.
'_setThrew',
]
if settings.ASYNCIFY:
if not settings.ASYNCIFY_IGNORE_INDIRECT:
# if we are not ignoring indirect calls, then we must treat invoke_* as if
# they are indirect calls, since that is what they do - we can't see their
# targets statically.
settings.ASYNCIFY_IMPORTS += ['invoke_*']
# with pthreads we may call main through the __call_main mechanism, which can
# therefore reach anything in the program, so mark it as possibly causing a
# sleep (the asyncify analysis doesn't look through JS, just wasm, so it can't
# see what it itself calls)
if settings.USE_PTHREADS:
settings.ASYNCIFY_IMPORTS += ['__call_main']
# add the default imports
settings.ASYNCIFY_IMPORTS += DEFAULT_ASYNCIFY_IMPORTS
# return the full import name, including module. The name may
# already have a module prefix; if not, we assume it is "env".
def get_full_import_name(name):
if '.' in name:
return name
return 'env.' + name
settings.ASYNCIFY_IMPORTS = [get_full_import_name(i) for i in settings.ASYNCIFY_IMPORTS]
if settings.WASM2JS and settings.GENERATE_SOURCE_MAP:
exit_with_error('wasm2js does not support source maps yet (debug in wasm for now)')
if settings.NODE_CODE_CACHING:
if settings.WASM_ASYNC_COMPILATION:
exit_with_error('NODE_CODE_CACHING requires sync compilation (WASM_ASYNC_COMPILATION=0)')
if not shared.target_environment_may_be('node'):
exit_with_error('NODE_CODE_CACHING only works in node, but target environments do not include it')
if settings.SINGLE_FILE:
exit_with_error('NODE_CODE_CACHING saves a file on the side and is not compatible with SINGLE_FILE')
if not shared.JS.isidentifier(settings.EXPORT_NAME):
exit_with_error(f'EXPORT_NAME is not a valid JS identifier: `{settings.EXPORT_NAME}`')
if options.tracing and settings.ALLOW_MEMORY_GROWTH:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['emscripten_trace_report_memory_layout']
settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_get_current',
'_emscripten_stack_get_base',
'_emscripten_stack_get_end']
# Any "pointers" passed to JS will now be i64's, in both modes.
if settings.MEMORY64:
if settings_map.get('WASM_BIGINT') == '0':
exit_with_error('MEMORY64 is not compatible with WASM_BIGINT=0')
settings.WASM_BIGINT = 1
# check if we can address the 2GB mark and higher: either if we start at
# 2GB, or if we allow growth to either any amount or to 2GB or more.
if settings.INITIAL_MEMORY > 2 * 1024 * 1024 * 1024 or \
(settings.ALLOW_MEMORY_GROWTH and
(settings.MAXIMUM_MEMORY < 0 or
settings.MAXIMUM_MEMORY > 2 * 1024 * 1024 * 1024)):
settings.CAN_ADDRESS_2GB = 1
settings.EMSCRIPTEN_VERSION = shared.EMSCRIPTEN_VERSION
settings.PROFILING_FUNCS = options.profiling_funcs
settings.SOURCE_MAP_BASE = options.source_map_base or ''
return target, wasm_target
@ToolchainProfiler.profile_block('compile inputs')
def phase_compile_inputs(options, state, newargs, input_files):
def is_link_flag(flag):
if flag.startswith('-nostdlib'):
return True
return flag.startswith(('-l', '-L', '-Wl,'))
CXX = [shared.CLANG_CXX]
CC = [shared.CLANG_CC]
if config.COMPILER_WRAPPER:
logger.debug('using compiler wrapper: %s', config.COMPILER_WRAPPER)
CXX.insert(0, config.COMPILER_WRAPPER)
CC.insert(0, config.COMPILER_WRAPPER)
if 'EMMAKEN_COMPILER' in os.environ:
diagnostics.warning('deprecated', '`EMMAKEN_COMPILER` is deprecated.\n'
'To use an alteranative LLVM build set `LLVM_ROOT` in the config file (or `EM_LLVM_ROOT` env var).\n'
'To wrap invocations of clang use the `COMPILER_WRAPPER` setting (or `EM_COMPILER_WRAPPER` env var.\n')
CXX = [os.environ['EMMAKEN_COMPILER']]
CC = [cxx_to_c_compiler(os.environ['EMMAKEN_COMPILER'])]
compile_args = [a for a in newargs if a and not is_link_flag(a)]
system_libs.ensure_sysroot()
def get_language_mode(args):
return_next = False
for item in args:
if return_next:
return item
if item == '-x':
return_next = True
continue
if item.startswith('-x'):
return strip_prefix(item, '-x')
return ''
language_mode = get_language_mode(newargs)
def use_cxx(src):
if 'c++' in language_mode or run_via_emxx:
return True
# Next consider the filename
if src.endswith(C_ENDINGS + OBJC_ENDINGS):
return False
if src.endswith(CXX_ENDINGS):
return True
# Finally fall back to the default
if settings.DEFAULT_TO_CXX:
# Default to using C++ even when run as `emcc`.
# This means that emcc will act as a C++ linker when no source files are
# specified.
# This differs to clang and gcc where the default is always C unless run as
# clang++/g++.
return True
return False
def get_compiler(cxx):
if cxx:
return CXX
return CC
def get_clang_command(src_file):
return get_compiler(use_cxx(src_file)) + get_cflags(options, state.orig_args) + compile_args + [src_file]
def get_clang_command_asm(src_file):
return get_compiler(use_cxx(src_file)) + get_clang_flags() + compile_args + [src_file]
# preprocessor-only (-E) support
if state.mode == Mode.PREPROCESS_ONLY:
for input_file in [x[1] for x in input_files]:
cmd = get_clang_command(input_file)
if options.output_file:
cmd += ['-o', options.output_file]
# Do not compile, but just output the result from preprocessing stage or
# output the dependency rule. Warning: clang and gcc behave differently
# with -MF! (clang seems to not recognize it)
logger.debug(('just preprocessor ' if state.has_dash_E else 'just dependencies: ') + ' '.join(cmd))
shared.check_call(cmd)
return []
# Precompiled headers support
if state.mode == Mode.PCH:
headers = [header for _, header in input_files]
for header in headers:
if not header.endswith(HEADER_ENDINGS):
exit_with_error(f'cannot mix precompiled headers with non-header inputs: {headers} : {header}')
cmd = get_clang_command(header)
if options.output_file:
cmd += ['-o', options.output_file]
logger.debug(f"running (for precompiled headers): {cmd[0]} {" ".join(cmd[1:])}")
shared.check_call(cmd)
return []
linker_inputs = []
seen_names = {}
def uniquename(name):
if name not in seen_names:
seen_names[name] = str(len(seen_names))
return unsuffixed(name) + '_' + seen_names[name] + shared.suffix(name)
def get_object_filename(input_file):
if state.mode == Mode.COMPILE_ONLY:
# In compile-only mode we don't use any temp file. The object files
# are written directly to their final output locations.
if options.output_file:
assert len(input_files) == 1
return options.output_file
else:
return unsuffixed_basename(input_file) + options.default_object_extension
else:
return in_temp(unsuffixed(uniquename(input_file)) + options.default_object_extension)
def compile_source_file(i, input_file):
logger.debug('compiling source file: ' + input_file)
output_file = get_object_filename(input_file)
if state.mode not in (Mode.COMPILE_ONLY, Mode.PREPROCESS_ONLY):
linker_inputs.append((i, output_file))
if get_file_suffix(input_file) in ASSEMBLY_ENDINGS:
cmd = get_clang_command_asm(input_file)
else:
cmd = get_clang_command(input_file)
if not state.has_dash_c:
cmd += ['-c']
cmd += ['-o', output_file]
if state.mode == Mode.COMPILE_AND_LINK and '-gsplit-dwarf' in newargs:
# When running in COMPILE_AND_LINK mode we compile to temporary location
# but we want the `.dwo` file to be generated in the current working directory,
# like it is under clang. We could avoid this hack if we use the clang driver
# to generate the temporary files, but that would also involve using the clang
# driver to perform linking which would be big change.
cmd += ['-Xclang', '-split-dwarf-file', '-Xclang', unsuffixed_basename(input_file) + '.dwo']
cmd += ['-Xclang', '-split-dwarf-output', '-Xclang', unsuffixed_basename(input_file) + '.dwo']
shared.check_call(cmd)
if output_file not in ('-', os.devnull):
assert os.path.exists(output_file)
# First, generate LLVM bitcode. For each input file, we get base.o with bitcode
for i, input_file in input_files:
file_suffix = get_file_suffix(input_file)
if file_suffix in SOURCE_ENDINGS + ASSEMBLY_ENDINGS or (state.has_dash_c and file_suffix == '.bc'):
compile_source_file(i, input_file)
elif file_suffix in DYNAMICLIB_ENDINGS:
logger.debug('using shared library: ' + input_file)
linker_inputs.append((i, input_file))
elif building.is_ar(input_file):
logger.debug('using static library: ' + input_file)
ensure_archive_index(input_file)
linker_inputs.append((i, input_file))
elif language_mode:
compile_source_file(i, input_file)
elif input_file == '-':
exit_with_error('-E or -x required when input is from standard input')
else:
# Default to assuming the inputs are object files and pass them to the linker
logger.debug('using object file: ' + input_file)
linker_inputs.append((i, input_file))
return linker_inputs
@ToolchainProfiler.profile_block('calculate system libraries')
def phase_calculate_system_libraries(state, linker_arguments, linker_inputs, newargs):
extra_files_to_link = []
# link in ports and system libraries, if necessary
if not settings.SIDE_MODULE:
# Ports are always linked into the main module, never the size module.
extra_files_to_link += system_libs.get_ports_libs(settings)
if '-nostdlib' not in newargs and '-nodefaultlibs' not in newargs:
settings.LINK_AS_CXX = run_via_emxx
# Traditionally we always link as C++. For compatibility we continue to do that,
# unless running in strict mode.
if not settings.STRICT and '-nostdlib++' not in newargs:
settings.LINK_AS_CXX = True
extra_files_to_link += system_libs.calculate([f for _, f in sorted(linker_inputs)] + extra_files_to_link, forced=state.forced_stdlibs)
linker_arguments.extend(extra_files_to_link)
@ToolchainProfiler.profile_block('link')
def phase_link(linker_arguments, wasm_target):
logger.debug(f'linking: {linker_arguments}')
# Make a final pass over settings.EXPORTED_FUNCTIONS to remove any
# duplication between functions added by the driver/libraries and function
# specified by the user
settings.EXPORTED_FUNCTIONS = dedup_list(settings.EXPORTED_FUNCTIONS)
# if EMCC_DEBUG=2 then we must link now, so the temp files are complete.
# if using the wasm backend, we might be using vanilla LLVM, which does not allow our
# fastcomp deferred linking opts.
# TODO: we could check if this is a fastcomp build, and still speed things up here
js_syms = None
if settings.LLD_REPORT_UNDEFINED and settings.ERROR_ON_UNDEFINED_SYMBOLS:
js_syms = get_all_js_syms()
building.link_lld(linker_arguments, wasm_target, external_symbols=js_syms)
@ToolchainProfiler.profile_block('post_link')
def phase_post_link(options, state, in_wasm, wasm_target, target):
global final_js
target_basename = unsuffixed_basename(target)
if options.oformat != OFormat.WASM:
final_js = in_temp(target_basename + '.js')
settings.TARGET_BASENAME = unsuffixed_basename(target)
if options.oformat in (OFormat.JS, OFormat.MJS):
state.js_target = target
else:
state.js_target = get_secondary_target(target, '.js')
settings.TARGET_JS_NAME = os.path.basename(state.js_target)
if settings.MEM_INIT_IN_WASM:
memfile = None
else:
memfile = shared.replace_or_append_suffix(target, '.mem')
phase_emscript(options, in_wasm, wasm_target, memfile)
phase_source_transforms(options, target)
if memfile and not settings.MINIMAL_RUNTIME:
# MINIMAL_RUNTIME doesn't use `var memoryInitializer` but instead expects Module['mem'] to
# be loaded before the module. See src/postamble_minimal.js.
phase_memory_initializer(memfile)
phase_binaryen(target, options, wasm_target)
# If we are not emitting any JS then we are all done now
if options.oformat != OFormat.WASM:
phase_final_emitting(options, state, target, wasm_target, memfile)
@ToolchainProfiler.profile_block('emscript')
def phase_emscript(options, in_wasm, wasm_target, memfile):
# Emscripten
logger.debug('emscript')
if options.memory_init_file:
settings.MEM_INIT_METHOD = 1
else:
assert settings.MEM_INIT_METHOD != 1
if embed_memfile():
settings.SUPPORT_BASE64_EMBEDDING = 1
emscripten.run(in_wasm, wasm_target, final_js, memfile)
save_intermediate('original')
@ToolchainProfiler.profile_block('source transforms')
def phase_source_transforms(options, target):
global final_js
# Embed and preload files
if len(options.preload_files) or len(options.embed_files):
logger.debug('setting up files')
file_args = ['--from-emcc', '--export-name=' + settings.EXPORT_NAME]
if len(options.preload_files):
file_args.append('--preload')
file_args += options.preload_files
if len(options.embed_files):
file_args.append('--embed')
file_args += options.embed_files
if len(options.exclude_files):
file_args.append('--exclude')
file_args += options.exclude_files
if options.use_preload_cache:
file_args.append('--use-preload-cache')
if settings.LZ4:
file_args.append('--lz4')
if options.use_preload_plugins:
file_args.append('--use-preload-plugins')
if not settings.ENVIRONMENT_MAY_BE_NODE:
file_args.append('--no-node')
file_code = shared.check_call([shared.FILE_PACKAGER, unsuffixed(target) + '.data'] + file_args, stdout=PIPE).stdout
options.pre_js = js_manipulation.add_files_pre_js(options.pre_js, file_code)
# Apply pre and postjs files
if final_js and (options.pre_js or options.post_js):
logger.debug('applying pre/postjses')
src = read_file(final_js)
final_js += '.pp.js'
with open(final_js, 'w') as f:
# pre-js code goes right after the Module integration code (so it
# can use Module), we have a marker for it
f.write(do_replace(src, '// {{PRE_JSES}}', fix_windows_newlines(options.pre_js)))
f.write(fix_windows_newlines(options.post_js))
options.pre_js = src = options.post_js = None
save_intermediate('pre-post')
# Apply a source code transformation, if requested
if options.js_transform:
safe_copy(final_js, final_js + '.tr.js')
final_js += '.tr.js'
posix = not shared.WINDOWS
logger.debug('applying transform: %s', options.js_transform)
shared.check_call(building.remove_quotes(shlex.split(options.js_transform, posix=posix) + [os.path.abspath(final_js)]))
save_intermediate('transformed')
@ToolchainProfiler.profile_block('memory initializer')
def phase_memory_initializer(memfile):
# For the wasm backend, we don't have any memory info in JS. All we need to do
# is set the memory initializer url.
global final_js
src = read_file(final_js)
src = do_replace(src, '// {{MEM_INITIALIZER}}', 'var memoryInitializer = "%s";' % os.path.basename(memfile))
write_file(final_js + '.mem.js', src)
final_js += '.mem.js'
@ToolchainProfiler.profile_block('final emitting')
def phase_final_emitting(options, state, target, wasm_target, memfile):
global final_js
# Remove some trivial whitespace
# TODO: do not run when compress has already been done on all parts of the code
# src = read_file(final_js)
# src = re.sub(r'\n+[ \n]*\n+', '\n', src)
# write_file(final_js, src)
if settings.USE_PTHREADS:
target_dir = os.path.dirname(os.path.abspath(target))
worker_output = os.path.join(target_dir, settings.PTHREAD_WORKER_FILE)
with open(worker_output, 'w') as f:
f.write(shared.read_and_preprocess(shared.path_from_root('src', 'worker.js'), expand_macros=True))
# Minify the worker.js file in optimized builds
if (settings.OPT_LEVEL >= 1 or settings.SHRINK_LEVEL >= 1) and not settings.DEBUG_LEVEL:
minified_worker = building.acorn_optimizer(worker_output, ['minifyWhitespace'], return_output=True)
write_file(worker_output, minified_worker)
# track files that will need native eols
generated_text_files_with_native_eols = []
if settings.MODULARIZE:
modularize()
module_export_name_substitution()
# Run a final regex pass to clean up items that were not possible to optimize by Closure, or unoptimalities that were left behind
# by processing steps that occurred after Closure.
if settings.MINIMAL_RUNTIME == 2 and settings.USE_CLOSURE_COMPILER and settings.DEBUG_LEVEL == 0 and not settings.SINGLE_FILE:
# Process .js runtime file. Note that we need to handle the license text
# here, so that it will not confuse the hacky script.
shared.JS.handle_license(final_js)
shared.run_process([shared.PYTHON, shared.path_from_root('tools', 'hacky_postprocess_around_closure_limitations.py'), final_js])
# Unmangle previously mangled `import.meta` references in both main code and libraries.
# See also: `preprocess` in parseTools.js.
if settings.EXPORT_ES6 and settings.USE_ES6_IMPORT_META:
src = read_file(final_js)
final_js += '.esmeta.js'
write_file(final_js, src.replace('EMSCRIPTEN$IMPORT$META', 'import.meta'))
save_intermediate('es6-import-meta')
# Apply pre and postjs files
if options.extern_pre_js or options.extern_post_js:
logger.debug('applying extern pre/postjses')
src = read_file(final_js)
final_js += '.epp.js'
with open(final_js, 'w') as f:
f.write(fix_windows_newlines(options.extern_pre_js))
f.write(src)
f.write(fix_windows_newlines(options.extern_post_js))
save_intermediate('extern-pre-post')
shared.JS.handle_license(final_js)
js_target = state.js_target
# The JS is now final. Move it to its final location
move_file(final_js, js_target)
if not settings.SINGLE_FILE:
generated_text_files_with_native_eols += [js_target]
target_basename = unsuffixed_basename(target)
# If we were asked to also generate HTML, do that
if options.oformat == OFormat.HTML:
generate_html(target, options, js_target, target_basename,
wasm_target, memfile)
elif settings.PROXY_TO_WORKER:
generate_worker_js(target, js_target, target_basename)
if embed_memfile() and memfile:
shared.try_delete(memfile)
if settings.SPLIT_MODULE:
diagnostics.warning('experimental', 'The SPLIT_MODULE setting is experimental and subject to change')
do_split_module(wasm_target)
for f in generated_text_files_with_native_eols:
tools.line_endings.convert_line_endings_in_file(f, os.linesep, options.output_eol)
if options.executable:
make_js_executable(js_target)
def version_string():
# if the emscripten folder is not a git repo, don't run git show - that can
# look up and find the revision in a parent directory that is a git repo
revision_suffix = ''
if os.path.exists(shared.path_from_root('.git')):
git_rev = run_process(
['git', 'rev-parse', 'HEAD'],
stdout=PIPE, stderr=PIPE, cwd=shared.path_from_root()).stdout.strip()
revision_suffix = '-git (%s)' % git_rev
elif os.path.exists(shared.path_from_root('emscripten-revision.txt')):
with open(shared.path_from_root('emscripten-revision.txt')) as f:
git_rev = f.read().strip()
revision_suffix = ' (%s)' % git_rev
return f'emcc (Emscripten gcc/clang-like replacement + linker emulating GNU ld) {shared.EMSCRIPTEN_VERSION}{revision_suffix}'
def parse_args(newargs):
options = EmccOptions()
settings_changes = []
user_js_defines = []
should_exit = False
eh_enabled = False
wasm_eh_enabled = False
skip = False
for i in range(len(newargs)):
if skip:
skip = False
continue
# On Windows Vista (and possibly others), excessive spaces in the command line
# leak into the items in this array, so trim e.g. 'foo.cpp ' -> 'foo.cpp'
newargs[i] = newargs[i].strip()
arg = newargs[i]
arg_value = None
def check_flag(value):
# Check for and consume a flag
if arg == value:
newargs[i] = ''
return True
return False
def check_arg(name):
nonlocal arg_value
if arg.startswith(name) and '=' in arg:
arg_value = arg.split('=', 1)[1]
newargs[i] = ''
return True
if arg == name:
if len(newargs) <= i + 1:
exit_with_error("option '%s' requires an argument" % arg)
arg_value = newargs[i + 1]
newargs[i] = ''
newargs[i + 1] = ''
return True
return False
def consume_arg():
nonlocal arg_value
assert arg_value is not None
rtn = arg_value
arg_value = None
return rtn
def consume_arg_file():
name = consume_arg()
if not os.path.isfile(name):
exit_with_error("'%s': file not found: '%s'" % (arg, name))
return name
if arg.startswith('-O'):
# Let -O default to -O2, which is what gcc does.
options.requested_level = strip_prefix(arg, '-O') or '2'
if options.requested_level == 's':
options.requested_level = 2
settings.SHRINK_LEVEL = 1
settings_changes.append('INLINING_LIMIT=1')
elif options.requested_level == 'z':
options.requested_level = 2
settings.SHRINK_LEVEL = 2
settings_changes.append('INLINING_LIMIT=1')
settings.OPT_LEVEL = validate_arg_level(options.requested_level, 3, 'Invalid optimization level: ' + arg, clamp=True)
elif check_arg('--js-opts'):
logger.warning('--js-opts ignored when using llvm backend')
consume_arg()
elif check_arg('--llvm-opts'):
diagnostics.warning('deprecated', '--llvm-opts is deprecated. All non-emcc args are passed through to clang.')
elif arg.startswith('-flto'):
if '=' in arg:
settings.LTO = arg.split('=')[1]
else:
settings.LTO = "full"
elif check_arg('--llvm-lto'):
logger.warning('--llvm-lto ignored when using llvm backend')
consume_arg()
elif check_arg('--closure-args'):
args = consume_arg()
options.closure_args += shlex.split(args)
elif check_arg('--closure'):
options.use_closure_compiler = int(consume_arg())
elif check_arg('--js-transform'):
options.js_transform = consume_arg()
elif check_arg('--pre-js'):
options.pre_js += read_file(consume_arg_file()) + '\n'
elif check_arg('--post-js'):
options.post_js += read_file(consume_arg_file()) + '\n'
elif check_arg('--extern-pre-js'):
options.extern_pre_js += read_file(consume_arg_file()) + '\n'
elif check_arg('--extern-post-js'):
options.extern_post_js += read_file(consume_arg_file()) + '\n'
elif check_arg('--compiler-wrapper'):
config.COMPILER_WRAPPER = consume_arg()
elif check_flag('--post-link'):
options.post_link = True
elif check_arg('--oformat'):
formats = [f.lower() for f in OFormat.__members__]
fmt = consume_arg()
if fmt not in formats:
exit_with_error('invalid output format: `%s` (must be one of %s)' % (fmt, formats))
options.oformat = getattr(OFormat, fmt.upper())
elif check_arg('--minify'):
arg = consume_arg()
if arg != '0':
exit_with_error('0 is the only supported option for --minify; 1 has been deprecated')
settings.DEBUG_LEVEL = max(1, settings.DEBUG_LEVEL)
elif arg.startswith('-g'):
options.requested_debug = arg
requested_level = strip_prefix(arg, '-g') or '3'
if is_int(requested_level):
# the -gX value is the debug level (-g1, -g2, etc.)
settings.DEBUG_LEVEL = validate_arg_level(requested_level, 4, 'Invalid debug level: ' + arg)
# if we don't need to preserve LLVM debug info, do not keep this flag
# for clang
if settings.DEBUG_LEVEL < 3:
newargs[i] = ''
else:
# for 3+, report -g to clang as -g4 etc. are not accepted
newargs[i] = '-g'
if settings.DEBUG_LEVEL == 4:
settings.GENERATE_SOURCE_MAP = 1
diagnostics.warning('deprecated', 'please replace -g4 with -gsource-map')
else:
if requested_level.startswith('force_dwarf'):
exit_with_error('gforce_dwarf was a temporary option and is no longer necessary (use -g)')
elif requested_level.startswith('separate-dwarf'):
# emit full DWARF but also emit it in a file on the side
newargs[i] = '-g'
# if a file is provided, use that; otherwise use the default location
# (note that we do not know the default location until all args have
# been parsed, so just note True for now).
if requested_level != 'separate-dwarf':
if not requested_level.startswith('separate-dwarf=') or requested_level.count('=') != 1:
exit_with_error('invalid -gseparate-dwarf=FILENAME notation')
settings.SEPARATE_DWARF = requested_level.split('=')[1]
else:
settings.SEPARATE_DWARF = True
elif requested_level == 'source-map':
settings.GENERATE_SOURCE_MAP = 1
newargs[i] = '-g'
# a non-integer level can be something like -gline-tables-only. keep
# the flag for the clang frontend to emit the appropriate DWARF info.
# set the emscripten debug level to 3 so that we do not remove that
# debug info during link (during compile, this does not make a
# difference).
settings.DEBUG_LEVEL = 3
elif check_flag('-profiling') or check_flag('--profiling'):
settings.DEBUG_LEVEL = max(settings.DEBUG_LEVEL, 2)
elif check_flag('-profiling-funcs') or check_flag('--profiling-funcs'):
options.profiling_funcs = True
elif newargs[i] == '--tracing' or newargs[i] == '--memoryprofiler':
if newargs[i] == '--memoryprofiler':
options.memory_profiler = True
options.tracing = True
newargs[i] = ''
settings_changes.append("EMSCRIPTEN_TRACING=1")
settings.JS_LIBRARIES.append((0, 'library_trace.js'))
elif check_flag('--emit-symbol-map'):
options.emit_symbol_map = True
settings.EMIT_SYMBOL_MAP = 1
elif check_flag('--bind'):
settings.EMBIND = 1
settings.JS_LIBRARIES.append((0, os.path.join('embind', 'emval.js')))
settings.JS_LIBRARIES.append((0, os.path.join('embind', 'embind.js')))
elif check_arg('--embed-file'):
options.embed_files.append(consume_arg())
elif check_arg('--preload-file'):
options.preload_files.append(consume_arg())
elif check_arg('--exclude-file'):
options.exclude_files.append(consume_arg())
elif check_flag('--use-preload-cache'):
options.use_preload_cache = True
elif check_flag('--no-heap-copy'):
diagnostics.warning('legacy-settings', 'ignoring legacy flag --no-heap-copy (that is the only mode supported now)')
elif check_flag('--use-preload-plugins'):
options.use_preload_plugins = True
elif check_flag('--ignore-dynamic-linking'):
options.ignore_dynamic_linking = True
elif arg == '-v':
shared.PRINT_STAGES = True
elif check_arg('--shell-file'):
options.shell_path = consume_arg_file()
elif check_arg('--source-map-base'):
options.source_map_base = consume_arg()
elif check_flag('--no-entry'):
options.no_entry = True
elif check_arg('--js-library'):
settings.JS_LIBRARIES.append((i + 1, os.path.abspath(consume_arg_file())))
elif check_flag('--remove-duplicates'):
diagnostics.warning('legacy-settings', '--remove-duplicates is deprecated as it is no longer needed. If you cannot link without it, file a bug with a testcase')
elif check_flag('--jcache'):
logger.error('jcache is no longer supported')
elif check_arg('--cache'):
config.CACHE = os.path.normpath(consume_arg())
shared.reconfigure_cache()
elif check_flag('--clear-cache'):
logger.info('clearing cache as requested by --clear-cache: `%s`', shared.Cache.dirname)
shared.Cache.erase()
shared.check_sanity(force=True) # this is a good time for a sanity check
should_exit = True
elif check_flag('--clear-ports'):
logger.info('clearing ports and cache as requested by --clear-ports')
system_libs.Ports.erase()
shared.Cache.erase()
shared.check_sanity(force=True) # this is a good time for a sanity check
should_exit = True
elif check_flag('--check'):
print(version_string(), file=sys.stderr)
shared.check_sanity(force=True)
should_exit = True
elif check_flag('--show-ports'):
system_libs.show_ports()
should_exit = True
elif check_arg('--memory-init-file'):
options.memory_init_file = int(consume_arg())
elif check_flag('--proxy-to-worker'):
settings_changes.append('PROXY_TO_WORKER=1')
elif check_arg('--valid-abspath'):
options.valid_abspaths.append(consume_arg())
elif check_flag('--separate-asm'):
exit_with_error('cannot --separate-asm with the wasm backend, since not emitting asm.js')
elif arg.startswith(('-I', '-L')):
path_name = arg[2:]
if os.path.isabs(path_name) and not is_valid_abspath(options, path_name):
# Of course an absolute path to a non-system-specific library or header
# is fine, and you can ignore this warning. The danger are system headers
# that are e.g. x86 specific and non-portable. The emscripten bundled
# headers are modified to be portable, local system ones are generally not.
diagnostics.warning(
'absolute-paths', f'-I or -L of an absolute path "{arg}" '
'encountered. If this is to a local system header/library, it may '
'cause problems (local system files make sense for compiling natively '
'on your system, but not necessarily to JavaScript).')
elif check_flag('--emrun'):
options.emrun = True
elif check_flag('--cpuprofiler'):
options.cpu_profiler = True
elif check_flag('--threadprofiler'):
options.thread_profiler = True
settings_changes.append('PTHREADS_PROFILING=1')
elif arg == '-fno-exceptions':
settings.DISABLE_EXCEPTION_CATCHING = 1
settings.DISABLE_EXCEPTION_THROWING = 1
settings.EXCEPTION_HANDLING = 0
elif arg == '-fexceptions':
eh_enabled = True
elif arg == '-fwasm-exceptions':
wasm_eh_enabled = True
elif arg == '-fignore-exceptions':
settings.DISABLE_EXCEPTION_CATCHING = 1
elif check_arg('--default-obj-ext'):
options.default_object_extension = consume_arg()
if not options.default_object_extension.startswith('.'):
options.default_object_extension = '.' + options.default_object_extension
elif arg == '-fsanitize=cfi':
options.cfi = True
elif check_arg('--output_eol'):
style = consume_arg()
if style.lower() == 'windows':
options.output_eol = '\r\n'
elif style.lower() == 'linux':
options.output_eol = '\n'
else:
exit_with_error(f'Invalid value "{style}" to --output_eol!')
elif check_arg('--generate-config'):
optarg = consume_arg()
path = os.path.expanduser(optarg)
if os.path.exists(path):
exit_with_error(f'File {optarg} passed to --generate-config already exists!')
else:
config.generate_config(optarg)
should_exit = True
# Record USE_PTHREADS setting because it controls whether --shared-memory is passed to lld
elif arg == '-pthread':
settings_changes.append('USE_PTHREADS=1')
elif arg in ('-fno-diagnostics-color', '-fdiagnostics-color=never'):
colored_logger.disable()
diagnostics.color_enabled = False
elif arg == '-fno-rtti':
settings.USE_RTTI = 0
elif arg == '-frtti':
settings.USE_RTTI = 1
elif arg.startswith('-jsD'):
key = strip_prefix(arg, '-jsD')
if '=' in key:
key, value = key.split('=')
else:
value = '1'
if key in settings.keys():
exit_with_error(f'{arg}: cannot change built-in settings values with a -jsD directive. Pass -s {key}={value} instead!')
user_js_defines += [(key, value)]
newargs[i] = ''
elif check_flag('-shared'):
options.shared = True
elif check_flag('-r'):
options.relocatable = True
elif check_arg('-o'):
options.output_file = consume_arg()
elif arg.startswith('-o'):
options.output_file = strip_prefix(arg, '-o')
newargs[i] = ''
elif arg == '-mllvm':
# Ignore the next argument rather than trying to parse it. This is needed
# because llvm args could, for example, start with `-o` and we don't want
# to confuse that with a normal `-o` flag.
skip = True
if should_exit:
sys.exit(0)
# TODO Currently -fexceptions only means Emscripten EH. Switch to wasm
# exception handling by default when -fexceptions is given when wasm
# exception handling becomes stable.
if wasm_eh_enabled:
settings.EXCEPTION_HANDLING = 1
settings.DISABLE_EXCEPTION_THROWING = 1
settings.DISABLE_EXCEPTION_CATCHING = 1
elif eh_enabled:
settings.EXCEPTION_HANDLING = 0
settings.DISABLE_EXCEPTION_THROWING = 0
settings.DISABLE_EXCEPTION_CATCHING = 0
newargs = [a for a in newargs if a]
return options, settings_changes, user_js_defines, newargs
@ToolchainProfiler.profile_block('binaryen')
def phase_binaryen(target, options, wasm_target):
global final_js
logger.debug('using binaryen')
if settings.GENERATE_SOURCE_MAP and not settings.SOURCE_MAP_BASE:
logger.warning("Wasm source map won't be usable in a browser without --source-map-base")
# whether we need to emit -g (function name debug info) in the final wasm
debug_info = settings.DEBUG_LEVEL >= 2 or options.profiling_funcs
# whether we need to emit -g in the intermediate binaryen invocations (but not
# necessarily at the very end). this is necessary if we depend on debug info
# during compilation, even if we do not emit it at the end.
# we track the number of causes for needing intermdiate debug info so
# that we can stop emitting it when possible - in particular, that is
# important so that we stop emitting it before the end, and it is not in the
# final binary (if it shouldn't be)
intermediate_debug_info = 0
if debug_info:
intermediate_debug_info += 1
if options.emit_symbol_map:
intermediate_debug_info += 1
if settings.ASYNCIFY:
intermediate_debug_info += 1
# note that wasm-ld can strip DWARF info for us too (--strip-debug), but it
# also strips the Names section. so to emit just the Names section we don't
# tell wasm-ld to strip anything, and we do it here.
strip_debug = settings.DEBUG_LEVEL < 3
strip_producers = not settings.EMIT_PRODUCERS_SECTION
# run wasm-opt if we have work for it: either passes, or if we are using
# source maps (which requires some extra processing to keep the source map
# but remove DWARF)
passes = get_binaryen_passes()
if passes or settings.GENERATE_SOURCE_MAP:
# if we need to strip certain sections, and we have wasm-opt passes
# to run anyhow, do it with them.
if strip_debug:
passes += ['--strip-debug']
if strip_producers:
passes += ['--strip-producers']
building.save_intermediate(wasm_target, 'pre-byn.wasm')
# if asyncify is used, we will use it in the next stage, and so if it is
# the only reason we need intermediate debug info, we can stop keeping it
if settings.ASYNCIFY:
intermediate_debug_info -= 1
building.run_wasm_opt(wasm_target,
wasm_target,
args=passes,
debug=intermediate_debug_info)
elif strip_debug or strip_producers:
# we are not running wasm-opt. if we need to strip certain sections
# then do so using llvm-objcopy which is fast and does not rewrite the
# code (which is better for debug info)
building.save_intermediate(wasm_target, 'pre-strip.wasm')
building.strip(wasm_target, wasm_target, debug=strip_debug, producers=strip_producers)
if settings.EVAL_CTORS:
building.save_intermediate(wasm_target, 'pre-ctors.wasm')
building.eval_ctors(final_js, wasm_target, debug_info=intermediate_debug_info)
# after generating the wasm, do some final operations
if settings.EMIT_EMSCRIPTEN_METADATA:
diagnostics.warning('deprecated', 'We hope to remove support for EMIT_EMSCRIPTEN_METADATA. See https://github.com/emscripten-core/emscripten/issues/12231')
webassembly.add_emscripten_metadata(wasm_target)
if final_js:
if settings.SUPPORT_BIG_ENDIAN:
final_js = building.little_endian_heap(final_js)
# >=2GB heap support requires pointers in JS to be unsigned. rather than
# require all pointers to be unsigned by default, which increases code size
# a little, keep them signed, and just unsign them here if we need that.
if settings.CAN_ADDRESS_2GB:
final_js = building.use_unsigned_pointers_in_js(final_js)
# pthreads memory growth requires some additional JS fixups.
# note that we must do this after handling of unsigned pointers. unsigning
# adds some >>> 0 things, while growth will replace a HEAP8 with a call to
# a method to get the heap, and that call would not be recognized by the
# unsigning pass
if settings.USE_PTHREADS and settings.ALLOW_MEMORY_GROWTH:
final_js = building.apply_wasm_memory_growth(final_js)
if settings.USE_ASAN:
final_js = building.instrument_js_for_asan(final_js)
if settings.SAFE_HEAP:
final_js = building.instrument_js_for_safe_heap(final_js)
if settings.OPT_LEVEL >= 2 and settings.DEBUG_LEVEL <= 2:
# minify the JS. Do not minify whitespace if Closure is used, so that
# Closure can print out readable error messages (Closure will then
# minify whitespace afterwards)
save_intermediate_with_wasm('preclean', wasm_target)
final_js = building.minify_wasm_js(js_file=final_js,
wasm_file=wasm_target,
expensive_optimizations=will_metadce(),
minify_whitespace=minify_whitespace() and not options.use_closure_compiler,
debug_info=intermediate_debug_info)
save_intermediate_with_wasm('postclean', wasm_target)
if settings.ASYNCIFY_LAZY_LOAD_CODE:
building.asyncify_lazy_load_code(wasm_target, debug=intermediate_debug_info)
def preprocess_wasm2js_script():
return read_and_preprocess(shared.path_from_root('src', 'wasm2js.js'), expand_macros=True)
def run_closure_compiler():
global final_js
final_js = building.closure_compiler(final_js, pretty=not minify_whitespace(),
extra_closure_args=options.closure_args)
save_intermediate_with_wasm('closure', wasm_target)
if final_js and options.use_closure_compiler:
run_closure_compiler()
symbols_file = None
if options.emit_symbol_map:
symbols_file = shared.replace_or_append_suffix(target, '.symbols')
if settings.WASM2JS:
symbols_file_js = None
if settings.WASM == 2:
wasm2js_template = wasm_target + '.js'
with open(wasm2js_template, 'w') as f:
f.write(preprocess_wasm2js_script())
# generate secondary file for JS symbols
if options.emit_symbol_map:
symbols_file_js = shared.replace_or_append_suffix(wasm2js_template, '.symbols')
else:
wasm2js_template = final_js
if options.emit_symbol_map:
symbols_file_js = shared.replace_or_append_suffix(target, '.symbols')
wasm2js = building.wasm2js(wasm2js_template,
wasm_target,
opt_level=settings.OPT_LEVEL,
minify_whitespace=minify_whitespace(),
use_closure_compiler=options.use_closure_compiler,
debug_info=debug_info,
symbols_file=symbols_file,
symbols_file_js=symbols_file_js)
shared.configuration.get_temp_files().note(wasm2js)
if settings.WASM == 2:
safe_copy(wasm2js, wasm2js_template)
if settings.WASM != 2:
final_js = wasm2js
# if we only target JS, we don't need the wasm any more
shared.try_delete(wasm_target)
save_intermediate('wasm2js')
# emit the final symbols, either in the binary or in a symbol map.
# this will also remove debug info if we only kept it around in the intermediate invocations.
# note that if we aren't emitting a binary (like in wasm2js) then we don't
# have anything to do here.
if options.emit_symbol_map:
intermediate_debug_info -= 1
if os.path.exists(wasm_target):
building.handle_final_wasm_symbols(wasm_file=wasm_target, symbols_file=symbols_file, debug_info=intermediate_debug_info)
save_intermediate_with_wasm('symbolmap', wasm_target)
if settings.DEBUG_LEVEL >= 3 and settings.SEPARATE_DWARF and os.path.exists(wasm_target):
building.emit_debug_on_side(wasm_target, settings.SEPARATE_DWARF)
if settings.WASM2C:
wasm2c.do_wasm2c(wasm_target)
# we have finished emitting the wasm, and so intermediate debug info will
# definitely no longer be used tracking it.
if debug_info:
intermediate_debug_info -= 1
assert intermediate_debug_info == 0
# strip debug info if it was not already stripped by the last command
if not debug_info and building.binaryen_kept_debug_info and \
building.os.path.exists(wasm_target):
building.run_wasm_opt(wasm_target, wasm_target)
# replace placeholder strings with correct subresource locations
if final_js and settings.SINGLE_FILE and not settings.WASM2JS:
js = read_file(final_js)
if settings.MINIMAL_RUNTIME:
js = do_replace(js, '<<< WASM_BINARY_DATA >>>', base64_encode(read_binary(wasm_target)))
else:
js = do_replace(js, '<<< WASM_BINARY_FILE >>>', shared.JS.get_subresource_location(wasm_target))
shared.try_delete(wasm_target)
with open(final_js, 'w') as f:
f.write(js)
def modularize():
global final_js
logger.debug(f'Modularizing, assigning to var {settings.EXPORT_NAME}')
src = read_file(final_js)
return_value = settings.EXPORT_NAME
if settings.WASM_ASYNC_COMPILATION:
return_value += '.ready'
if not settings.EXPORT_READY_PROMISE:
return_value = '{}'
src = '''
function(%(EXPORT_NAME)s) {
%(EXPORT_NAME)s = %(EXPORT_NAME)s || {};
%(src)s
return %(return_value)s
}
''' % {
'EXPORT_NAME': settings.EXPORT_NAME,
'src': src,
'return_value': return_value
}
if settings.MINIMAL_RUNTIME and not settings.USE_PTHREADS:
# Single threaded MINIMAL_RUNTIME programs do not need access to
# document.currentScript, so a simple export declaration is enough.
src = 'var %s=%s' % (settings.EXPORT_NAME, src)
else:
script_url_node = ""
# When MODULARIZE this JS may be executed later,
# after document.currentScript is gone, so we save it.
# In EXPORT_ES6 + USE_PTHREADS the 'thread' is actually an ES6 module webworker running in strict mode,
# so doesn't have access to 'document'. In this case use 'import.meta' instead.
if settings.EXPORT_ES6 and settings.USE_ES6_IMPORT_META:
script_url = "import.meta.url"
else:
script_url = "typeof document !== 'undefined' && document.currentScript ? document.currentScript.src : undefined"
if shared.target_environment_may_be('node'):
script_url_node = "if (typeof __filename !== 'undefined') _scriptDir = _scriptDir || __filename;"
src = '''
var %(EXPORT_NAME)s = (function() {
var _scriptDir = %(script_url)s;
%(script_url_node)s
return (%(src)s);
})();
''' % {
'EXPORT_NAME': settings.EXPORT_NAME,
'script_url': script_url,
'script_url_node': script_url_node,
'src': src
}
final_js += '.modular.js'
with open(final_js, 'w') as f:
f.write(src)
# Export using a UMD style export, or ES6 exports if selected
if settings.EXPORT_ES6:
f.write('export default %s;' % settings.EXPORT_NAME)
elif not settings.MINIMAL_RUNTIME:
f.write('''\
if (typeof exports === 'object' && typeof module === 'object')
module.exports = %(EXPORT_NAME)s;
else if (typeof define === 'function' && define['amd'])
define([], function() { return %(EXPORT_NAME)s; });
else if (typeof exports === 'object')
exports["%(EXPORT_NAME)s"] = %(EXPORT_NAME)s;
''' % {'EXPORT_NAME': settings.EXPORT_NAME})
shared.configuration.get_temp_files().note(final_js)
save_intermediate('modularized')
def module_export_name_substitution():
global final_js
logger.debug(f'Private module export name substitution with {settings.EXPORT_NAME}')
with open(final_js) as f:
src = f.read()
final_js += '.module_export_name_substitution.js'
if settings.MINIMAL_RUNTIME:
# In MINIMAL_RUNTIME the Module object is always present to provide the .asm.js/.wasm content
replacement = settings.EXPORT_NAME
else:
replacement = "typeof %(EXPORT_NAME)s !== 'undefined' ? %(EXPORT_NAME)s : {}" % {"EXPORT_NAME": settings.EXPORT_NAME}
src = re.sub(r'{\s*[\'"]?__EMSCRIPTEN_PRIVATE_MODULE_EXPORT_NAME_SUBSTITUTION__[\'"]?:\s*1\s*}', replacement, src)
# For Node.js and other shell environments, create an unminified Module object so that
# loading external .asm.js file that assigns to Module['asm'] works even when Closure is used.
if settings.MINIMAL_RUNTIME and (shared.target_environment_may_be('node') or shared.target_environment_may_be('shell')):
src = 'if(typeof Module==="undefined"){var Module={};}\n' + src
with open(final_js, 'w') as f:
f.write(src)
shared.configuration.get_temp_files().note(final_js)
save_intermediate('module_export_name_substitution')
def generate_traditional_runtime_html(target, options, js_target, target_basename,
wasm_target, memfile):
script = ScriptSource()
shell = read_and_preprocess(options.shell_path)
assert '{{{ SCRIPT }}}' in shell, 'HTML shell must contain {{{ SCRIPT }}} , see src/shell.html for an example'
base_js_target = os.path.basename(js_target)
if settings.PROXY_TO_WORKER:
proxy_worker_filename = (settings.PROXY_TO_WORKER_FILENAME or target_basename) + '.js'
worker_js = worker_js_script(proxy_worker_filename)
script.inline = ('''
var filename = '%s';
if ((',' + window.location.search.substr(1) + ',').indexOf(',noProxy,') < 0) {
console.log('running code in a web worker');
''' % shared.JS.get_subresource_location(proxy_worker_filename)) + worker_js + '''
} else {
console.log('running code on the main thread');
var fileBytes = tryParseAsDataURI(filename);
var script = document.createElement('script');
if (fileBytes) {
script.innerHTML = intArrayToString(fileBytes);
} else {
script.src = filename;
}
document.body.appendChild(script);
}
'''
else:
# Normal code generation path
script.src = base_js_target
if not settings.SINGLE_FILE:
if memfile and not settings.MINIMAL_RUNTIME:
# start to load the memory init file in the HTML, in parallel with the JS
script.un_src()
script.inline = ('''
var memoryInitializer = '%s';
memoryInitializer = Module['locateFile'] ? Module['locateFile'](memoryInitializer, '') : memoryInitializer;
Module['memoryInitializerRequestURL'] = memoryInitializer;
var meminitXHR = Module['memoryInitializerRequest'] = new XMLHttpRequest();
meminitXHR.open('GET', memoryInitializer, true);
meminitXHR.responseType = 'arraybuffer';
meminitXHR.send(null);
''' % shared.JS.get_subresource_location(memfile)) + script.inline
if not settings.WASM_ASYNC_COMPILATION:
# We need to load the wasm file before anything else, it has to be synchronously ready TODO: optimize
script.un_src()
script.inline = '''
var wasmURL = '%s';
var wasmXHR = new XMLHttpRequest();
wasmXHR.open('GET', wasmURL, true);
wasmXHR.responseType = 'arraybuffer';
wasmXHR.onload = function() {
if (wasmXHR.status === 200 || wasmXHR.status === 0) {
Module.wasmBinary = wasmXHR.response;
} else {
var wasmURLBytes = tryParseAsDataURI(wasmURL);
if (wasmURLBytes) {
Module.wasmBinary = wasmURLBytes.buffer;
}
}
%s
};
wasmXHR.send(null);
''' % (shared.JS.get_subresource_location(wasm_target), script.inline)
if settings.WASM == 2:
# If target browser does not support WebAssembly, we need to load the .wasm.js file before the main .js file.
script.un_src()
script.inline = '''
function loadMainJs() {
%s
}
if (!window.WebAssembly || location.search.indexOf('_rwasm=0') > 0) {
// Current browser does not support WebAssembly, load the .wasm.js JavaScript fallback
// before the main JS runtime.
var wasm2js = document.createElement('script');
wasm2js.src = '%s';
wasm2js.onload = loadMainJs;
document.body.appendChild(wasm2js);
} else {
// Current browser supports Wasm, proceed with loading the main JS runtime.
loadMainJs();
}
''' % (script.inline, shared.JS.get_subresource_location(wasm_target) + '.js')
# when script.inline isn't empty, add required helper functions such as tryParseAsDataURI
if script.inline:
for filename in ('arrayUtils.js', 'base64Utils.js', 'URIUtils.js'):
content = read_and_preprocess(shared.path_from_root('src', filename))
script.inline = content + script.inline
script.inline = 'var ASSERTIONS = %s;\n%s' % (settings.ASSERTIONS, script.inline)
# inline script for SINGLE_FILE output
if settings.SINGLE_FILE:
js_contents = script.inline or ''
if script.src:
js_contents += read_file(js_target)
shared.try_delete(js_target)
script.src = None
script.inline = js_contents
html_contents = do_replace(shell, '{{{ SCRIPT }}}', script.replacement())
html_contents = tools.line_endings.convert_line_endings(html_contents, '\n', options.output_eol)
try:
with open(target, 'wb') as f:
# Force UTF-8 output for consistency across platforms and with the web.
f.write(html_contents.encode('utf-8'))
except OSError as e:
exit_with_error(f'cannot write output file: {e}')
def minify_html(filename):
if settings.DEBUG_LEVEL >= 2:
return
opts = []
# -g1 and greater retain whitespace and comments in source
if settings.DEBUG_LEVEL == 0:
opts += ['--collapse-whitespace',
'--collapse-inline-tag-whitespace',
'--remove-comments',
'--remove-tag-whitespace',
'--sort-attributes',
'--sort-class-name']
# -g2 and greater do not minify HTML at all
if settings.DEBUG_LEVEL <= 1:
opts += ['--decode-entities',
'--collapse-boolean-attributes',
'--remove-attribute-quotes',
'--remove-redundant-attributes',
'--remove-script-type-attributes',
'--remove-style-link-type-attributes',
'--use-short-doctype',
'--minify-css', 'true',
'--minify-js', 'true']
# html-minifier also has the following options, but they look unsafe for use:
# '--remove-optional-tags': removes e.g. <head></head> and <body></body> tags from the page.
# (Breaks at least browser.test_sdl2glshader)
# '--remove-empty-attributes': removes all attributes with whitespace-only values.
# (Breaks at least browser.test_asmfs_hello_file)
# '--remove-empty-elements': removes all elements with empty contents.
# (Breaks at least browser.test_asm_swapping)
logger.debug(f'minifying HTML file {filename}')
size_before = os.path.getsize(filename)
start_time = time.time()
shared.check_call(shared.get_npm_cmd('html-minifier-terser') + [filename, '-o', filename] + opts, env=shared.env_with_node_in_path())
elapsed_time = time.time() - start_time
size_after = os.path.getsize(filename)
delta = size_after - size_before
logger.debug(f'HTML minification took {elapsed_time:.2f} seconds, and shrunk size of {filename} from {size_before} to {size_after} bytes, delta={delta} ({delta * 100.0 / size_before:+.2f}%)')
def generate_html(target, options, js_target, target_basename,
wasm_target, memfile):
logger.debug('generating HTML')
if settings.EXPORT_NAME != 'Module' and \
not settings.MINIMAL_RUNTIME and \
options.shell_path == shared.path_from_root('src', 'shell.html'):
# the minimal runtime shell HTML is designed to support changing the export
# name, but the normal one does not support that currently
exit_with_error('Customizing EXPORT_NAME requires that the HTML be customized to use that name (see https://github.com/emscripten-core/emscripten/issues/10086)')
if settings.MINIMAL_RUNTIME:
generate_minimal_runtime_html(target, options, js_target, target_basename)
else:
generate_traditional_runtime_html(target, options, js_target, target_basename,
wasm_target, memfile)
if settings.MINIFY_HTML and (settings.OPT_LEVEL >= 1 or settings.SHRINK_LEVEL >= 1):
minify_html(target)
def generate_worker_js(target, js_target, target_basename):
# compiler output is embedded as base64
if settings.SINGLE_FILE:
proxy_worker_filename = shared.JS.get_subresource_location(js_target)
# compiler output goes in .worker.js file
else:
move_file(js_target, unsuffixed(js_target) + '.worker.js')
worker_target_basename = target_basename + '.worker'
proxy_worker_filename = (settings.PROXY_TO_WORKER_FILENAME or worker_target_basename) + '.js'
target_contents = worker_js_script(proxy_worker_filename)
write_file(target, target_contents)
def worker_js_script(proxy_worker_filename):
web_gl_client_src = read_file(shared.path_from_root('src', 'webGLClient.js'))
idb_store_src = read_file(shared.path_from_root('src', 'IDBStore.js'))
proxy_client_src = read_file(shared.path_from_root('src', 'proxyClient.js'))
proxy_client_src = do_replace(proxy_client_src, '{{{ filename }}}', proxy_worker_filename)
proxy_client_src = do_replace(proxy_client_src, '{{{ IDBStore.js }}}', idb_store_src)
return web_gl_client_src + '\n' + proxy_client_src
def find_library(lib, lib_dirs):
for lib_dir in lib_dirs:
path = os.path.join(lib_dir, lib)
if os.path.isfile(path):
logger.debug('found library "%s" at %s', lib, path)
return path
return None
def process_libraries(state, linker_inputs):
new_flags = []
libraries = []
suffixes = STATICLIB_ENDINGS + DYNAMICLIB_ENDINGS
system_libs_map = system_libs.Library.get_usable_variations()
# Find library files
for i, flag in state.link_flags:
if not flag.startswith('-l'):
new_flags.append((i, flag))
continue
lib = strip_prefix(flag, '-l')
logger.debug('looking for library "%s"', lib)
js_libs, native_lib = building.map_to_js_libs(lib)
if js_libs is not None:
libraries += [(i, js_lib) for js_lib in js_libs]
# If native_lib is returned then include it in the link
# via forced_stdlibs.
if native_lib:
state.forced_stdlibs.append(native_lib)
continue
# We don't need to resolve system libraries to absolute paths here, we can just
# let wasm-ld handle that. However, we do want to map to the correct variant.
# For example we map `-lc` to `-lc-mt` if we are building with threading support.
if 'lib' + lib in system_libs_map:
lib = system_libs_map['lib' + lib]
new_flags.append((i, '-l' + strip_prefix(lib.get_base_name(), 'lib')))
continue
if building.map_and_apply_to_settings(lib):
continue
path = None
for suff in suffixes:
name = 'lib' + lib + suff
path = find_library(name, state.lib_dirs)
if path:
break
if path:
linker_inputs.append((i, path))
continue
new_flags.append((i, flag))
settings.JS_LIBRARIES += libraries
# At this point processing JS_LIBRARIES is finished, no more items will be added to it.
# Sort the input list from (order, lib_name) pairs to a flat array in the right order.
settings.JS_LIBRARIES.sort(key=lambda lib: lib[0])
settings.JS_LIBRARIES = [lib[1] for lib in settings.JS_LIBRARIES]
state.link_flags = new_flags
class ScriptSource:
def __init__(self):
self.src = None # if set, we have a script to load with a src attribute
self.inline = None # if set, we have the contents of a script to write inline in a script
def un_src(self):
"""Use this if you want to modify the script and need it to be inline."""
if self.src is None:
return
quoted_src = quote(self.src)
if settings.EXPORT_ES6:
self.inline = f'''
import("./{quoted_src}").then(exports => exports.default(Module))
'''
else:
self.inline = f'''
var script = document.createElement('script');
script.src = "{quoted_src}";
document.body.appendChild(script);
'''
self.src = None
def replacement(self):
"""Returns the script tag to replace the {{{ SCRIPT }}} tag in the target"""
assert (self.src or self.inline) and not (self.src and self.inline)
if self.src:
quoted_src = quote(self.src)
if settings.EXPORT_ES6:
return f'''
<script type="module">
import initModule from "./{quoted_src}";
initModule(Module);
</script>
'''
else:
return f'<script async type="text/javascript" src="{quoted_src}"></script>'
else:
return '<script>\n%s\n</script>' % self.inline
def is_valid_abspath(options, path_name):
# Any path that is underneath the emscripten repository root must be ok.
if shared.path_from_root().replace('\\', '/') in path_name.replace('\\', '/'):
return True
def in_directory(root, child):
# make both path absolute
root = os.path.realpath(root)
child = os.path.realpath(child)
# return true, if the common prefix of both is equal to directory
# e.g. /a/b/c/d.rst and directory is /a/b, the common prefix is /a/b
return os.path.commonprefix([root, child]) == root
for valid_abspath in options.valid_abspaths:
if in_directory(valid_abspath, path_name):
return True
return False
def parse_symbol_list_file(contents):
"""Parse contents of one-symbol-per-line response file. This format can by used
with, for example, -sEXPORTED_FUNCTIONS=@filename and avoids the need for any
kind of quoting or escaping.
"""
values = contents.splitlines()
return [v.strip() for v in values]
def parse_value(text, expect_list):
# Note that using response files can introduce whitespace, if the file
# has a newline at the end. For that reason, we rstrip() in relevant
# places here.
def parse_string_value(text):
first = text[0]
if first == "'" or first == '"':
text = text.rstrip()
assert text[-1] == text[0] and len(text) > 1, 'unclosed opened quoted string. expected final character to be "%s" and length to be greater than 1 in "%s"' % (text[0], text)
return text[1:-1]
return text
def parse_string_list_members(text):
sep = ','
values = text.split(sep)
result = []
index = 0
while True:
current = values[index].lstrip() # Cannot safely rstrip for cases like: "HERE-> ,"
if not len(current):
exit_with_error('string array should not contain an empty value')
first = current[0]
if not(first == "'" or first == '"'):
result.append(current.rstrip())
else:
start = index
while True: # Continue until closing quote found
if index >= len(values):
exit_with_error("unclosed quoted string. expected final character to be '%s' in '%s'" % (first, values[start]))
new = values[index].rstrip()
if new and new[-1] == first:
if start == index:
result.append(current.rstrip()[1:-1])
else:
result.append((current + sep + new)[1:-1])
break
else:
current += sep + values[index]
index += 1
index += 1
if index >= len(values):
break
return result
def parse_string_list(text):
text = text.rstrip()
if text and text[0] == '[':
if text[-1] != ']':
exit_with_error('unclosed opened string list. expected final character to be "]" in "%s"' % (text))
text = text[1:-1]
if text.strip() == "":
return []
return parse_string_list_members(text)
if expect_list or (text and text[0] == '['):
# if json parsing fails, we fall back to our own parser, which can handle a few
# simpler syntaxes
try:
return json.loads(text)
except ValueError:
return parse_string_list(text)
try:
return int(text)
except ValueError:
return parse_string_value(text)
def validate_arg_level(level_string, max_level, err_msg, clamp=False):
try:
level = int(level_string)
except ValueError:
raise Exception(err_msg)
if clamp:
if level > max_level:
logger.warning("optimization level '-O" + level_string + "' is not supported; using '-O" + str(max_level) + "' instead")
level = max_level
if not 0 <= level <= max_level:
raise Exception(err_msg)
return level
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
def main(args):
start_time = time.time()
ret = run(args)
logger.debug('total time: %.2f seconds', (time.time() - start_time))
return ret
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
logger.warning('KeyboardInterrupt')
sys.exit(1)
|
#!/usr/bin/env python3
# Copyright 2011 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""emcc - compiler helper script
=============================
emcc is a drop-in replacement for a compiler like gcc or clang.
See emcc --help for details.
emcc can be influenced by a few environment variables:
EMCC_DEBUG - "1" will log out useful information during compilation, as well as
save each compiler step as an emcc-* file in the temp dir
(by default /tmp/emscripten_temp). "2" will save additional emcc-*
steps, that would normally not be separately produced (so this
slows down compilation).
"""
from tools.toolchain_profiler import ToolchainProfiler
import base64
import json
import logging
import os
import re
import shlex
import shutil
import stat
import sys
import time
from enum import Enum, unique, auto
from subprocess import PIPE
from urllib.parse import quote
import emscripten
from tools import shared, system_libs
from tools import colored_logger, diagnostics, building
from tools.shared import unsuffixed, unsuffixed_basename, WINDOWS, safe_copy
from tools.shared import run_process, read_and_preprocess, exit_with_error, DEBUG
from tools.shared import do_replace, strip_prefix
from tools.response_file import substitute_response_files
from tools.minimal_runtime_shell import generate_minimal_runtime_html
import tools.line_endings
from tools import js_manipulation
from tools import wasm2c
from tools import webassembly
from tools import config
from tools.settings import settings, MEM_SIZE_SETTINGS, COMPILE_TIME_SETTINGS
from tools.utils import read_file, write_file, read_binary
logger = logging.getLogger('emcc')
# endings = dot + a suffix, safe to test by filename.endswith(endings)
C_ENDINGS = ('.c', '.i')
CXX_ENDINGS = ('.cpp', '.cxx', '.cc', '.c++', '.CPP', '.CXX', '.C', '.CC', '.C++', '.ii')
OBJC_ENDINGS = ('.m', '.mi')
OBJCXX_ENDINGS = ('.mm', '.mii')
ASSEMBLY_CPP_ENDINGS = ('.S',)
SPECIAL_ENDINGLESS_FILENAMES = (os.devnull,)
SOURCE_ENDINGS = C_ENDINGS + CXX_ENDINGS + OBJC_ENDINGS + OBJCXX_ENDINGS + SPECIAL_ENDINGLESS_FILENAMES + ASSEMBLY_CPP_ENDINGS
C_ENDINGS = C_ENDINGS + SPECIAL_ENDINGLESS_FILENAMES # consider the special endingless filenames like /dev/null to be C
EXECUTABLE_ENDINGS = ('.wasm', '.html', '.js', '.mjs', '.out', '')
DYNAMICLIB_ENDINGS = ('.dylib', '.so') # Windows .dll suffix is not included in this list, since those are never linked to directly on the command line.
STATICLIB_ENDINGS = ('.a',)
ASSEMBLY_ENDINGS = ('.ll', '.s')
HEADER_ENDINGS = ('.h', '.hxx', '.hpp', '.hh', '.H', '.HXX', '.HPP', '.HH')
# Supported LLD flags which we will pass through to the linker.
SUPPORTED_LINKER_FLAGS = (
'--start-group', '--end-group',
'-(', '-)',
'--whole-archive', '--no-whole-archive',
'-whole-archive', '-no-whole-archive'
)
# Unsupported LLD flags which we will ignore.
# Maps to true if the flag takes an argument.
UNSUPPORTED_LLD_FLAGS = {
# macOS-specific linker flag that libtool (ltmain.sh) will if macOS is detected.
'-bind_at_load': False,
'-M': False,
# wasm-ld doesn't support soname or other dynamic linking flags (yet). Ignore them
# in order to aid build systems that want to pass these flags.
'-soname': True,
'-allow-shlib-undefined': False,
'-rpath': True,
'-rpath-link': True,
'-version-script': True,
}
DEFAULT_ASYNCIFY_IMPORTS = [
'emscripten_sleep', 'emscripten_wget', 'emscripten_wget_data', 'emscripten_idb_load',
'emscripten_idb_store', 'emscripten_idb_delete', 'emscripten_idb_exists',
'emscripten_idb_load_blob', 'emscripten_idb_store_blob', 'SDL_Delay',
'emscripten_scan_registers', 'emscripten_lazy_load_code',
'emscripten_fiber_swap',
'wasi_snapshot_preview1.fd_sync', '__wasi_fd_sync', '_emval_await',
'dlopen', '__asyncjs__*'
]
# Target options
final_js = None
UBSAN_SANITIZERS = {
'alignment',
'bool',
'builtin',
'bounds',
'enum',
'float-cast-overflow',
'float-divide-by-zero',
'function',
'implicit-unsigned-integer-truncation',
'implicit-signed-integer-truncation',
'implicit-integer-sign-change',
'integer-divide-by-zero',
'nonnull-attribute',
'null',
'nullability-arg',
'nullability-assign',
'nullability-return',
'object-size',
'pointer-overflow',
'return',
'returns-nonnull-attribute',
'shift',
'signed-integer-overflow',
'unreachable',
'unsigned-integer-overflow',
'vla-bound',
'vptr',
'undefined',
'undefined-trap',
'implicit-integer-truncation',
'implicit-integer-arithmetic-value-change',
'implicit-conversion',
'integer',
'nullability',
}
VALID_ENVIRONMENTS = ('web', 'webview', 'worker', 'node', 'shell')
SIMD_INTEL_FEATURE_TOWER = ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-mavx']
SIMD_NEON_FLAGS = ['-mfpu=neon']
# this function uses the global 'final' variable, which contains the current
# final output file. if a method alters final, and calls this method, then it
# must modify final globally (i.e. it can't receive final as a param and
# return it)
# TODO: refactor all this, a singleton that abstracts over the final output
# and saving of intermediates
def save_intermediate(name, suffix='js'):
if not DEBUG:
return
if not final_js:
logger.debug(f'(not saving intermediate {name} because not generating JS)')
return
building.save_intermediate(final_js, f'{name}.{suffix}')
def save_intermediate_with_wasm(name, wasm_binary):
if not DEBUG:
return
save_intermediate(name) # save the js
building.save_intermediate(wasm_binary, name + '.wasm')
def base64_encode(b):
b64 = base64.b64encode(b)
return b64.decode('ascii')
@unique
class OFormat(Enum):
# Output a relocatable object file. We use this
# today for `-r` and `-shared`.
OBJECT = auto()
WASM = auto()
JS = auto()
MJS = auto()
HTML = auto()
BARE = auto()
@unique
class Mode(Enum):
PREPROCESS_ONLY = auto()
PCH = auto()
COMPILE_ONLY = auto()
POST_LINK_ONLY = auto()
COMPILE_AND_LINK = auto()
class EmccState:
def __init__(self, args):
self.mode = Mode.COMPILE_AND_LINK
self.orig_args = args
self.has_dash_c = False
self.has_dash_E = False
self.has_dash_S = False
self.link_flags = []
self.lib_dirs = []
self.forced_stdlibs = []
def add_link_flag(state, i, f):
if f.startswith('-L'):
state.lib_dirs.append(f[2:])
state.link_flags.append((i, f))
class EmccOptions:
def __init__(self):
self.output_file = None
self.post_link = False
self.executable = False
self.compiler_wrapper = None
self.oformat = None
self.requested_debug = ''
self.profiling_funcs = False
self.tracing = False
self.emit_symbol_map = False
self.use_closure_compiler = None
self.closure_args = []
self.js_transform = None
self.pre_js = '' # before all js
self.post_js = '' # after all js
self.extern_pre_js = '' # before all js, external to optimized code
self.extern_post_js = '' # after all js, external to optimized code
self.preload_files = []
self.embed_files = []
self.exclude_files = []
self.ignore_dynamic_linking = False
self.shell_path = shared.path_from_root('src', 'shell.html')
self.source_map_base = ''
self.emrun = False
self.cpu_profiler = False
self.thread_profiler = False
self.memory_profiler = False
self.memory_init_file = None
self.use_preload_cache = False
self.use_preload_plugins = False
self.default_object_extension = '.o'
self.valid_abspaths = []
self.cfi = False
# Specifies the line ending format to use for all generated text files.
# Defaults to using the native EOL on each platform (\r\n on Windows, \n on
# Linux & MacOS)
self.output_eol = os.linesep
self.no_entry = False
self.shared = False
self.relocatable = False
def will_metadce():
# The metadce JS parsing code does not currently support the JS that gets generated
# when assertions are enabled.
if settings.ASSERTIONS:
return False
return settings.OPT_LEVEL >= 3 or settings.SHRINK_LEVEL >= 1
def setup_environment_settings():
# Environment setting based on user input
environments = settings.ENVIRONMENT.split(',')
if any([x for x in environments if x not in VALID_ENVIRONMENTS]):
exit_with_error(f'Invalid environment specified in "ENVIRONMENT": {settings.ENVIRONMENT}. Should be one of: {",".join(VALID_ENVIRONMENTS)}')
settings.ENVIRONMENT_MAY_BE_WEB = not settings.ENVIRONMENT or 'web' in environments
settings.ENVIRONMENT_MAY_BE_WEBVIEW = not settings.ENVIRONMENT or 'webview' in environments
settings.ENVIRONMENT_MAY_BE_NODE = not settings.ENVIRONMENT or 'node' in environments
settings.ENVIRONMENT_MAY_BE_SHELL = not settings.ENVIRONMENT or 'shell' in environments
# The worker case also includes Node.js workers when pthreads are
# enabled and Node.js is one of the supported environments for the build to
# run on. Node.js workers are detected as a combination of
# ENVIRONMENT_IS_WORKER and ENVIRONMENT_IS_NODE.
settings.ENVIRONMENT_MAY_BE_WORKER = \
not settings.ENVIRONMENT or \
'worker' in environments or \
(settings.ENVIRONMENT_MAY_BE_NODE and settings.USE_PTHREADS)
if not settings.ENVIRONMENT_MAY_BE_WORKER and settings.PROXY_TO_WORKER:
exit_with_error('If you specify --proxy-to-worker and specify a "-s ENVIRONMENT=" directive, it must include "worker" as a target! (Try e.g. -s ENVIRONMENT=web,worker)')
if not settings.ENVIRONMENT_MAY_BE_WORKER and settings.USE_PTHREADS:
exit_with_error('When building with multithreading enabled and a "-s ENVIRONMENT=" directive is specified, it must include "worker" as a target! (Try e.g. -s ENVIRONMENT=web,worker)')
def minify_whitespace():
return settings.OPT_LEVEL >= 2 and settings.DEBUG_LEVEL == 0
def embed_memfile():
return (settings.SINGLE_FILE or
(settings.MEM_INIT_METHOD == 0 and
(not settings.MAIN_MODULE and
not settings.SIDE_MODULE and
not settings.GENERATE_SOURCE_MAP)))
def expand_byte_size_suffixes(value):
"""Given a string with KB/MB size suffixes, such as "32MB", computes how
many bytes that is and returns it as an integer.
"""
value = value.strip()
match = re.match(r'^(\d+)\s*([kmgt]?b)?$', value, re.I)
if not match:
exit_with_error("invalid byte size `%s`. Valid suffixes are: kb, mb, gb, tb" % value)
value, suffix = match.groups()
value = int(value)
if suffix:
size_suffixes = {suffix: 1024 ** i for i, suffix in enumerate(['b', 'kb', 'mb', 'gb', 'tb'])}
value *= size_suffixes[suffix.lower()]
return value
def apply_settings(changes):
"""Take a map of users settings {NAME: VALUE} and apply them to the global
settings object.
"""
def standardize_setting_change(key, value):
# boolean NO_X settings are aliases for X
# (note that *non*-boolean setting values have special meanings,
# and we can't just flip them, so leave them as-is to be
# handled in a special way later)
if key.startswith('NO_') and value in ('0', '1'):
key = strip_prefix(key, 'NO_')
value = str(1 - int(value))
return key, value
for key, value in changes.items():
key, value = standardize_setting_change(key, value)
if key in settings.internal_settings:
exit_with_error('%s is an internal setting and cannot be set from command line', key)
# map legacy settings which have aliases to the new names
# but keep the original key so errors are correctly reported via the `setattr` below
user_key = key
if key in settings.legacy_settings and key in settings.alt_names:
key = settings.alt_names[key]
# In those settings fields that represent amount of memory, translate suffixes to multiples of 1024.
if key in MEM_SIZE_SETTINGS:
value = str(expand_byte_size_suffixes(value))
filename = None
if value and value[0] == '@':
filename = strip_prefix(value, '@')
if not os.path.exists(filename):
exit_with_error('%s: file not found parsing argument: %s=%s' % (filename, key, value))
value = read_file(filename).strip()
else:
value = value.replace('\\', '\\\\')
existing = getattr(settings, user_key, None)
expect_list = type(existing) == list
if filename and expect_list and value.strip()[0] != '[':
# Prefer simpler one-line-per value parser
value = parse_symbol_list_file(value)
else:
try:
value = parse_value(value, expect_list)
except Exception as e:
exit_with_error('a problem occurred in evaluating the content after a "-s", specifically "%s=%s": %s', key, value, str(e))
# Do some basic type checking by comparing to the existing settings.
# Sadly we can't do this generically in the SettingsManager since there are settings
# that so change types internally over time.
# We only currently worry about lists vs non-lists.
if expect_list != (type(value) == list):
exit_with_error('setting `%s` expects `%s` but got `%s`' % (user_key, type(existing), type(value)))
setattr(settings, user_key, value)
if key == 'EXPORTED_FUNCTIONS':
# used for warnings in emscripten.py
settings.USER_EXPORTED_FUNCTIONS = settings.EXPORTED_FUNCTIONS.copy()
# TODO(sbc): Remove this legacy way.
if key == 'WASM_OBJECT_FILES':
settings.LTO = 0 if value else 'full'
def is_ar_file_with_missing_index(archive_file):
# We parse the archive header outselves because llvm-nm --print-armap is slower and less
# reliable.
# See: https://github.com/emscripten-core/emscripten/issues/10195
archive_header = b'!<arch>\n'
file_header_size = 60
with open(archive_file, 'rb') as f:
header = f.read(len(archive_header))
if header != archive_header:
# This is not even an ar file
return False
file_header = f.read(file_header_size)
if len(file_header) != file_header_size:
# We don't have any file entires at all so we don't consider the index missing
return False
name = file_header[:16].strip()
# If '/' is the name of the first file we have an index
return name != b'/'
def ensure_archive_index(archive_file):
# Fastcomp linking works without archive indexes.
if not settings.AUTO_ARCHIVE_INDEXES:
return
if is_ar_file_with_missing_index(archive_file):
diagnostics.warning('emcc', '%s: archive is missing an index; Use emar when creating libraries to ensure an index is created', archive_file)
diagnostics.warning('emcc', '%s: adding index', archive_file)
run_process([shared.LLVM_RANLIB, archive_file])
@ToolchainProfiler.profile_block('JS symbol generation')
def get_all_js_syms():
# Runs the js compiler to generate a list of all symbols available in the JS
# libraries. This must be done separately for each linker invokation since the
# list of symbols depends on what settings are used.
# TODO(sbc): Find a way to optimize this. Potentially we could add a super-set
# mode of the js compiler that would generate a list of all possible symbols
# that could be checked in.
old_full = settings.INCLUDE_FULL_LIBRARY
try:
# Temporarily define INCLUDE_FULL_LIBRARY since we want a full list
# of all available JS library functions.
settings.INCLUDE_FULL_LIBRARY = True
settings.ONLY_CALC_JS_SYMBOLS = True
emscripten.generate_struct_info()
glue, forwarded_data = emscripten.compile_settings()
forwarded_json = json.loads(forwarded_data)
library_syms = set()
for name in forwarded_json['libraryFunctions']:
if shared.is_c_symbol(name):
name = shared.demangle_c_symbol_name(name)
library_syms.add(name)
finally:
settings.ONLY_CALC_JS_SYMBOLS = False
settings.INCLUDE_FULL_LIBRARY = old_full
return library_syms
def filter_link_flags(flags, using_lld):
def is_supported(f):
if using_lld:
for flag, takes_arg in UNSUPPORTED_LLD_FLAGS.items():
# lld allows various flags to have either a single -foo or double --foo
if f.startswith(flag) or f.startswith('-' + flag):
diagnostics.warning('linkflags', 'ignoring unsupported linker flag: `%s`', f)
# Skip the next argument if this linker flag takes and argument and that
# argument was not specified as a separately (i.e. it was specified as
# single arg containing an `=` char.)
skip_next = takes_arg and '=' not in f
return False, skip_next
return True, False
else:
if f in SUPPORTED_LINKER_FLAGS:
return True, False
# Silently ignore -l/-L flags when not using lld. If using lld allow
# them to pass through the linker
if f.startswith('-l') or f.startswith('-L'):
return False, False
diagnostics.warning('linkflags', 'ignoring unsupported linker flag: `%s`', f)
return False, False
results = []
skip_next = False
for f in flags:
if skip_next:
skip_next = False
continue
keep, skip_next = is_supported(f[1])
if keep:
results.append(f)
return results
def fix_windows_newlines(text):
# Avoid duplicating \r\n to \r\r\n when writing out text.
if WINDOWS:
text = text.replace('\r\n', '\n')
return text
def cxx_to_c_compiler(cxx):
# Convert C++ compiler name into C compiler name
dirname, basename = os.path.split(cxx)
basename = basename.replace('clang++', 'clang').replace('g++', 'gcc').replace('em++', 'emcc')
return os.path.join(dirname, basename)
def get_binaryen_passes():
# run the binaryen optimizer in -O2+. in -O0 we don't need it obviously, while
# in -O1 we don't run it as the LLVM optimizer has been run, and it does the
# great majority of the work; not running the binaryen optimizer in that case
# keeps -O1 mostly-optimized while compiling quickly and without rewriting
# DWARF etc.
run_binaryen_optimizer = settings.OPT_LEVEL >= 2
passes = []
# safe heap must run before post-emscripten, so post-emscripten can apply the sbrk ptr
if settings.SAFE_HEAP:
passes += ['--safe-heap']
if settings.MEMORY64 == 2:
passes += ['--memory64-lowering']
if run_binaryen_optimizer:
passes += ['--post-emscripten']
if not settings.EXIT_RUNTIME:
passes += ['--no-exit-runtime']
if run_binaryen_optimizer:
passes += [building.opt_level_to_str(settings.OPT_LEVEL, settings.SHRINK_LEVEL)]
# when optimizing, use the fact that low memory is never used (1024 is a
# hardcoded value in the binaryen pass)
if run_binaryen_optimizer and settings.GLOBAL_BASE >= 1024:
passes += ['--low-memory-unused']
if settings.AUTODEBUG:
# adding '--flatten' here may make these even more effective
passes += ['--instrument-locals']
passes += ['--log-execution']
passes += ['--instrument-memory']
if settings.LEGALIZE_JS_FFI:
# legalize it again now, as the instrumentation may need it
passes += ['--legalize-js-interface']
if settings.EMULATE_FUNCTION_POINTER_CASTS:
# note that this pass must run before asyncify, as if it runs afterwards we only
# generate the byn$fpcast_emu functions after asyncify runs, and so we wouldn't
# be able to further process them.
passes += ['--fpcast-emu']
if settings.ASYNCIFY:
passes += ['--asyncify']
if settings.ASSERTIONS:
passes += ['--pass-arg=asyncify-asserts']
if settings.ASYNCIFY_ADVISE:
passes += ['--pass-arg=asyncify-verbose']
if settings.ASYNCIFY_IGNORE_INDIRECT:
passes += ['--pass-arg=asyncify-ignore-indirect']
passes += ['--pass-arg=asyncify-imports@%s' % ','.join(settings.ASYNCIFY_IMPORTS)]
# shell escaping can be confusing; try to emit useful warnings
def check_human_readable_list(items):
for item in items:
if item.count('(') != item.count(')'):
logger.warning('emcc: ASYNCIFY list contains an item without balanced parentheses ("(", ")"):')
logger.warning(' ' + item)
logger.warning('This may indicate improper escaping that led to splitting inside your names.')
logger.warning('Try using a response file. e.g: [email protected]. The format is a simple')
logger.warning('text file, one line per function.')
break
if settings.ASYNCIFY_REMOVE:
check_human_readable_list(settings.ASYNCIFY_REMOVE)
passes += ['--pass-arg=asyncify-removelist@%s' % ','.join(settings.ASYNCIFY_REMOVE)]
if settings.ASYNCIFY_ADD:
check_human_readable_list(settings.ASYNCIFY_ADD)
passes += ['--pass-arg=asyncify-addlist@%s' % ','.join(settings.ASYNCIFY_ADD)]
if settings.ASYNCIFY_ONLY:
check_human_readable_list(settings.ASYNCIFY_ONLY)
passes += ['--pass-arg=asyncify-onlylist@%s' % ','.join(settings.ASYNCIFY_ONLY)]
if settings.BINARYEN_IGNORE_IMPLICIT_TRAPS:
passes += ['--ignore-implicit-traps']
# normally we can assume the memory, if imported, has not been modified
# beforehand (in fact, in most cases the memory is not even imported anyhow,
# but it is still safe to pass the flag), and is therefore filled with zeros.
# the one exception is dynamic linking of a side module: the main module is ok
# as it is loaded first, but the side module may be assigned memory that was
# previously used.
if run_binaryen_optimizer and not settings.SIDE_MODULE:
passes += ['--zero-filled-memory']
if settings.BINARYEN_EXTRA_PASSES:
# BINARYEN_EXTRA_PASSES is comma-separated, and we support both '-'-prefixed and
# unprefixed pass names
extras = settings.BINARYEN_EXTRA_PASSES.split(',')
passes += [('--' + p) if p[0] != '-' else p for p in extras if p]
return passes
def make_js_executable(script):
src = read_file(script)
cmd = shared.shlex_join(config.JS_ENGINE)
if not os.path.isabs(config.JS_ENGINE[0]):
# TODO: use whereis etc. And how about non-*NIX?
cmd = '/usr/bin/env -S ' + cmd
logger.debug('adding `#!` to JavaScript file: %s' % cmd)
# add shebang
with open(script, 'w') as f:
f.write('#!%s\n' % cmd)
f.write(src)
try:
os.chmod(script, stat.S_IMODE(os.stat(script).st_mode) | stat.S_IXUSR) # make executable
except OSError:
pass # can fail if e.g. writing the executable to /dev/null
def do_split_module(wasm_file):
os.rename(wasm_file, wasm_file + '.orig')
args = ['--instrument']
building.run_binaryen_command('wasm-split', wasm_file + '.orig', outfile=wasm_file, args=args)
def is_dash_s_for_emcc(args, i):
# -s OPT=VALUE or -s OPT or -sOPT are all interpreted as emscripten flags.
# -s by itself is a linker option (alias for --strip-all)
if args[i] == '-s':
if len(args) <= i + 1:
return False
arg = args[i + 1]
else:
arg = strip_prefix(args[i], '-s')
arg = arg.split('=')[0]
return arg.isidentifier() and arg.isupper()
def filter_out_dynamic_libs(options, inputs):
# Filters out "fake" dynamic libraries that are really just intermediate object files.
def check(input_file):
if get_file_suffix(input_file) in DYNAMICLIB_ENDINGS and not building.is_wasm_dylib(input_file):
if not options.ignore_dynamic_linking:
diagnostics.warning('emcc', 'ignoring dynamic library %s because not compiling to JS or HTML, remember to link it when compiling to JS or HTML at the end', os.path.basename(input_file))
return False
else:
return True
return [f for f in inputs if check(f)]
def filter_out_duplicate_dynamic_libs(inputs):
seen = set()
# Filter out duplicate "fake" shared libraries (intermediate object files).
# See test_core.py:test_redundant_link
def check(input_file):
if get_file_suffix(input_file) in DYNAMICLIB_ENDINGS and not building.is_wasm_dylib(input_file):
abspath = os.path.abspath(input_file)
if abspath in seen:
return False
seen.add(abspath)
return True
return [f for f in inputs if check(f)]
def process_dynamic_libs(dylibs, lib_dirs):
extras = []
seen = set()
to_process = dylibs.copy()
while to_process:
dylib = to_process.pop()
dylink = webassembly.parse_dylink_section(dylib)
for needed in dylink.needed:
if needed in seen:
continue
path = find_library(needed, lib_dirs)
if path:
extras.append(path)
seen.add(needed)
else:
exit_with_error(f'{os.path.normpath(dylib)}: shared library dependency not found: `{needed}`')
to_process.append(path)
dylibs += extras
for dylib in dylibs:
exports = webassembly.get_exports(dylib)
exports = set(e.name for e in exports)
settings.SIDE_MODULE_EXPORTS.extend(exports)
imports = webassembly.get_imports(dylib)
imports = [i.field for i in imports if i.kind in (webassembly.ExternType.FUNC, webassembly.ExternType.GLOBAL)]
# For now we ignore `invoke_` functions imported by side modules and rely
# on the dynamic linker to create them on the fly.
# TODO(sbc): Integrate with metadata['invokeFuncs'] that comes from the
# main module to avoid creating new invoke functions at runtime.
imports = set(i for i in imports if not i.startswith('invoke_'))
weak_imports = imports.intersection(exports)
strong_imports = imports.difference(exports)
logger.debug('Adding symbols requirements from `%s`: %s', dylib, imports)
mangled_imports = [shared.asmjs_mangle(e) for e in imports]
mangled_strong_imports = [shared.asmjs_mangle(e) for e in strong_imports]
settings.SIDE_MODULE_IMPORTS.extend(mangled_imports)
settings.EXPORTED_FUNCTIONS.extend(mangled_strong_imports)
settings.EXPORT_IF_DEFINED.extend(weak_imports)
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE.extend(strong_imports)
building.user_requested_exports.update(mangled_strong_imports)
def unmangle_symbols_from_cmdline(symbols):
def unmangle(x):
return x.replace('.', ' ').replace('#', '&').replace('?', ',')
if type(symbols) is list:
return [unmangle(x) for x in symbols]
return unmangle(symbols)
def parse_s_args(args):
settings_changes = []
for i in range(len(args)):
if args[i].startswith('-s'):
if is_dash_s_for_emcc(args, i):
if args[i] == '-s':
key = args[i + 1]
args[i + 1] = ''
else:
key = strip_prefix(args[i], '-s')
args[i] = ''
# If not = is specified default to 1
if '=' not in key:
key += '=1'
# Special handling of browser version targets. A version -1 means that the specific version
# is not supported at all. Replace those with INT32_MAX to make it possible to compare e.g.
# #if MIN_FIREFOX_VERSION < 68
if re.match(r'MIN_.*_VERSION(=.*)?', key):
try:
if int(key.split('=')[1]) < 0:
key = key.split('=')[0] + '=0x7FFFFFFF'
except Exception:
pass
settings_changes.append(key)
newargs = [a for a in args if a]
return (settings_changes, newargs)
def emsdk_ldflags(user_args):
if os.environ.get('EMMAKEN_NO_SDK'):
return []
library_paths = [
shared.Cache.get_lib_dir(absolute=True)
]
ldflags = ['-L' + l for l in library_paths]
if '-nostdlib' in user_args:
return ldflags
return ldflags
def emsdk_cflags(user_args):
cflags = ['--sysroot=' + shared.Cache.get_sysroot(absolute=True)]
def array_contains_any_of(hay, needles):
for n in needles:
if n in hay:
return True
if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER) or array_contains_any_of(user_args, SIMD_NEON_FLAGS):
if '-msimd128' not in user_args:
exit_with_error('Passing any of ' + ', '.join(SIMD_INTEL_FEATURE_TOWER + SIMD_NEON_FLAGS) + ' flags also requires passing -msimd128!')
cflags += ['-D__SSE__=1']
if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[1:]):
cflags += ['-D__SSE2__=1']
if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[2:]):
cflags += ['-D__SSE3__=1']
if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[3:]):
cflags += ['-D__SSSE3__=1']
if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[4:]):
cflags += ['-D__SSE4_1__=1']
if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[5:]):
cflags += ['-D__SSE4_2__=1']
if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[6:]):
cflags += ['-D__AVX__=1']
if array_contains_any_of(user_args, SIMD_NEON_FLAGS):
cflags += ['-D__ARM_NEON__=1']
return cflags + ['-Xclang', '-iwithsysroot' + os.path.join('/include', 'compat')]
def get_clang_flags():
return ['-target', get_llvm_target()]
def get_llvm_target():
if settings.MEMORY64:
return 'wasm64-unknown-emscripten'
else:
return 'wasm32-unknown-emscripten'
cflags = None
def get_cflags(options, user_args):
global cflags
if cflags:
return cflags
# Flags we pass to the compiler when building C/C++ code
# We add these to the user's flags (newargs), but not when building .s or .S assembly files
cflags = get_clang_flags()
if options.tracing:
cflags.append('-D__EMSCRIPTEN_TRACING__=1')
if settings.USE_PTHREADS:
cflags.append('-D__EMSCRIPTEN_PTHREADS__=1')
if not settings.STRICT:
# The preprocessor define EMSCRIPTEN is deprecated. Don't pass it to code
# in strict mode. Code should use the define __EMSCRIPTEN__ instead.
cflags.append('-DEMSCRIPTEN')
# if exception catching is disabled, we can prevent that code from being
# generated in the frontend
if settings.DISABLE_EXCEPTION_CATCHING and not settings.EXCEPTION_HANDLING:
cflags.append('-fignore-exceptions')
if settings.INLINING_LIMIT:
cflags.append('-fno-inline-functions')
if settings.RELOCATABLE:
cflags.append('-fPIC')
cflags.append('-fvisibility=default')
if settings.LTO:
if not any(a.startswith('-flto') for a in user_args):
cflags.append('-flto=' + settings.LTO)
else:
# In LTO mode these args get passed instead at link time when the backend runs.
for a in building.llvm_backend_args():
cflags += ['-mllvm', a]
# Set the LIBCPP ABI version to at least 2 so that we get nicely aligned string
# data and other nice fixes.
cflags += [# '-fno-threadsafe-statics', # disabled due to issue 1289
'-D__EMSCRIPTEN_major__=' + str(shared.EMSCRIPTEN_VERSION_MAJOR),
'-D__EMSCRIPTEN_minor__=' + str(shared.EMSCRIPTEN_VERSION_MINOR),
'-D__EMSCRIPTEN_tiny__=' + str(shared.EMSCRIPTEN_VERSION_TINY),
'-D_LIBCPP_ABI_VERSION=2']
# For compatability with the fastcomp compiler that defined these
cflags += ['-Dunix',
'-D__unix',
'-D__unix__']
# Changes to default clang behavior
# Implicit functions can cause horribly confusing function pointer type errors, see #2175
# If your codebase really needs them - very unrecommended! - you can disable the error with
# -Wno-error=implicit-function-declaration
# or disable even a warning about it with
# -Wno-implicit-function-declaration
cflags += ['-Werror=implicit-function-declaration']
system_libs.add_ports_cflags(cflags, settings)
if os.environ.get('EMMAKEN_NO_SDK') or '-nostdinc' in user_args:
return cflags
cflags += emsdk_cflags(user_args)
return cflags
def get_file_suffix(filename):
"""Parses the essential suffix of a filename, discarding Unix-style version
numbers in the name. For example for 'libz.so.1.2.8' returns '.so'"""
if filename in SPECIAL_ENDINGLESS_FILENAMES:
return filename
while filename:
filename, suffix = os.path.splitext(filename)
if not suffix[1:].isdigit():
return suffix
return ''
def get_library_basename(filename):
"""Similar to get_file_suffix this strips off all numeric suffixes and then
then final non-numeric one. For example for 'libz.so.1.2.8' returns 'libz'"""
filename = os.path.basename(filename)
while filename:
filename, suffix = os.path.splitext(filename)
# Keep stipping suffixes until we strip a non-numeric one.
if not suffix[1:].isdigit():
return filename
def get_secondary_target(target, ext):
# Depending on the output format emscripten creates zero or more secondary
# output files (e.g. the .wasm file when creating JS output, or the
# .js and the .wasm file when creating html output.
# Thus function names the secondary output files, while ensuring they
# never collide with the primary one.
base = unsuffixed(target)
if get_file_suffix(target) == ext:
base += '_'
return base + ext
def in_temp(name):
temp_dir = shared.get_emscripten_temp_dir()
return os.path.join(temp_dir, os.path.basename(name))
def dedup_list(lst):
rtn = []
for item in lst:
if item not in rtn:
rtn.append(item)
return rtn
def move_file(src, dst):
logging.debug('move: %s -> %s', src, dst)
if os.path.isdir(dst):
exit_with_error(f'cannot write output file `{dst}`: Is a directory')
src = os.path.abspath(src)
dst = os.path.abspath(dst)
if src == dst:
return
if dst == os.devnull:
return
shutil.move(src, dst)
run_via_emxx = False
#
# Main run() function
#
def run(args):
# Additional compiler flags that we treat as if they were passed to us on the
# commandline
EMCC_CFLAGS = os.environ.get('EMCC_CFLAGS')
if DEBUG:
cmd = shared.shlex_join(args)
if EMCC_CFLAGS:
cmd += ' + ' + EMCC_CFLAGS
logger.warning(f'invocation: {cmd} (in {os.getcwd()})')
if EMCC_CFLAGS:
args.extend(shlex.split(EMCC_CFLAGS))
# Strip args[0] (program name)
args = args[1:]
misc_temp_files = shared.configuration.get_temp_files()
# Handle some global flags
# read response files very early on
try:
args = substitute_response_files(args)
except IOError as e:
exit_with_error(e)
if '--help' in args:
# Documentation for emcc and its options must be updated in:
# site/source/docs/tools_reference/emcc.rst
# This then gets built (via: `make -C site text`) to:
# site/build/text/docs/tools_reference/emcc.txt
# This then needs to be copied to its final home in docs/emcc.txt from where
# we read it here. We have CI rules that ensure its always up-to-date.
with open(shared.path_from_root('docs', 'emcc.txt'), 'r') as f:
print(f.read())
print('''
------------------------------------------------------------------
emcc: supported targets: llvm bitcode, WebAssembly, NOT elf
(autoconf likes to see elf above to enable shared object support)
''')
return 0
if '--version' in args:
print(version_string())
print('''\
Copyright (C) 2014 the Emscripten authors (see AUTHORS.txt)
This is free and open source software under the MIT license.
There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
''')
return 0
if run_via_emxx:
clang = shared.CLANG_CXX
else:
clang = shared.CLANG_CC
if len(args) == 1 and args[0] == '-v': # -v with no inputs
# autoconf likes to see 'GNU' in the output to enable shared object support
print(version_string(), file=sys.stderr)
return shared.check_call([clang, '-v'] + get_clang_flags(), check=False).returncode
if '-dumpmachine' in args:
print(get_llvm_target())
return 0
if '-dumpversion' in args: # gcc's doc states "Print the compiler version [...] and don't do anything else."
print(shared.EMSCRIPTEN_VERSION)
return 0
if '--cflags' in args:
# fake running the command, to see the full args we pass to clang
args = [x for x in args if x != '--cflags']
with misc_temp_files.get_file(suffix='.o') as temp_target:
input_file = 'hello_world.c'
cmd = [shared.PYTHON, sys.argv[0], shared.path_from_root('tests', input_file), '-v', '-c', '-o', temp_target] + args
proc = run_process(cmd, stderr=PIPE, check=False)
if proc.returncode != 0:
print(proc.stderr)
exit_with_error('error getting cflags')
lines = [x for x in proc.stderr.splitlines() if clang in x and input_file in x]
parts = shlex.split(lines[0].replace('\\', '\\\\'))
parts = [x for x in parts if x not in ['-c', '-o', '-v', '-emit-llvm'] and input_file not in x and temp_target not in x]
print(shared.shlex_join(parts[1:]))
return 0
shared.check_sanity()
if '-print-search-dirs' in args:
return run_process([clang, '-print-search-dirs'], check=False).returncode
EMMAKEN_CFLAGS = os.environ.get('EMMAKEN_CFLAGS')
if EMMAKEN_CFLAGS:
args += shlex.split(EMMAKEN_CFLAGS)
if 'EMMAKEN_NO_SDK' in os.environ:
diagnostics.warning('deprecated', 'We hope to deprecated EMMAKEN_NO_SDK. See https://github.com/emscripten-core/emscripten/issues/14050 if use use this feature.')
## Process argument and setup the compiler
state = EmccState(args)
options, newargs, settings_map = phase_parse_arguments(state)
# For internal consistency, ensure we don't attempt or read or write any link time
# settings until we reach the linking phase.
settings.limit_settings(COMPILE_TIME_SETTINGS)
newargs, input_files = phase_setup(options, state, newargs, settings_map)
if state.mode == Mode.POST_LINK_ONLY:
settings.limit_settings(None)
target, wasm_target = phase_linker_setup(options, state, newargs, settings_map)
process_libraries(state, [])
if len(input_files) != 1:
exit_with_error('--post-link requires a single input file')
phase_post_link(options, state, input_files[0][1], wasm_target, target)
return 0
## Compile source code to object files
linker_inputs = phase_compile_inputs(options, state, newargs, input_files)
if state.mode != Mode.COMPILE_AND_LINK:
logger.debug('stopping after compile phase')
for flag in state.link_flags:
diagnostics.warning('unused-command-line-argument', "argument unused during compilation: '%s'" % flag[1])
for f in linker_inputs:
diagnostics.warning('unused-command-line-argument', "%s: linker input file unused because linking not done" % f[1])
return 0
# We have now passed the compile phase, allow reading/writing of all settings.
settings.limit_settings(None)
if options.output_file and options.output_file.startswith('-'):
exit_with_error(f'invalid output filename: `{options.output_file}`')
target, wasm_target = phase_linker_setup(options, state, newargs, settings_map)
# Link object files using wasm-ld or llvm-link (for bitcode linking)
linker_arguments = phase_calculate_linker_inputs(options, state, linker_inputs)
if options.oformat == OFormat.OBJECT:
logger.debug(f'link_to_object: {linker_arguments} -> {target}')
building.link_to_object(linker_arguments, target)
logger.debug('stopping after linking to object file')
return 0
phase_calculate_system_libraries(state, linker_arguments, linker_inputs, newargs)
phase_link(linker_arguments, wasm_target)
# Special handling for when the user passed '-Wl,--version'. In this case the linker
# does not create the output file, but just prints its version and exits with 0.
if '--version' in linker_arguments:
return 0
# TODO(sbc): In theory we should really run the whole pipeline even if the output is
# /dev/null, but that will take some refactoring
if target == os.devnull:
return 0
# Perform post-link steps (unless we are running bare mode)
if options.oformat != OFormat.BARE:
phase_post_link(options, state, wasm_target, wasm_target, target)
return 0
@ToolchainProfiler.profile_block('calculate linker inputs')
def phase_calculate_linker_inputs(options, state, linker_inputs):
using_lld = not (options.oformat == OFormat.OBJECT and settings.LTO)
state.link_flags = filter_link_flags(state.link_flags, using_lld)
# Decide what we will link
process_libraries(state, linker_inputs)
linker_args = [val for _, val in sorted(linker_inputs + state.link_flags)]
# If we are linking to an intermediate object then ignore other
# "fake" dynamic libraries, since otherwise we will end up with
# multiple copies in the final executable.
if options.oformat == OFormat.OBJECT or options.ignore_dynamic_linking:
linker_args = filter_out_dynamic_libs(options, linker_args)
else:
linker_args = filter_out_duplicate_dynamic_libs(linker_args)
if settings.MAIN_MODULE:
dylibs = [a for a in linker_args if building.is_wasm_dylib(a)]
process_dynamic_libs(dylibs, state.lib_dirs)
return linker_args
@ToolchainProfiler.profile_block('parse arguments')
def phase_parse_arguments(state):
"""The first phase of the compiler. Parse command line argument and
populate settings.
"""
newargs = state.orig_args.copy()
# Scan and strip emscripten specific cmdline warning flags.
# This needs to run before other cmdline flags have been parsed, so that
# warnings are properly printed during arg parse.
newargs = diagnostics.capture_warnings(newargs)
for i in range(len(newargs)):
if newargs[i] in ('-l', '-L', '-I'):
# Scan for individual -l/-L/-I arguments and concatenate the next arg on
# if there is no suffix
newargs[i] += newargs[i + 1]
newargs[i + 1] = ''
options, settings_changes, user_js_defines, newargs = parse_args(newargs)
if options.post_link or options.oformat == OFormat.BARE:
diagnostics.warning('experimental', '--oformat=base/--post-link are experimental and subject to change.')
explicit_settings_changes, newargs = parse_s_args(newargs)
settings_changes += explicit_settings_changes
user_settings = {}
for s in settings_changes:
key, value = s.split('=', 1)
user_settings[key] = value
# STRICT is used when applying settings so it needs to be applied first before
# called ing `apply_settings`.
strict_cmdline = user_settings.get('STRICT')
if strict_cmdline:
settings.STRICT = int(strict_cmdline)
# Apply user -jsD settings
for s in user_js_defines:
settings[s[0]] = s[1]
# Apply -s settings in newargs here (after optimization levels, so they can override them)
apply_settings(user_settings)
return options, newargs, user_settings
@ToolchainProfiler.profile_block('setup')
def phase_setup(options, state, newargs, settings_map):
"""Second phase: configure and setup the compiler based on the specified settings and arguments.
"""
if settings.RUNTIME_LINKED_LIBS:
diagnostics.warning('deprecated', 'RUNTIME_LINKED_LIBS is deprecated; you can simply list the libraries directly on the commandline now')
newargs += settings.RUNTIME_LINKED_LIBS
def default_setting(name, new_default):
if name not in settings_map:
setattr(settings, name, new_default)
if settings.STRICT:
default_setting('DEFAULT_TO_CXX', 0)
# Find input files
# These three arrays are used to store arguments of different types for
# type-specific processing. In order to shuffle the arguments back together
# after processing, all of these arrays hold tuples (original_index, value).
# Note that the index part of the tuple can have a fractional part for input
# arguments that expand into multiple processed arguments, as in -Wl,-f1,-f2.
input_files = []
# find input files with a simple heuristic. we should really analyze
# based on a full understanding of gcc params, right now we just assume that
# what is left contains no more |-x OPT| things
skip = False
has_header_inputs = False
for i in range(len(newargs)):
if skip:
skip = False
continue
arg = newargs[i]
if arg in ('-MT', '-MF', '-MJ', '-MQ', '-D', '-U', '-o', '-x',
'-Xpreprocessor', '-include', '-imacros', '-idirafter',
'-iprefix', '-iwithprefix', '-iwithprefixbefore',
'-isysroot', '-imultilib', '-A', '-isystem', '-iquote',
'-install_name', '-compatibility_version',
'-current_version', '-I', '-L', '-include-pch',
'-Xlinker', '-Xclang'):
skip = True
if not arg.startswith('-'):
# we already removed -o <target>, so all these should be inputs
newargs[i] = ''
# os.devnul should always be reported as existing but there is bug in windows
# python before 3.8:
# https://bugs.python.org/issue1311
if not os.path.exists(arg) and arg != os.devnull:
exit_with_error('%s: No such file or directory ("%s" was expected to be an input file, based on the commandline arguments provided)', arg, arg)
file_suffix = get_file_suffix(arg)
if file_suffix in HEADER_ENDINGS:
has_header_inputs = True
if file_suffix in STATICLIB_ENDINGS and not building.is_ar(arg):
if building.is_bitcode(arg):
message = f'{arg}: File has a suffix of a static library {STATICLIB_ENDINGS}, but instead is an LLVM bitcode file! When linking LLVM bitcode files use .bc or .o.'
else:
message = arg + ': Unknown format, not a static library!'
exit_with_error(message)
if file_suffix in DYNAMICLIB_ENDINGS and not building.is_bitcode(arg) and not building.is_wasm(arg):
# For shared libraries that are neither bitcode nor wasm, assuming its local native
# library and attempt to find a library by the same name in our own library path.
# TODO(sbc): Do we really need this feature? See test_other.py:test_local_link
libname = strip_prefix(get_library_basename(arg), 'lib')
flag = '-l' + libname
diagnostics.warning('map-unrecognized-libraries', f'unrecognized file type: `{arg}`. Mapping to `{flag}` and hoping for the best')
add_link_flag(state, i, flag)
else:
input_files.append((i, arg))
elif arg.startswith('-L'):
add_link_flag(state, i, arg)
newargs[i] = ''
elif arg.startswith('-l'):
add_link_flag(state, i, arg)
newargs[i] = ''
elif arg.startswith('-Wl,'):
# Multiple comma separated link flags can be specified. Create fake
# fractional indices for these: -Wl,a,b,c,d at index 4 becomes:
# (4, a), (4.25, b), (4.5, c), (4.75, d)
link_flags_to_add = arg.split(',')[1:]
for flag_index, flag in enumerate(link_flags_to_add):
add_link_flag(state, i + float(flag_index) / len(link_flags_to_add), flag)
newargs[i] = ''
elif arg == '-Xlinker':
add_link_flag(state, i + 1, newargs[i + 1])
newargs[i] = ''
newargs[i + 1] = ''
elif arg == '-s':
# -s and some other compiler flags are normally passed onto the linker
# TODO(sbc): Pass this and other flags through when using lld
# link_flags.append((i, arg))
newargs[i] = ''
elif arg == '-':
input_files.append((i, arg))
newargs[i] = ''
if not input_files and not state.link_flags:
exit_with_error('no input files')
newargs = [a for a in newargs if a]
# SSEx is implemented on top of SIMD128 instruction set, but do not pass SSE flags to LLVM
# so it won't think about generating native x86 SSE code.
newargs = [x for x in newargs if x not in SIMD_INTEL_FEATURE_TOWER and x not in SIMD_NEON_FLAGS]
state.has_dash_c = '-c' in newargs
state.has_dash_S = '-S' in newargs
state.has_dash_E = '-E' in newargs
if options.post_link:
state.mode = Mode.POST_LINK_ONLY
elif state.has_dash_E or '-M' in newargs or '-MM' in newargs or '-fsyntax-only' in newargs:
state.mode = Mode.PREPROCESS_ONLY
elif has_header_inputs:
state.mode = Mode.PCH
elif state.has_dash_c or state.has_dash_S:
state.mode = Mode.COMPILE_ONLY
if state.mode in (Mode.COMPILE_ONLY, Mode.PREPROCESS_ONLY):
for key in settings_map:
if key not in COMPILE_TIME_SETTINGS:
diagnostics.warning('unused-command-line-argument', "linker setting ignored during compilation: '%s'" % key)
if state.has_dash_c:
if '-emit-llvm' in newargs:
options.default_object_extension = '.bc'
elif state.has_dash_S:
if '-emit-llvm' in newargs:
options.default_object_extension = '.ll'
else:
options.default_object_extension = '.s'
elif '-M' in newargs or '-MM' in newargs:
options.default_object_extension = '.mout' # not bitcode, not js; but just dependency rule of the input file
if options.output_file and len(input_files) > 1:
exit_with_error('cannot specify -o with -c/-S/-E/-M and multiple source files')
if settings.MAIN_MODULE or settings.SIDE_MODULE:
settings.RELOCATABLE = 1
if settings.USE_PTHREADS and '-pthread' not in newargs:
newargs += ['-pthread']
if 'DISABLE_EXCEPTION_CATCHING' in settings_map and 'EXCEPTION_CATCHING_ALLOWED' in settings_map:
# If we get here then the user specified both DISABLE_EXCEPTION_CATCHING and EXCEPTION_CATCHING_ALLOWED
# on the command line. This is no longer valid so report either an error or a warning (for
# backwards compat with the old `DISABLE_EXCEPTION_CATCHING=2`
if settings_map['DISABLE_EXCEPTION_CATCHING'] in ('0', '2'):
diagnostics.warning('deprecated', 'DISABLE_EXCEPTION_CATCHING=X is no longer needed when specifying EXCEPTION_CATCHING_ALLOWED')
else:
exit_with_error('DISABLE_EXCEPTION_CATCHING and EXCEPTION_CATCHING_ALLOWED are mutually exclusive')
if settings.EXCEPTION_CATCHING_ALLOWED:
settings.DISABLE_EXCEPTION_CATCHING = 0
if settings.DISABLE_EXCEPTION_THROWING and not settings.DISABLE_EXCEPTION_CATCHING:
exit_with_error("DISABLE_EXCEPTION_THROWING was set (probably from -fno-exceptions) but is not compatible with enabling exception catching (DISABLE_EXCEPTION_CATCHING=0). If you don't want exceptions, set DISABLE_EXCEPTION_CATCHING to 1; if you do want exceptions, don't link with -fno-exceptions")
return (newargs, input_files)
@ToolchainProfiler.profile_block('linker_setup')
def phase_linker_setup(options, state, newargs, settings_map):
autoconf = os.environ.get('EMMAKEN_JUST_CONFIGURE') or 'conftest.c' in state.orig_args
if autoconf:
# configure tests want a more shell-like style, where we emit return codes on exit()
settings.EXIT_RUNTIME = 1
# use node.js raw filesystem access, to behave just like a native executable
settings.NODERAWFS = 1
# Add `#!` line to output JS and make it executable.
options.executable = True
ldflags = emsdk_ldflags(newargs)
for f in ldflags:
add_link_flag(state, sys.maxsize, f)
if options.emrun:
options.pre_js += read_file(shared.path_from_root('src', 'emrun_prejs.js')) + '\n'
options.post_js += read_file(shared.path_from_root('src', 'emrun_postjs.js')) + '\n'
# emrun mode waits on program exit
settings.EXIT_RUNTIME = 1
if options.cpu_profiler:
options.post_js += read_file(shared.path_from_root('src', 'cpuprofiler.js')) + '\n'
if options.memory_profiler:
settings.MEMORYPROFILER = 1
if options.thread_profiler:
options.post_js += read_file(shared.path_from_root('src', 'threadprofiler.js')) + '\n'
if options.memory_init_file is None:
options.memory_init_file = settings.OPT_LEVEL >= 2
# TODO: support source maps with js_transform
if options.js_transform and settings.GENERATE_SOURCE_MAP:
logger.warning('disabling source maps because a js transform is being done')
settings.GENERATE_SOURCE_MAP = 0
# options.output_file is the user-specified one, target is what we will generate
if options.output_file:
target = options.output_file
# check for the existence of the output directory now, to avoid having
# to do so repeatedly when each of the various output files (.mem, .wasm,
# etc) are written. This gives a more useful error message than the
# IOError and python backtrace that users would otherwise see.
dirname = os.path.dirname(target)
if dirname and not os.path.isdir(dirname):
exit_with_error("specified output file (%s) is in a directory that does not exist" % target)
elif autoconf:
# Autoconf expects the executable output file to be called `a.out`
target = 'a.out'
elif settings.SIDE_MODULE:
target = 'a.out.wasm'
else:
target = 'a.out.js'
final_suffix = get_file_suffix(target)
if settings.EXTRA_EXPORTED_RUNTIME_METHODS:
diagnostics.warning('deprecated', 'EXTRA_EXPORTED_RUNTIME_METHODS is deprecated, please use EXPORTED_RUNTIME_METHODS instead')
settings.EXPORTED_RUNTIME_METHODS += settings.EXTRA_EXPORTED_RUNTIME_METHODS
# If no output format was sepecific we try to imply the format based on
# the output filename extension.
if not options.oformat and (options.relocatable or (options.shared and not settings.SIDE_MODULE)):
# Until we have a better story for actually producing runtime shared libraries
# we support a compatibility mode where shared libraries are actually just
# object files linked with `wasm-ld --relocatable` or `llvm-link` in the case
# of LTO.
if final_suffix in EXECUTABLE_ENDINGS:
diagnostics.warning('emcc', '-shared/-r used with executable output suffix. This behaviour is deprecated. Please remove -shared/-r to build an executable or avoid the executable suffix (%s) when building object files.' % final_suffix)
else:
if options.shared:
diagnostics.warning('emcc', 'linking a library with `-shared` will emit a static object file. This is a form of emulation to support existing build systems. If you want to build a runtime shared library use the SIDE_MODULE setting.')
options.oformat = OFormat.OBJECT
if not options.oformat:
if settings.SIDE_MODULE or final_suffix == '.wasm':
options.oformat = OFormat.WASM
elif final_suffix == '.mjs':
options.oformat = OFormat.MJS
elif final_suffix == '.html':
options.oformat = OFormat.HTML
else:
options.oformat = OFormat.JS
if options.oformat == OFormat.MJS:
settings.EXPORT_ES6 = 1
settings.MODULARIZE = 1
if options.oformat in (OFormat.WASM, OFormat.BARE):
# If the user asks directly for a wasm file then this *is* the target
wasm_target = target
else:
# Otherwise the wasm file is produced alongside the final target.
wasm_target = get_secondary_target(target, '.wasm')
if settings.SAFE_HEAP not in [0, 1]:
exit_with_error('emcc: SAFE_HEAP must be 0 or 1')
if not settings.WASM:
# When the user requests non-wasm output, we enable wasm2js. that is,
# we still compile to wasm normally, but we compile the final output
# to js.
settings.WASM = 1
settings.WASM2JS = 1
if settings.WASM == 2:
# Requesting both Wasm and Wasm2JS support
settings.WASM2JS = 1
if (options.oformat == OFormat.WASM or settings.PURE_WASI) and not settings.SIDE_MODULE:
# if the output is just a wasm file, it will normally be a standalone one,
# as there is no JS. an exception are side modules, as we can't tell at
# compile time whether JS will be involved or not - the main module may
# have JS, and the side module is expected to link against that.
# we also do not support standalone mode in fastcomp.
settings.STANDALONE_WASM = 1
if settings.LZ4:
settings.EXPORTED_RUNTIME_METHODS += ['LZ4']
if settings.WASM2C:
# wasm2c only makes sense with standalone wasm - there will be no JS,
# just wasm and then C
settings.STANDALONE_WASM = 1
# wasm2c doesn't need any special handling of i64, we have proper i64
# handling on the FFI boundary, which is exactly like the case of JS with
# BigInt support
settings.WASM_BIGINT = 1
if options.no_entry:
settings.EXPECT_MAIN = 0
elif settings.STANDALONE_WASM:
if '_main' in settings.EXPORTED_FUNCTIONS:
# TODO(sbc): Make this into a warning?
logger.debug('including `_main` in EXPORTED_FUNCTIONS is not necessary in standalone mode')
else:
# In normal non-standalone mode we have special handling of `_main` in EXPORTED_FUNCTIONS.
# 1. If the user specifies exports, but doesn't include `_main` we assume they want to build a
# reactor.
# 2. If the user doesn't export anything we default to exporting `_main` (unless `--no-entry`
# is specified (see above).
if 'EXPORTED_FUNCTIONS' in settings_map:
if '_main' not in settings.USER_EXPORTED_FUNCTIONS:
settings.EXPECT_MAIN = 0
else:
assert not settings.EXPORTED_FUNCTIONS
settings.EXPORTED_FUNCTIONS = ['_main']
if settings.STANDALONE_WASM:
# In STANDALONE_WASM mode we either build a command or a reactor.
# See https://github.com/WebAssembly/WASI/blob/main/design/application-abi.md
# For a command we always want EXIT_RUNTIME=1
# For a reactor we always want EXIT_RUNTIME=0
if 'EXIT_RUNTIME' in settings_map:
exit_with_error('Explictly setting EXIT_RUNTIME not compatible with STANDALONE_WASM. EXIT_RUNTIME will always be True for programs (with a main function) and False for reactors (not main function).')
settings.EXIT_RUNTIME = settings.EXPECT_MAIN
# Note the exports the user requested
building.user_requested_exports.update(settings.EXPORTED_FUNCTIONS)
def default_setting(name, new_default):
if name not in settings_map:
setattr(settings, name, new_default)
if settings.OPT_LEVEL >= 1:
default_setting('ASSERTIONS', 0)
if settings.SHRINK_LEVEL >= 2:
default_setting('EVAL_CTORS', 1)
# -s ASSERTIONS=1 implies basic stack overflow checks, and ASSERTIONS=2
# implies full stack overflow checks.
if settings.ASSERTIONS:
# However, we don't set this default in PURE_WASI, or when we are linking without standard
# libraries because STACK_OVERFLOW_CHECK depends on emscripten_stack_get_end which is defined
# in libcompiler-rt.
if not settings.PURE_WASI and '-nostdlib' not in newargs and '-nodefaultlibs' not in newargs:
default_setting('STACK_OVERFLOW_CHECK', max(settings.ASSERTIONS, settings.STACK_OVERFLOW_CHECK))
if settings.LLD_REPORT_UNDEFINED or settings.STANDALONE_WASM:
# Reporting undefined symbols at wasm-ld time requires us to know if we have a `main` function
# or not, as does standalone wasm mode.
# TODO(sbc): Remove this once this becomes the default
settings.IGNORE_MISSING_MAIN = 0
# For users that opt out of WARN_ON_UNDEFINED_SYMBOLS we assume they also
# want to opt out of ERROR_ON_UNDEFINED_SYMBOLS.
if settings_map.get('WARN_ON_UNDEFINED_SYMBOLS') == '0':
default_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
# It is unlikely that developers targeting "native web" APIs with MINIMAL_RUNTIME need
# errno support by default.
if settings.MINIMAL_RUNTIME:
default_setting('SUPPORT_ERRNO', 0)
# Require explicit -lfoo.js flags to link with JS libraries.
default_setting('AUTO_JS_LIBRARIES', 0)
if settings.STRICT:
default_setting('STRICT_JS', 1)
default_setting('AUTO_JS_LIBRARIES', 0)
default_setting('AUTO_NATIVE_LIBRARIES', 0)
default_setting('AUTO_ARCHIVE_INDEXES', 0)
default_setting('IGNORE_MISSING_MAIN', 0)
default_setting('ALLOW_UNIMPLEMENTED_SYSCALLS', 0)
if not settings.AUTO_JS_LIBRARIES:
default_setting('USE_SDL', 0)
# Default to TEXTDECODER=2 (always use TextDecoder to decode UTF-8 strings)
# in -Oz builds, since custom decoder for UTF-8 takes up space.
# In pthreads enabled builds, TEXTDECODER==2 may not work, see
# https://github.com/whatwg/encoding/issues/172
# When supporting shell environments, do not do this as TextDecoder is not
# widely supported there.
if settings.SHRINK_LEVEL >= 2 and not settings.USE_PTHREADS and \
not settings.ENVIRONMENT_MAY_BE_SHELL:
default_setting('TEXTDECODER', 2)
# If set to 1, we will run the autodebugger (the automatic debugging tool, see
# tools/autodebugger). Note that this will disable inclusion of libraries. This
# is useful because including dlmalloc makes it hard to compare native and js
# builds
if os.environ.get('EMCC_AUTODEBUG'):
settings.AUTODEBUG = 1
# Use settings
if settings.DEBUG_LEVEL > 1 and options.use_closure_compiler:
diagnostics.warning('emcc', 'disabling closure because debug info was requested')
options.use_closure_compiler = False
if settings.WASM == 2 and settings.SINGLE_FILE:
exit_with_error('cannot have both WASM=2 and SINGLE_FILE enabled at the same time')
if settings.SEPARATE_DWARF and settings.WASM2JS:
exit_with_error('cannot have both SEPARATE_DWARF and WASM2JS at the same time (as there is no wasm file)')
if settings.MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION and settings.MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION:
exit_with_error('MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION and MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION are mutually exclusive!')
if options.emrun:
if settings.MINIMAL_RUNTIME:
exit_with_error('--emrun is not compatible with -s MINIMAL_RUNTIME=1')
settings.EXPORTED_RUNTIME_METHODS.append('addOnExit')
if options.use_closure_compiler:
settings.USE_CLOSURE_COMPILER = options.use_closure_compiler
if settings.CLOSURE_WARNINGS not in ['quiet', 'warn', 'error']:
exit_with_error('Invalid option -s CLOSURE_WARNINGS=%s specified! Allowed values are "quiet", "warn" or "error".' % settings.CLOSURE_WARNINGS)
# Include dynCall() function by default in DYNCALLS builds in classic runtime; in MINIMAL_RUNTIME, must add this explicitly.
if settings.DYNCALLS and not settings.MINIMAL_RUNTIME:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$dynCall']
if settings.MAIN_MODULE:
assert not settings.SIDE_MODULE
if settings.MAIN_MODULE == 1:
settings.INCLUDE_FULL_LIBRARY = 1
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$preloadDylibs']
elif settings.SIDE_MODULE:
assert not settings.MAIN_MODULE
# memory init file is not supported with side modules, must be executable synchronously (for dlopen)
options.memory_init_file = False
# If we are including the entire JS library then we know for sure we will, by definition,
# require all the reverse dependencies.
if settings.INCLUDE_FULL_LIBRARY:
default_setting('REVERSE_DEPS', 'all')
if settings.MAIN_MODULE == 1 or settings.SIDE_MODULE == 1:
settings.LINKABLE = 1
settings.EXPORT_ALL = 1
if settings.MAIN_MODULE:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$getDylinkMetadata', '$mergeLibSymbols']
if settings.RELOCATABLE:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += [
'$reportUndefinedSymbols',
'$relocateExports',
'$GOTHandler',
'__heap_base',
'__stack_pointer',
]
settings.EXPORTED_FUNCTIONS += [
# This needs to be exported on the Module object too so it's visible
# to side modules too.
'___heap_base',
# Unconditional dependency in library_dylink.js
'_setThrew',
]
if settings.MINIMAL_RUNTIME:
exit_with_error('MINIMAL_RUNTIME is not compatible with relocatable output')
if settings.WASM2JS:
exit_with_error('WASM2JS is not compatible with relocatable output')
# shared modules need memory utilities to allocate their memory
settings.EXPORTED_RUNTIME_METHODS += ['allocate']
settings.ALLOW_TABLE_GROWTH = 1
# various settings require sbrk() access
if settings.DETERMINISTIC or \
settings.EMSCRIPTEN_TRACING or \
settings.SAFE_HEAP or \
settings.MEMORYPROFILER:
settings.EXPORTED_FUNCTIONS += ['_sbrk']
if settings.MEMORYPROFILER:
settings.EXPORTED_FUNCTIONS += ['___heap_base',
'_emscripten_stack_get_base',
'_emscripten_stack_get_end',
'_emscripten_stack_get_current']
if settings.ASYNCIFY_LAZY_LOAD_CODE:
settings.ASYNCIFY = 1
if settings.ASYNCIFY:
# See: https://github.com/emscripten-core/emscripten/issues/12065
# See: https://github.com/emscripten-core/emscripten/issues/12066
settings.DYNCALLS = 1
settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_get_base',
'_emscripten_stack_get_end',
'_emscripten_stack_set_limits']
settings.ASYNCIFY_ADD = unmangle_symbols_from_cmdline(settings.ASYNCIFY_ADD)
settings.ASYNCIFY_REMOVE = unmangle_symbols_from_cmdline(settings.ASYNCIFY_REMOVE)
settings.ASYNCIFY_ONLY = unmangle_symbols_from_cmdline(settings.ASYNCIFY_ONLY)
if state.mode == Mode.COMPILE_AND_LINK and final_suffix in ('.o', '.bc', '.so', '.dylib') and not settings.SIDE_MODULE:
diagnostics.warning('emcc', 'generating an executable with an object extension (%s). If you meant to build an object file please use `-c, `-r`, or `-shared`' % final_suffix)
if settings.SUPPORT_BIG_ENDIAN:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += [
'$LE_HEAP_STORE_U16',
'$LE_HEAP_STORE_I16',
'$LE_HEAP_STORE_U32',
'$LE_HEAP_STORE_I32',
'$LE_HEAP_STORE_F32',
'$LE_HEAP_STORE_F64',
'$LE_HEAP_LOAD_U16',
'$LE_HEAP_LOAD_I16',
'$LE_HEAP_LOAD_U32',
'$LE_HEAP_LOAD_I32',
'$LE_HEAP_LOAD_F32',
'$LE_HEAP_LOAD_F64'
]
if settings.STACK_OVERFLOW_CHECK:
# The basic writeStackCookie/checkStackCookie mechanism just needs to know where the end
# of the stack is.
settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_get_end', '_emscripten_stack_get_free']
if settings.STACK_OVERFLOW_CHECK == 2:
# The full checking done by binaryen's `StackCheck` pass also needs to know the base of the
# stack.
settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_get_base']
# We call one of these two functions during startup which caches the stack limits
# in wasm globals allowing get_base/get_free to be super fast.
# See compiler-rt/stack_limits.S.
if settings.RELOCATABLE:
settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_set_limits']
else:
settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_init']
if settings.MODULARIZE:
if settings.PROXY_TO_WORKER:
exit_with_error('-s MODULARIZE=1 is not compatible with --proxy-to-worker (if you want to run in a worker with -s MODULARIZE=1, you likely want to do the worker side setup manually)')
# in MINIMAL_RUNTIME we may not need to emit the Promise code, as the
# HTML output creates a singleton instance, and it does so without the
# Promise. However, in Pthreads mode the Promise is used for worker
# creation.
if settings.MINIMAL_RUNTIME and options.oformat == OFormat.HTML and not settings.USE_PTHREADS:
settings.EXPORT_READY_PROMISE = 0
if settings.LEGACY_VM_SUPPORT:
if settings.WASM2JS:
settings.POLYFILL_OLD_MATH_FUNCTIONS = 1
# Support all old browser versions
settings.MIN_FIREFOX_VERSION = 0
settings.MIN_SAFARI_VERSION = 0
settings.MIN_IE_VERSION = 0
settings.MIN_EDGE_VERSION = 0
settings.MIN_CHROME_VERSION = 0
if settings.MIN_CHROME_VERSION <= 37:
settings.WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG = 1
setup_environment_settings()
# Silently drop any individual backwards compatibility emulation flags that are known never to occur on browsers that support WebAssembly.
if not settings.WASM2JS:
settings.POLYFILL_OLD_MATH_FUNCTIONS = 0
settings.WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG = 0
if settings.STB_IMAGE and final_suffix in EXECUTABLE_ENDINGS:
state.forced_stdlibs.append('libstb_image')
settings.EXPORTED_FUNCTIONS += ['_stbi_load', '_stbi_load_from_memory', '_stbi_image_free']
if settings.USE_WEBGL2:
settings.MAX_WEBGL_VERSION = 2
# MIN_WEBGL_VERSION=2 implies MAX_WEBGL_VERSION=2
if settings.MIN_WEBGL_VERSION == 2:
default_setting('MAX_WEBGL_VERSION', 2)
if settings.MIN_WEBGL_VERSION > settings.MAX_WEBGL_VERSION:
exit_with_error('MIN_WEBGL_VERSION must be smaller or equal to MAX_WEBGL_VERSION!')
if not settings.GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS and settings.GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS:
exit_with_error('-s GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=0 only makes sense with -s GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0!')
if settings.ASMFS and final_suffix in EXECUTABLE_ENDINGS:
state.forced_stdlibs.append('libasmfs')
settings.FILESYSTEM = 0
settings.SYSCALLS_REQUIRE_FILESYSTEM = 0
settings.FETCH = 1
settings.JS_LIBRARIES.append((0, 'library_asmfs.js'))
# Explicitly drop linking in a malloc implementation if program is not using any dynamic allocation calls.
if not settings.USES_DYNAMIC_ALLOC:
settings.MALLOC = 'none'
if settings.FETCH and final_suffix in EXECUTABLE_ENDINGS:
state.forced_stdlibs.append('libfetch')
settings.JS_LIBRARIES.append((0, 'library_fetch.js'))
if settings.USE_PTHREADS:
settings.FETCH_WORKER_FILE = unsuffixed(os.path.basename(target)) + '.fetch.js'
if settings.DEMANGLE_SUPPORT:
settings.EXPORTED_FUNCTIONS += ['___cxa_demangle']
if settings.FULL_ES3:
settings.FULL_ES2 = 1
settings.MAX_WEBGL_VERSION = max(2, settings.MAX_WEBGL_VERSION)
if settings.EMBIND:
state.forced_stdlibs.append('libembind')
settings.EXPORTED_FUNCTIONS += ['_stackSave', '_stackRestore', '_stackAlloc']
if not settings.STANDALONE_WASM:
# in standalone mode, crt1 will call the constructors from inside the wasm
settings.EXPORTED_FUNCTIONS.append('___wasm_call_ctors')
if settings.RELOCATABLE and not settings.DYNAMIC_EXECUTION:
exit_with_error('cannot have both DYNAMIC_EXECUTION=0 and RELOCATABLE enabled at the same time, since RELOCATABLE needs to eval()')
if settings.SIDE_MODULE and settings.GLOBAL_BASE != -1:
exit_with_error('Cannot set GLOBAL_BASE when building SIDE_MODULE')
# When building a side module we currently have to assume that any undefined
# symbols that exist at link time will be satisfied by the main module or JS.
if settings.SIDE_MODULE:
default_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
default_setting('WARN_ON_UNDEFINED_SYMBOLS', 0)
else:
settings.EXPORT_IF_DEFINED.append('__start_em_asm')
settings.EXPORT_IF_DEFINED.append('__stop_em_asm')
if options.use_preload_plugins or len(options.preload_files) or len(options.embed_files):
if settings.NODERAWFS:
exit_with_error('--preload-file and --embed-file cannot be used with NODERAWFS which disables virtual filesystem')
# if we include any files, or intend to use preload plugins, then we definitely need filesystem support
settings.FORCE_FILESYSTEM = 1
if settings.PROXY_TO_WORKER or options.use_preload_plugins:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$Browser']
if not settings.MINIMAL_RUNTIME:
# In non-MINIMAL_RUNTIME, the core runtime depends on these functions to be present. (In MINIMAL_RUNTIME, they are
# no longer always bundled in)
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += [
'$demangle',
'$demangleAll',
'$jsStackTrace',
'$stackTrace'
]
if settings.FILESYSTEM and not settings.BOOTSTRAPPING_STRUCT_INFO and not settings.STANDALONE_WASM:
# to flush streams on FS exit, we need to be able to call fflush
# we only include it if the runtime is exitable, or when ASSERTIONS
# (ASSERTIONS will check that streams do not need to be flushed,
# helping people see when they should have enabled EXIT_RUNTIME)
if settings.EXIT_RUNTIME or settings.ASSERTIONS:
settings.EXPORTED_FUNCTIONS += ['_fflush']
if settings.SUPPORT_ERRNO and not settings.BOOTSTRAPPING_STRUCT_INFO:
# so setErrNo JS library function can report errno back to C
settings.EXPORTED_FUNCTIONS += ['___errno_location']
if settings.SAFE_HEAP:
# SAFE_HEAP check includes calling emscripten_get_sbrk_ptr() from wasm
settings.EXPORTED_FUNCTIONS += ['_emscripten_get_sbrk_ptr', '_emscripten_stack_get_base']
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$unSign']
if not settings.DECLARE_ASM_MODULE_EXPORTS:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$exportAsmFunctions']
if settings.ALLOW_MEMORY_GROWTH:
# Setting ALLOW_MEMORY_GROWTH turns off ABORTING_MALLOC, as in that mode we default to
# the behavior of trying to grow and returning 0 from malloc on failure, like
# a standard system would. However, if the user sets the flag it
# overrides that.
default_setting('ABORTING_MALLOC', 0)
if settings.USE_PTHREADS:
if settings.USE_PTHREADS == 2:
exit_with_error('USE_PTHREADS=2 is no longer supported')
if settings.ALLOW_MEMORY_GROWTH:
diagnostics.warning('pthreads-mem-growth', 'USE_PTHREADS + ALLOW_MEMORY_GROWTH may run non-wasm code slowly, see https://github.com/WebAssembly/design/issues/1271')
settings.JS_LIBRARIES.append((0, 'library_pthread.js'))
settings.EXPORTED_FUNCTIONS += [
'___emscripten_pthread_data_constructor',
'__emscripten_call_on_thread',
'__emscripten_main_thread_futex',
'__emscripten_thread_init',
'__emscripten_thread_exit',
'_emscripten_current_thread_process_queued_calls',
'__emscripten_allow_main_runtime_queued_calls',
'_emscripten_futex_wake',
'_emscripten_get_global_libc',
'_emscripten_main_browser_thread_id',
'_emscripten_main_thread_process_queued_calls',
'_emscripten_run_in_main_runtime_thread_js',
'_emscripten_stack_set_limits',
'_emscripten_sync_run_in_main_thread_2',
'_emscripten_sync_run_in_main_thread_4',
'_emscripten_tls_init',
'_pthread_self',
'_pthread_testcancel',
]
# Some of these symbols are using by worker.js but otherwise unreferenced.
# Because emitDCEGraph only considered the main js file, and not worker.js
# we have explicitly mark these symbols as user-exported so that they will
# kept alive through DCE.
# TODO: Find a less hacky way to do this, perhaps by also scanning worker.js
# for roots.
building.user_requested_exports.add('_emscripten_tls_init')
building.user_requested_exports.add('_emscripten_current_thread_process_queued_calls')
# set location of worker.js
settings.PTHREAD_WORKER_FILE = unsuffixed(os.path.basename(target)) + '.worker.js'
else:
settings.JS_LIBRARIES.append((0, 'library_pthread_stub.js'))
if settings.FORCE_FILESYSTEM and not settings.MINIMAL_RUNTIME:
# when the filesystem is forced, we export by default methods that filesystem usage
# may need, including filesystem usage from standalone file packager output (i.e.
# file packages not built together with emcc, but that are loaded at runtime
# separately, and they need emcc's output to contain the support they need)
if not settings.ASMFS:
settings.EXPORTED_RUNTIME_METHODS += [
'FS_createPath',
'FS_createDataFile',
'FS_createPreloadedFile',
'FS_createLazyFile',
'FS_createDevice',
'FS_unlink'
]
settings.EXPORTED_RUNTIME_METHODS += [
'addRunDependency',
'removeRunDependency',
]
if not settings.MINIMAL_RUNTIME or settings.EXIT_RUNTIME:
# MINIMAL_RUNTIME only needs callRuntimeCallbacks in certain cases, but the normal runtime
# always does.
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$callRuntimeCallbacks']
if settings.USE_PTHREADS:
# memalign is used to ensure allocated thread stacks are aligned.
settings.EXPORTED_FUNCTIONS += ['_memalign']
if settings.MINIMAL_RUNTIME:
building.user_requested_exports.add('exit')
if settings.PROXY_TO_PTHREAD:
settings.EXPORTED_FUNCTIONS += ['_emscripten_proxy_main']
# pthread stack setup and other necessary utilities
def include_and_export(name):
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$' + name]
settings.EXPORTED_FUNCTIONS += [name]
include_and_export('establishStackSpace')
include_and_export('invokeEntryPoint')
if not settings.MINIMAL_RUNTIME:
# keepRuntimeAlive does not apply to MINIMAL_RUNTIME.
settings.EXPORTED_RUNTIME_METHODS += ['keepRuntimeAlive']
if settings.MODULARIZE:
if not settings.EXPORT_ES6 and settings.EXPORT_NAME == 'Module':
exit_with_error('pthreads + MODULARIZE currently require you to set -s EXPORT_NAME=Something (see settings.js) to Something != Module, so that the .worker.js file can work')
# MODULARIZE+USE_PTHREADS mode requires extra exports out to Module so that worker.js
# can access them:
# general threading variables:
settings.EXPORTED_RUNTIME_METHODS += ['PThread']
# To keep code size to minimum, MINIMAL_RUNTIME does not utilize the global ExitStatus
# object, only regular runtime has it.
if not settings.MINIMAL_RUNTIME:
settings.EXPORTED_RUNTIME_METHODS += ['ExitStatus']
if settings.RELOCATABLE:
# phtreads + dyanmic linking has certain limitations
if settings.SIDE_MODULE:
diagnostics.warning('experimental', '-s SIDE_MODULE + pthreads is experimental')
elif settings.MAIN_MODULE:
diagnostics.warning('experimental', '-s MAIN_MODULE + pthreads is experimental')
elif settings.LINKABLE:
diagnostics.warning('experimental', '-s LINKABLE + pthreads is experimental')
default_setting('SUPPORT_LONGJMP', 0)
if settings.SUPPORT_LONGJMP:
exit_with_error('SUPPORT_LONGJMP is not compatible with pthreads + dynamic linking')
if settings.PROXY_TO_WORKER:
exit_with_error('--proxy-to-worker is not supported with -s USE_PTHREADS>0! Use the option -s PROXY_TO_PTHREAD=1 if you want to run the main thread of a multithreaded application in a web worker.')
elif settings.PROXY_TO_PTHREAD:
exit_with_error('-s PROXY_TO_PTHREAD=1 requires -s USE_PTHREADS to work!')
def check_memory_setting(setting):
if settings[setting] % webassembly.WASM_PAGE_SIZE != 0:
exit_with_error(f'{setting} must be a multiple of WebAssembly page size (64KiB), was {settings[setting]}')
check_memory_setting('INITIAL_MEMORY')
check_memory_setting('MAXIMUM_MEMORY')
if settings.INITIAL_MEMORY >= 2 * 1024 * 1024 * 1024:
exit_with_error('INITIAL_MEMORY must be less than 2GB due to current spec limitations')
if settings.INITIAL_MEMORY < settings.TOTAL_STACK:
exit_with_error(f'INITIAL_MEMORY must be larger than TOTAL_STACK, was {settings.INITIAL_MEMORY} (TOTAL_STACK={settings.TOTAL_STACK})')
if settings.MEMORY_GROWTH_LINEAR_STEP != -1:
check_memory_setting('MEMORY_GROWTH_LINEAR_STEP')
if 'MAXIMUM_MEMORY' in settings_map and not settings.ALLOW_MEMORY_GROWTH:
diagnostics.warning('unused-command-line-argument', 'MAXIMUM_MEMORY is only meaningful with ALLOW_MEMORY_GROWTH')
if settings.EXPORT_ES6 and not settings.MODULARIZE:
# EXPORT_ES6 requires output to be a module
if 'MODULARIZE' in settings_map:
exit_with_error('EXPORT_ES6 requires MODULARIZE to be set')
settings.MODULARIZE = 1
if settings.MODULARIZE and not settings.DECLARE_ASM_MODULE_EXPORTS:
# When MODULARIZE option is used, currently requires declaring all module exports
# individually - TODO: this could be optimized
exit_with_error('DECLARE_ASM_MODULE_EXPORTS=0 is not compatible with MODULARIZE')
# When not declaring wasm module exports in outer scope one by one, disable minifying
# wasm module export names so that the names can be passed directly to the outer scope.
# Also, if using library_exports.js API, disable minification so that the feature can work.
if not settings.DECLARE_ASM_MODULE_EXPORTS or '-lexports.js' in [x for _, x in state.link_flags]:
settings.MINIFY_ASMJS_EXPORT_NAMES = 0
# Enable minification of wasm imports and exports when appropriate, if we
# are emitting an optimized JS+wasm combo (then the JS knows how to load the minified names).
# Things that process the JS after this operation would be done must disable this.
# For example, ASYNCIFY_LAZY_LOAD_CODE needs to identify import names.
if will_metadce() and \
settings.OPT_LEVEL >= 2 and \
settings.DEBUG_LEVEL <= 2 and \
options.oformat not in (OFormat.WASM, OFormat.BARE) and \
not settings.LINKABLE and \
not settings.STANDALONE_WASM and \
not settings.AUTODEBUG and \
not settings.ASSERTIONS and \
not settings.RELOCATABLE and \
not settings.ASYNCIFY_LAZY_LOAD_CODE and \
settings.MINIFY_ASMJS_EXPORT_NAMES:
settings.MINIFY_WASM_IMPORTS_AND_EXPORTS = 1
settings.MINIFY_WASM_IMPORTED_MODULES = 1
if settings.MINIMAL_RUNTIME:
# Minimal runtime uses a different default shell file
if options.shell_path == shared.path_from_root('src', 'shell.html'):
options.shell_path = shared.path_from_root('src', 'shell_minimal_runtime.html')
if settings.EXIT_RUNTIME:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['proc_exit']
if settings.ASSERTIONS:
# In ASSERTIONS-builds, functions UTF8ArrayToString() and stringToUTF8Array() (which are not JS library functions), both
# use warnOnce(), which in MINIMAL_RUNTIME is a JS library function, so explicitly have to mark dependency to warnOnce()
# in that case. If string functions are turned to library functions in the future, then JS dependency tracking can be
# used and this special directive can be dropped.
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$warnOnce']
if settings.MODULARIZE and not (settings.EXPORT_ES6 and not settings.SINGLE_FILE) and \
settings.EXPORT_NAME == 'Module' and options.oformat == OFormat.HTML and \
(options.shell_path == shared.path_from_root('src', 'shell.html') or options.shell_path == shared.path_from_root('src', 'shell_minimal.html')):
exit_with_error(f'Due to collision in variable name "Module", the shell file "{options.shell_path}" is not compatible with build options "-s MODULARIZE=1 -s EXPORT_NAME=Module". Either provide your own shell file, change the name of the export to something else to avoid the name collision. (see https://github.com/emscripten-core/emscripten/issues/7950 for details)')
if settings.STANDALONE_WASM:
if settings.USE_PTHREADS:
exit_with_error('STANDALONE_WASM does not support pthreads yet')
if settings.MINIMAL_RUNTIME:
exit_with_error('MINIMAL_RUNTIME reduces JS size, and is incompatible with STANDALONE_WASM which focuses on ignoring JS anyhow and being 100% wasm')
# the wasm must be runnable without the JS, so there cannot be anything that
# requires JS legalization
settings.LEGALIZE_JS_FFI = 0
# TODO(sbc): Remove WASM2JS here once the size regression it would introduce has been fixed.
if settings.USE_PTHREADS or settings.RELOCATABLE or settings.ASYNCIFY_LAZY_LOAD_CODE or settings.WASM2JS:
settings.IMPORTED_MEMORY = 1
if settings.WASM_BIGINT:
settings.LEGALIZE_JS_FFI = 0
if settings.SINGLE_FILE:
settings.GENERATE_SOURCE_MAP = 0
if options.use_closure_compiler == 2 and not settings.WASM2JS:
exit_with_error('closure compiler mode 2 assumes the code is asm.js, so not meaningful for wasm')
if 'MEM_INIT_METHOD' in settings_map:
exit_with_error('MEM_INIT_METHOD is not supported in wasm. Memory will be embedded in the wasm binary if threads are not used, and included in a separate file if threads are used.')
if settings.WASM2JS:
settings.MAYBE_WASM2JS = 1
# when using wasm2js, if the memory segments are in the wasm then they
# end up converted by wasm2js into base64 encoded JS. alternatively, we
# can use a .mem file like asm.js used to.
# generally we follow what the options tell us to do (which is to use
# a .mem file in most cases, since it is binary & compact). however, for
# pthreads we must keep the memory segments in the wasm as they will be
# passive segments which the .mem format cannot handle.
settings.MEM_INIT_IN_WASM = not options.memory_init_file or settings.SINGLE_FILE or settings.USE_PTHREADS
else:
# wasm includes the mem init in the wasm binary. The exception is
# wasm2js, which behaves more like js.
options.memory_init_file = True
settings.MEM_INIT_IN_WASM = True
# wasm side modules have suffix .wasm
if settings.SIDE_MODULE and target.endswith('.js'):
diagnostics.warning('emcc', 'output suffix .js requested, but wasm side modules are just wasm files; emitting only a .wasm, no .js')
sanitize = set()
for arg in newargs:
if arg.startswith('-fsanitize='):
sanitize.update(arg.split('=', 1)[1].split(','))
elif arg.startswith('-fno-sanitize='):
sanitize.difference_update(arg.split('=', 1)[1].split(','))
if sanitize:
settings.USE_OFFSET_CONVERTER = 1
settings.EXPORTED_FUNCTIONS += [
'_memalign',
'_emscripten_builtin_memalign',
'_emscripten_builtin_malloc',
'_emscripten_builtin_free',
'___heap_base',
'___global_base'
]
if settings.USE_OFFSET_CONVERTER and settings.WASM2JS:
exit_with_error('wasm2js is not compatible with USE_OFFSET_CONVERTER (see #14630)')
if sanitize & UBSAN_SANITIZERS:
if '-fsanitize-minimal-runtime' in newargs:
settings.UBSAN_RUNTIME = 1
else:
settings.UBSAN_RUNTIME = 2
if 'leak' in sanitize:
settings.USE_LSAN = 1
settings.EXIT_RUNTIME = 1
if settings.LINKABLE:
exit_with_error('LSan does not support dynamic linking')
if 'address' in sanitize:
settings.USE_ASAN = 1
if not settings.UBSAN_RUNTIME:
settings.UBSAN_RUNTIME = 2
settings.EXPORTED_FUNCTIONS.append('_emscripten_builtin_memset')
# helper functions for JS to call into C to do memory operations. these
# let us sanitize memory access from the JS side, by calling into C where
# it has been instrumented.
ASAN_C_HELPERS = [
'asan_c_load_1', 'asan_c_load_1u',
'asan_c_load_2', 'asan_c_load_2u',
'asan_c_load_4', 'asan_c_load_4u',
'asan_c_load_f', 'asan_c_load_d',
'asan_c_store_1', 'asan_c_store_1u',
'asan_c_store_2', 'asan_c_store_2u',
'asan_c_store_4', 'asan_c_store_4u',
'asan_c_store_f', 'asan_c_store_d',
]
settings.EXPORTED_FUNCTIONS += ['_' + x for x in ASAN_C_HELPERS]
if settings.ASYNCIFY and not settings.ASYNCIFY_ONLY:
# we do not want asyncify to instrument these helpers - they just access
# memory as small getters/setters, so they cannot pause anyhow, and also
# we access them in the runtime as we prepare to rewind, which would hit
# an asyncify assertion, if asyncify instrumented them.
#
# note that if ASYNCIFY_ONLY was set by the user then we do not need to
# do anything (as the user's list won't contain these functions), and if
# we did add them, the pass would assert on incompatible lists, hence the
# condition in the above if.
settings.ASYNCIFY_REMOVE += ASAN_C_HELPERS
if settings.ASAN_SHADOW_SIZE != -1:
diagnostics.warning('emcc', 'ASAN_SHADOW_SIZE is ignored and will be removed in a future release')
if settings.GLOBAL_BASE != -1:
exit_with_error("ASan does not support custom GLOBAL_BASE")
max_mem = settings.INITIAL_MEMORY
if settings.ALLOW_MEMORY_GROWTH:
max_mem = settings.MAXIMUM_MEMORY
shadow_size = max_mem // 8
settings.GLOBAL_BASE = shadow_size
sanitizer_mem = (shadow_size + webassembly.WASM_PAGE_SIZE) & ~webassembly.WASM_PAGE_SIZE
# sanitizers do at least 9 page allocs of a single page during startup.
sanitizer_mem += webassembly.WASM_PAGE_SIZE * 9
# we also allocate at least 11 "regions". Each region is kRegionSize (2 << 20) but
# MmapAlignedOrDieOnFatalError adds another 2 << 20 for alignment.
sanitizer_mem += (1 << 21) * 11
# When running in the threaded mode asan needs to allocate an array of kMaxNumberOfThreads
# (1 << 22) pointers. See compiler-rt/lib/asan/asan_thread.cpp.
if settings.USE_PTHREADS:
sanitizer_mem += (1 << 22) * 4
# Increase the size of the initial memory according to how much memory
# we think the sanitizers will use.
settings.INITIAL_MEMORY += sanitizer_mem
if settings.SAFE_HEAP:
# SAFE_HEAP instruments ASan's shadow memory accesses.
# Since the shadow memory starts at 0, the act of accessing the shadow memory is detected
# by SAFE_HEAP as a null pointer dereference.
exit_with_error('ASan does not work with SAFE_HEAP')
if settings.LINKABLE:
exit_with_error('ASan does not support dynamic linking')
if sanitize and settings.GENERATE_SOURCE_MAP:
settings.LOAD_SOURCE_MAP = 1
if settings.GLOBAL_BASE == -1:
# default if nothing else sets it
# a higher global base is useful for optimizing load/store offsets, as it
# enables the --post-emscripten pass
settings.GLOBAL_BASE = 1024
# various settings require malloc/free support from JS
if settings.RELOCATABLE or \
settings.BUILD_AS_WORKER or \
settings.USE_WEBGPU or \
settings.USE_PTHREADS or \
settings.OFFSCREENCANVAS_SUPPORT or \
settings.LEGACY_GL_EMULATION or \
not settings.DISABLE_EXCEPTION_CATCHING or \
settings.ASYNCIFY or \
settings.ASMFS or \
settings.DEMANGLE_SUPPORT or \
settings.FORCE_FILESYSTEM or \
settings.STB_IMAGE or \
settings.EMBIND or \
settings.FETCH or \
settings.PROXY_POSIX_SOCKETS or \
options.memory_profiler or \
sanitize:
settings.EXPORTED_FUNCTIONS += ['_malloc', '_free']
if not settings.DISABLE_EXCEPTION_CATCHING:
settings.EXPORTED_FUNCTIONS += [
# For normal builds the entries in deps_info.py are enough to include
# these symbols whenever __cxa_find_matching_catch_* functions are
# found. However, under LTO these symbols don't exist prior to linking
# so we include then unconditionally when exceptions are enabled.
'___cxa_is_pointer_type',
'___cxa_can_catch',
# Emscripten exception handling can generate invoke calls, and they call
# setThrew(). We cannot handle this using deps_info as the invokes are not
# emitted because of library function usage, but by codegen itself.
'_setThrew',
]
if settings.ASYNCIFY:
if not settings.ASYNCIFY_IGNORE_INDIRECT:
# if we are not ignoring indirect calls, then we must treat invoke_* as if
# they are indirect calls, since that is what they do - we can't see their
# targets statically.
settings.ASYNCIFY_IMPORTS += ['invoke_*']
# with pthreads we may call main through the __call_main mechanism, which can
# therefore reach anything in the program, so mark it as possibly causing a
# sleep (the asyncify analysis doesn't look through JS, just wasm, so it can't
# see what it itself calls)
if settings.USE_PTHREADS:
settings.ASYNCIFY_IMPORTS += ['__call_main']
# add the default imports
settings.ASYNCIFY_IMPORTS += DEFAULT_ASYNCIFY_IMPORTS
# return the full import name, including module. The name may
# already have a module prefix; if not, we assume it is "env".
def get_full_import_name(name):
if '.' in name:
return name
return 'env.' + name
settings.ASYNCIFY_IMPORTS = [get_full_import_name(i) for i in settings.ASYNCIFY_IMPORTS]
if settings.WASM2JS and settings.GENERATE_SOURCE_MAP:
exit_with_error('wasm2js does not support source maps yet (debug in wasm for now)')
if settings.NODE_CODE_CACHING:
if settings.WASM_ASYNC_COMPILATION:
exit_with_error('NODE_CODE_CACHING requires sync compilation (WASM_ASYNC_COMPILATION=0)')
if not shared.target_environment_may_be('node'):
exit_with_error('NODE_CODE_CACHING only works in node, but target environments do not include it')
if settings.SINGLE_FILE:
exit_with_error('NODE_CODE_CACHING saves a file on the side and is not compatible with SINGLE_FILE')
if not shared.JS.isidentifier(settings.EXPORT_NAME):
exit_with_error(f'EXPORT_NAME is not a valid JS identifier: `{settings.EXPORT_NAME}`')
if options.tracing and settings.ALLOW_MEMORY_GROWTH:
settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['emscripten_trace_report_memory_layout']
settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_get_current',
'_emscripten_stack_get_base',
'_emscripten_stack_get_end']
# Any "pointers" passed to JS will now be i64's, in both modes.
if settings.MEMORY64:
if settings_map.get('WASM_BIGINT') == '0':
exit_with_error('MEMORY64 is not compatible with WASM_BIGINT=0')
settings.WASM_BIGINT = 1
# check if we can address the 2GB mark and higher: either if we start at
# 2GB, or if we allow growth to either any amount or to 2GB or more.
if settings.INITIAL_MEMORY > 2 * 1024 * 1024 * 1024 or \
(settings.ALLOW_MEMORY_GROWTH and
(settings.MAXIMUM_MEMORY < 0 or
settings.MAXIMUM_MEMORY > 2 * 1024 * 1024 * 1024)):
settings.CAN_ADDRESS_2GB = 1
settings.EMSCRIPTEN_VERSION = shared.EMSCRIPTEN_VERSION
settings.PROFILING_FUNCS = options.profiling_funcs
settings.SOURCE_MAP_BASE = options.source_map_base or ''
return target, wasm_target
@ToolchainProfiler.profile_block('compile inputs')
def phase_compile_inputs(options, state, newargs, input_files):
def is_link_flag(flag):
if flag.startswith('-nostdlib'):
return True
return flag.startswith(('-l', '-L', '-Wl,'))
CXX = [shared.CLANG_CXX]
CC = [shared.CLANG_CC]
if config.COMPILER_WRAPPER:
logger.debug('using compiler wrapper: %s', config.COMPILER_WRAPPER)
CXX.insert(0, config.COMPILER_WRAPPER)
CC.insert(0, config.COMPILER_WRAPPER)
if 'EMMAKEN_COMPILER' in os.environ:
diagnostics.warning('deprecated', '`EMMAKEN_COMPILER` is deprecated.\n'
'To use an alteranative LLVM build set `LLVM_ROOT` in the config file (or `EM_LLVM_ROOT` env var).\n'
'To wrap invocations of clang use the `COMPILER_WRAPPER` setting (or `EM_COMPILER_WRAPPER` env var.\n')
CXX = [os.environ['EMMAKEN_COMPILER']]
CC = [cxx_to_c_compiler(os.environ['EMMAKEN_COMPILER'])]
compile_args = [a for a in newargs if a and not is_link_flag(a)]
system_libs.ensure_sysroot()
def get_language_mode(args):
return_next = False
for item in args:
if return_next:
return item
if item == '-x':
return_next = True
continue
if item.startswith('-x'):
return strip_prefix(item, '-x')
return ''
language_mode = get_language_mode(newargs)
def use_cxx(src):
if 'c++' in language_mode or run_via_emxx:
return True
# Next consider the filename
if src.endswith(C_ENDINGS + OBJC_ENDINGS):
return False
if src.endswith(CXX_ENDINGS):
return True
# Finally fall back to the default
if settings.DEFAULT_TO_CXX:
# Default to using C++ even when run as `emcc`.
# This means that emcc will act as a C++ linker when no source files are
# specified.
# This differs to clang and gcc where the default is always C unless run as
# clang++/g++.
return True
return False
def get_compiler(cxx):
if cxx:
return CXX
return CC
def get_clang_command(src_file):
return get_compiler(use_cxx(src_file)) + get_cflags(options, state.orig_args) + compile_args + [src_file]
def get_clang_command_asm(src_file):
return get_compiler(use_cxx(src_file)) + get_clang_flags() + compile_args + [src_file]
# preprocessor-only (-E) support
if state.mode == Mode.PREPROCESS_ONLY:
for input_file in [x[1] for x in input_files]:
cmd = get_clang_command(input_file)
if options.output_file:
cmd += ['-o', options.output_file]
# Do not compile, but just output the result from preprocessing stage or
# output the dependency rule. Warning: clang and gcc behave differently
# with -MF! (clang seems to not recognize it)
logger.debug(('just preprocessor ' if state.has_dash_E else 'just dependencies: ') + ' '.join(cmd))
shared.check_call(cmd)
return []
# Precompiled headers support
if state.mode == Mode.PCH:
headers = [header for _, header in input_files]
for header in headers:
if not header.endswith(HEADER_ENDINGS):
exit_with_error(f'cannot mix precompiled headers with non-header inputs: {headers} : {header}')
cmd = get_clang_command(header)
if options.output_file:
cmd += ['-o', options.output_file]
logger.debug(f"running (for precompiled headers): {cmd[0]} {' '.join(cmd[1:])}")
shared.check_call(cmd)
return []
linker_inputs = []
seen_names = {}
def uniquename(name):
if name not in seen_names:
seen_names[name] = str(len(seen_names))
return unsuffixed(name) + '_' + seen_names[name] + shared.suffix(name)
def get_object_filename(input_file):
if state.mode == Mode.COMPILE_ONLY:
# In compile-only mode we don't use any temp file. The object files
# are written directly to their final output locations.
if options.output_file:
assert len(input_files) == 1
return options.output_file
else:
return unsuffixed_basename(input_file) + options.default_object_extension
else:
return in_temp(unsuffixed(uniquename(input_file)) + options.default_object_extension)
def compile_source_file(i, input_file):
logger.debug('compiling source file: ' + input_file)
output_file = get_object_filename(input_file)
if state.mode not in (Mode.COMPILE_ONLY, Mode.PREPROCESS_ONLY):
linker_inputs.append((i, output_file))
if get_file_suffix(input_file) in ASSEMBLY_ENDINGS:
cmd = get_clang_command_asm(input_file)
else:
cmd = get_clang_command(input_file)
if not state.has_dash_c:
cmd += ['-c']
cmd += ['-o', output_file]
if state.mode == Mode.COMPILE_AND_LINK and '-gsplit-dwarf' in newargs:
# When running in COMPILE_AND_LINK mode we compile to temporary location
# but we want the `.dwo` file to be generated in the current working directory,
# like it is under clang. We could avoid this hack if we use the clang driver
# to generate the temporary files, but that would also involve using the clang
# driver to perform linking which would be big change.
cmd += ['-Xclang', '-split-dwarf-file', '-Xclang', unsuffixed_basename(input_file) + '.dwo']
cmd += ['-Xclang', '-split-dwarf-output', '-Xclang', unsuffixed_basename(input_file) + '.dwo']
shared.check_call(cmd)
if output_file not in ('-', os.devnull):
assert os.path.exists(output_file)
# First, generate LLVM bitcode. For each input file, we get base.o with bitcode
for i, input_file in input_files:
file_suffix = get_file_suffix(input_file)
if file_suffix in SOURCE_ENDINGS + ASSEMBLY_ENDINGS or (state.has_dash_c and file_suffix == '.bc'):
compile_source_file(i, input_file)
elif file_suffix in DYNAMICLIB_ENDINGS:
logger.debug('using shared library: ' + input_file)
linker_inputs.append((i, input_file))
elif building.is_ar(input_file):
logger.debug('using static library: ' + input_file)
ensure_archive_index(input_file)
linker_inputs.append((i, input_file))
elif language_mode:
compile_source_file(i, input_file)
elif input_file == '-':
exit_with_error('-E or -x required when input is from standard input')
else:
# Default to assuming the inputs are object files and pass them to the linker
logger.debug('using object file: ' + input_file)
linker_inputs.append((i, input_file))
return linker_inputs
@ToolchainProfiler.profile_block('calculate system libraries')
def phase_calculate_system_libraries(state, linker_arguments, linker_inputs, newargs):
extra_files_to_link = []
# link in ports and system libraries, if necessary
if not settings.SIDE_MODULE:
# Ports are always linked into the main module, never the size module.
extra_files_to_link += system_libs.get_ports_libs(settings)
if '-nostdlib' not in newargs and '-nodefaultlibs' not in newargs:
settings.LINK_AS_CXX = run_via_emxx
# Traditionally we always link as C++. For compatibility we continue to do that,
# unless running in strict mode.
if not settings.STRICT and '-nostdlib++' not in newargs:
settings.LINK_AS_CXX = True
extra_files_to_link += system_libs.calculate([f for _, f in sorted(linker_inputs)] + extra_files_to_link, forced=state.forced_stdlibs)
linker_arguments.extend(extra_files_to_link)
@ToolchainProfiler.profile_block('link')
def phase_link(linker_arguments, wasm_target):
logger.debug(f'linking: {linker_arguments}')
# Make a final pass over settings.EXPORTED_FUNCTIONS to remove any
# duplication between functions added by the driver/libraries and function
# specified by the user
settings.EXPORTED_FUNCTIONS = dedup_list(settings.EXPORTED_FUNCTIONS)
# if EMCC_DEBUG=2 then we must link now, so the temp files are complete.
# if using the wasm backend, we might be using vanilla LLVM, which does not allow our
# fastcomp deferred linking opts.
# TODO: we could check if this is a fastcomp build, and still speed things up here
js_syms = None
if settings.LLD_REPORT_UNDEFINED and settings.ERROR_ON_UNDEFINED_SYMBOLS:
js_syms = get_all_js_syms()
building.link_lld(linker_arguments, wasm_target, external_symbols=js_syms)
@ToolchainProfiler.profile_block('post_link')
def phase_post_link(options, state, in_wasm, wasm_target, target):
global final_js
target_basename = unsuffixed_basename(target)
if options.oformat != OFormat.WASM:
final_js = in_temp(target_basename + '.js')
settings.TARGET_BASENAME = unsuffixed_basename(target)
if options.oformat in (OFormat.JS, OFormat.MJS):
state.js_target = target
else:
state.js_target = get_secondary_target(target, '.js')
settings.TARGET_JS_NAME = os.path.basename(state.js_target)
if settings.MEM_INIT_IN_WASM:
memfile = None
else:
memfile = shared.replace_or_append_suffix(target, '.mem')
phase_emscript(options, in_wasm, wasm_target, memfile)
phase_source_transforms(options, target)
if memfile and not settings.MINIMAL_RUNTIME:
# MINIMAL_RUNTIME doesn't use `var memoryInitializer` but instead expects Module['mem'] to
# be loaded before the module. See src/postamble_minimal.js.
phase_memory_initializer(memfile)
phase_binaryen(target, options, wasm_target)
# If we are not emitting any JS then we are all done now
if options.oformat != OFormat.WASM:
phase_final_emitting(options, state, target, wasm_target, memfile)
@ToolchainProfiler.profile_block('emscript')
def phase_emscript(options, in_wasm, wasm_target, memfile):
# Emscripten
logger.debug('emscript')
if options.memory_init_file:
settings.MEM_INIT_METHOD = 1
else:
assert settings.MEM_INIT_METHOD != 1
if embed_memfile():
settings.SUPPORT_BASE64_EMBEDDING = 1
emscripten.run(in_wasm, wasm_target, final_js, memfile)
save_intermediate('original')
@ToolchainProfiler.profile_block('source transforms')
def phase_source_transforms(options, target):
global final_js
# Embed and preload files
if len(options.preload_files) or len(options.embed_files):
logger.debug('setting up files')
file_args = ['--from-emcc', '--export-name=' + settings.EXPORT_NAME]
if len(options.preload_files):
file_args.append('--preload')
file_args += options.preload_files
if len(options.embed_files):
file_args.append('--embed')
file_args += options.embed_files
if len(options.exclude_files):
file_args.append('--exclude')
file_args += options.exclude_files
if options.use_preload_cache:
file_args.append('--use-preload-cache')
if settings.LZ4:
file_args.append('--lz4')
if options.use_preload_plugins:
file_args.append('--use-preload-plugins')
if not settings.ENVIRONMENT_MAY_BE_NODE:
file_args.append('--no-node')
file_code = shared.check_call([shared.FILE_PACKAGER, unsuffixed(target) + '.data'] + file_args, stdout=PIPE).stdout
options.pre_js = js_manipulation.add_files_pre_js(options.pre_js, file_code)
# Apply pre and postjs files
if final_js and (options.pre_js or options.post_js):
logger.debug('applying pre/postjses')
src = read_file(final_js)
final_js += '.pp.js'
with open(final_js, 'w') as f:
# pre-js code goes right after the Module integration code (so it
# can use Module), we have a marker for it
f.write(do_replace(src, '// {{PRE_JSES}}', fix_windows_newlines(options.pre_js)))
f.write(fix_windows_newlines(options.post_js))
options.pre_js = src = options.post_js = None
save_intermediate('pre-post')
# Apply a source code transformation, if requested
if options.js_transform:
safe_copy(final_js, final_js + '.tr.js')
final_js += '.tr.js'
posix = not shared.WINDOWS
logger.debug('applying transform: %s', options.js_transform)
shared.check_call(building.remove_quotes(shlex.split(options.js_transform, posix=posix) + [os.path.abspath(final_js)]))
save_intermediate('transformed')
@ToolchainProfiler.profile_block('memory initializer')
def phase_memory_initializer(memfile):
# For the wasm backend, we don't have any memory info in JS. All we need to do
# is set the memory initializer url.
global final_js
src = read_file(final_js)
src = do_replace(src, '// {{MEM_INITIALIZER}}', 'var memoryInitializer = "%s";' % os.path.basename(memfile))
write_file(final_js + '.mem.js', src)
final_js += '.mem.js'
@ToolchainProfiler.profile_block('final emitting')
def phase_final_emitting(options, state, target, wasm_target, memfile):
global final_js
# Remove some trivial whitespace
# TODO: do not run when compress has already been done on all parts of the code
# src = read_file(final_js)
# src = re.sub(r'\n+[ \n]*\n+', '\n', src)
# write_file(final_js, src)
if settings.USE_PTHREADS:
target_dir = os.path.dirname(os.path.abspath(target))
worker_output = os.path.join(target_dir, settings.PTHREAD_WORKER_FILE)
with open(worker_output, 'w') as f:
f.write(shared.read_and_preprocess(shared.path_from_root('src', 'worker.js'), expand_macros=True))
# Minify the worker.js file in optimized builds
if (settings.OPT_LEVEL >= 1 or settings.SHRINK_LEVEL >= 1) and not settings.DEBUG_LEVEL:
minified_worker = building.acorn_optimizer(worker_output, ['minifyWhitespace'], return_output=True)
write_file(worker_output, minified_worker)
# track files that will need native eols
generated_text_files_with_native_eols = []
if settings.MODULARIZE:
modularize()
module_export_name_substitution()
# Run a final regex pass to clean up items that were not possible to optimize by Closure, or unoptimalities that were left behind
# by processing steps that occurred after Closure.
if settings.MINIMAL_RUNTIME == 2 and settings.USE_CLOSURE_COMPILER and settings.DEBUG_LEVEL == 0 and not settings.SINGLE_FILE:
# Process .js runtime file. Note that we need to handle the license text
# here, so that it will not confuse the hacky script.
shared.JS.handle_license(final_js)
shared.run_process([shared.PYTHON, shared.path_from_root('tools', 'hacky_postprocess_around_closure_limitations.py'), final_js])
# Unmangle previously mangled `import.meta` references in both main code and libraries.
# See also: `preprocess` in parseTools.js.
if settings.EXPORT_ES6 and settings.USE_ES6_IMPORT_META:
src = read_file(final_js)
final_js += '.esmeta.js'
write_file(final_js, src.replace('EMSCRIPTEN$IMPORT$META', 'import.meta'))
save_intermediate('es6-import-meta')
# Apply pre and postjs files
if options.extern_pre_js or options.extern_post_js:
logger.debug('applying extern pre/postjses')
src = read_file(final_js)
final_js += '.epp.js'
with open(final_js, 'w') as f:
f.write(fix_windows_newlines(options.extern_pre_js))
f.write(src)
f.write(fix_windows_newlines(options.extern_post_js))
save_intermediate('extern-pre-post')
shared.JS.handle_license(final_js)
js_target = state.js_target
# The JS is now final. Move it to its final location
move_file(final_js, js_target)
if not settings.SINGLE_FILE:
generated_text_files_with_native_eols += [js_target]
target_basename = unsuffixed_basename(target)
# If we were asked to also generate HTML, do that
if options.oformat == OFormat.HTML:
generate_html(target, options, js_target, target_basename,
wasm_target, memfile)
elif settings.PROXY_TO_WORKER:
generate_worker_js(target, js_target, target_basename)
if embed_memfile() and memfile:
shared.try_delete(memfile)
if settings.SPLIT_MODULE:
diagnostics.warning('experimental', 'The SPLIT_MODULE setting is experimental and subject to change')
do_split_module(wasm_target)
for f in generated_text_files_with_native_eols:
tools.line_endings.convert_line_endings_in_file(f, os.linesep, options.output_eol)
if options.executable:
make_js_executable(js_target)
def version_string():
# if the emscripten folder is not a git repo, don't run git show - that can
# look up and find the revision in a parent directory that is a git repo
revision_suffix = ''
if os.path.exists(shared.path_from_root('.git')):
git_rev = run_process(
['git', 'rev-parse', 'HEAD'],
stdout=PIPE, stderr=PIPE, cwd=shared.path_from_root()).stdout.strip()
revision_suffix = '-git (%s)' % git_rev
elif os.path.exists(shared.path_from_root('emscripten-revision.txt')):
with open(shared.path_from_root('emscripten-revision.txt')) as f:
git_rev = f.read().strip()
revision_suffix = ' (%s)' % git_rev
return f'emcc (Emscripten gcc/clang-like replacement + linker emulating GNU ld) {shared.EMSCRIPTEN_VERSION}{revision_suffix}'
def parse_args(newargs):
options = EmccOptions()
settings_changes = []
user_js_defines = []
should_exit = False
eh_enabled = False
wasm_eh_enabled = False
skip = False
for i in range(len(newargs)):
if skip:
skip = False
continue
# On Windows Vista (and possibly others), excessive spaces in the command line
# leak into the items in this array, so trim e.g. 'foo.cpp ' -> 'foo.cpp'
newargs[i] = newargs[i].strip()
arg = newargs[i]
arg_value = None
def check_flag(value):
# Check for and consume a flag
if arg == value:
newargs[i] = ''
return True
return False
def check_arg(name):
nonlocal arg_value
if arg.startswith(name) and '=' in arg:
arg_value = arg.split('=', 1)[1]
newargs[i] = ''
return True
if arg == name:
if len(newargs) <= i + 1:
exit_with_error("option '%s' requires an argument" % arg)
arg_value = newargs[i + 1]
newargs[i] = ''
newargs[i + 1] = ''
return True
return False
def consume_arg():
nonlocal arg_value
assert arg_value is not None
rtn = arg_value
arg_value = None
return rtn
def consume_arg_file():
name = consume_arg()
if not os.path.isfile(name):
exit_with_error("'%s': file not found: '%s'" % (arg, name))
return name
if arg.startswith('-O'):
# Let -O default to -O2, which is what gcc does.
options.requested_level = strip_prefix(arg, '-O') or '2'
if options.requested_level == 's':
options.requested_level = 2
settings.SHRINK_LEVEL = 1
settings_changes.append('INLINING_LIMIT=1')
elif options.requested_level == 'z':
options.requested_level = 2
settings.SHRINK_LEVEL = 2
settings_changes.append('INLINING_LIMIT=1')
settings.OPT_LEVEL = validate_arg_level(options.requested_level, 3, 'Invalid optimization level: ' + arg, clamp=True)
elif check_arg('--js-opts'):
logger.warning('--js-opts ignored when using llvm backend')
consume_arg()
elif check_arg('--llvm-opts'):
diagnostics.warning('deprecated', '--llvm-opts is deprecated. All non-emcc args are passed through to clang.')
elif arg.startswith('-flto'):
if '=' in arg:
settings.LTO = arg.split('=')[1]
else:
settings.LTO = "full"
elif check_arg('--llvm-lto'):
logger.warning('--llvm-lto ignored when using llvm backend')
consume_arg()
elif check_arg('--closure-args'):
args = consume_arg()
options.closure_args += shlex.split(args)
elif check_arg('--closure'):
options.use_closure_compiler = int(consume_arg())
elif check_arg('--js-transform'):
options.js_transform = consume_arg()
elif check_arg('--pre-js'):
options.pre_js += read_file(consume_arg_file()) + '\n'
elif check_arg('--post-js'):
options.post_js += read_file(consume_arg_file()) + '\n'
elif check_arg('--extern-pre-js'):
options.extern_pre_js += read_file(consume_arg_file()) + '\n'
elif check_arg('--extern-post-js'):
options.extern_post_js += read_file(consume_arg_file()) + '\n'
elif check_arg('--compiler-wrapper'):
config.COMPILER_WRAPPER = consume_arg()
elif check_flag('--post-link'):
options.post_link = True
elif check_arg('--oformat'):
formats = [f.lower() for f in OFormat.__members__]
fmt = consume_arg()
if fmt not in formats:
exit_with_error('invalid output format: `%s` (must be one of %s)' % (fmt, formats))
options.oformat = getattr(OFormat, fmt.upper())
elif check_arg('--minify'):
arg = consume_arg()
if arg != '0':
exit_with_error('0 is the only supported option for --minify; 1 has been deprecated')
settings.DEBUG_LEVEL = max(1, settings.DEBUG_LEVEL)
elif arg.startswith('-g'):
options.requested_debug = arg
requested_level = strip_prefix(arg, '-g') or '3'
if is_int(requested_level):
# the -gX value is the debug level (-g1, -g2, etc.)
settings.DEBUG_LEVEL = validate_arg_level(requested_level, 4, 'Invalid debug level: ' + arg)
# if we don't need to preserve LLVM debug info, do not keep this flag
# for clang
if settings.DEBUG_LEVEL < 3:
newargs[i] = ''
else:
# for 3+, report -g to clang as -g4 etc. are not accepted
newargs[i] = '-g'
if settings.DEBUG_LEVEL == 4:
settings.GENERATE_SOURCE_MAP = 1
diagnostics.warning('deprecated', 'please replace -g4 with -gsource-map')
else:
if requested_level.startswith('force_dwarf'):
exit_with_error('gforce_dwarf was a temporary option and is no longer necessary (use -g)')
elif requested_level.startswith('separate-dwarf'):
# emit full DWARF but also emit it in a file on the side
newargs[i] = '-g'
# if a file is provided, use that; otherwise use the default location
# (note that we do not know the default location until all args have
# been parsed, so just note True for now).
if requested_level != 'separate-dwarf':
if not requested_level.startswith('separate-dwarf=') or requested_level.count('=') != 1:
exit_with_error('invalid -gseparate-dwarf=FILENAME notation')
settings.SEPARATE_DWARF = requested_level.split('=')[1]
else:
settings.SEPARATE_DWARF = True
elif requested_level == 'source-map':
settings.GENERATE_SOURCE_MAP = 1
newargs[i] = '-g'
# a non-integer level can be something like -gline-tables-only. keep
# the flag for the clang frontend to emit the appropriate DWARF info.
# set the emscripten debug level to 3 so that we do not remove that
# debug info during link (during compile, this does not make a
# difference).
settings.DEBUG_LEVEL = 3
elif check_flag('-profiling') or check_flag('--profiling'):
settings.DEBUG_LEVEL = max(settings.DEBUG_LEVEL, 2)
elif check_flag('-profiling-funcs') or check_flag('--profiling-funcs'):
options.profiling_funcs = True
elif newargs[i] == '--tracing' or newargs[i] == '--memoryprofiler':
if newargs[i] == '--memoryprofiler':
options.memory_profiler = True
options.tracing = True
newargs[i] = ''
settings_changes.append("EMSCRIPTEN_TRACING=1")
settings.JS_LIBRARIES.append((0, 'library_trace.js'))
elif check_flag('--emit-symbol-map'):
options.emit_symbol_map = True
settings.EMIT_SYMBOL_MAP = 1
elif check_flag('--bind'):
settings.EMBIND = 1
settings.JS_LIBRARIES.append((0, os.path.join('embind', 'emval.js')))
settings.JS_LIBRARIES.append((0, os.path.join('embind', 'embind.js')))
elif check_arg('--embed-file'):
options.embed_files.append(consume_arg())
elif check_arg('--preload-file'):
options.preload_files.append(consume_arg())
elif check_arg('--exclude-file'):
options.exclude_files.append(consume_arg())
elif check_flag('--use-preload-cache'):
options.use_preload_cache = True
elif check_flag('--no-heap-copy'):
diagnostics.warning('legacy-settings', 'ignoring legacy flag --no-heap-copy (that is the only mode supported now)')
elif check_flag('--use-preload-plugins'):
options.use_preload_plugins = True
elif check_flag('--ignore-dynamic-linking'):
options.ignore_dynamic_linking = True
elif arg == '-v':
shared.PRINT_STAGES = True
elif check_arg('--shell-file'):
options.shell_path = consume_arg_file()
elif check_arg('--source-map-base'):
options.source_map_base = consume_arg()
elif check_flag('--no-entry'):
options.no_entry = True
elif check_arg('--js-library'):
settings.JS_LIBRARIES.append((i + 1, os.path.abspath(consume_arg_file())))
elif check_flag('--remove-duplicates'):
diagnostics.warning('legacy-settings', '--remove-duplicates is deprecated as it is no longer needed. If you cannot link without it, file a bug with a testcase')
elif check_flag('--jcache'):
logger.error('jcache is no longer supported')
elif check_arg('--cache'):
config.CACHE = os.path.normpath(consume_arg())
shared.reconfigure_cache()
elif check_flag('--clear-cache'):
logger.info('clearing cache as requested by --clear-cache: `%s`', shared.Cache.dirname)
shared.Cache.erase()
shared.check_sanity(force=True) # this is a good time for a sanity check
should_exit = True
elif check_flag('--clear-ports'):
logger.info('clearing ports and cache as requested by --clear-ports')
system_libs.Ports.erase()
shared.Cache.erase()
shared.check_sanity(force=True) # this is a good time for a sanity check
should_exit = True
elif check_flag('--check'):
print(version_string(), file=sys.stderr)
shared.check_sanity(force=True)
should_exit = True
elif check_flag('--show-ports'):
system_libs.show_ports()
should_exit = True
elif check_arg('--memory-init-file'):
options.memory_init_file = int(consume_arg())
elif check_flag('--proxy-to-worker'):
settings_changes.append('PROXY_TO_WORKER=1')
elif check_arg('--valid-abspath'):
options.valid_abspaths.append(consume_arg())
elif check_flag('--separate-asm'):
exit_with_error('cannot --separate-asm with the wasm backend, since not emitting asm.js')
elif arg.startswith(('-I', '-L')):
path_name = arg[2:]
if os.path.isabs(path_name) and not is_valid_abspath(options, path_name):
# Of course an absolute path to a non-system-specific library or header
# is fine, and you can ignore this warning. The danger are system headers
# that are e.g. x86 specific and non-portable. The emscripten bundled
# headers are modified to be portable, local system ones are generally not.
diagnostics.warning(
'absolute-paths', f'-I or -L of an absolute path "{arg}" '
'encountered. If this is to a local system header/library, it may '
'cause problems (local system files make sense for compiling natively '
'on your system, but not necessarily to JavaScript).')
elif check_flag('--emrun'):
options.emrun = True
elif check_flag('--cpuprofiler'):
options.cpu_profiler = True
elif check_flag('--threadprofiler'):
options.thread_profiler = True
settings_changes.append('PTHREADS_PROFILING=1')
elif arg == '-fno-exceptions':
settings.DISABLE_EXCEPTION_CATCHING = 1
settings.DISABLE_EXCEPTION_THROWING = 1
settings.EXCEPTION_HANDLING = 0
elif arg == '-fexceptions':
eh_enabled = True
elif arg == '-fwasm-exceptions':
wasm_eh_enabled = True
elif arg == '-fignore-exceptions':
settings.DISABLE_EXCEPTION_CATCHING = 1
elif check_arg('--default-obj-ext'):
options.default_object_extension = consume_arg()
if not options.default_object_extension.startswith('.'):
options.default_object_extension = '.' + options.default_object_extension
elif arg == '-fsanitize=cfi':
options.cfi = True
elif check_arg('--output_eol'):
style = consume_arg()
if style.lower() == 'windows':
options.output_eol = '\r\n'
elif style.lower() == 'linux':
options.output_eol = '\n'
else:
exit_with_error(f'Invalid value "{style}" to --output_eol!')
elif check_arg('--generate-config'):
optarg = consume_arg()
path = os.path.expanduser(optarg)
if os.path.exists(path):
exit_with_error(f'File {optarg} passed to --generate-config already exists!')
else:
config.generate_config(optarg)
should_exit = True
# Record USE_PTHREADS setting because it controls whether --shared-memory is passed to lld
elif arg == '-pthread':
settings_changes.append('USE_PTHREADS=1')
elif arg in ('-fno-diagnostics-color', '-fdiagnostics-color=never'):
colored_logger.disable()
diagnostics.color_enabled = False
elif arg == '-fno-rtti':
settings.USE_RTTI = 0
elif arg == '-frtti':
settings.USE_RTTI = 1
elif arg.startswith('-jsD'):
key = strip_prefix(arg, '-jsD')
if '=' in key:
key, value = key.split('=')
else:
value = '1'
if key in settings.keys():
exit_with_error(f'{arg}: cannot change built-in settings values with a -jsD directive. Pass -s {key}={value} instead!')
user_js_defines += [(key, value)]
newargs[i] = ''
elif check_flag('-shared'):
options.shared = True
elif check_flag('-r'):
options.relocatable = True
elif check_arg('-o'):
options.output_file = consume_arg()
elif arg.startswith('-o'):
options.output_file = strip_prefix(arg, '-o')
newargs[i] = ''
elif arg == '-mllvm':
# Ignore the next argument rather than trying to parse it. This is needed
# because llvm args could, for example, start with `-o` and we don't want
# to confuse that with a normal `-o` flag.
skip = True
if should_exit:
sys.exit(0)
# TODO Currently -fexceptions only means Emscripten EH. Switch to wasm
# exception handling by default when -fexceptions is given when wasm
# exception handling becomes stable.
if wasm_eh_enabled:
settings.EXCEPTION_HANDLING = 1
settings.DISABLE_EXCEPTION_THROWING = 1
settings.DISABLE_EXCEPTION_CATCHING = 1
elif eh_enabled:
settings.EXCEPTION_HANDLING = 0
settings.DISABLE_EXCEPTION_THROWING = 0
settings.DISABLE_EXCEPTION_CATCHING = 0
newargs = [a for a in newargs if a]
return options, settings_changes, user_js_defines, newargs
@ToolchainProfiler.profile_block('binaryen')
def phase_binaryen(target, options, wasm_target):
global final_js
logger.debug('using binaryen')
if settings.GENERATE_SOURCE_MAP and not settings.SOURCE_MAP_BASE:
logger.warning("Wasm source map won't be usable in a browser without --source-map-base")
# whether we need to emit -g (function name debug info) in the final wasm
debug_info = settings.DEBUG_LEVEL >= 2 or options.profiling_funcs
# whether we need to emit -g in the intermediate binaryen invocations (but not
# necessarily at the very end). this is necessary if we depend on debug info
# during compilation, even if we do not emit it at the end.
# we track the number of causes for needing intermdiate debug info so
# that we can stop emitting it when possible - in particular, that is
# important so that we stop emitting it before the end, and it is not in the
# final binary (if it shouldn't be)
intermediate_debug_info = 0
if debug_info:
intermediate_debug_info += 1
if options.emit_symbol_map:
intermediate_debug_info += 1
if settings.ASYNCIFY:
intermediate_debug_info += 1
# note that wasm-ld can strip DWARF info for us too (--strip-debug), but it
# also strips the Names section. so to emit just the Names section we don't
# tell wasm-ld to strip anything, and we do it here.
strip_debug = settings.DEBUG_LEVEL < 3
strip_producers = not settings.EMIT_PRODUCERS_SECTION
# run wasm-opt if we have work for it: either passes, or if we are using
# source maps (which requires some extra processing to keep the source map
# but remove DWARF)
passes = get_binaryen_passes()
if passes or settings.GENERATE_SOURCE_MAP:
# if we need to strip certain sections, and we have wasm-opt passes
# to run anyhow, do it with them.
if strip_debug:
passes += ['--strip-debug']
if strip_producers:
passes += ['--strip-producers']
building.save_intermediate(wasm_target, 'pre-byn.wasm')
# if asyncify is used, we will use it in the next stage, and so if it is
# the only reason we need intermediate debug info, we can stop keeping it
if settings.ASYNCIFY:
intermediate_debug_info -= 1
building.run_wasm_opt(wasm_target,
wasm_target,
args=passes,
debug=intermediate_debug_info)
elif strip_debug or strip_producers:
# we are not running wasm-opt. if we need to strip certain sections
# then do so using llvm-objcopy which is fast and does not rewrite the
# code (which is better for debug info)
building.save_intermediate(wasm_target, 'pre-strip.wasm')
building.strip(wasm_target, wasm_target, debug=strip_debug, producers=strip_producers)
if settings.EVAL_CTORS:
building.save_intermediate(wasm_target, 'pre-ctors.wasm')
building.eval_ctors(final_js, wasm_target, debug_info=intermediate_debug_info)
# after generating the wasm, do some final operations
if settings.EMIT_EMSCRIPTEN_METADATA:
diagnostics.warning('deprecated', 'We hope to remove support for EMIT_EMSCRIPTEN_METADATA. See https://github.com/emscripten-core/emscripten/issues/12231')
webassembly.add_emscripten_metadata(wasm_target)
if final_js:
if settings.SUPPORT_BIG_ENDIAN:
final_js = building.little_endian_heap(final_js)
# >=2GB heap support requires pointers in JS to be unsigned. rather than
# require all pointers to be unsigned by default, which increases code size
# a little, keep them signed, and just unsign them here if we need that.
if settings.CAN_ADDRESS_2GB:
final_js = building.use_unsigned_pointers_in_js(final_js)
# pthreads memory growth requires some additional JS fixups.
# note that we must do this after handling of unsigned pointers. unsigning
# adds some >>> 0 things, while growth will replace a HEAP8 with a call to
# a method to get the heap, and that call would not be recognized by the
# unsigning pass
if settings.USE_PTHREADS and settings.ALLOW_MEMORY_GROWTH:
final_js = building.apply_wasm_memory_growth(final_js)
if settings.USE_ASAN:
final_js = building.instrument_js_for_asan(final_js)
if settings.SAFE_HEAP:
final_js = building.instrument_js_for_safe_heap(final_js)
if settings.OPT_LEVEL >= 2 and settings.DEBUG_LEVEL <= 2:
# minify the JS. Do not minify whitespace if Closure is used, so that
# Closure can print out readable error messages (Closure will then
# minify whitespace afterwards)
save_intermediate_with_wasm('preclean', wasm_target)
final_js = building.minify_wasm_js(js_file=final_js,
wasm_file=wasm_target,
expensive_optimizations=will_metadce(),
minify_whitespace=minify_whitespace() and not options.use_closure_compiler,
debug_info=intermediate_debug_info)
save_intermediate_with_wasm('postclean', wasm_target)
if settings.ASYNCIFY_LAZY_LOAD_CODE:
building.asyncify_lazy_load_code(wasm_target, debug=intermediate_debug_info)
def preprocess_wasm2js_script():
return read_and_preprocess(shared.path_from_root('src', 'wasm2js.js'), expand_macros=True)
def run_closure_compiler():
global final_js
final_js = building.closure_compiler(final_js, pretty=not minify_whitespace(),
extra_closure_args=options.closure_args)
save_intermediate_with_wasm('closure', wasm_target)
if final_js and options.use_closure_compiler:
run_closure_compiler()
symbols_file = None
if options.emit_symbol_map:
symbols_file = shared.replace_or_append_suffix(target, '.symbols')
if settings.WASM2JS:
symbols_file_js = None
if settings.WASM == 2:
wasm2js_template = wasm_target + '.js'
with open(wasm2js_template, 'w') as f:
f.write(preprocess_wasm2js_script())
# generate secondary file for JS symbols
if options.emit_symbol_map:
symbols_file_js = shared.replace_or_append_suffix(wasm2js_template, '.symbols')
else:
wasm2js_template = final_js
if options.emit_symbol_map:
symbols_file_js = shared.replace_or_append_suffix(target, '.symbols')
wasm2js = building.wasm2js(wasm2js_template,
wasm_target,
opt_level=settings.OPT_LEVEL,
minify_whitespace=minify_whitespace(),
use_closure_compiler=options.use_closure_compiler,
debug_info=debug_info,
symbols_file=symbols_file,
symbols_file_js=symbols_file_js)
shared.configuration.get_temp_files().note(wasm2js)
if settings.WASM == 2:
safe_copy(wasm2js, wasm2js_template)
if settings.WASM != 2:
final_js = wasm2js
# if we only target JS, we don't need the wasm any more
shared.try_delete(wasm_target)
save_intermediate('wasm2js')
# emit the final symbols, either in the binary or in a symbol map.
# this will also remove debug info if we only kept it around in the intermediate invocations.
# note that if we aren't emitting a binary (like in wasm2js) then we don't
# have anything to do here.
if options.emit_symbol_map:
intermediate_debug_info -= 1
if os.path.exists(wasm_target):
building.handle_final_wasm_symbols(wasm_file=wasm_target, symbols_file=symbols_file, debug_info=intermediate_debug_info)
save_intermediate_with_wasm('symbolmap', wasm_target)
if settings.DEBUG_LEVEL >= 3 and settings.SEPARATE_DWARF and os.path.exists(wasm_target):
building.emit_debug_on_side(wasm_target, settings.SEPARATE_DWARF)
if settings.WASM2C:
wasm2c.do_wasm2c(wasm_target)
# we have finished emitting the wasm, and so intermediate debug info will
# definitely no longer be used tracking it.
if debug_info:
intermediate_debug_info -= 1
assert intermediate_debug_info == 0
# strip debug info if it was not already stripped by the last command
if not debug_info and building.binaryen_kept_debug_info and \
building.os.path.exists(wasm_target):
building.run_wasm_opt(wasm_target, wasm_target)
# replace placeholder strings with correct subresource locations
if final_js and settings.SINGLE_FILE and not settings.WASM2JS:
js = read_file(final_js)
if settings.MINIMAL_RUNTIME:
js = do_replace(js, '<<< WASM_BINARY_DATA >>>', base64_encode(read_binary(wasm_target)))
else:
js = do_replace(js, '<<< WASM_BINARY_FILE >>>', shared.JS.get_subresource_location(wasm_target))
shared.try_delete(wasm_target)
with open(final_js, 'w') as f:
f.write(js)
def modularize():
global final_js
logger.debug(f'Modularizing, assigning to var {settings.EXPORT_NAME}')
src = read_file(final_js)
return_value = settings.EXPORT_NAME
if settings.WASM_ASYNC_COMPILATION:
return_value += '.ready'
if not settings.EXPORT_READY_PROMISE:
return_value = '{}'
src = '''
function(%(EXPORT_NAME)s) {
%(EXPORT_NAME)s = %(EXPORT_NAME)s || {};
%(src)s
return %(return_value)s
}
''' % {
'EXPORT_NAME': settings.EXPORT_NAME,
'src': src,
'return_value': return_value
}
if settings.MINIMAL_RUNTIME and not settings.USE_PTHREADS:
# Single threaded MINIMAL_RUNTIME programs do not need access to
# document.currentScript, so a simple export declaration is enough.
src = 'var %s=%s' % (settings.EXPORT_NAME, src)
else:
script_url_node = ""
# When MODULARIZE this JS may be executed later,
# after document.currentScript is gone, so we save it.
# In EXPORT_ES6 + USE_PTHREADS the 'thread' is actually an ES6 module webworker running in strict mode,
# so doesn't have access to 'document'. In this case use 'import.meta' instead.
if settings.EXPORT_ES6 and settings.USE_ES6_IMPORT_META:
script_url = "import.meta.url"
else:
script_url = "typeof document !== 'undefined' && document.currentScript ? document.currentScript.src : undefined"
if shared.target_environment_may_be('node'):
script_url_node = "if (typeof __filename !== 'undefined') _scriptDir = _scriptDir || __filename;"
src = '''
var %(EXPORT_NAME)s = (function() {
var _scriptDir = %(script_url)s;
%(script_url_node)s
return (%(src)s);
})();
''' % {
'EXPORT_NAME': settings.EXPORT_NAME,
'script_url': script_url,
'script_url_node': script_url_node,
'src': src
}
final_js += '.modular.js'
with open(final_js, 'w') as f:
f.write(src)
# Export using a UMD style export, or ES6 exports if selected
if settings.EXPORT_ES6:
f.write('export default %s;' % settings.EXPORT_NAME)
elif not settings.MINIMAL_RUNTIME:
f.write('''\
if (typeof exports === 'object' && typeof module === 'object')
module.exports = %(EXPORT_NAME)s;
else if (typeof define === 'function' && define['amd'])
define([], function() { return %(EXPORT_NAME)s; });
else if (typeof exports === 'object')
exports["%(EXPORT_NAME)s"] = %(EXPORT_NAME)s;
''' % {'EXPORT_NAME': settings.EXPORT_NAME})
shared.configuration.get_temp_files().note(final_js)
save_intermediate('modularized')
def module_export_name_substitution():
global final_js
logger.debug(f'Private module export name substitution with {settings.EXPORT_NAME}')
with open(final_js) as f:
src = f.read()
final_js += '.module_export_name_substitution.js'
if settings.MINIMAL_RUNTIME:
# In MINIMAL_RUNTIME the Module object is always present to provide the .asm.js/.wasm content
replacement = settings.EXPORT_NAME
else:
replacement = "typeof %(EXPORT_NAME)s !== 'undefined' ? %(EXPORT_NAME)s : {}" % {"EXPORT_NAME": settings.EXPORT_NAME}
src = re.sub(r'{\s*[\'"]?__EMSCRIPTEN_PRIVATE_MODULE_EXPORT_NAME_SUBSTITUTION__[\'"]?:\s*1\s*}', replacement, src)
# For Node.js and other shell environments, create an unminified Module object so that
# loading external .asm.js file that assigns to Module['asm'] works even when Closure is used.
if settings.MINIMAL_RUNTIME and (shared.target_environment_may_be('node') or shared.target_environment_may_be('shell')):
src = 'if(typeof Module==="undefined"){var Module={};}\n' + src
with open(final_js, 'w') as f:
f.write(src)
shared.configuration.get_temp_files().note(final_js)
save_intermediate('module_export_name_substitution')
def generate_traditional_runtime_html(target, options, js_target, target_basename,
wasm_target, memfile):
script = ScriptSource()
shell = read_and_preprocess(options.shell_path)
assert '{{{ SCRIPT }}}' in shell, 'HTML shell must contain {{{ SCRIPT }}} , see src/shell.html for an example'
base_js_target = os.path.basename(js_target)
if settings.PROXY_TO_WORKER:
proxy_worker_filename = (settings.PROXY_TO_WORKER_FILENAME or target_basename) + '.js'
worker_js = worker_js_script(proxy_worker_filename)
script.inline = ('''
var filename = '%s';
if ((',' + window.location.search.substr(1) + ',').indexOf(',noProxy,') < 0) {
console.log('running code in a web worker');
''' % shared.JS.get_subresource_location(proxy_worker_filename)) + worker_js + '''
} else {
console.log('running code on the main thread');
var fileBytes = tryParseAsDataURI(filename);
var script = document.createElement('script');
if (fileBytes) {
script.innerHTML = intArrayToString(fileBytes);
} else {
script.src = filename;
}
document.body.appendChild(script);
}
'''
else:
# Normal code generation path
script.src = base_js_target
if not settings.SINGLE_FILE:
if memfile and not settings.MINIMAL_RUNTIME:
# start to load the memory init file in the HTML, in parallel with the JS
script.un_src()
script.inline = ('''
var memoryInitializer = '%s';
memoryInitializer = Module['locateFile'] ? Module['locateFile'](memoryInitializer, '') : memoryInitializer;
Module['memoryInitializerRequestURL'] = memoryInitializer;
var meminitXHR = Module['memoryInitializerRequest'] = new XMLHttpRequest();
meminitXHR.open('GET', memoryInitializer, true);
meminitXHR.responseType = 'arraybuffer';
meminitXHR.send(null);
''' % shared.JS.get_subresource_location(memfile)) + script.inline
if not settings.WASM_ASYNC_COMPILATION:
# We need to load the wasm file before anything else, it has to be synchronously ready TODO: optimize
script.un_src()
script.inline = '''
var wasmURL = '%s';
var wasmXHR = new XMLHttpRequest();
wasmXHR.open('GET', wasmURL, true);
wasmXHR.responseType = 'arraybuffer';
wasmXHR.onload = function() {
if (wasmXHR.status === 200 || wasmXHR.status === 0) {
Module.wasmBinary = wasmXHR.response;
} else {
var wasmURLBytes = tryParseAsDataURI(wasmURL);
if (wasmURLBytes) {
Module.wasmBinary = wasmURLBytes.buffer;
}
}
%s
};
wasmXHR.send(null);
''' % (shared.JS.get_subresource_location(wasm_target), script.inline)
if settings.WASM == 2:
# If target browser does not support WebAssembly, we need to load the .wasm.js file before the main .js file.
script.un_src()
script.inline = '''
function loadMainJs() {
%s
}
if (!window.WebAssembly || location.search.indexOf('_rwasm=0') > 0) {
// Current browser does not support WebAssembly, load the .wasm.js JavaScript fallback
// before the main JS runtime.
var wasm2js = document.createElement('script');
wasm2js.src = '%s';
wasm2js.onload = loadMainJs;
document.body.appendChild(wasm2js);
} else {
// Current browser supports Wasm, proceed with loading the main JS runtime.
loadMainJs();
}
''' % (script.inline, shared.JS.get_subresource_location(wasm_target) + '.js')
# when script.inline isn't empty, add required helper functions such as tryParseAsDataURI
if script.inline:
for filename in ('arrayUtils.js', 'base64Utils.js', 'URIUtils.js'):
content = read_and_preprocess(shared.path_from_root('src', filename))
script.inline = content + script.inline
script.inline = 'var ASSERTIONS = %s;\n%s' % (settings.ASSERTIONS, script.inline)
# inline script for SINGLE_FILE output
if settings.SINGLE_FILE:
js_contents = script.inline or ''
if script.src:
js_contents += read_file(js_target)
shared.try_delete(js_target)
script.src = None
script.inline = js_contents
html_contents = do_replace(shell, '{{{ SCRIPT }}}', script.replacement())
html_contents = tools.line_endings.convert_line_endings(html_contents, '\n', options.output_eol)
try:
with open(target, 'wb') as f:
# Force UTF-8 output for consistency across platforms and with the web.
f.write(html_contents.encode('utf-8'))
except OSError as e:
exit_with_error(f'cannot write output file: {e}')
def minify_html(filename):
if settings.DEBUG_LEVEL >= 2:
return
opts = []
# -g1 and greater retain whitespace and comments in source
if settings.DEBUG_LEVEL == 0:
opts += ['--collapse-whitespace',
'--collapse-inline-tag-whitespace',
'--remove-comments',
'--remove-tag-whitespace',
'--sort-attributes',
'--sort-class-name']
# -g2 and greater do not minify HTML at all
if settings.DEBUG_LEVEL <= 1:
opts += ['--decode-entities',
'--collapse-boolean-attributes',
'--remove-attribute-quotes',
'--remove-redundant-attributes',
'--remove-script-type-attributes',
'--remove-style-link-type-attributes',
'--use-short-doctype',
'--minify-css', 'true',
'--minify-js', 'true']
# html-minifier also has the following options, but they look unsafe for use:
# '--remove-optional-tags': removes e.g. <head></head> and <body></body> tags from the page.
# (Breaks at least browser.test_sdl2glshader)
# '--remove-empty-attributes': removes all attributes with whitespace-only values.
# (Breaks at least browser.test_asmfs_hello_file)
# '--remove-empty-elements': removes all elements with empty contents.
# (Breaks at least browser.test_asm_swapping)
logger.debug(f'minifying HTML file {filename}')
size_before = os.path.getsize(filename)
start_time = time.time()
shared.check_call(shared.get_npm_cmd('html-minifier-terser') + [filename, '-o', filename] + opts, env=shared.env_with_node_in_path())
elapsed_time = time.time() - start_time
size_after = os.path.getsize(filename)
delta = size_after - size_before
logger.debug(f'HTML minification took {elapsed_time:.2f} seconds, and shrunk size of {filename} from {size_before} to {size_after} bytes, delta={delta} ({delta * 100.0 / size_before:+.2f}%)')
def generate_html(target, options, js_target, target_basename,
wasm_target, memfile):
logger.debug('generating HTML')
if settings.EXPORT_NAME != 'Module' and \
not settings.MINIMAL_RUNTIME and \
options.shell_path == shared.path_from_root('src', 'shell.html'):
# the minimal runtime shell HTML is designed to support changing the export
# name, but the normal one does not support that currently
exit_with_error('Customizing EXPORT_NAME requires that the HTML be customized to use that name (see https://github.com/emscripten-core/emscripten/issues/10086)')
if settings.MINIMAL_RUNTIME:
generate_minimal_runtime_html(target, options, js_target, target_basename)
else:
generate_traditional_runtime_html(target, options, js_target, target_basename,
wasm_target, memfile)
if settings.MINIFY_HTML and (settings.OPT_LEVEL >= 1 or settings.SHRINK_LEVEL >= 1):
minify_html(target)
def generate_worker_js(target, js_target, target_basename):
# compiler output is embedded as base64
if settings.SINGLE_FILE:
proxy_worker_filename = shared.JS.get_subresource_location(js_target)
# compiler output goes in .worker.js file
else:
move_file(js_target, unsuffixed(js_target) + '.worker.js')
worker_target_basename = target_basename + '.worker'
proxy_worker_filename = (settings.PROXY_TO_WORKER_FILENAME or worker_target_basename) + '.js'
target_contents = worker_js_script(proxy_worker_filename)
write_file(target, target_contents)
def worker_js_script(proxy_worker_filename):
web_gl_client_src = read_file(shared.path_from_root('src', 'webGLClient.js'))
idb_store_src = read_file(shared.path_from_root('src', 'IDBStore.js'))
proxy_client_src = read_file(shared.path_from_root('src', 'proxyClient.js'))
proxy_client_src = do_replace(proxy_client_src, '{{{ filename }}}', proxy_worker_filename)
proxy_client_src = do_replace(proxy_client_src, '{{{ IDBStore.js }}}', idb_store_src)
return web_gl_client_src + '\n' + proxy_client_src
def find_library(lib, lib_dirs):
for lib_dir in lib_dirs:
path = os.path.join(lib_dir, lib)
if os.path.isfile(path):
logger.debug('found library "%s" at %s', lib, path)
return path
return None
def process_libraries(state, linker_inputs):
new_flags = []
libraries = []
suffixes = STATICLIB_ENDINGS + DYNAMICLIB_ENDINGS
system_libs_map = system_libs.Library.get_usable_variations()
# Find library files
for i, flag in state.link_flags:
if not flag.startswith('-l'):
new_flags.append((i, flag))
continue
lib = strip_prefix(flag, '-l')
logger.debug('looking for library "%s"', lib)
js_libs, native_lib = building.map_to_js_libs(lib)
if js_libs is not None:
libraries += [(i, js_lib) for js_lib in js_libs]
# If native_lib is returned then include it in the link
# via forced_stdlibs.
if native_lib:
state.forced_stdlibs.append(native_lib)
continue
# We don't need to resolve system libraries to absolute paths here, we can just
# let wasm-ld handle that. However, we do want to map to the correct variant.
# For example we map `-lc` to `-lc-mt` if we are building with threading support.
if 'lib' + lib in system_libs_map:
lib = system_libs_map['lib' + lib]
new_flags.append((i, '-l' + strip_prefix(lib.get_base_name(), 'lib')))
continue
if building.map_and_apply_to_settings(lib):
continue
path = None
for suff in suffixes:
name = 'lib' + lib + suff
path = find_library(name, state.lib_dirs)
if path:
break
if path:
linker_inputs.append((i, path))
continue
new_flags.append((i, flag))
settings.JS_LIBRARIES += libraries
# At this point processing JS_LIBRARIES is finished, no more items will be added to it.
# Sort the input list from (order, lib_name) pairs to a flat array in the right order.
settings.JS_LIBRARIES.sort(key=lambda lib: lib[0])
settings.JS_LIBRARIES = [lib[1] for lib in settings.JS_LIBRARIES]
state.link_flags = new_flags
class ScriptSource:
def __init__(self):
self.src = None # if set, we have a script to load with a src attribute
self.inline = None # if set, we have the contents of a script to write inline in a script
def un_src(self):
"""Use this if you want to modify the script and need it to be inline."""
if self.src is None:
return
quoted_src = quote(self.src)
if settings.EXPORT_ES6:
self.inline = f'''
import("./{quoted_src}").then(exports => exports.default(Module))
'''
else:
self.inline = f'''
var script = document.createElement('script');
script.src = "{quoted_src}";
document.body.appendChild(script);
'''
self.src = None
def replacement(self):
"""Returns the script tag to replace the {{{ SCRIPT }}} tag in the target"""
assert (self.src or self.inline) and not (self.src and self.inline)
if self.src:
quoted_src = quote(self.src)
if settings.EXPORT_ES6:
return f'''
<script type="module">
import initModule from "./{quoted_src}";
initModule(Module);
</script>
'''
else:
return f'<script async type="text/javascript" src="{quoted_src}"></script>'
else:
return '<script>\n%s\n</script>' % self.inline
def is_valid_abspath(options, path_name):
# Any path that is underneath the emscripten repository root must be ok.
if shared.path_from_root().replace('\\', '/') in path_name.replace('\\', '/'):
return True
def in_directory(root, child):
# make both path absolute
root = os.path.realpath(root)
child = os.path.realpath(child)
# return true, if the common prefix of both is equal to directory
# e.g. /a/b/c/d.rst and directory is /a/b, the common prefix is /a/b
return os.path.commonprefix([root, child]) == root
for valid_abspath in options.valid_abspaths:
if in_directory(valid_abspath, path_name):
return True
return False
def parse_symbol_list_file(contents):
"""Parse contents of one-symbol-per-line response file. This format can by used
with, for example, -sEXPORTED_FUNCTIONS=@filename and avoids the need for any
kind of quoting or escaping.
"""
values = contents.splitlines()
return [v.strip() for v in values]
def parse_value(text, expect_list):
# Note that using response files can introduce whitespace, if the file
# has a newline at the end. For that reason, we rstrip() in relevant
# places here.
def parse_string_value(text):
first = text[0]
if first == "'" or first == '"':
text = text.rstrip()
assert text[-1] == text[0] and len(text) > 1, 'unclosed opened quoted string. expected final character to be "%s" and length to be greater than 1 in "%s"' % (text[0], text)
return text[1:-1]
return text
def parse_string_list_members(text):
sep = ','
values = text.split(sep)
result = []
index = 0
while True:
current = values[index].lstrip() # Cannot safely rstrip for cases like: "HERE-> ,"
if not len(current):
exit_with_error('string array should not contain an empty value')
first = current[0]
if not(first == "'" or first == '"'):
result.append(current.rstrip())
else:
start = index
while True: # Continue until closing quote found
if index >= len(values):
exit_with_error("unclosed quoted string. expected final character to be '%s' in '%s'" % (first, values[start]))
new = values[index].rstrip()
if new and new[-1] == first:
if start == index:
result.append(current.rstrip()[1:-1])
else:
result.append((current + sep + new)[1:-1])
break
else:
current += sep + values[index]
index += 1
index += 1
if index >= len(values):
break
return result
def parse_string_list(text):
text = text.rstrip()
if text and text[0] == '[':
if text[-1] != ']':
exit_with_error('unclosed opened string list. expected final character to be "]" in "%s"' % (text))
text = text[1:-1]
if text.strip() == "":
return []
return parse_string_list_members(text)
if expect_list or (text and text[0] == '['):
# if json parsing fails, we fall back to our own parser, which can handle a few
# simpler syntaxes
try:
return json.loads(text)
except ValueError:
return parse_string_list(text)
try:
return int(text)
except ValueError:
return parse_string_value(text)
def validate_arg_level(level_string, max_level, err_msg, clamp=False):
try:
level = int(level_string)
except ValueError:
raise Exception(err_msg)
if clamp:
if level > max_level:
logger.warning("optimization level '-O" + level_string + "' is not supported; using '-O" + str(max_level) + "' instead")
level = max_level
if not 0 <= level <= max_level:
raise Exception(err_msg)
return level
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
def main(args):
start_time = time.time()
ret = run(args)
logger.debug('total time: %.2f seconds', (time.time() - start_time))
return ret
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
logger.warning('KeyboardInterrupt')
sys.exit(1)
|
#!/usr/bin/env python
# coding: utf-8
import json
import os
import pathlib
from dotenv import load_dotenv
from requests_oauthlib import OAuth1Session
class FailedToGetResponse(Exception):
"""TwitterAPIからのレスポンスが正常ではないことを知らせる例外クラス"""
pass
class TwitterUtils():
def __init__(self) -> None:
root_path = pathlib.Path(__file__).parents[2]
dotenv_path = root_path / '.env'
if not dotenv_path.is_file():
print(dotenv_path)
raise FileNotFoundError(".env file not found")
load_dotenv(dotenv_path)
self.CONSUMER_KEY = os.getenv('CONSUMER_KEY')
self.CONSUMER_SECRET = os.getenv('CONSUMER_SECRET')
self.ACCESS_TOKEN = os.getenv('ACCESS_TOKEN')
self.ACCESS_TOKEN_SECRET = os.getenv('ACCESS_TOKEN_SECRET')
self.KEY = (
self.CONSUMER_KEY,
self.CONSUMER_SECRET,
self.ACCESS_TOKEN,
self.ACCESS_TOKEN_SECRET
)
if any(i is None for i in self.KEY):
raise ValueError(self.KEY)
def get_timeline(self) -> dict:
twitter = OAuth1Session(*self.KEY)
url = 'https://api.twitter.com/1.1/statuses/user_timeline.json'
# scpjp_announce
params = {'screen_name': 'scpjp_announce', 'count': 5}
res = twitter.get(url, params=params)
result_dict = {}
if res.status_code == 200: # 正常通信出来た場合
timelines = json.loads(res.text) # レスポンスからタイムラインリストを取得
for line in timelines: # タイムラインリストをループ処理
# print(f"{root_url}{line["id"]}")
result_dict[line['id']] = {'created_at': line['created_at'],
'name': line['user']['name'],
'avator_url': line['user']['profile_image_url'],
'screen_name': line['user']['screen_name']}
else: # 正常通信出来なかった場合
print("Failed: %d" % res.status_code)
raise FailedToGetResponse
return result_dict
|
#!/usr/bin/env python
# coding: utf-8
import json
import os
import pathlib
from dotenv import load_dotenv
from requests_oauthlib import OAuth1Session
class FailedToGetResponse(Exception):
"""TwitterAPIからのレスポンスが正常ではないことを知らせる例外クラス"""
pass
class TwitterUtils():
def __init__(self) -> None:
root_path = pathlib.Path(__file__).parents[2]
dotenv_path = root_path / '.env'
if not dotenv_path.is_file():
print(dotenv_path)
raise FileNotFoundError(".env file not found")
load_dotenv(dotenv_path)
self.CONSUMER_KEY = os.getenv('CONSUMER_KEY')
self.CONSUMER_SECRET = os.getenv('CONSUMER_SECRET')
self.ACCESS_TOKEN = os.getenv('ACCESS_TOKEN')
self.ACCESS_TOKEN_SECRET = os.getenv('ACCESS_TOKEN_SECRET')
self.KEY = (
self.CONSUMER_KEY,
self.CONSUMER_SECRET,
self.ACCESS_TOKEN,
self.ACCESS_TOKEN_SECRET
)
if any(i is None for i in self.KEY):
raise ValueError(self.KEY)
def get_timeline(self) -> dict:
twitter = OAuth1Session(*self.KEY)
url = 'https://api.twitter.com/1.1/statuses/user_timeline.json'
# scpjp_announce
params = {'screen_name': 'scpjp_announce', 'count': 5}
res = twitter.get(url, params=params)
result_dict = {}
if res.status_code == 200: # 正常通信出来た場合
timelines = json.loads(res.text) # レスポンスからタイムラインリストを取得
for line in timelines: # タイムラインリストをループ処理
# print(f"{root_url}{line['id']}")
result_dict[line['id']] = {'created_at': line['created_at'],
'name': line['user']['name'],
'avator_url': line['user']['profile_image_url'],
'screen_name': line['user']['screen_name']}
else: # 正常通信出来なかった場合
print("Failed: %d" % res.status_code)
raise FailedToGetResponse
return result_dict
|
__all__ = ["GoogleAPI", "Resource", "Method"]
import re
import os
import warnings
from urllib.parse import urlencode, quote
from functools import wraps
from typing import List, Generic, TypeVar
from .excs import ValidationError
from .utils import _safe_getitem
from .models import MediaDownload, MediaUpload, ResumableUpload, Request
from .validate import validate as validate_
T = TypeVar("T") # Generic type var
# These are the hard-coded kwargs in Method.__call__
# They're used for testing whether those names will collide with any of the url parameters that are provided by any of the discovery docs.
# If collisions were to be found, that would mean that the user won't be able to pass a url_parameter that shares the same name with any of the RESERVED_KEYWORDS.
RESERVED_KEYWORDS = [
"validate",
"data",
"json",
"upload_file",
"download_file",
"timeout",
]
# From: https://github.com/googleapis/google-api-python-client/blob/master/googleapiclient/discovery.py
# Parameters accepted by the stack, but not visible via discovery.
STACK_QUERY_PARAMETERS = frozenset(["trace", "pp", "strict"])
STACK_QUERY_PARAMETER_DEFAULT_VALUE = {"type": "string", "location": "query"}
MEDIA_SIZE_BIT_SHIFTS = {"KB": 10, "MB": 20, "GB": 30, "TB": 40}
# TODO: etagRequired: {
# type: "boolean", # noqa: F821 (weird error)
# description: "Whether this method requires an ETag to be specified. The ETag is sent as an HTTP If-Match or If-None-Match header."
# }
# NOTE: etagRequired is only mentioned once in all of the discovery documents available from Google. (In discovery_service-v1. So, it isn't actually being used)
def _temporarily_add_back_dashes_to_param_definitions(f):
"""
When instantiating a Method, Method's constructor will remove all
dashes from the names of its URI params and global params in order
to make it possible to pass uri params through function calls
e.g. this is viable get_videos(my_videos=True)
this is will fail get_videos(my-videos=True)
This function momentarily adds back '-' to url parameters and passed uri_params
in order to be processed and validated correctly and comply with the disc doc
Reverts back to '_' after wrapped function is done
"""
@wraps(f)
def wrapper(
self,
validate=None,
data=None,
json=None,
upload_file=None,
download_file=None,
timeout=None,
**uri_params,
):
# unfix urls
uri_params = self._replace_dashes_with_underscores_in_user_provided_params(
uri_params, self.parameters
)
# unfix params
self._method_specs["parameters"] = self._restore_dashes_to_params(
self._method_specs.get("parameters")
)
self._global_parameters = self._restore_dashes_to_params(
self._global_parameters
)
# Run function
results = f(
self,
validate,
data,
json,
upload_file,
download_file,
timeout,
**uri_params,
)
# fix params again
self._method_specs[
"parameters"
] = self._replace_dashes_with_underscores_in_params(
self._method_specs.get("parameters")
)
self._global_parameters = self._replace_dashes_with_underscores_in_params(
self._global_parameters
)
return results
return wrapper
class Method:
def __init__(
self,
name,
method_specs,
global_parameters,
schemas,
root_url,
service_path,
batch_path,
validate,
):
# Replaces '-'s with '_'s and preserve old names to revert back to them after this method is called
global_parameters = self._replace_dashes_with_underscores_in_params(
global_parameters
)
method_specs["parameters"] = self._replace_dashes_with_underscores_in_params(
method_specs.get("parameters")
)
self.name = name
self._method_specs = method_specs
self._global_parameters = global_parameters
self._schemas = schemas
self._root_url = root_url
self._service_path = service_path
self._batch_path = batch_path
if (
self["useMediaDownloadService"] is True
and self["supportsMediaDownload"] is True
):
self._download_base_url = self._root_url + "download/" + self._service_path
else:
self._download_base_url = None
self._base_url = self._root_url + self._service_path
self._batch_url = self._root_url + self._batch_path
self._should_validate = validate
# ---- Changes URL parameters with a "-" to "_" -----#
# Depends on how you view it, but this section also changes robots with small mouths to robots with big mouths
@staticmethod
def _replace_dashes_with_underscores_in_params(param_set: Generic[T]) -> T:
if param_set:
for name, schema in list(param_set.items()):
if "-" in name:
new_name = name.replace("-", "_") # See?!
schema["orig_name"] = name
param_set[new_name] = schema
del param_set[name]
return param_set
@staticmethod
def _restore_dashes_to_params(param_set: Generic[T]) -> T:
if param_set:
for name, schema in list(param_set.items()):
if "orig_name" in schema:
param_set[schema["orig_name"]] = schema
del param_set[name]
return param_set
@staticmethod
def _replace_dashes_with_underscores_in_user_provided_params(
uri_params: Generic[T], parameters
) -> T:
for k, v in list(uri_params.items()):
if "_" in k:
if k in parameters:
if "orig_name" in parameters[k]:
uri_params[parameters[k]["orig_name"]] = v
del uri_params[k]
return uri_params
@staticmethod
def _del_none_params(uri_params: Generic[T]) -> T:
for k, v in list(uri_params.items()):
if v is None:
del uri_params[k]
return uri_params
# ---- / Changes URL parameters with a "-" to "_" -----#
@property
def request(self) -> dict:
""" Returns expected request body """
body = self["request"]
if body.get("$ref"):
return self._schemas.get(body["$ref"])
else:
return body
@property
def response(self) -> dict:
""" Retruns expected response body """
body = self["response"]
if body.get("$ref"):
return self._schemas.get(body["$ref"])
else:
return body
@property
def parameters(self) -> dict:
"""
Parameters property
Returns:
dict: All parameters that this method can take as described in the discovery document
"""
if not self._global_parameters and not self["parameters"]:
return {}
elif not self._global_parameters:
return self["parameters"]
elif not self["parameters"]:
return self._global_parameters
else:
return {**self["parameters"], **self._global_parameters}
@property
def optional_parameters(self) -> List[str]:
"""
Optional Parameters
Returns:
list: List of the names of optional parameters this method takes
"""
return (
[
parameter_name
for parameter_name, parameter_info in self.parameters.items()
if parameter_info.get("required") is not True
]
if self.parameters
else []
)
@property
def required_parameters(self) -> List[str]:
"""
Required Parameters
Returns:
list: List of the names of required parameters this method takes
"""
return (
[
parameter_name
for parameter_name, parameter_info in self.parameters.items()
if parameter_info.get("required") is True
]
if self.parameters
else []
)
@property
def path_parameters(self) -> List[str]:
"""
Path Parameters
Returns:
list: List of the names of path parameters this method takes
"""
return (
[
param_name
for param_name, param_info in self.parameters.items()
if param_info.get("location") == "path"
]
if self.parameters
else []
)
@property
def query_parameters(self) -> List[str]:
"""
Query Parameters
Returns:
list: List of the names of Query parameters this method takes
"""
return (
[
param_name
for param_name, param_info in self.parameters.items()
if param_info.get("location") == "query"
]
if self.parameters
else []
)
@property
def required_query_parameters(self) -> List[str]:
"""
Required Query Parameters
Returns:
list: List of the names of required query parameters this method takes
"""
return (
[
param_name
for param_name in self.query_parameters
if param_name in self.required_parameters
]
if self.query_parameters
else []
)
@property
def optional_query_parameters(self) -> List[str]:
"""
Optional Query Parameters
Returns:
list: List of the names of optional query parameters this method takes
"""
return (
[
param_name
for param_name in self.query_parameters
if param_name in self.optional_parameters
]
if self.query_parameters
else []
)
def __getitem__(self, key):
"""
Examples:
::
>>> self['description']
"method description"
>>> self['scopes']
['returns', 'scopes', 'required', 'by', 'this', 'method', 'in', 'a', 'list']
>>> self['supportsMediaDownload']
False
>>> self['supportsMediaUpload']
True
>>> self['httpMethod']
'GET'
Hint:
Using this method with ``scopes`` as an argument can be useful for incremental authorization. (Requesting scopes when needed. As opposed to requesting them at once)
for more: https://developers.google.com/identity/protocols/OAuth2WebServer#incrementalAuth
"""
return self._method_specs.get(key)
def _validate(self, instance, schema, schema_name=None):
return validate_(instance, schema, self._schemas, schema_name)
@_temporarily_add_back_dashes_to_param_definitions
def __call__(
self,
validate=None,
data=None,
json=None,
upload_file=None,
download_file=None,
timeout=None,
**uri_params,
) -> Request:
"""
Builds a request from this method
Note:
* When passing ``datetime.datetime or datetime.date`` pass them in json format.
* Aiogoogle won't do that as it would be a big hassle to iterate over every item in ``*uri_params``, ``json`` and ``data`` to check if there's any datetime objects.
* Fortunately Python makes it really easy to achieve that.
* Instead of passing say ``datetime.datetime.utcnow()``, pass: ``datetime.datetime.utcnow().jsonformat()``
Note:
* All ``None`` values are ommited before sending to Google apis, if you want to explicitly pass a JSON null then pass it as ``"null"`` not ``None``
Arguments:
validate (bool): Overrides :param: aiogoogle.Aiogoole.validate if not None
json (dict): Json body
data (any): Data body (Bytes, text, www-url-form-encoded and others)
upload_file (str): full path of file to upload
download_file (str): full path of file to download to
timeout (str): total timeout for this request
**uri_params (dict): path and query, required and optional parameters
Returns:
aiogoogle.models.Request: An unsent request object
"""
# If collisions are found between the 'key' of **uri_params and explicit kwargs e.g. data, json etc., then
# priority will be given to explicit kwargs. With that said, it's not likely there will be any.
# If you want to double check if there are any collisions,
# you can append the API name and version you're using to tests.globals.ALL_APIS (only if they don't exist, otherwise, you shouldn't worry about collisions)
# Then, run the unit tests and monitor: tests.test_discovery_document.test_parameters_not_colliding_with_google_api__call__ for failure
#
# NOTE: Use '_' instead of '-' when passing uri parameters that have a '-' in their names
#
# Remove params that are None
uri_params = self._del_none_params(uri_params)
# Assert timeout is int
if timeout is not None:
if not isinstance(timeout, int) or type(timeout) == bool:
raise TypeError("Timeouts can only be ints or None")
# Resolve validation status
if not isinstance(validate, bool):
validate = self._should_validate
base_url = self._base_url
# Build full url minus query & fragment
url = self._build_url(
base_url=base_url, uri_params=uri_params.copy(), validate=validate
)
# Filter out query parameters from all uri_params that were passed to this method
passed_query_params = {
param_name: param_info
for param_name, param_info in uri_params.items()
if param_name in self.query_parameters
}
# Ensure all required query parameteters were passed
for param in self.required_query_parameters:
if param not in passed_query_params:
raise ValidationError(f'Missing query parameter: "{param}"')
# Validate url query params
if validate is True:
if passed_query_params:
for param_name, passed_param in passed_query_params.items():
schema = self.parameters[param_name]
if schema.get('repeated') and isinstance(passed_param, (list, set, tuple)):
for param in passed_param:
self._validate(
param,
schema,
schema_name=param_name
)
else:
self._validate(
passed_param,
schema,
schema_name=param_name,
)
# Join query params
if passed_query_params:
uri = url + "?" + urlencode(passed_query_params)
else:
uri = url
# Pop query uri_params consumed
for param_name, _ in passed_query_params.items():
del uri_params[param_name]
# Pop uri_params consumed
# for param_name in self["parameterOrder"]:
for param_name in self.path_parameters:
if param_name in uri_params:
del uri_params[param_name]
# Warn if not all uri_params were consumed/popped
if uri_params: # should be empty by now
# If there's room for addtionalProperties, validate and add them to the URI
if validate:
if self.parameters.get("additionalProperties"):
for _, v in uri_params.items():
self._validate(
v,
self.parameters["additionalProperties"],
schema_name="Additional Url Parameters",
)
else:
raise ValidationError(
f"Invalid (extra) parameters: {uri_params} were passed"
)
else:
if not self.parameters.get("additionalProperties"):
warnings.warn(
"Parameters {} were found and they're probably of no use."
" Check if they're valid parameters".format(str(uri_params))
)
if "?" not in uri:
uri = uri + "?" + urlencode(uri_params)
else:
uri = uri + "&" + urlencode(uri_params)
# Ensure only one param for http req body.
if json and data:
raise TypeError(
"Pass either json or data for the body of the request, not both."
'\nThis is similar to the "body" argument in google-python-client'
"\nThis will validate agains the $request key in this method's "
"specs"
) # This raises a TypeError instead of a ValidationError because
# it will probably make your session raise an error if it passes.
# Better raise it early on
# Validate body
if validate is True:
body = (
json if json is not None else data if data is not None else None
) # json or data or None
if body is not None:
self._validate_body(body)
# Process download_file
if download_file:
if validate is True:
if self.__getitem__("supportsMediaDownload") is not True:
raise ValidationError(
"download_file was provided while method doesn't support media download"
)
media_download = MediaDownload(download_file)
else:
media_download = None
# Process upload_file
if upload_file:
media_upload = self._build_upload_media(
upload_file, uri, validate, fallback_url=url
)
else:
media_upload = None
return Request(
method=self["httpMethod"],
url=uri,
batch_url=self._batch_url,
data=data,
json=json,
timeout=timeout,
media_download=media_download,
media_upload=media_upload,
callback=lambda res: res, # TODO: get rid of this sorcery.
)
def _build_url(self, base_url, uri_params, validate):
if self.path_parameters:
# sort path params as sepcified in method_specs.parameterOrder
sorted_required_path_params = (
{}
) # Dict order is guaranteed (by insertion) as of Python 3.6
for param_name in self["parameterOrder"]:
try:
sorted_required_path_params[param_name] = uri_params.pop(param_name)
except KeyError:
raise ValidationError(f"Missing URL path parameter: {param_name}")
# Validate path params
if validate is True:
self._validate_url(sorted_required_path_params)
for k, v in sorted_required_path_params.items():
sorted_required_path_params[k] = quote(str(v), safe='')
# Build full path
# replace named placeholders with empty ones. e.g. {param} --> {}
# Why? Because some endpoints have different names in their url path placeholders than in their parameter defenitions
# e.g. path: {"v1/{+resourceName}/connections"}. e.g. param name: resourceName NOT +resourceName
self._method_specs["path"] = re.sub(
r"\{(.*?)\}", r"{}", self._method_specs["path"]
)
return base_url + self["path"].format(
*list(sorted_required_path_params.values())
)
else:
return base_url + self["path"]
def _build_upload_media(self, upload_file, qualified_url, validate, fallback_url):
if not self["supportsMediaUpload"]:
if validate:
raise ValidationError(
"upload_file was provided while method doesn't support media upload"
)
# This will probably not work, but will return a mediaupload object anyway
return MediaUpload(upload_file, upload_path=fallback_url)
# If resumable, create resumable object
resumable = (
self._build_resumeable_media(qualified_url)
if _safe_getitem(
self._method_specs, "mediaUpload", "protocols", "resumable"
)
else None
)
# Create MediaUpload object and pass it the resumable object we just created
media_upload_url_base = self._root_url + "upload/" + self._service_path
media_upload_url = qualified_url.replace(self._base_url, media_upload_url_base)
max_size = self._convert_str_size_to_int(
_safe_getitem(self._method_specs, "mediaUpload", "maxSize")
)
mime_range = _safe_getitem(self._method_specs, "mediaUpload", "accept")
multipart = self["mediaUpload"]["protocols"]["simple"].get("multipart", True)
return MediaUpload(
upload_file,
upload_path=media_upload_url,
max_size=max_size,
mime_range=mime_range,
multipart=multipart,
resumable=resumable,
validate=validate,
)
def _build_resumeable_media(self, qualified_url):
resumable_url_base = self._root_url + "resumable/upload/" + self._service_path
resumable_url = qualified_url.replace(self._base_url, resumable_url_base)
multipart = self["mediaUpload"]["protocols"]["resumable"].get("multipart", True)
return ResumableUpload(multipart=multipart, upload_path=resumable_url)
@staticmethod
def _convert_str_size_to_int(size):
"""Convert a string media size, such as 10GB or 3TB into an integer.
Args:
size: (str): e.g. such as 2MB or 7GB.
Returns:
The size as an integer value.
"""
if size is None:
return None
if len(size) < 2:
return 0
units = size[-2:].upper()
bit_shift = MEDIA_SIZE_BIT_SHIFTS.get(units)
if bit_shift is not None:
return int(size[:-2]) << bit_shift
else:
return int(size)
def _validate_url(self, sorted_required_path_params):
for path_param_name, path_param_info in sorted_required_path_params.items():
self._validate(
instance=path_param_info,
schema=self.parameters[path_param_name],
schema_name=path_param_name,
)
def _validate_body(self, req):
request_schema = self._method_specs.get("request")
if request_schema is not None:
schema_name = "Request Body"
if "$ref" in request_schema:
schema_name = request_schema["$ref"]
request_schema = self._schemas[schema_name]
self._validate(req, request_schema, schema_name=schema_name)
else:
raise ValidationError(
"Request body should've been validated, but wasn't because the method doesn't accept a JSON body"
)
def __contains__(self, item):
return item in self.parameters
def __str__(self):
return self["id"] + " method @ " + self._base_url
def __repr__(self):
return self.__str__()
def __len__(self):
return len(self.required_parameters) if self.required_parameters else 0
class Resource:
def __init__(
self,
name,
resource_specs,
global_parameters,
schemas,
root_url,
service_path,
batch_path,
validate,
):
self.name = name
self._resource_specs = resource_specs
self._global_parameters = global_parameters
self._schemas = schemas
self._root_url = root_url
self._service_path = service_path
self._batch_path = batch_path
self._validate = validate
@property
def methods_available(self) -> List[str]:
"""
Returns the names of the methods that this resource provides
"""
return [k for k, v in self["methods"].items()] if self["methods"] else []
@property
def resources_available(self) -> List[str]:
"""
Returns the names of the nested resources in this resource
"""
return [k for k, v in self["resources"].items()] if self["resources"] else []
def _get_resource(self, resource_name):
return Resource(
name=resource_name,
resource_specs=self["resources"][resource_name],
global_parameters=self._global_parameters,
schemas=self._schemas,
root_url=self._root_url,
service_path=self._service_path,
batch_path=self._batch_path,
validate=self._validate,
)
def _get_method(self, method_name):
return Method(
name=method_name,
method_specs=self["methods"][method_name],
global_parameters=self._global_parameters,
schemas=self._schemas,
root_url=self._root_url,
service_path=self._service_path,
batch_path=self._batch_path,
validate=self._validate,
)
def __str__(self):
return self.name + " resource @ " + self._root_url + self._service_path
def __repr__(self):
return self.__str__()
def __call__(self):
raise TypeError(
"Only methods are callables, not resources."
" e.g. api.resource.list() NOT api.resource().list()"
)
def __len__(self):
return len(self.methods_available)
def __contains__(self, item):
return (item in self.methods_available) or (item in self.resources_available)
def __getitem__(self, k):
return self._resource_specs.get(k)
def __getattr__(self, method_or_resource):
"""
Returns either a method or a nested resource
Arguments:
method_or_resource: Name of the method or resource desired.
Returns:
aiogoogle.resource.Resource, aiogoogle.resource.Methods: A Resource or a Method
Note:
This method will first check in nested resources then will check in methods.
Raises:
AttributeError:
"""
# 1. Search in nested resources
if method_or_resource in self.resources_available:
return self._get_resource(method_or_resource)
# 2. Search in methods
elif method_or_resource in self.methods_available:
return self._get_method(method_or_resource)
else:
raise AttributeError(
f"""Resource/Method {method_or_resource} doesn't exist.
Check: https://developers.google.com/ for more info.
\nAvailable resources are:
{self.resources_available}\n
Available methods are {self.methods_available}"""
)
class GoogleAPI:
"""
Creetes a representation of Google API given a discovery document
Arguments:
discovery_document (dict): A discovery document
validate (bool): Whther or not to validate user input again the schema defined in the discovery document
"""
def __init__(self, discovery_document, validate=True):
self.discovery_document = self._add_extra_query_param_definitions(
discovery_document
)
self._validate = validate
def _add_extra_query_param_definitions(self, discovery_document):
""" Adds extra parameters that aren't explicitly defined in discovery docuemnts
i.e. "trace", "pp", "strict"
"""
extra_params = {
param: STACK_QUERY_PARAMETER_DEFAULT_VALUE
for param in STACK_QUERY_PARAMETERS
}
if discovery_document.get("parameters"):
discovery_document["parameters"] = {
**discovery_document["parameters"],
**extra_params,
}
else:
discovery_document["parameters"] = extra_params
return discovery_document
@property
def methods_available(self) -> List[str]:
"""
Returns names of the methods provided by this resource
"""
return [k for k, v in self["methods"].items()] if self["methods"] else []
@property
def resources_available(self) -> List[str]:
"""
Returns names of the resources in a given API if any
"""
return [k for k, v in self["resources"].items()] if self["resources"] else []
def _get_resource(self, resource_name):
return Resource(
name=resource_name,
resource_specs=self["resources"][resource_name],
global_parameters=self["parameters"],
schemas=self["schemas"]
or {}, # josnschema validator will fail if schemas isn't a dict
root_url=self["rootUrl"],
service_path=self["servicePath"],
batch_path=self["batchPath"],
validate=self._validate,
)
def _get_method(self, method_name):
return Method(
name=method_name,
method_specs=self["methods"][method_name],
global_parameters=self["parameters"],
schemas=self["schemas"]
or {}, # josnschema validator will fail if schemas isn't a dict
root_url=self["rootUrl"],
service_path=self["servicePath"],
batch_path=self["batchPath"],
validate=self._validate,
)
def __getattr__(self, method_or_resource) -> Resource:
"""
Returns resources from an API
Note:
This method will first check in resources then will check in methods.
Arguments:
method_or_resource (str): name of the top level method or resource
Example:
::
>>> google_service = GoogleAPI(google_service_discovery_doc)
>>> google_service.user
user resource @ google_service.com/api/v1/
Returns:
aiogoogle.resource.Resource, aiogoogle.resource.Method: A Resource or a Method
Raises:
AttributeError:
"""
if method_or_resource in self.resources_available:
return self._get_resource(method_or_resource)
elif method_or_resource in self.methods_available:
return self._get_method(method_or_resource)
else:
documentation_link = (
self.discovery_document.get("documentationLink")
or "https://developers.google.com/"
)
raise AttributeError(
f"""Resource/Method {method_or_resource} doesn't exist.
Check: {documentation_link} for more info.
\nAvailable resources are:
{self.resources_available}\n
Available methods are {self.methods_available}"""
)
def __getitem__(self, k):
"""
Returns items from the discovery document
Example:
::
>>> google_service = GoogleAPI(google_books_discovery_doc)
>>> google_service['name']
'books'
>>> google_service['id']
'books:v1'
>>> google_service['version']
'v1'
>>> google_service['documentationLink']
'https://developers.google.com/books/docs/v1/getting_started'
>>> google_service['oauth2']['scopes']
https://www.googleapis.com/auth/books: {
description: "Manage your books"
}
Returns:
dict: Discovery Document Item
"""
return self.discovery_document.get(k)
def __contains__(self, name):
return (name in self.resources_available) or (name in self.methods_available)
def __repr__(self):
labels = f'\nLabels:\n{self['labels']}' if self["labels"] is not None else ""
return (
self.discovery_document["name"]
+ "-"
+ self.discovery_document["version"]
+ " API @ "
+ self["rootUrl"]
+ self["servicePath"]
+ labels
)
def __str__(self):
return self.__repr__()
def __len__(self):
return len(self.resources_available) + len(self.methods_available)
def __call__(self):
raise TypeError(
"Only methods are callables, not resources."
" e.g. client.resources.user.list() NOT client.resources().user.list()"
)
|
__all__ = ["GoogleAPI", "Resource", "Method"]
import re
import os
import warnings
from urllib.parse import urlencode, quote
from functools import wraps
from typing import List, Generic, TypeVar
from .excs import ValidationError
from .utils import _safe_getitem
from .models import MediaDownload, MediaUpload, ResumableUpload, Request
from .validate import validate as validate_
T = TypeVar("T") # Generic type var
# These are the hard-coded kwargs in Method.__call__
# They're used for testing whether those names will collide with any of the url parameters that are provided by any of the discovery docs.
# If collisions were to be found, that would mean that the user won't be able to pass a url_parameter that shares the same name with any of the RESERVED_KEYWORDS.
RESERVED_KEYWORDS = [
"validate",
"data",
"json",
"upload_file",
"download_file",
"timeout",
]
# From: https://github.com/googleapis/google-api-python-client/blob/master/googleapiclient/discovery.py
# Parameters accepted by the stack, but not visible via discovery.
STACK_QUERY_PARAMETERS = frozenset(["trace", "pp", "strict"])
STACK_QUERY_PARAMETER_DEFAULT_VALUE = {"type": "string", "location": "query"}
MEDIA_SIZE_BIT_SHIFTS = {"KB": 10, "MB": 20, "GB": 30, "TB": 40}
# TODO: etagRequired: {
# type: "boolean", # noqa: F821 (weird error)
# description: "Whether this method requires an ETag to be specified. The ETag is sent as an HTTP If-Match or If-None-Match header."
# }
# NOTE: etagRequired is only mentioned once in all of the discovery documents available from Google. (In discovery_service-v1. So, it isn't actually being used)
def _temporarily_add_back_dashes_to_param_definitions(f):
"""
When instantiating a Method, Method's constructor will remove all
dashes from the names of its URI params and global params in order
to make it possible to pass uri params through function calls
e.g. this is viable get_videos(my_videos=True)
this is will fail get_videos(my-videos=True)
This function momentarily adds back '-' to url parameters and passed uri_params
in order to be processed and validated correctly and comply with the disc doc
Reverts back to '_' after wrapped function is done
"""
@wraps(f)
def wrapper(
self,
validate=None,
data=None,
json=None,
upload_file=None,
download_file=None,
timeout=None,
**uri_params,
):
# unfix urls
uri_params = self._replace_dashes_with_underscores_in_user_provided_params(
uri_params, self.parameters
)
# unfix params
self._method_specs["parameters"] = self._restore_dashes_to_params(
self._method_specs.get("parameters")
)
self._global_parameters = self._restore_dashes_to_params(
self._global_parameters
)
# Run function
results = f(
self,
validate,
data,
json,
upload_file,
download_file,
timeout,
**uri_params,
)
# fix params again
self._method_specs[
"parameters"
] = self._replace_dashes_with_underscores_in_params(
self._method_specs.get("parameters")
)
self._global_parameters = self._replace_dashes_with_underscores_in_params(
self._global_parameters
)
return results
return wrapper
class Method:
def __init__(
self,
name,
method_specs,
global_parameters,
schemas,
root_url,
service_path,
batch_path,
validate,
):
# Replaces '-'s with '_'s and preserve old names to revert back to them after this method is called
global_parameters = self._replace_dashes_with_underscores_in_params(
global_parameters
)
method_specs["parameters"] = self._replace_dashes_with_underscores_in_params(
method_specs.get("parameters")
)
self.name = name
self._method_specs = method_specs
self._global_parameters = global_parameters
self._schemas = schemas
self._root_url = root_url
self._service_path = service_path
self._batch_path = batch_path
if (
self["useMediaDownloadService"] is True
and self["supportsMediaDownload"] is True
):
self._download_base_url = self._root_url + "download/" + self._service_path
else:
self._download_base_url = None
self._base_url = self._root_url + self._service_path
self._batch_url = self._root_url + self._batch_path
self._should_validate = validate
# ---- Changes URL parameters with a "-" to "_" -----#
# Depends on how you view it, but this section also changes robots with small mouths to robots with big mouths
@staticmethod
def _replace_dashes_with_underscores_in_params(param_set: Generic[T]) -> T:
if param_set:
for name, schema in list(param_set.items()):
if "-" in name:
new_name = name.replace("-", "_") # See?!
schema["orig_name"] = name
param_set[new_name] = schema
del param_set[name]
return param_set
@staticmethod
def _restore_dashes_to_params(param_set: Generic[T]) -> T:
if param_set:
for name, schema in list(param_set.items()):
if "orig_name" in schema:
param_set[schema["orig_name"]] = schema
del param_set[name]
return param_set
@staticmethod
def _replace_dashes_with_underscores_in_user_provided_params(
uri_params: Generic[T], parameters
) -> T:
for k, v in list(uri_params.items()):
if "_" in k:
if k in parameters:
if "orig_name" in parameters[k]:
uri_params[parameters[k]["orig_name"]] = v
del uri_params[k]
return uri_params
@staticmethod
def _del_none_params(uri_params: Generic[T]) -> T:
for k, v in list(uri_params.items()):
if v is None:
del uri_params[k]
return uri_params
# ---- / Changes URL parameters with a "-" to "_" -----#
@property
def request(self) -> dict:
""" Returns expected request body """
body = self["request"]
if body.get("$ref"):
return self._schemas.get(body["$ref"])
else:
return body
@property
def response(self) -> dict:
""" Retruns expected response body """
body = self["response"]
if body.get("$ref"):
return self._schemas.get(body["$ref"])
else:
return body
@property
def parameters(self) -> dict:
"""
Parameters property
Returns:
dict: All parameters that this method can take as described in the discovery document
"""
if not self._global_parameters and not self["parameters"]:
return {}
elif not self._global_parameters:
return self["parameters"]
elif not self["parameters"]:
return self._global_parameters
else:
return {**self["parameters"], **self._global_parameters}
@property
def optional_parameters(self) -> List[str]:
"""
Optional Parameters
Returns:
list: List of the names of optional parameters this method takes
"""
return (
[
parameter_name
for parameter_name, parameter_info in self.parameters.items()
if parameter_info.get("required") is not True
]
if self.parameters
else []
)
@property
def required_parameters(self) -> List[str]:
"""
Required Parameters
Returns:
list: List of the names of required parameters this method takes
"""
return (
[
parameter_name
for parameter_name, parameter_info in self.parameters.items()
if parameter_info.get("required") is True
]
if self.parameters
else []
)
@property
def path_parameters(self) -> List[str]:
"""
Path Parameters
Returns:
list: List of the names of path parameters this method takes
"""
return (
[
param_name
for param_name, param_info in self.parameters.items()
if param_info.get("location") == "path"
]
if self.parameters
else []
)
@property
def query_parameters(self) -> List[str]:
"""
Query Parameters
Returns:
list: List of the names of Query parameters this method takes
"""
return (
[
param_name
for param_name, param_info in self.parameters.items()
if param_info.get("location") == "query"
]
if self.parameters
else []
)
@property
def required_query_parameters(self) -> List[str]:
"""
Required Query Parameters
Returns:
list: List of the names of required query parameters this method takes
"""
return (
[
param_name
for param_name in self.query_parameters
if param_name in self.required_parameters
]
if self.query_parameters
else []
)
@property
def optional_query_parameters(self) -> List[str]:
"""
Optional Query Parameters
Returns:
list: List of the names of optional query parameters this method takes
"""
return (
[
param_name
for param_name in self.query_parameters
if param_name in self.optional_parameters
]
if self.query_parameters
else []
)
def __getitem__(self, key):
"""
Examples:
::
>>> self['description']
"method description"
>>> self['scopes']
['returns', 'scopes', 'required', 'by', 'this', 'method', 'in', 'a', 'list']
>>> self['supportsMediaDownload']
False
>>> self['supportsMediaUpload']
True
>>> self['httpMethod']
'GET'
Hint:
Using this method with ``scopes`` as an argument can be useful for incremental authorization. (Requesting scopes when needed. As opposed to requesting them at once)
for more: https://developers.google.com/identity/protocols/OAuth2WebServer#incrementalAuth
"""
return self._method_specs.get(key)
def _validate(self, instance, schema, schema_name=None):
return validate_(instance, schema, self._schemas, schema_name)
@_temporarily_add_back_dashes_to_param_definitions
def __call__(
self,
validate=None,
data=None,
json=None,
upload_file=None,
download_file=None,
timeout=None,
**uri_params,
) -> Request:
"""
Builds a request from this method
Note:
* When passing ``datetime.datetime or datetime.date`` pass them in json format.
* Aiogoogle won't do that as it would be a big hassle to iterate over every item in ``*uri_params``, ``json`` and ``data`` to check if there's any datetime objects.
* Fortunately Python makes it really easy to achieve that.
* Instead of passing say ``datetime.datetime.utcnow()``, pass: ``datetime.datetime.utcnow().jsonformat()``
Note:
* All ``None`` values are ommited before sending to Google apis, if you want to explicitly pass a JSON null then pass it as ``"null"`` not ``None``
Arguments:
validate (bool): Overrides :param: aiogoogle.Aiogoole.validate if not None
json (dict): Json body
data (any): Data body (Bytes, text, www-url-form-encoded and others)
upload_file (str): full path of file to upload
download_file (str): full path of file to download to
timeout (str): total timeout for this request
**uri_params (dict): path and query, required and optional parameters
Returns:
aiogoogle.models.Request: An unsent request object
"""
# If collisions are found between the 'key' of **uri_params and explicit kwargs e.g. data, json etc., then
# priority will be given to explicit kwargs. With that said, it's not likely there will be any.
# If you want to double check if there are any collisions,
# you can append the API name and version you're using to tests.globals.ALL_APIS (only if they don't exist, otherwise, you shouldn't worry about collisions)
# Then, run the unit tests and monitor: tests.test_discovery_document.test_parameters_not_colliding_with_google_api__call__ for failure
#
# NOTE: Use '_' instead of '-' when passing uri parameters that have a '-' in their names
#
# Remove params that are None
uri_params = self._del_none_params(uri_params)
# Assert timeout is int
if timeout is not None:
if not isinstance(timeout, int) or type(timeout) == bool:
raise TypeError("Timeouts can only be ints or None")
# Resolve validation status
if not isinstance(validate, bool):
validate = self._should_validate
base_url = self._base_url
# Build full url minus query & fragment
url = self._build_url(
base_url=base_url, uri_params=uri_params.copy(), validate=validate
)
# Filter out query parameters from all uri_params that were passed to this method
passed_query_params = {
param_name: param_info
for param_name, param_info in uri_params.items()
if param_name in self.query_parameters
}
# Ensure all required query parameteters were passed
for param in self.required_query_parameters:
if param not in passed_query_params:
raise ValidationError(f'Missing query parameter: "{param}"')
# Validate url query params
if validate is True:
if passed_query_params:
for param_name, passed_param in passed_query_params.items():
schema = self.parameters[param_name]
if schema.get('repeated') and isinstance(passed_param, (list, set, tuple)):
for param in passed_param:
self._validate(
param,
schema,
schema_name=param_name
)
else:
self._validate(
passed_param,
schema,
schema_name=param_name,
)
# Join query params
if passed_query_params:
uri = url + "?" + urlencode(passed_query_params)
else:
uri = url
# Pop query uri_params consumed
for param_name, _ in passed_query_params.items():
del uri_params[param_name]
# Pop uri_params consumed
# for param_name in self["parameterOrder"]:
for param_name in self.path_parameters:
if param_name in uri_params:
del uri_params[param_name]
# Warn if not all uri_params were consumed/popped
if uri_params: # should be empty by now
# If there's room for addtionalProperties, validate and add them to the URI
if validate:
if self.parameters.get("additionalProperties"):
for _, v in uri_params.items():
self._validate(
v,
self.parameters["additionalProperties"],
schema_name="Additional Url Parameters",
)
else:
raise ValidationError(
f"Invalid (extra) parameters: {uri_params} were passed"
)
else:
if not self.parameters.get("additionalProperties"):
warnings.warn(
"Parameters {} were found and they're probably of no use."
" Check if they're valid parameters".format(str(uri_params))
)
if "?" not in uri:
uri = uri + "?" + urlencode(uri_params)
else:
uri = uri + "&" + urlencode(uri_params)
# Ensure only one param for http req body.
if json and data:
raise TypeError(
"Pass either json or data for the body of the request, not both."
'\nThis is similar to the "body" argument in google-python-client'
"\nThis will validate agains the $request key in this method's "
"specs"
) # This raises a TypeError instead of a ValidationError because
# it will probably make your session raise an error if it passes.
# Better raise it early on
# Validate body
if validate is True:
body = (
json if json is not None else data if data is not None else None
) # json or data or None
if body is not None:
self._validate_body(body)
# Process download_file
if download_file:
if validate is True:
if self.__getitem__("supportsMediaDownload") is not True:
raise ValidationError(
"download_file was provided while method doesn't support media download"
)
media_download = MediaDownload(download_file)
else:
media_download = None
# Process upload_file
if upload_file:
media_upload = self._build_upload_media(
upload_file, uri, validate, fallback_url=url
)
else:
media_upload = None
return Request(
method=self["httpMethod"],
url=uri,
batch_url=self._batch_url,
data=data,
json=json,
timeout=timeout,
media_download=media_download,
media_upload=media_upload,
callback=lambda res: res, # TODO: get rid of this sorcery.
)
def _build_url(self, base_url, uri_params, validate):
if self.path_parameters:
# sort path params as sepcified in method_specs.parameterOrder
sorted_required_path_params = (
{}
) # Dict order is guaranteed (by insertion) as of Python 3.6
for param_name in self["parameterOrder"]:
try:
sorted_required_path_params[param_name] = uri_params.pop(param_name)
except KeyError:
raise ValidationError(f"Missing URL path parameter: {param_name}")
# Validate path params
if validate is True:
self._validate_url(sorted_required_path_params)
for k, v in sorted_required_path_params.items():
sorted_required_path_params[k] = quote(str(v), safe='')
# Build full path
# replace named placeholders with empty ones. e.g. {param} --> {}
# Why? Because some endpoints have different names in their url path placeholders than in their parameter defenitions
# e.g. path: {"v1/{+resourceName}/connections"}. e.g. param name: resourceName NOT +resourceName
self._method_specs["path"] = re.sub(
r"\{(.*?)\}", r"{}", self._method_specs["path"]
)
return base_url + self["path"].format(
*list(sorted_required_path_params.values())
)
else:
return base_url + self["path"]
def _build_upload_media(self, upload_file, qualified_url, validate, fallback_url):
if not self["supportsMediaUpload"]:
if validate:
raise ValidationError(
"upload_file was provided while method doesn't support media upload"
)
# This will probably not work, but will return a mediaupload object anyway
return MediaUpload(upload_file, upload_path=fallback_url)
# If resumable, create resumable object
resumable = (
self._build_resumeable_media(qualified_url)
if _safe_getitem(
self._method_specs, "mediaUpload", "protocols", "resumable"
)
else None
)
# Create MediaUpload object and pass it the resumable object we just created
media_upload_url_base = self._root_url + "upload/" + self._service_path
media_upload_url = qualified_url.replace(self._base_url, media_upload_url_base)
max_size = self._convert_str_size_to_int(
_safe_getitem(self._method_specs, "mediaUpload", "maxSize")
)
mime_range = _safe_getitem(self._method_specs, "mediaUpload", "accept")
multipart = self["mediaUpload"]["protocols"]["simple"].get("multipart", True)
return MediaUpload(
upload_file,
upload_path=media_upload_url,
max_size=max_size,
mime_range=mime_range,
multipart=multipart,
resumable=resumable,
validate=validate,
)
def _build_resumeable_media(self, qualified_url):
resumable_url_base = self._root_url + "resumable/upload/" + self._service_path
resumable_url = qualified_url.replace(self._base_url, resumable_url_base)
multipart = self["mediaUpload"]["protocols"]["resumable"].get("multipart", True)
return ResumableUpload(multipart=multipart, upload_path=resumable_url)
@staticmethod
def _convert_str_size_to_int(size):
"""Convert a string media size, such as 10GB or 3TB into an integer.
Args:
size: (str): e.g. such as 2MB or 7GB.
Returns:
The size as an integer value.
"""
if size is None:
return None
if len(size) < 2:
return 0
units = size[-2:].upper()
bit_shift = MEDIA_SIZE_BIT_SHIFTS.get(units)
if bit_shift is not None:
return int(size[:-2]) << bit_shift
else:
return int(size)
def _validate_url(self, sorted_required_path_params):
for path_param_name, path_param_info in sorted_required_path_params.items():
self._validate(
instance=path_param_info,
schema=self.parameters[path_param_name],
schema_name=path_param_name,
)
def _validate_body(self, req):
request_schema = self._method_specs.get("request")
if request_schema is not None:
schema_name = "Request Body"
if "$ref" in request_schema:
schema_name = request_schema["$ref"]
request_schema = self._schemas[schema_name]
self._validate(req, request_schema, schema_name=schema_name)
else:
raise ValidationError(
"Request body should've been validated, but wasn't because the method doesn't accept a JSON body"
)
def __contains__(self, item):
return item in self.parameters
def __str__(self):
return self["id"] + " method @ " + self._base_url
def __repr__(self):
return self.__str__()
def __len__(self):
return len(self.required_parameters) if self.required_parameters else 0
class Resource:
def __init__(
self,
name,
resource_specs,
global_parameters,
schemas,
root_url,
service_path,
batch_path,
validate,
):
self.name = name
self._resource_specs = resource_specs
self._global_parameters = global_parameters
self._schemas = schemas
self._root_url = root_url
self._service_path = service_path
self._batch_path = batch_path
self._validate = validate
@property
def methods_available(self) -> List[str]:
"""
Returns the names of the methods that this resource provides
"""
return [k for k, v in self["methods"].items()] if self["methods"] else []
@property
def resources_available(self) -> List[str]:
"""
Returns the names of the nested resources in this resource
"""
return [k for k, v in self["resources"].items()] if self["resources"] else []
def _get_resource(self, resource_name):
return Resource(
name=resource_name,
resource_specs=self["resources"][resource_name],
global_parameters=self._global_parameters,
schemas=self._schemas,
root_url=self._root_url,
service_path=self._service_path,
batch_path=self._batch_path,
validate=self._validate,
)
def _get_method(self, method_name):
return Method(
name=method_name,
method_specs=self["methods"][method_name],
global_parameters=self._global_parameters,
schemas=self._schemas,
root_url=self._root_url,
service_path=self._service_path,
batch_path=self._batch_path,
validate=self._validate,
)
def __str__(self):
return self.name + " resource @ " + self._root_url + self._service_path
def __repr__(self):
return self.__str__()
def __call__(self):
raise TypeError(
"Only methods are callables, not resources."
" e.g. api.resource.list() NOT api.resource().list()"
)
def __len__(self):
return len(self.methods_available)
def __contains__(self, item):
return (item in self.methods_available) or (item in self.resources_available)
def __getitem__(self, k):
return self._resource_specs.get(k)
def __getattr__(self, method_or_resource):
"""
Returns either a method or a nested resource
Arguments:
method_or_resource: Name of the method or resource desired.
Returns:
aiogoogle.resource.Resource, aiogoogle.resource.Methods: A Resource or a Method
Note:
This method will first check in nested resources then will check in methods.
Raises:
AttributeError:
"""
# 1. Search in nested resources
if method_or_resource in self.resources_available:
return self._get_resource(method_or_resource)
# 2. Search in methods
elif method_or_resource in self.methods_available:
return self._get_method(method_or_resource)
else:
raise AttributeError(
f"""Resource/Method {method_or_resource} doesn't exist.
Check: https://developers.google.com/ for more info.
\nAvailable resources are:
{self.resources_available}\n
Available methods are {self.methods_available}"""
)
class GoogleAPI:
"""
Creetes a representation of Google API given a discovery document
Arguments:
discovery_document (dict): A discovery document
validate (bool): Whther or not to validate user input again the schema defined in the discovery document
"""
def __init__(self, discovery_document, validate=True):
self.discovery_document = self._add_extra_query_param_definitions(
discovery_document
)
self._validate = validate
def _add_extra_query_param_definitions(self, discovery_document):
""" Adds extra parameters that aren't explicitly defined in discovery docuemnts
i.e. "trace", "pp", "strict"
"""
extra_params = {
param: STACK_QUERY_PARAMETER_DEFAULT_VALUE
for param in STACK_QUERY_PARAMETERS
}
if discovery_document.get("parameters"):
discovery_document["parameters"] = {
**discovery_document["parameters"],
**extra_params,
}
else:
discovery_document["parameters"] = extra_params
return discovery_document
@property
def methods_available(self) -> List[str]:
"""
Returns names of the methods provided by this resource
"""
return [k for k, v in self["methods"].items()] if self["methods"] else []
@property
def resources_available(self) -> List[str]:
"""
Returns names of the resources in a given API if any
"""
return [k for k, v in self["resources"].items()] if self["resources"] else []
def _get_resource(self, resource_name):
return Resource(
name=resource_name,
resource_specs=self["resources"][resource_name],
global_parameters=self["parameters"],
schemas=self["schemas"]
or {}, # josnschema validator will fail if schemas isn't a dict
root_url=self["rootUrl"],
service_path=self["servicePath"],
batch_path=self["batchPath"],
validate=self._validate,
)
def _get_method(self, method_name):
return Method(
name=method_name,
method_specs=self["methods"][method_name],
global_parameters=self["parameters"],
schemas=self["schemas"]
or {}, # josnschema validator will fail if schemas isn't a dict
root_url=self["rootUrl"],
service_path=self["servicePath"],
batch_path=self["batchPath"],
validate=self._validate,
)
def __getattr__(self, method_or_resource) -> Resource:
"""
Returns resources from an API
Note:
This method will first check in resources then will check in methods.
Arguments:
method_or_resource (str): name of the top level method or resource
Example:
::
>>> google_service = GoogleAPI(google_service_discovery_doc)
>>> google_service.user
user resource @ google_service.com/api/v1/
Returns:
aiogoogle.resource.Resource, aiogoogle.resource.Method: A Resource or a Method
Raises:
AttributeError:
"""
if method_or_resource in self.resources_available:
return self._get_resource(method_or_resource)
elif method_or_resource in self.methods_available:
return self._get_method(method_or_resource)
else:
documentation_link = (
self.discovery_document.get("documentationLink")
or "https://developers.google.com/"
)
raise AttributeError(
f"""Resource/Method {method_or_resource} doesn't exist.
Check: {documentation_link} for more info.
\nAvailable resources are:
{self.resources_available}\n
Available methods are {self.methods_available}"""
)
def __getitem__(self, k):
"""
Returns items from the discovery document
Example:
::
>>> google_service = GoogleAPI(google_books_discovery_doc)
>>> google_service['name']
'books'
>>> google_service['id']
'books:v1'
>>> google_service['version']
'v1'
>>> google_service['documentationLink']
'https://developers.google.com/books/docs/v1/getting_started'
>>> google_service['oauth2']['scopes']
https://www.googleapis.com/auth/books: {
description: "Manage your books"
}
Returns:
dict: Discovery Document Item
"""
return self.discovery_document.get(k)
def __contains__(self, name):
return (name in self.resources_available) or (name in self.methods_available)
def __repr__(self):
labels = f'\nLabels:\n{self["labels"]}' if self["labels"] is not None else ""
return (
self.discovery_document["name"]
+ "-"
+ self.discovery_document["version"]
+ " API @ "
+ self["rootUrl"]
+ self["servicePath"]
+ labels
)
def __str__(self):
return self.__repr__()
def __len__(self):
return len(self.resources_available) + len(self.methods_available)
def __call__(self):
raise TypeError(
"Only methods are callables, not resources."
" e.g. client.resources.user.list() NOT client.resources().user.list()"
)
|
import copy
import re
import typing
import uritemplate
from django.core import exceptions as django_exceptions
from django.core import validators
from django.db import models
from django.utils.translation import gettext_lazy as _
from rest_framework import permissions, renderers, serializers
from rest_framework.fields import _UnvalidatedField, empty
from rest_framework.generics import CreateAPIView, GenericAPIView, ListCreateAPIView
from rest_framework.mixins import ListModelMixin
from rest_framework.schemas.inspectors import ViewInspector
from rest_framework.schemas.utils import get_pk_description # type: ignore
from rest_framework.settings import api_settings
from rest_framework.utils.model_meta import get_field_info
from rest_framework.views import APIView
from drf_spectacular.authentication import OpenApiAuthenticationExtension
from drf_spectacular.contrib import * # noqa: F403, F401
from drf_spectacular.drainage import add_trace_message, get_override, has_override
from drf_spectacular.extensions import (
OpenApiFilterExtension, OpenApiSerializerExtension, OpenApiSerializerFieldExtension,
)
from drf_spectacular.plumbing import (
ComponentRegistry, ResolvedComponent, UnableToProceedError, append_meta, build_array_type,
build_basic_type, build_choice_field, build_examples_list, build_media_type_object,
build_object_type, build_parameter_type, error, follow_field_source, force_instance, get_doc,
get_type_hints, get_view_model, is_basic_type, is_field, is_list_serializer,
is_patched_serializer, is_serializer, is_trivial_string_variation, resolve_regex_path_parameter,
resolve_type_hint, safe_ref, warn,
)
from drf_spectacular.settings import spectacular_settings
from drf_spectacular.types import OpenApiTypes, build_generic_type
from drf_spectacular.utils import OpenApiParameter, OpenApiResponse
class AutoSchema(ViewInspector):
method_mapping = {
'get': 'retrieve',
'post': 'create',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy',
}
def get_operation(self, path, path_regex, path_prefix, method, registry: ComponentRegistry):
self.registry = registry
self.path = path
self.path_regex = path_regex
self.path_prefix = path_prefix
self.method = method
operation = {}
operation['operationId'] = self.get_operation_id()
operation['description'] = self.get_description()
summary = self.get_summary()
if summary:
operation['summary'] = summary
parameters = self._get_parameters()
if parameters:
operation['parameters'] = parameters
tags = self.get_tags()
if tags:
operation['tags'] = tags
request_body = self._get_request_body()
if request_body:
operation['requestBody'] = request_body
auth = self.get_auth()
if auth:
operation['security'] = auth
deprecated = self.is_deprecated()
if deprecated:
operation['deprecated'] = deprecated
operation['responses'] = self._get_response_bodies()
return operation
def _is_list_view(self, serializer=None):
"""
partially heuristic approach to determine if a view yields an object or a
list of objects. used for operationId naming, array building and pagination.
defaults to False if all introspection fail.
"""
if serializer is None:
serializer = self.get_response_serializers()
if isinstance(serializer, dict) and serializer:
# extract likely main serializer from @extend_schema override
serializer = {str(code): s for code, s in serializer.items()}
serializer = serializer[min(serializer)]
if is_list_serializer(serializer):
return True
if is_basic_type(serializer):
return False
if hasattr(self.view, 'action'):
return self.view.action == 'list'
# list responses are "usually" only returned by GET
if self.method.lower() != 'get':
return False
if isinstance(self.view, ListModelMixin):
return True
# primary key/lookup variable in path is a strong indicator for retrieve
if isinstance(self.view, GenericAPIView):
lookup_url_kwarg = self.view.lookup_url_kwarg or self.view.lookup_field
if lookup_url_kwarg in uritemplate.variables(self.path):
return False
return False
def _is_create_operation(self):
if self.method != 'POST':
return False
if getattr(self.view, 'action', None) == 'create':
return True
if isinstance(self.view, (ListCreateAPIView, CreateAPIView)):
return True
return False
def get_override_parameters(self):
""" override this for custom behaviour """
return []
def _process_override_parameters(self):
result = {}
for parameter in self.get_override_parameters():
if isinstance(parameter, OpenApiParameter):
if parameter.response:
continue
if is_basic_type(parameter.type):
schema = build_basic_type(parameter.type)
elif is_serializer(parameter.type):
schema = self.resolve_serializer(parameter.type, 'request').ref
else:
schema = parameter.type
if parameter.exclude:
result[parameter.name, parameter.location] = None
else:
result[parameter.name, parameter.location] = build_parameter_type(
name=parameter.name,
schema=schema,
location=parameter.location,
required=parameter.required,
description=parameter.description,
enum=parameter.enum,
deprecated=parameter.deprecated,
style=parameter.style,
explode=parameter.explode,
default=parameter.default,
examples=build_examples_list(parameter.examples),
)
elif is_serializer(parameter):
# explode serializer into separate parameters. defaults to QUERY location
mapped = self._map_serializer(parameter, 'request')
for property_name, property_schema in mapped['properties'].items():
result[property_name, OpenApiParameter.QUERY] = build_parameter_type(
name=property_name,
schema=property_schema,
description=property_schema.pop('description', None),
location=OpenApiParameter.QUERY,
required=property_name in mapped.get('required', []),
)
else:
warn(f'could not resolve parameter annotation {parameter}. Skipping.')
return result
def _get_format_parameters(self):
parameters = []
formats = self.map_renderers('format')
if api_settings.URL_FORMAT_OVERRIDE and len(formats) > 1:
parameters.append(build_parameter_type(
name=api_settings.URL_FORMAT_OVERRIDE,
schema=build_basic_type(OpenApiTypes.STR),
location=OpenApiParameter.QUERY,
enum=formats
))
return parameters
def _get_parameters(self):
def dict_helper(parameters):
return {(p['name'], p['in']): p for p in parameters}
override_parameters = self._process_override_parameters()
# remove overridden path parameters beforehand so that there are no irrelevant warnings.
path_variables = [
v for v in uritemplate.variables(self.path) if (v, 'path') not in override_parameters
]
parameters = {
**dict_helper(self._resolve_path_parameters(path_variables)),
**dict_helper(self._get_filter_parameters()),
**dict_helper(self._get_pagination_parameters()),
**dict_helper(self._get_format_parameters()),
}
# override/add/remove @extend_schema parameters
for key, parameter in override_parameters.items():
if parameter is None:
# either omit or explicitly remove parameter
if key in parameters:
del parameters[key]
else:
parameters[key] = parameter
if callable(spectacular_settings.SORT_OPERATION_PARAMETERS):
return sorted(parameters.values(), key=spectacular_settings.SORT_OPERATION_PARAMETERS)
elif spectacular_settings.SORT_OPERATION_PARAMETERS:
return sorted(parameters.values(), key=lambda p: p['name'])
else:
return list(parameters.values())
def get_description(self):
""" override this for custom behaviour """
action_or_method = getattr(self.view, getattr(self.view, 'action', self.method.lower()), None)
view_doc = get_doc(self.view.__class__)
action_doc = get_doc(action_or_method)
return action_doc or view_doc
def get_summary(self):
""" override this for custom behaviour """
return None
def get_auth(self):
"""
Obtains authentication classes and permissions from view. If authentication
is known, resolve security requirement for endpoint and security definition for
the component section.
For custom authentication subclass ``OpenApiAuthenticationExtension``.
"""
auths = []
for authenticator in self.view.get_authenticators():
if (
spectacular_settings.AUTHENTICATION_WHITELIST
and authenticator.__class__ not in spectacular_settings.AUTHENTICATION_WHITELIST
):
continue
scheme = OpenApiAuthenticationExtension.get_match(authenticator)
if not scheme:
warn(
f'could not resolve authenticator {authenticator.__class__}. There '
f'was no OpenApiAuthenticationExtension registered for that class. '
f'Try creating one by subclassing it. Ignoring for now.'
)
continue
security_requirements = scheme.get_security_requirement(self)
if security_requirements is not None:
auths.append(security_requirements)
component = ResolvedComponent(
name=scheme.name,
type=ResolvedComponent.SECURITY_SCHEMA,
object=authenticator.__class__,
schema=scheme.get_security_definition(self)
)
self.registry.register_on_missing(component)
if spectacular_settings.SECURITY:
auths.extend(spectacular_settings.SECURITY)
perms = [p.__class__ for p in self.view.get_permissions()]
if permissions.AllowAny in perms:
auths.append({})
elif permissions.IsAuthenticatedOrReadOnly in perms and self.method in permissions.SAFE_METHODS:
auths.append({})
return auths
def get_request_serializer(self) -> typing.Any:
""" override this for custom behaviour """
return self._get_serializer()
def get_response_serializers(self) -> typing.Any:
""" override this for custom behaviour """
return self._get_serializer()
def get_tags(self) -> typing.List[str]:
""" override this for custom behaviour """
tokenized_path = self._tokenize_path()
# use first non-parameter path part as tag
return tokenized_path[:1]
def get_operation_id(self):
""" override this for custom behaviour """
tokenized_path = self._tokenize_path()
# replace dashes as they can be problematic later in code generation
tokenized_path = [t.replace('-', '_') for t in tokenized_path]
if self.method.lower() == 'get' and self._is_list_view():
action = 'list'
else:
action = self.method_mapping[self.method.lower()]
if not tokenized_path:
tokenized_path.append('root')
if re.search(r'<drf_format_suffix\w*:\w+>', self.path_regex):
tokenized_path.append('formatted')
return '_'.join(tokenized_path + [action])
def is_deprecated(self):
""" override this for custom behaviour """
return False
def _tokenize_path(self):
# remove path prefix
path = re.sub(
pattern=self.path_prefix,
repl='',
string=self.path,
flags=re.IGNORECASE
)
# remove path variables
path = re.sub(pattern=r'\{[\w\-]+\}', repl='', string=path)
# cleanup and tokenize remaining parts.
path = path.rstrip('/').lstrip('/').split('/')
return [t for t in path if t]
def _resolve_path_parameters(self, variables):
parameters = []
for variable in variables:
schema = build_basic_type(OpenApiTypes.STR)
description = ''
resolved_parameter = resolve_regex_path_parameter(
self.path_regex, variable, self.map_renderers('format'),
)
if resolved_parameter:
schema = resolved_parameter['schema']
elif get_view_model(self.view) is None:
warn(
f'could not derive type of path parameter "{variable}" because because it '
f'is untyped and obtaining queryset from the viewset failed. '
f'Consider adding a type to the path (e.g. <int:{variable}>) or annotating '
f'the parameter type with @extend_schema. Defaulting to "string".'
)
else:
try:
model = get_view_model(self.view)
model_field = model._meta.get_field(variable)
schema = self._map_model_field(model_field, direction=None)
if 'description' not in schema and model_field.primary_key:
description = get_pk_description(model, model_field)
except django_exceptions.FieldDoesNotExist:
warn(
f'could not derive type of path parameter "{variable}" because '
f'model "{model}" did contain no such field. Consider annotating '
f'parameter with @extend_schema. Defaulting to "string".'
)
parameters.append(build_parameter_type(
name=variable,
location=OpenApiParameter.PATH,
description=description,
schema=schema
))
return parameters
def _get_filter_parameters(self):
if not self._is_list_view():
return []
if getattr(self.view, 'filter_backends', None) is None:
return []
parameters = []
for filter_backend in self.view.filter_backends:
filter_extension = OpenApiFilterExtension.get_match(filter_backend())
if filter_extension:
parameters += filter_extension.get_schema_operation_parameters(self)
else:
parameters += filter_backend().get_schema_operation_parameters(self.view)
return parameters
def _get_pagination_parameters(self):
if not self._is_list_view():
return []
paginator = self._get_paginator()
if not paginator:
return []
filter_extension = OpenApiFilterExtension.get_match(paginator)
if filter_extension:
return filter_extension.get_schema_operation_parameters(self)
else:
return paginator.get_schema_operation_parameters(self.view)
def _map_model_field(self, model_field, direction):
assert isinstance(model_field, models.Field)
# to get a fully initialized serializer field we use DRF's own init logic
try:
field_cls, field_kwargs = serializers.ModelSerializer().build_field(
field_name=model_field.name,
info=get_field_info(model_field.model),
model_class=model_field.model,
nested_depth=0,
)
field = field_cls(**field_kwargs)
field.field_name = model_field.name
except: # noqa
field = None
# For some cases, the DRF init logic either breaks (custom field with internal type) or
# the resulting field is underspecified with regards to the schema (ReadOnlyField).
if field and isinstance(field, serializers.PrimaryKeyRelatedField):
# special case handling only for _resolve_path_parameters() where neither queryset nor
# parent is set by build_field. patch in queryset as _map_serializer_field requires it
if not field.queryset:
field.queryset = model_field.related_model.objects.none()
return self._map_serializer_field(field, direction)
elif isinstance(field, serializers.ManyRelatedField):
# special case handling similar to the case above. "parent.parent" on child_relation
# is None and there is no queryset. patch in as _map_serializer_field requires one.
if not field.child_relation.queryset:
field.child_relation.queryset = model_field.related_model.objects.none()
return self._map_serializer_field(field, direction)
elif field and not isinstance(field, (serializers.ReadOnlyField, serializers.ModelField)):
return self._map_serializer_field(field, direction)
elif isinstance(model_field, models.ForeignKey):
return self._map_model_field(model_field.target_field, direction)
elif hasattr(models, 'JSONField') and isinstance(model_field, models.JSONField):
# fix for DRF==3.11 with django>=3.1 as it is not yet represented in the field_mapping
return build_basic_type(OpenApiTypes.OBJECT)
elif hasattr(models, model_field.get_internal_type()):
# be graceful when the model field is not explicitly mapped to a serializer
internal_type = getattr(models, model_field.get_internal_type())
field_cls = serializers.ModelSerializer.serializer_field_mapping.get(internal_type)
if not field_cls:
warn(
f'model field "{model_field.get_internal_type()}" has no mapping in '
f'ModelSerializer. It may be a deprecated field. Defaulting to "string"'
)
return build_basic_type(OpenApiTypes.STR)
return self._map_serializer_field(field_cls(), direction)
else:
error(
f'could not resolve model field "{model_field}". Failed to resolve through '
f'serializer_field_mapping, get_internal_type(), or any override mechanism. '
f'Defaulting to "string"'
)
return build_basic_type(OpenApiTypes.STR)
def _map_serializer_field(self, field, direction, bypass_extensions=False):
meta = self._get_serializer_field_meta(field)
if has_override(field, 'field'):
override = get_override(field, 'field')
if is_basic_type(override):
schema = build_basic_type(override)
if schema is None:
return None
elif isinstance(override, dict):
schema = override
else:
schema = self._map_serializer_field(force_instance(override), direction)
field_component_name = get_override(field, 'field_component_name')
if field_component_name:
component = ResolvedComponent(
name=field_component_name,
type=ResolvedComponent.SCHEMA,
schema=schema,
object=field,
)
self.registry.register_on_missing(component)
return append_meta(component.ref, meta)
else:
return append_meta(schema, meta)
serializer_field_extension = OpenApiSerializerFieldExtension.get_match(field)
if serializer_field_extension and not bypass_extensions:
schema = serializer_field_extension.map_serializer_field(self, direction)
if serializer_field_extension.get_name():
component = ResolvedComponent(
name=serializer_field_extension.get_name(),
type=ResolvedComponent.SCHEMA,
schema=schema,
object=field,
)
self.registry.register_on_missing(component)
return append_meta(component.ref, meta)
else:
return append_meta(schema, meta)
# nested serializer with many=True gets automatically replaced with ListSerializer
if is_list_serializer(field):
if is_serializer(field.child):
component = self.resolve_serializer(field.child, direction)
return append_meta(build_array_type(component.ref), meta) if component else None
else:
schema = self._map_serializer_field(field.child, direction)
return append_meta(build_array_type(schema), meta)
# nested serializer
if is_serializer(field):
component = self.resolve_serializer(field, direction)
return append_meta(component.ref, meta) if component else None
# Related fields.
if isinstance(field, serializers.ManyRelatedField):
schema = self._map_serializer_field(field.child_relation, direction)
# remove hand-over initkwargs applying only to outer scope
schema.pop('description', None)
schema.pop('readOnly', None)
return append_meta(build_array_type(schema), meta)
if isinstance(field, serializers.PrimaryKeyRelatedField):
# read_only fields do not have a Manager by design. go around and get field
# from parent. also avoid calling Manager. __bool__ as it might be customized
# to hit the database.
if getattr(field, 'queryset', None) is not None:
model_field = field.queryset.model._meta.pk
else:
if isinstance(field.parent, serializers.ManyRelatedField):
model = field.parent.parent.Meta.model
source = field.parent.source.split('.')
else:
model = field.parent.Meta.model
source = field.source.split('.')
# estimates the relating model field and jumps to it's target model PK field.
# also differentiate as source can be direct (pk) or relation field (model).
model_field = follow_field_source(model, source)
if callable(model_field):
# follow_field_source bailed with a warning. be graceful and default to str.
model_field = models.TextField()
# primary keys are usually non-editable (readOnly=True) and map_model_field correctly
# signals that attribute. however this does not apply in the context of relations.
schema = self._map_model_field(model_field, direction)
schema.pop('readOnly', None)
return append_meta(schema, meta)
if isinstance(field, serializers.StringRelatedField):
return append_meta(build_basic_type(OpenApiTypes.STR), meta)
if isinstance(field, serializers.SlugRelatedField):
return append_meta(build_basic_type(OpenApiTypes.STR), meta)
if isinstance(field, serializers.HyperlinkedIdentityField):
return append_meta(build_basic_type(OpenApiTypes.URI), meta)
if isinstance(field, serializers.HyperlinkedRelatedField):
return append_meta(build_basic_type(OpenApiTypes.URI), meta)
if isinstance(field, serializers.MultipleChoiceField):
return append_meta(build_array_type(build_choice_field(field)), meta)
if isinstance(field, serializers.ChoiceField):
return append_meta(build_choice_field(field), meta)
if isinstance(field, serializers.ListField):
if isinstance(field.child, _UnvalidatedField):
return append_meta(build_array_type(build_basic_type(OpenApiTypes.ANY)), meta)
elif is_serializer(field.child):
component = self.resolve_serializer(field.child, direction)
return append_meta(build_array_type(component.ref), meta) if component else None
else:
schema = self._map_serializer_field(field.child, direction)
# remove automatically attached but redundant title
if is_trivial_string_variation(field.field_name, schema.get('title')):
schema.pop('title', None)
return append_meta(build_array_type(schema), meta)
# DateField and DateTimeField type is string
if isinstance(field, serializers.DateField):
return append_meta(build_basic_type(OpenApiTypes.DATE), meta)
if isinstance(field, serializers.DateTimeField):
return append_meta(build_basic_type(OpenApiTypes.DATETIME), meta)
if isinstance(field, serializers.TimeField):
return append_meta(build_basic_type(OpenApiTypes.TIME), meta)
if isinstance(field, serializers.EmailField):
return append_meta(build_basic_type(OpenApiTypes.EMAIL), meta)
if isinstance(field, serializers.URLField):
return append_meta(build_basic_type(OpenApiTypes.URI), meta)
if isinstance(field, serializers.UUIDField):
return append_meta(build_basic_type(OpenApiTypes.UUID), meta)
if isinstance(field, serializers.DurationField):
return append_meta(build_basic_type(OpenApiTypes.STR), meta)
if isinstance(field, serializers.IPAddressField):
# TODO this might be a DRF bug. protocol is not propagated to serializer although it
# should have been. results in always 'both' (thus no format)
if 'ipv4' == field.protocol.lower():
schema = build_basic_type(OpenApiTypes.IP4)
elif 'ipv6' == field.protocol.lower():
schema = build_basic_type(OpenApiTypes.IP6)
else:
schema = build_basic_type(OpenApiTypes.STR)
return append_meta(schema, meta)
# DecimalField has multipleOf based on decimal_places
if isinstance(field, serializers.DecimalField):
if getattr(field, 'coerce_to_string', api_settings.COERCE_DECIMAL_TO_STRING):
content = {**build_basic_type(OpenApiTypes.STR), 'format': 'decimal'}
if field.max_whole_digits:
content['pattern'] = (
f'^\\d{{0,{field.max_whole_digits}}}'
f'(\\.\\d{{0,{field.decimal_places}}})?$'
)
else:
content = build_basic_type(OpenApiTypes.DECIMAL)
if field.max_whole_digits:
content['maximum'] = int(field.max_whole_digits * '9') + 1
content['minimum'] = -content['maximum']
self._map_min_max(field, content)
return append_meta(content, meta)
if isinstance(field, serializers.FloatField):
content = build_basic_type(OpenApiTypes.FLOAT)
self._map_min_max(field, content)
return append_meta(content, meta)
if isinstance(field, serializers.IntegerField):
content = build_basic_type(OpenApiTypes.INT)
self._map_min_max(field, content)
# 2147483647 is max for int32_size, so we use int64 for format
if int(content.get('maximum', 0)) > 2147483647 or int(content.get('minimum', 0)) > 2147483647:
content['format'] = 'int64'
return append_meta(content, meta)
if isinstance(field, serializers.FileField):
if spectacular_settings.COMPONENT_SPLIT_REQUEST and direction == 'request':
content = build_basic_type(OpenApiTypes.BINARY)
else:
use_url = getattr(field, 'use_url', api_settings.UPLOADED_FILES_USE_URL)
content = build_basic_type(OpenApiTypes.URI if use_url else OpenApiTypes.STR)
return append_meta(content, meta)
if isinstance(field, serializers.SerializerMethodField):
method = getattr(field.parent, field.method_name)
return append_meta(self._map_response_type_hint(method), meta)
if isinstance(field, (serializers.BooleanField, serializers.NullBooleanField)):
return append_meta(build_basic_type(OpenApiTypes.BOOL), meta)
if isinstance(field, serializers.JSONField):
return append_meta(build_basic_type(OpenApiTypes.OBJECT), meta)
if isinstance(field, (serializers.DictField, serializers.HStoreField)):
content = build_basic_type(OpenApiTypes.OBJECT)
if not isinstance(field.child, _UnvalidatedField):
content['additionalProperties'] = self._map_serializer_field(field.child, direction)
return append_meta(content, meta)
if isinstance(field, serializers.CharField):
return append_meta(build_basic_type(OpenApiTypes.STR), meta)
if isinstance(field, serializers.ReadOnlyField):
# when field is nested inside a ListSerializer, the Meta class is 2 steps removed
if is_list_serializer(field.parent):
model = getattr(getattr(field.parent.parent, 'Meta', None), 'model', None)
source = field.parent.source_attrs
else:
model = getattr(getattr(field.parent, 'Meta', None), 'model', None)
source = field.source_attrs
if model is None:
warn(
f'Could not derive type for ReadOnlyField "{field.field_name}" because the '
f'serializer class has no associated model (Meta class). Consider using some '
f'other field like CharField(read_only=True) instead. defaulting to string.'
)
return append_meta(build_basic_type(OpenApiTypes.STR), meta)
target = follow_field_source(model, source)
if callable(target):
schema = self._map_response_type_hint(target)
elif isinstance(target, models.Field):
schema = self._map_model_field(target, direction)
else:
assert False, f'ReadOnlyField target "{field}" must be property or model field'
return append_meta(schema, meta)
# DRF was not able to match the model field to an explicit SerializerField and therefore
# used its generic fallback serializer field that simply wraps the model field.
if isinstance(field, serializers.ModelField):
schema = self._map_model_field(field.model_field, direction)
return append_meta(schema, meta)
warn(f'could not resolve serializer field "{field}". Defaulting to "string"')
return append_meta(build_basic_type(OpenApiTypes.STR), meta)
def _map_min_max(self, field, content):
if field.max_value:
content['maximum'] = field.max_value
if field.min_value:
content['minimum'] = field.min_value
def _map_serializer(self, serializer, direction, bypass_extensions=False):
serializer = force_instance(serializer)
serializer_extension = OpenApiSerializerExtension.get_match(serializer)
if serializer_extension and not bypass_extensions:
schema = serializer_extension.map_serializer(self, direction)
else:
schema = self._map_basic_serializer(serializer, direction)
return self._postprocess_serializer_schema(schema, serializer, direction)
def _postprocess_serializer_schema(self, schema, serializer, direction):
"""
postprocess generated schema for component splitting, if enabled.
does only apply to direct component schemas and not intermediate schemas
like components composed of sub-component via e.g. oneOf.
"""
if not spectacular_settings.COMPONENT_SPLIT_REQUEST:
return schema
properties = schema.get('properties', [])
required = schema.get('required', [])
for prop_name in list(properties):
if direction == 'request' and properties[prop_name].get('readOnly'):
del schema['properties'][prop_name]
if prop_name in required:
required.remove(prop_name)
if direction == 'response' and properties[prop_name].get('writeOnly'):
del schema['properties'][prop_name]
if prop_name in required:
required.remove(prop_name)
# remove empty listing as it violates schema specification
if 'required' in schema and not required:
del schema['required']
return schema
def _get_serializer_field_meta(self, field):
if not isinstance(field, serializers.Field):
return {}
meta = {}
if field.read_only:
meta['readOnly'] = True
if field.write_only:
meta['writeOnly'] = True
if field.allow_null:
meta['nullable'] = True
if field.default is not None and field.default != empty and not callable(field.default):
default = field.to_representation(field.default)
if isinstance(default, set):
default = list(default)
meta['default'] = default
if field.label and not is_trivial_string_variation(field.label, field.field_name):
meta['title'] = str(field.label)
if field.help_text:
meta['description'] = str(field.help_text)
return meta
def _map_basic_serializer(self, serializer, direction):
serializer = force_instance(serializer)
required = set()
properties = {}
for field in serializer.fields.values():
if isinstance(field, serializers.HiddenField):
continue
if field.field_name in get_override(serializer, 'exclude_fields', []):
continue
schema = self._map_serializer_field(field, direction)
# skip field if there is no schema for the direction
if schema is None:
continue
add_to_required = (
field.required
or (schema.get('readOnly') and not spectacular_settings.COMPONENT_NO_READ_ONLY_REQUIRED)
)
if add_to_required:
required.add(field.field_name)
self._map_field_validators(field, schema)
if field.field_name in get_override(serializer, 'deprecate_fields', []):
schema['deprecated'] = True
properties[field.field_name] = safe_ref(schema)
if is_patched_serializer(serializer, direction):
required = []
return build_object_type(
properties=properties,
required=required,
description=get_doc(serializer.__class__),
)
def _map_field_validators(self, field, schema):
for v in field.validators:
if isinstance(v, validators.EmailValidator):
schema['format'] = 'email'
elif isinstance(v, validators.URLValidator):
schema['format'] = 'uri'
elif isinstance(v, validators.RegexValidator):
pattern = v.regex.pattern.encode('ascii', 'backslashreplace').decode()
pattern = pattern.replace(r'\x', r'\u00') # unify escaping
pattern = pattern.replace(r'\Z', '$').replace(r'\A', '^') # ECMA anchors
schema['pattern'] = pattern
elif isinstance(v, validators.MaxLengthValidator):
attr_name = 'maxLength'
if isinstance(field, serializers.ListField):
attr_name = 'maxItems'
schema[attr_name] = v.limit_value() if callable(v.limit_value) else v.limit_value
elif isinstance(v, validators.MinLengthValidator):
attr_name = 'minLength'
if isinstance(field, serializers.ListField):
attr_name = 'minItems'
schema[attr_name] = v.limit_value() if callable(v.limit_value) else v.limit_value
elif isinstance(v, validators.MaxValueValidator):
schema['maximum'] = v.limit_value() if callable(v.limit_value) else v.limit_value
elif isinstance(v, validators.MinValueValidator):
schema['minimum'] = v.limit_value() if callable(v.limit_value) else v.limit_value
elif isinstance(v, validators.DecimalValidator):
if v.max_digits:
digits = v.max_digits
if v.decimal_places is not None and v.decimal_places > 0:
digits -= v.decimal_places
schema['maximum'] = int(digits * '9') + 1
schema['minimum'] = -schema['maximum']
def _map_response_type_hint(self, method):
hint = get_override(method, 'field') or get_type_hints(method).get('return')
if is_serializer(hint) or is_field(hint):
return self._map_serializer_field(force_instance(hint), 'response')
if isinstance(hint, dict):
return hint
try:
return resolve_type_hint(hint)
except UnableToProceedError:
warn(
f'unable to resolve type hint for function "{method.__name__}". Consider '
f'using a type hint or @extend_schema_field. Defaulting to string.'
)
return build_basic_type(OpenApiTypes.STR)
def _get_paginator(self):
pagination_class = getattr(self.view, 'pagination_class', None)
if pagination_class:
return pagination_class()
return None
def map_parsers(self):
return list(dict.fromkeys([p.media_type for p in self.view.get_parsers()]))
def map_renderers(self, attribute):
assert attribute in ['media_type', 'format']
return list(dict.fromkeys([
getattr(r, attribute).split(';')[0]
for r in self.view.get_renderers()
if not isinstance(r, renderers.BrowsableAPIRenderer) and getattr(r, attribute, None)
]))
def _get_serializer(self):
view = self.view
try:
if isinstance(view, GenericAPIView):
# try to circumvent queryset issues with calling get_serializer. if view has NOT
# overridden get_serializer, its safe to use get_serializer_class.
if view.__class__.get_serializer == GenericAPIView.get_serializer:
return view.get_serializer_class()()
return view.get_serializer()
elif isinstance(view, APIView):
# APIView does not implement the required interface, but be lenient and make
# good guesses before giving up and emitting a warning.
if callable(getattr(view, 'get_serializer', None)):
return view.get_serializer()
elif callable(getattr(view, 'get_serializer_class', None)):
return view.get_serializer_class()()
elif hasattr(view, 'serializer_class'):
return view.serializer_class
else:
error(
'unable to guess serializer. This is graceful '
'fallback handling for APIViews. Consider using GenericAPIView as view base '
'class, if view is under your control. Ignoring view for now. '
)
else:
error('Encountered unknown view base class. Please report this issue. Ignoring for now')
except Exception as exc:
error(
f'exception raised while getting serializer. Hint: '
f'Is get_serializer_class() returning None or is get_queryset() not working without '
f'a request? Ignoring the view for now. (Exception: {exc})'
)
def get_examples(self):
return []
def _get_examples(self, serializer, direction, media_type, status_code=None, extras=None):
examples = self.get_examples()
if not examples:
if is_list_serializer(serializer):
examples = get_override(serializer.child, 'examples', [])
elif is_serializer(serializer):
examples = get_override(serializer, 'examples', [])
# additional examples provided via OpenApiResponse are merged with the other methods
extras = extras or []
filtered_examples = []
for example in examples + extras:
if direction == 'request' and example.response_only:
continue
if direction == 'response' and example.request_only:
continue
if media_type and media_type != example.media_type:
continue
if status_code and status_code not in example.status_codes:
continue
filtered_examples.append(example)
return build_examples_list(filtered_examples)
def _get_request_body(self):
# only unsafe methods can have a body
if self.method not in ('PUT', 'PATCH', 'POST'):
return None
request_serializer = self.get_request_serializer()
if isinstance(request_serializer, dict):
content = []
request_body_required = True
for media_type, serializer in request_serializer.items():
schema, partial_request_body_required = self._get_request_for_media_type(serializer)
examples = self._get_examples(serializer, 'request', media_type)
if schema is None:
continue
content.append((media_type, schema, examples))
request_body_required &= partial_request_body_required
else:
schema, request_body_required = self._get_request_for_media_type(request_serializer)
if schema is None:
return None
content = [
(media_type, schema, self._get_examples(request_serializer, 'request', media_type))
for media_type in self.map_parsers()
]
request_body = {
'content': {
media_type: build_media_type_object(schema, examples)
for media_type, schema, examples in content
}
}
if request_body_required:
request_body['required'] = request_body_required
return request_body
def _get_request_for_media_type(self, serializer):
serializer = force_instance(serializer)
if is_list_serializer(serializer):
if is_serializer(serializer.child):
component = self.resolve_serializer(serializer.child, 'request')
schema = build_array_type(component.ref)
else:
schema = build_array_type(self._map_serializer_field(serializer.child, 'request'))
request_body_required = True
elif is_serializer(serializer):
if self.method == 'PATCH':
# we simulate what DRF is doing: the entry serializer is set to partial
# for PATCH requests. serializer instances received via extend_schema
# may be reused; prevent race conditions by modifying a copy.
serializer = copy.copy(serializer)
serializer.partial = True
component = self.resolve_serializer(serializer, 'request')
if not component.schema:
# serializer is empty so skip content enumeration
return None, False
schema = component.ref
# request body is only required if any required property is not read-only
readonly_props = [
p for p, s in component.schema.get('properties', {}).items() if s.get('readOnly')
]
required_props = component.schema.get('required', [])
request_body_required = any(req not in readonly_props for req in required_props)
elif is_basic_type(serializer):
schema = build_basic_type(serializer)
request_body_required = False
elif isinstance(serializer, dict):
# bypass processing and use given schema directly
schema = serializer
request_body_required = False
else:
warn(
f'could not resolve request body for {self.method} {self.path}. Defaulting to generic '
'free-form object. (Maybe annotate a Serializer class?)'
)
schema = build_generic_type()
schema['description'] = 'Unspecified request body'
request_body_required = False
return schema, request_body_required
def _get_response_bodies(self):
response_serializers = self.get_response_serializers()
if (
is_serializer(response_serializers)
or is_basic_type(response_serializers)
or isinstance(response_serializers, OpenApiResponse)
):
if self.method == 'DELETE':
return {'204': {'description': _('No response body')}}
if self._is_create_operation():
return {'201': self._get_response_for_code(response_serializers, '201')}
return {'200': self._get_response_for_code(response_serializers, '200')}
elif isinstance(response_serializers, dict):
# custom handling for overriding default return codes with @extend_schema
responses = {}
for code, serializer in response_serializers.items():
if isinstance(code, tuple):
code, media_types = str(code[0]), code[1:]
else:
code, media_types = str(code), None
content_response = self._get_response_for_code(serializer, code, media_types)
if code in responses:
responses[code]['content'].update(content_response['content'])
else:
responses[code] = content_response
return responses
else:
warn(
f'could not resolve "{response_serializers}" for {self.method} {self.path}. '
f'Expected either a serializer or some supported override mechanism. '
f'Defaulting to generic free-form object.'
)
schema = build_basic_type(OpenApiTypes.OBJECT)
schema['description'] = _('Unspecified response body')
return {'200': self._get_response_for_code(schema, '200')}
def _get_response_for_code(self, serializer, status_code, media_types=None):
if isinstance(serializer, OpenApiResponse):
serializer, description, examples = (
serializer.response, serializer.description, serializer.examples
)
else:
description, examples = '', []
serializer = force_instance(serializer)
headers = self._get_response_headers_for_code(status_code)
headers = {'headers': headers} if headers else {}
if not serializer:
return {**headers, 'description': description or _('No response body')}
elif is_list_serializer(serializer):
if is_serializer(serializer.child):
schema = self.resolve_serializer(serializer.child, 'response').ref
else:
schema = self._map_serializer_field(serializer.child, 'response')
elif is_serializer(serializer):
component = self.resolve_serializer(serializer, 'response')
if not component.schema:
return {**headers, 'description': description or _('No response body')}
schema = component.ref
elif is_basic_type(serializer):
schema = build_basic_type(serializer)
elif isinstance(serializer, dict):
# bypass processing and use given schema directly
schema = serializer
# prevent invalid dict case in _is_list_view() as this not a status_code dict.
serializer = None
else:
warn(
f'could not resolve "{serializer}" for {self.method} {self.path}. Expected either '
f'a serializer or some supported override mechanism. Defaulting to '
f'generic free-form object.'
)
schema = build_basic_type(OpenApiTypes.OBJECT)
schema['description'] = _('Unspecified response body')
if (
self._is_list_view(serializer)
and get_override(serializer, 'many') is not False
and ('200' <= status_code < '300' or spectacular_settings.ENABLE_LIST_MECHANICS_ON_NON_2XX)
):
schema = build_array_type(schema)
paginator = self._get_paginator()
if (
paginator
and is_serializer(serializer)
and (not is_list_serializer(serializer) or is_serializer(serializer.child))
):
paginated_name = f'Paginated{self._get_serializer_name(serializer, 'response')}List'
component = ResolvedComponent(
name=paginated_name,
type=ResolvedComponent.SCHEMA,
schema=paginator.get_paginated_response_schema(schema),
object=serializer.child if is_list_serializer(serializer) else serializer,
)
self.registry.register_on_missing(component)
schema = component.ref
elif paginator:
schema = paginator.get_paginated_response_schema(schema)
if not media_types:
media_types = self.map_renderers('media_type')
return {
**headers,
'content': {
media_type: build_media_type_object(
schema,
self._get_examples(serializer, 'response', media_type, status_code, examples)
)
for media_type in media_types
},
'description': description
}
def _get_response_headers_for_code(self, status_code) -> dict:
result = {}
for parameter in self.get_override_parameters():
if not isinstance(parameter, OpenApiParameter):
continue
if not parameter.response:
continue
if (
isinstance(parameter.response, list)
and status_code not in [str(code) for code in parameter.response]
):
continue
if is_basic_type(parameter.type):
schema = build_basic_type(parameter.type)
elif is_serializer(parameter.type):
schema = self.resolve_serializer(parameter.type, 'response').ref
else:
schema = parameter.type
if parameter.location not in [OpenApiParameter.HEADER, OpenApiParameter.COOKIE]:
warn(f'incompatible location type ignored for response parameter {parameter.name}')
parameter_type = build_parameter_type(
name=parameter.name,
schema=schema,
location=parameter.location,
required=parameter.required,
description=parameter.description,
enum=parameter.enum,
deprecated=parameter.deprecated,
style=parameter.style,
explode=parameter.explode,
default=parameter.default,
examples=build_examples_list(parameter.examples),
)
del parameter_type['name']
del parameter_type['in']
result[parameter.name] = parameter_type
return result
def _get_serializer_name(self, serializer, direction):
serializer_extension = OpenApiSerializerExtension.get_match(serializer)
if serializer_extension and serializer_extension.get_name():
# library override mechanisms
name = serializer_extension.get_name()
elif getattr(getattr(serializer, 'Meta', None), 'ref_name', None) is not None:
# local override mechanisms. for compatibility with drf-yasg we support meta ref_name,
# though we do not support the serializer inlining feature.
# https://drf-yasg.readthedocs.io/en/stable/custom_spec.html#serializer-meta-nested-class
name = serializer.Meta.ref_name
elif is_list_serializer(serializer):
return self._get_serializer_name(serializer.child, direction)
else:
name = serializer.__class__.__name__
if name.endswith('Serializer'):
name = name[:-10]
if is_patched_serializer(serializer, direction):
name = 'Patched' + name
if direction == 'request' and spectacular_settings.COMPONENT_SPLIT_REQUEST:
name = name + 'Request'
return name
def resolve_serializer(self, serializer, direction) -> ResolvedComponent:
assert is_serializer(serializer), (
f'internal assumption violated because we expected a serializer here and instead '
f'got a "{serializer}". This may be the result of another app doing some unexpected '
f'magic or an invalid internal call. Feel free to report this as a bug at '
f'https://github.com/tfranzel/drf-spectacular/issues '
)
serializer = force_instance(serializer)
with add_trace_message(serializer.__class__.__name__):
component = ResolvedComponent(
name=self._get_serializer_name(serializer, direction),
type=ResolvedComponent.SCHEMA,
object=serializer,
)
if component in self.registry:
return self.registry[component] # return component with schema
self.registry.register(component)
component.schema = self._map_serializer(serializer, direction)
discard_component = (
# components with empty schemas serve no purpose
not component.schema
# concrete component without properties are likely only transactional so discard
or (
component.schema.get('type') == 'object'
and not component.schema.get('properties')
and 'additionalProperties' not in component.schema
)
)
if discard_component:
del self.registry[component]
return ResolvedComponent(None, None) # sentinel
return component
|
import copy
import re
import typing
import uritemplate
from django.core import exceptions as django_exceptions
from django.core import validators
from django.db import models
from django.utils.translation import gettext_lazy as _
from rest_framework import permissions, renderers, serializers
from rest_framework.fields import _UnvalidatedField, empty
from rest_framework.generics import CreateAPIView, GenericAPIView, ListCreateAPIView
from rest_framework.mixins import ListModelMixin
from rest_framework.schemas.inspectors import ViewInspector
from rest_framework.schemas.utils import get_pk_description # type: ignore
from rest_framework.settings import api_settings
from rest_framework.utils.model_meta import get_field_info
from rest_framework.views import APIView
from drf_spectacular.authentication import OpenApiAuthenticationExtension
from drf_spectacular.contrib import * # noqa: F403, F401
from drf_spectacular.drainage import add_trace_message, get_override, has_override
from drf_spectacular.extensions import (
OpenApiFilterExtension, OpenApiSerializerExtension, OpenApiSerializerFieldExtension,
)
from drf_spectacular.plumbing import (
ComponentRegistry, ResolvedComponent, UnableToProceedError, append_meta, build_array_type,
build_basic_type, build_choice_field, build_examples_list, build_media_type_object,
build_object_type, build_parameter_type, error, follow_field_source, force_instance, get_doc,
get_type_hints, get_view_model, is_basic_type, is_field, is_list_serializer,
is_patched_serializer, is_serializer, is_trivial_string_variation, resolve_regex_path_parameter,
resolve_type_hint, safe_ref, warn,
)
from drf_spectacular.settings import spectacular_settings
from drf_spectacular.types import OpenApiTypes, build_generic_type
from drf_spectacular.utils import OpenApiParameter, OpenApiResponse
class AutoSchema(ViewInspector):
method_mapping = {
'get': 'retrieve',
'post': 'create',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy',
}
def get_operation(self, path, path_regex, path_prefix, method, registry: ComponentRegistry):
self.registry = registry
self.path = path
self.path_regex = path_regex
self.path_prefix = path_prefix
self.method = method
operation = {}
operation['operationId'] = self.get_operation_id()
operation['description'] = self.get_description()
summary = self.get_summary()
if summary:
operation['summary'] = summary
parameters = self._get_parameters()
if parameters:
operation['parameters'] = parameters
tags = self.get_tags()
if tags:
operation['tags'] = tags
request_body = self._get_request_body()
if request_body:
operation['requestBody'] = request_body
auth = self.get_auth()
if auth:
operation['security'] = auth
deprecated = self.is_deprecated()
if deprecated:
operation['deprecated'] = deprecated
operation['responses'] = self._get_response_bodies()
return operation
def _is_list_view(self, serializer=None):
"""
partially heuristic approach to determine if a view yields an object or a
list of objects. used for operationId naming, array building and pagination.
defaults to False if all introspection fail.
"""
if serializer is None:
serializer = self.get_response_serializers()
if isinstance(serializer, dict) and serializer:
# extract likely main serializer from @extend_schema override
serializer = {str(code): s for code, s in serializer.items()}
serializer = serializer[min(serializer)]
if is_list_serializer(serializer):
return True
if is_basic_type(serializer):
return False
if hasattr(self.view, 'action'):
return self.view.action == 'list'
# list responses are "usually" only returned by GET
if self.method.lower() != 'get':
return False
if isinstance(self.view, ListModelMixin):
return True
# primary key/lookup variable in path is a strong indicator for retrieve
if isinstance(self.view, GenericAPIView):
lookup_url_kwarg = self.view.lookup_url_kwarg or self.view.lookup_field
if lookup_url_kwarg in uritemplate.variables(self.path):
return False
return False
def _is_create_operation(self):
if self.method != 'POST':
return False
if getattr(self.view, 'action', None) == 'create':
return True
if isinstance(self.view, (ListCreateAPIView, CreateAPIView)):
return True
return False
def get_override_parameters(self):
""" override this for custom behaviour """
return []
def _process_override_parameters(self):
result = {}
for parameter in self.get_override_parameters():
if isinstance(parameter, OpenApiParameter):
if parameter.response:
continue
if is_basic_type(parameter.type):
schema = build_basic_type(parameter.type)
elif is_serializer(parameter.type):
schema = self.resolve_serializer(parameter.type, 'request').ref
else:
schema = parameter.type
if parameter.exclude:
result[parameter.name, parameter.location] = None
else:
result[parameter.name, parameter.location] = build_parameter_type(
name=parameter.name,
schema=schema,
location=parameter.location,
required=parameter.required,
description=parameter.description,
enum=parameter.enum,
deprecated=parameter.deprecated,
style=parameter.style,
explode=parameter.explode,
default=parameter.default,
examples=build_examples_list(parameter.examples),
)
elif is_serializer(parameter):
# explode serializer into separate parameters. defaults to QUERY location
mapped = self._map_serializer(parameter, 'request')
for property_name, property_schema in mapped['properties'].items():
result[property_name, OpenApiParameter.QUERY] = build_parameter_type(
name=property_name,
schema=property_schema,
description=property_schema.pop('description', None),
location=OpenApiParameter.QUERY,
required=property_name in mapped.get('required', []),
)
else:
warn(f'could not resolve parameter annotation {parameter}. Skipping.')
return result
def _get_format_parameters(self):
parameters = []
formats = self.map_renderers('format')
if api_settings.URL_FORMAT_OVERRIDE and len(formats) > 1:
parameters.append(build_parameter_type(
name=api_settings.URL_FORMAT_OVERRIDE,
schema=build_basic_type(OpenApiTypes.STR),
location=OpenApiParameter.QUERY,
enum=formats
))
return parameters
def _get_parameters(self):
def dict_helper(parameters):
return {(p['name'], p['in']): p for p in parameters}
override_parameters = self._process_override_parameters()
# remove overridden path parameters beforehand so that there are no irrelevant warnings.
path_variables = [
v for v in uritemplate.variables(self.path) if (v, 'path') not in override_parameters
]
parameters = {
**dict_helper(self._resolve_path_parameters(path_variables)),
**dict_helper(self._get_filter_parameters()),
**dict_helper(self._get_pagination_parameters()),
**dict_helper(self._get_format_parameters()),
}
# override/add/remove @extend_schema parameters
for key, parameter in override_parameters.items():
if parameter is None:
# either omit or explicitly remove parameter
if key in parameters:
del parameters[key]
else:
parameters[key] = parameter
if callable(spectacular_settings.SORT_OPERATION_PARAMETERS):
return sorted(parameters.values(), key=spectacular_settings.SORT_OPERATION_PARAMETERS)
elif spectacular_settings.SORT_OPERATION_PARAMETERS:
return sorted(parameters.values(), key=lambda p: p['name'])
else:
return list(parameters.values())
def get_description(self):
""" override this for custom behaviour """
action_or_method = getattr(self.view, getattr(self.view, 'action', self.method.lower()), None)
view_doc = get_doc(self.view.__class__)
action_doc = get_doc(action_or_method)
return action_doc or view_doc
def get_summary(self):
""" override this for custom behaviour """
return None
def get_auth(self):
"""
Obtains authentication classes and permissions from view. If authentication
is known, resolve security requirement for endpoint and security definition for
the component section.
For custom authentication subclass ``OpenApiAuthenticationExtension``.
"""
auths = []
for authenticator in self.view.get_authenticators():
if (
spectacular_settings.AUTHENTICATION_WHITELIST
and authenticator.__class__ not in spectacular_settings.AUTHENTICATION_WHITELIST
):
continue
scheme = OpenApiAuthenticationExtension.get_match(authenticator)
if not scheme:
warn(
f'could not resolve authenticator {authenticator.__class__}. There '
f'was no OpenApiAuthenticationExtension registered for that class. '
f'Try creating one by subclassing it. Ignoring for now.'
)
continue
security_requirements = scheme.get_security_requirement(self)
if security_requirements is not None:
auths.append(security_requirements)
component = ResolvedComponent(
name=scheme.name,
type=ResolvedComponent.SECURITY_SCHEMA,
object=authenticator.__class__,
schema=scheme.get_security_definition(self)
)
self.registry.register_on_missing(component)
if spectacular_settings.SECURITY:
auths.extend(spectacular_settings.SECURITY)
perms = [p.__class__ for p in self.view.get_permissions()]
if permissions.AllowAny in perms:
auths.append({})
elif permissions.IsAuthenticatedOrReadOnly in perms and self.method in permissions.SAFE_METHODS:
auths.append({})
return auths
def get_request_serializer(self) -> typing.Any:
""" override this for custom behaviour """
return self._get_serializer()
def get_response_serializers(self) -> typing.Any:
""" override this for custom behaviour """
return self._get_serializer()
def get_tags(self) -> typing.List[str]:
""" override this for custom behaviour """
tokenized_path = self._tokenize_path()
# use first non-parameter path part as tag
return tokenized_path[:1]
def get_operation_id(self):
""" override this for custom behaviour """
tokenized_path = self._tokenize_path()
# replace dashes as they can be problematic later in code generation
tokenized_path = [t.replace('-', '_') for t in tokenized_path]
if self.method.lower() == 'get' and self._is_list_view():
action = 'list'
else:
action = self.method_mapping[self.method.lower()]
if not tokenized_path:
tokenized_path.append('root')
if re.search(r'<drf_format_suffix\w*:\w+>', self.path_regex):
tokenized_path.append('formatted')
return '_'.join(tokenized_path + [action])
def is_deprecated(self):
""" override this for custom behaviour """
return False
def _tokenize_path(self):
# remove path prefix
path = re.sub(
pattern=self.path_prefix,
repl='',
string=self.path,
flags=re.IGNORECASE
)
# remove path variables
path = re.sub(pattern=r'\{[\w\-]+\}', repl='', string=path)
# cleanup and tokenize remaining parts.
path = path.rstrip('/').lstrip('/').split('/')
return [t for t in path if t]
def _resolve_path_parameters(self, variables):
parameters = []
for variable in variables:
schema = build_basic_type(OpenApiTypes.STR)
description = ''
resolved_parameter = resolve_regex_path_parameter(
self.path_regex, variable, self.map_renderers('format'),
)
if resolved_parameter:
schema = resolved_parameter['schema']
elif get_view_model(self.view) is None:
warn(
f'could not derive type of path parameter "{variable}" because because it '
f'is untyped and obtaining queryset from the viewset failed. '
f'Consider adding a type to the path (e.g. <int:{variable}>) or annotating '
f'the parameter type with @extend_schema. Defaulting to "string".'
)
else:
try:
model = get_view_model(self.view)
model_field = model._meta.get_field(variable)
schema = self._map_model_field(model_field, direction=None)
if 'description' not in schema and model_field.primary_key:
description = get_pk_description(model, model_field)
except django_exceptions.FieldDoesNotExist:
warn(
f'could not derive type of path parameter "{variable}" because '
f'model "{model}" did contain no such field. Consider annotating '
f'parameter with @extend_schema. Defaulting to "string".'
)
parameters.append(build_parameter_type(
name=variable,
location=OpenApiParameter.PATH,
description=description,
schema=schema
))
return parameters
def _get_filter_parameters(self):
if not self._is_list_view():
return []
if getattr(self.view, 'filter_backends', None) is None:
return []
parameters = []
for filter_backend in self.view.filter_backends:
filter_extension = OpenApiFilterExtension.get_match(filter_backend())
if filter_extension:
parameters += filter_extension.get_schema_operation_parameters(self)
else:
parameters += filter_backend().get_schema_operation_parameters(self.view)
return parameters
def _get_pagination_parameters(self):
if not self._is_list_view():
return []
paginator = self._get_paginator()
if not paginator:
return []
filter_extension = OpenApiFilterExtension.get_match(paginator)
if filter_extension:
return filter_extension.get_schema_operation_parameters(self)
else:
return paginator.get_schema_operation_parameters(self.view)
def _map_model_field(self, model_field, direction):
assert isinstance(model_field, models.Field)
# to get a fully initialized serializer field we use DRF's own init logic
try:
field_cls, field_kwargs = serializers.ModelSerializer().build_field(
field_name=model_field.name,
info=get_field_info(model_field.model),
model_class=model_field.model,
nested_depth=0,
)
field = field_cls(**field_kwargs)
field.field_name = model_field.name
except: # noqa
field = None
# For some cases, the DRF init logic either breaks (custom field with internal type) or
# the resulting field is underspecified with regards to the schema (ReadOnlyField).
if field and isinstance(field, serializers.PrimaryKeyRelatedField):
# special case handling only for _resolve_path_parameters() where neither queryset nor
# parent is set by build_field. patch in queryset as _map_serializer_field requires it
if not field.queryset:
field.queryset = model_field.related_model.objects.none()
return self._map_serializer_field(field, direction)
elif isinstance(field, serializers.ManyRelatedField):
# special case handling similar to the case above. "parent.parent" on child_relation
# is None and there is no queryset. patch in as _map_serializer_field requires one.
if not field.child_relation.queryset:
field.child_relation.queryset = model_field.related_model.objects.none()
return self._map_serializer_field(field, direction)
elif field and not isinstance(field, (serializers.ReadOnlyField, serializers.ModelField)):
return self._map_serializer_field(field, direction)
elif isinstance(model_field, models.ForeignKey):
return self._map_model_field(model_field.target_field, direction)
elif hasattr(models, 'JSONField') and isinstance(model_field, models.JSONField):
# fix for DRF==3.11 with django>=3.1 as it is not yet represented in the field_mapping
return build_basic_type(OpenApiTypes.OBJECT)
elif hasattr(models, model_field.get_internal_type()):
# be graceful when the model field is not explicitly mapped to a serializer
internal_type = getattr(models, model_field.get_internal_type())
field_cls = serializers.ModelSerializer.serializer_field_mapping.get(internal_type)
if not field_cls:
warn(
f'model field "{model_field.get_internal_type()}" has no mapping in '
f'ModelSerializer. It may be a deprecated field. Defaulting to "string"'
)
return build_basic_type(OpenApiTypes.STR)
return self._map_serializer_field(field_cls(), direction)
else:
error(
f'could not resolve model field "{model_field}". Failed to resolve through '
f'serializer_field_mapping, get_internal_type(), or any override mechanism. '
f'Defaulting to "string"'
)
return build_basic_type(OpenApiTypes.STR)
def _map_serializer_field(self, field, direction, bypass_extensions=False):
meta = self._get_serializer_field_meta(field)
if has_override(field, 'field'):
override = get_override(field, 'field')
if is_basic_type(override):
schema = build_basic_type(override)
if schema is None:
return None
elif isinstance(override, dict):
schema = override
else:
schema = self._map_serializer_field(force_instance(override), direction)
field_component_name = get_override(field, 'field_component_name')
if field_component_name:
component = ResolvedComponent(
name=field_component_name,
type=ResolvedComponent.SCHEMA,
schema=schema,
object=field,
)
self.registry.register_on_missing(component)
return append_meta(component.ref, meta)
else:
return append_meta(schema, meta)
serializer_field_extension = OpenApiSerializerFieldExtension.get_match(field)
if serializer_field_extension and not bypass_extensions:
schema = serializer_field_extension.map_serializer_field(self, direction)
if serializer_field_extension.get_name():
component = ResolvedComponent(
name=serializer_field_extension.get_name(),
type=ResolvedComponent.SCHEMA,
schema=schema,
object=field,
)
self.registry.register_on_missing(component)
return append_meta(component.ref, meta)
else:
return append_meta(schema, meta)
# nested serializer with many=True gets automatically replaced with ListSerializer
if is_list_serializer(field):
if is_serializer(field.child):
component = self.resolve_serializer(field.child, direction)
return append_meta(build_array_type(component.ref), meta) if component else None
else:
schema = self._map_serializer_field(field.child, direction)
return append_meta(build_array_type(schema), meta)
# nested serializer
if is_serializer(field):
component = self.resolve_serializer(field, direction)
return append_meta(component.ref, meta) if component else None
# Related fields.
if isinstance(field, serializers.ManyRelatedField):
schema = self._map_serializer_field(field.child_relation, direction)
# remove hand-over initkwargs applying only to outer scope
schema.pop('description', None)
schema.pop('readOnly', None)
return append_meta(build_array_type(schema), meta)
if isinstance(field, serializers.PrimaryKeyRelatedField):
# read_only fields do not have a Manager by design. go around and get field
# from parent. also avoid calling Manager. __bool__ as it might be customized
# to hit the database.
if getattr(field, 'queryset', None) is not None:
model_field = field.queryset.model._meta.pk
else:
if isinstance(field.parent, serializers.ManyRelatedField):
model = field.parent.parent.Meta.model
source = field.parent.source.split('.')
else:
model = field.parent.Meta.model
source = field.source.split('.')
# estimates the relating model field and jumps to it's target model PK field.
# also differentiate as source can be direct (pk) or relation field (model).
model_field = follow_field_source(model, source)
if callable(model_field):
# follow_field_source bailed with a warning. be graceful and default to str.
model_field = models.TextField()
# primary keys are usually non-editable (readOnly=True) and map_model_field correctly
# signals that attribute. however this does not apply in the context of relations.
schema = self._map_model_field(model_field, direction)
schema.pop('readOnly', None)
return append_meta(schema, meta)
if isinstance(field, serializers.StringRelatedField):
return append_meta(build_basic_type(OpenApiTypes.STR), meta)
if isinstance(field, serializers.SlugRelatedField):
return append_meta(build_basic_type(OpenApiTypes.STR), meta)
if isinstance(field, serializers.HyperlinkedIdentityField):
return append_meta(build_basic_type(OpenApiTypes.URI), meta)
if isinstance(field, serializers.HyperlinkedRelatedField):
return append_meta(build_basic_type(OpenApiTypes.URI), meta)
if isinstance(field, serializers.MultipleChoiceField):
return append_meta(build_array_type(build_choice_field(field)), meta)
if isinstance(field, serializers.ChoiceField):
return append_meta(build_choice_field(field), meta)
if isinstance(field, serializers.ListField):
if isinstance(field.child, _UnvalidatedField):
return append_meta(build_array_type(build_basic_type(OpenApiTypes.ANY)), meta)
elif is_serializer(field.child):
component = self.resolve_serializer(field.child, direction)
return append_meta(build_array_type(component.ref), meta) if component else None
else:
schema = self._map_serializer_field(field.child, direction)
# remove automatically attached but redundant title
if is_trivial_string_variation(field.field_name, schema.get('title')):
schema.pop('title', None)
return append_meta(build_array_type(schema), meta)
# DateField and DateTimeField type is string
if isinstance(field, serializers.DateField):
return append_meta(build_basic_type(OpenApiTypes.DATE), meta)
if isinstance(field, serializers.DateTimeField):
return append_meta(build_basic_type(OpenApiTypes.DATETIME), meta)
if isinstance(field, serializers.TimeField):
return append_meta(build_basic_type(OpenApiTypes.TIME), meta)
if isinstance(field, serializers.EmailField):
return append_meta(build_basic_type(OpenApiTypes.EMAIL), meta)
if isinstance(field, serializers.URLField):
return append_meta(build_basic_type(OpenApiTypes.URI), meta)
if isinstance(field, serializers.UUIDField):
return append_meta(build_basic_type(OpenApiTypes.UUID), meta)
if isinstance(field, serializers.DurationField):
return append_meta(build_basic_type(OpenApiTypes.STR), meta)
if isinstance(field, serializers.IPAddressField):
# TODO this might be a DRF bug. protocol is not propagated to serializer although it
# should have been. results in always 'both' (thus no format)
if 'ipv4' == field.protocol.lower():
schema = build_basic_type(OpenApiTypes.IP4)
elif 'ipv6' == field.protocol.lower():
schema = build_basic_type(OpenApiTypes.IP6)
else:
schema = build_basic_type(OpenApiTypes.STR)
return append_meta(schema, meta)
# DecimalField has multipleOf based on decimal_places
if isinstance(field, serializers.DecimalField):
if getattr(field, 'coerce_to_string', api_settings.COERCE_DECIMAL_TO_STRING):
content = {**build_basic_type(OpenApiTypes.STR), 'format': 'decimal'}
if field.max_whole_digits:
content['pattern'] = (
f'^\\d{{0,{field.max_whole_digits}}}'
f'(\\.\\d{{0,{field.decimal_places}}})?$'
)
else:
content = build_basic_type(OpenApiTypes.DECIMAL)
if field.max_whole_digits:
content['maximum'] = int(field.max_whole_digits * '9') + 1
content['minimum'] = -content['maximum']
self._map_min_max(field, content)
return append_meta(content, meta)
if isinstance(field, serializers.FloatField):
content = build_basic_type(OpenApiTypes.FLOAT)
self._map_min_max(field, content)
return append_meta(content, meta)
if isinstance(field, serializers.IntegerField):
content = build_basic_type(OpenApiTypes.INT)
self._map_min_max(field, content)
# 2147483647 is max for int32_size, so we use int64 for format
if int(content.get('maximum', 0)) > 2147483647 or int(content.get('minimum', 0)) > 2147483647:
content['format'] = 'int64'
return append_meta(content, meta)
if isinstance(field, serializers.FileField):
if spectacular_settings.COMPONENT_SPLIT_REQUEST and direction == 'request':
content = build_basic_type(OpenApiTypes.BINARY)
else:
use_url = getattr(field, 'use_url', api_settings.UPLOADED_FILES_USE_URL)
content = build_basic_type(OpenApiTypes.URI if use_url else OpenApiTypes.STR)
return append_meta(content, meta)
if isinstance(field, serializers.SerializerMethodField):
method = getattr(field.parent, field.method_name)
return append_meta(self._map_response_type_hint(method), meta)
if isinstance(field, (serializers.BooleanField, serializers.NullBooleanField)):
return append_meta(build_basic_type(OpenApiTypes.BOOL), meta)
if isinstance(field, serializers.JSONField):
return append_meta(build_basic_type(OpenApiTypes.OBJECT), meta)
if isinstance(field, (serializers.DictField, serializers.HStoreField)):
content = build_basic_type(OpenApiTypes.OBJECT)
if not isinstance(field.child, _UnvalidatedField):
content['additionalProperties'] = self._map_serializer_field(field.child, direction)
return append_meta(content, meta)
if isinstance(field, serializers.CharField):
return append_meta(build_basic_type(OpenApiTypes.STR), meta)
if isinstance(field, serializers.ReadOnlyField):
# when field is nested inside a ListSerializer, the Meta class is 2 steps removed
if is_list_serializer(field.parent):
model = getattr(getattr(field.parent.parent, 'Meta', None), 'model', None)
source = field.parent.source_attrs
else:
model = getattr(getattr(field.parent, 'Meta', None), 'model', None)
source = field.source_attrs
if model is None:
warn(
f'Could not derive type for ReadOnlyField "{field.field_name}" because the '
f'serializer class has no associated model (Meta class). Consider using some '
f'other field like CharField(read_only=True) instead. defaulting to string.'
)
return append_meta(build_basic_type(OpenApiTypes.STR), meta)
target = follow_field_source(model, source)
if callable(target):
schema = self._map_response_type_hint(target)
elif isinstance(target, models.Field):
schema = self._map_model_field(target, direction)
else:
assert False, f'ReadOnlyField target "{field}" must be property or model field'
return append_meta(schema, meta)
# DRF was not able to match the model field to an explicit SerializerField and therefore
# used its generic fallback serializer field that simply wraps the model field.
if isinstance(field, serializers.ModelField):
schema = self._map_model_field(field.model_field, direction)
return append_meta(schema, meta)
warn(f'could not resolve serializer field "{field}". Defaulting to "string"')
return append_meta(build_basic_type(OpenApiTypes.STR), meta)
def _map_min_max(self, field, content):
if field.max_value:
content['maximum'] = field.max_value
if field.min_value:
content['minimum'] = field.min_value
def _map_serializer(self, serializer, direction, bypass_extensions=False):
serializer = force_instance(serializer)
serializer_extension = OpenApiSerializerExtension.get_match(serializer)
if serializer_extension and not bypass_extensions:
schema = serializer_extension.map_serializer(self, direction)
else:
schema = self._map_basic_serializer(serializer, direction)
return self._postprocess_serializer_schema(schema, serializer, direction)
def _postprocess_serializer_schema(self, schema, serializer, direction):
"""
postprocess generated schema for component splitting, if enabled.
does only apply to direct component schemas and not intermediate schemas
like components composed of sub-component via e.g. oneOf.
"""
if not spectacular_settings.COMPONENT_SPLIT_REQUEST:
return schema
properties = schema.get('properties', [])
required = schema.get('required', [])
for prop_name in list(properties):
if direction == 'request' and properties[prop_name].get('readOnly'):
del schema['properties'][prop_name]
if prop_name in required:
required.remove(prop_name)
if direction == 'response' and properties[prop_name].get('writeOnly'):
del schema['properties'][prop_name]
if prop_name in required:
required.remove(prop_name)
# remove empty listing as it violates schema specification
if 'required' in schema and not required:
del schema['required']
return schema
def _get_serializer_field_meta(self, field):
if not isinstance(field, serializers.Field):
return {}
meta = {}
if field.read_only:
meta['readOnly'] = True
if field.write_only:
meta['writeOnly'] = True
if field.allow_null:
meta['nullable'] = True
if field.default is not None and field.default != empty and not callable(field.default):
default = field.to_representation(field.default)
if isinstance(default, set):
default = list(default)
meta['default'] = default
if field.label and not is_trivial_string_variation(field.label, field.field_name):
meta['title'] = str(field.label)
if field.help_text:
meta['description'] = str(field.help_text)
return meta
def _map_basic_serializer(self, serializer, direction):
serializer = force_instance(serializer)
required = set()
properties = {}
for field in serializer.fields.values():
if isinstance(field, serializers.HiddenField):
continue
if field.field_name in get_override(serializer, 'exclude_fields', []):
continue
schema = self._map_serializer_field(field, direction)
# skip field if there is no schema for the direction
if schema is None:
continue
add_to_required = (
field.required
or (schema.get('readOnly') and not spectacular_settings.COMPONENT_NO_READ_ONLY_REQUIRED)
)
if add_to_required:
required.add(field.field_name)
self._map_field_validators(field, schema)
if field.field_name in get_override(serializer, 'deprecate_fields', []):
schema['deprecated'] = True
properties[field.field_name] = safe_ref(schema)
if is_patched_serializer(serializer, direction):
required = []
return build_object_type(
properties=properties,
required=required,
description=get_doc(serializer.__class__),
)
def _map_field_validators(self, field, schema):
for v in field.validators:
if isinstance(v, validators.EmailValidator):
schema['format'] = 'email'
elif isinstance(v, validators.URLValidator):
schema['format'] = 'uri'
elif isinstance(v, validators.RegexValidator):
pattern = v.regex.pattern.encode('ascii', 'backslashreplace').decode()
pattern = pattern.replace(r'\x', r'\u00') # unify escaping
pattern = pattern.replace(r'\Z', '$').replace(r'\A', '^') # ECMA anchors
schema['pattern'] = pattern
elif isinstance(v, validators.MaxLengthValidator):
attr_name = 'maxLength'
if isinstance(field, serializers.ListField):
attr_name = 'maxItems'
schema[attr_name] = v.limit_value() if callable(v.limit_value) else v.limit_value
elif isinstance(v, validators.MinLengthValidator):
attr_name = 'minLength'
if isinstance(field, serializers.ListField):
attr_name = 'minItems'
schema[attr_name] = v.limit_value() if callable(v.limit_value) else v.limit_value
elif isinstance(v, validators.MaxValueValidator):
schema['maximum'] = v.limit_value() if callable(v.limit_value) else v.limit_value
elif isinstance(v, validators.MinValueValidator):
schema['minimum'] = v.limit_value() if callable(v.limit_value) else v.limit_value
elif isinstance(v, validators.DecimalValidator):
if v.max_digits:
digits = v.max_digits
if v.decimal_places is not None and v.decimal_places > 0:
digits -= v.decimal_places
schema['maximum'] = int(digits * '9') + 1
schema['minimum'] = -schema['maximum']
def _map_response_type_hint(self, method):
hint = get_override(method, 'field') or get_type_hints(method).get('return')
if is_serializer(hint) or is_field(hint):
return self._map_serializer_field(force_instance(hint), 'response')
if isinstance(hint, dict):
return hint
try:
return resolve_type_hint(hint)
except UnableToProceedError:
warn(
f'unable to resolve type hint for function "{method.__name__}". Consider '
f'using a type hint or @extend_schema_field. Defaulting to string.'
)
return build_basic_type(OpenApiTypes.STR)
def _get_paginator(self):
pagination_class = getattr(self.view, 'pagination_class', None)
if pagination_class:
return pagination_class()
return None
def map_parsers(self):
return list(dict.fromkeys([p.media_type for p in self.view.get_parsers()]))
def map_renderers(self, attribute):
assert attribute in ['media_type', 'format']
return list(dict.fromkeys([
getattr(r, attribute).split(';')[0]
for r in self.view.get_renderers()
if not isinstance(r, renderers.BrowsableAPIRenderer) and getattr(r, attribute, None)
]))
def _get_serializer(self):
view = self.view
try:
if isinstance(view, GenericAPIView):
# try to circumvent queryset issues with calling get_serializer. if view has NOT
# overridden get_serializer, its safe to use get_serializer_class.
if view.__class__.get_serializer == GenericAPIView.get_serializer:
return view.get_serializer_class()()
return view.get_serializer()
elif isinstance(view, APIView):
# APIView does not implement the required interface, but be lenient and make
# good guesses before giving up and emitting a warning.
if callable(getattr(view, 'get_serializer', None)):
return view.get_serializer()
elif callable(getattr(view, 'get_serializer_class', None)):
return view.get_serializer_class()()
elif hasattr(view, 'serializer_class'):
return view.serializer_class
else:
error(
'unable to guess serializer. This is graceful '
'fallback handling for APIViews. Consider using GenericAPIView as view base '
'class, if view is under your control. Ignoring view for now. '
)
else:
error('Encountered unknown view base class. Please report this issue. Ignoring for now')
except Exception as exc:
error(
f'exception raised while getting serializer. Hint: '
f'Is get_serializer_class() returning None or is get_queryset() not working without '
f'a request? Ignoring the view for now. (Exception: {exc})'
)
def get_examples(self):
return []
def _get_examples(self, serializer, direction, media_type, status_code=None, extras=None):
examples = self.get_examples()
if not examples:
if is_list_serializer(serializer):
examples = get_override(serializer.child, 'examples', [])
elif is_serializer(serializer):
examples = get_override(serializer, 'examples', [])
# additional examples provided via OpenApiResponse are merged with the other methods
extras = extras or []
filtered_examples = []
for example in examples + extras:
if direction == 'request' and example.response_only:
continue
if direction == 'response' and example.request_only:
continue
if media_type and media_type != example.media_type:
continue
if status_code and status_code not in example.status_codes:
continue
filtered_examples.append(example)
return build_examples_list(filtered_examples)
def _get_request_body(self):
# only unsafe methods can have a body
if self.method not in ('PUT', 'PATCH', 'POST'):
return None
request_serializer = self.get_request_serializer()
if isinstance(request_serializer, dict):
content = []
request_body_required = True
for media_type, serializer in request_serializer.items():
schema, partial_request_body_required = self._get_request_for_media_type(serializer)
examples = self._get_examples(serializer, 'request', media_type)
if schema is None:
continue
content.append((media_type, schema, examples))
request_body_required &= partial_request_body_required
else:
schema, request_body_required = self._get_request_for_media_type(request_serializer)
if schema is None:
return None
content = [
(media_type, schema, self._get_examples(request_serializer, 'request', media_type))
for media_type in self.map_parsers()
]
request_body = {
'content': {
media_type: build_media_type_object(schema, examples)
for media_type, schema, examples in content
}
}
if request_body_required:
request_body['required'] = request_body_required
return request_body
def _get_request_for_media_type(self, serializer):
serializer = force_instance(serializer)
if is_list_serializer(serializer):
if is_serializer(serializer.child):
component = self.resolve_serializer(serializer.child, 'request')
schema = build_array_type(component.ref)
else:
schema = build_array_type(self._map_serializer_field(serializer.child, 'request'))
request_body_required = True
elif is_serializer(serializer):
if self.method == 'PATCH':
# we simulate what DRF is doing: the entry serializer is set to partial
# for PATCH requests. serializer instances received via extend_schema
# may be reused; prevent race conditions by modifying a copy.
serializer = copy.copy(serializer)
serializer.partial = True
component = self.resolve_serializer(serializer, 'request')
if not component.schema:
# serializer is empty so skip content enumeration
return None, False
schema = component.ref
# request body is only required if any required property is not read-only
readonly_props = [
p for p, s in component.schema.get('properties', {}).items() if s.get('readOnly')
]
required_props = component.schema.get('required', [])
request_body_required = any(req not in readonly_props for req in required_props)
elif is_basic_type(serializer):
schema = build_basic_type(serializer)
request_body_required = False
elif isinstance(serializer, dict):
# bypass processing and use given schema directly
schema = serializer
request_body_required = False
else:
warn(
f'could not resolve request body for {self.method} {self.path}. Defaulting to generic '
'free-form object. (Maybe annotate a Serializer class?)'
)
schema = build_generic_type()
schema['description'] = 'Unspecified request body'
request_body_required = False
return schema, request_body_required
def _get_response_bodies(self):
response_serializers = self.get_response_serializers()
if (
is_serializer(response_serializers)
or is_basic_type(response_serializers)
or isinstance(response_serializers, OpenApiResponse)
):
if self.method == 'DELETE':
return {'204': {'description': _('No response body')}}
if self._is_create_operation():
return {'201': self._get_response_for_code(response_serializers, '201')}
return {'200': self._get_response_for_code(response_serializers, '200')}
elif isinstance(response_serializers, dict):
# custom handling for overriding default return codes with @extend_schema
responses = {}
for code, serializer in response_serializers.items():
if isinstance(code, tuple):
code, media_types = str(code[0]), code[1:]
else:
code, media_types = str(code), None
content_response = self._get_response_for_code(serializer, code, media_types)
if code in responses:
responses[code]['content'].update(content_response['content'])
else:
responses[code] = content_response
return responses
else:
warn(
f'could not resolve "{response_serializers}" for {self.method} {self.path}. '
f'Expected either a serializer or some supported override mechanism. '
f'Defaulting to generic free-form object.'
)
schema = build_basic_type(OpenApiTypes.OBJECT)
schema['description'] = _('Unspecified response body')
return {'200': self._get_response_for_code(schema, '200')}
def _get_response_for_code(self, serializer, status_code, media_types=None):
if isinstance(serializer, OpenApiResponse):
serializer, description, examples = (
serializer.response, serializer.description, serializer.examples
)
else:
description, examples = '', []
serializer = force_instance(serializer)
headers = self._get_response_headers_for_code(status_code)
headers = {'headers': headers} if headers else {}
if not serializer:
return {**headers, 'description': description or _('No response body')}
elif is_list_serializer(serializer):
if is_serializer(serializer.child):
schema = self.resolve_serializer(serializer.child, 'response').ref
else:
schema = self._map_serializer_field(serializer.child, 'response')
elif is_serializer(serializer):
component = self.resolve_serializer(serializer, 'response')
if not component.schema:
return {**headers, 'description': description or _('No response body')}
schema = component.ref
elif is_basic_type(serializer):
schema = build_basic_type(serializer)
elif isinstance(serializer, dict):
# bypass processing and use given schema directly
schema = serializer
# prevent invalid dict case in _is_list_view() as this not a status_code dict.
serializer = None
else:
warn(
f'could not resolve "{serializer}" for {self.method} {self.path}. Expected either '
f'a serializer or some supported override mechanism. Defaulting to '
f'generic free-form object.'
)
schema = build_basic_type(OpenApiTypes.OBJECT)
schema['description'] = _('Unspecified response body')
if (
self._is_list_view(serializer)
and get_override(serializer, 'many') is not False
and ('200' <= status_code < '300' or spectacular_settings.ENABLE_LIST_MECHANICS_ON_NON_2XX)
):
schema = build_array_type(schema)
paginator = self._get_paginator()
if (
paginator
and is_serializer(serializer)
and (not is_list_serializer(serializer) or is_serializer(serializer.child))
):
paginated_name = f'Paginated{self._get_serializer_name(serializer, "response")}List'
component = ResolvedComponent(
name=paginated_name,
type=ResolvedComponent.SCHEMA,
schema=paginator.get_paginated_response_schema(schema),
object=serializer.child if is_list_serializer(serializer) else serializer,
)
self.registry.register_on_missing(component)
schema = component.ref
elif paginator:
schema = paginator.get_paginated_response_schema(schema)
if not media_types:
media_types = self.map_renderers('media_type')
return {
**headers,
'content': {
media_type: build_media_type_object(
schema,
self._get_examples(serializer, 'response', media_type, status_code, examples)
)
for media_type in media_types
},
'description': description
}
def _get_response_headers_for_code(self, status_code) -> dict:
result = {}
for parameter in self.get_override_parameters():
if not isinstance(parameter, OpenApiParameter):
continue
if not parameter.response:
continue
if (
isinstance(parameter.response, list)
and status_code not in [str(code) for code in parameter.response]
):
continue
if is_basic_type(parameter.type):
schema = build_basic_type(parameter.type)
elif is_serializer(parameter.type):
schema = self.resolve_serializer(parameter.type, 'response').ref
else:
schema = parameter.type
if parameter.location not in [OpenApiParameter.HEADER, OpenApiParameter.COOKIE]:
warn(f'incompatible location type ignored for response parameter {parameter.name}')
parameter_type = build_parameter_type(
name=parameter.name,
schema=schema,
location=parameter.location,
required=parameter.required,
description=parameter.description,
enum=parameter.enum,
deprecated=parameter.deprecated,
style=parameter.style,
explode=parameter.explode,
default=parameter.default,
examples=build_examples_list(parameter.examples),
)
del parameter_type['name']
del parameter_type['in']
result[parameter.name] = parameter_type
return result
def _get_serializer_name(self, serializer, direction):
serializer_extension = OpenApiSerializerExtension.get_match(serializer)
if serializer_extension and serializer_extension.get_name():
# library override mechanisms
name = serializer_extension.get_name()
elif getattr(getattr(serializer, 'Meta', None), 'ref_name', None) is not None:
# local override mechanisms. for compatibility with drf-yasg we support meta ref_name,
# though we do not support the serializer inlining feature.
# https://drf-yasg.readthedocs.io/en/stable/custom_spec.html#serializer-meta-nested-class
name = serializer.Meta.ref_name
elif is_list_serializer(serializer):
return self._get_serializer_name(serializer.child, direction)
else:
name = serializer.__class__.__name__
if name.endswith('Serializer'):
name = name[:-10]
if is_patched_serializer(serializer, direction):
name = 'Patched' + name
if direction == 'request' and spectacular_settings.COMPONENT_SPLIT_REQUEST:
name = name + 'Request'
return name
def resolve_serializer(self, serializer, direction) -> ResolvedComponent:
assert is_serializer(serializer), (
f'internal assumption violated because we expected a serializer here and instead '
f'got a "{serializer}". This may be the result of another app doing some unexpected '
f'magic or an invalid internal call. Feel free to report this as a bug at '
f'https://github.com/tfranzel/drf-spectacular/issues '
)
serializer = force_instance(serializer)
with add_trace_message(serializer.__class__.__name__):
component = ResolvedComponent(
name=self._get_serializer_name(serializer, direction),
type=ResolvedComponent.SCHEMA,
object=serializer,
)
if component in self.registry:
return self.registry[component] # return component with schema
self.registry.register(component)
component.schema = self._map_serializer(serializer, direction)
discard_component = (
# components with empty schemas serve no purpose
not component.schema
# concrete component without properties are likely only transactional so discard
or (
component.schema.get('type') == 'object'
and not component.schema.get('properties')
and 'additionalProperties' not in component.schema
)
)
if discard_component:
del self.registry[component]
return ResolvedComponent(None, None) # sentinel
return component
|
import csv
import io
import json
import logging
import sys
from datetime import date, datetime, timedelta
import hug
from peewee import fn, DatabaseError
from api import api
from access_control.access_control import UserRoles, get_or_create_auto_user
from config import config
from db import directives
from db.migration import migrate_db, init_database
from db.model import TimeSlot, Appointment, User, Booking, Migration, FrontendConfig
from secret_token.secret_token import get_random_string, hash_pw
log = logging.getLogger('cli')
@hug.default_output_format(apply_globally=False, cli=True, http=False)
def cli_output(data):
result = io.StringIO()
writer = csv.DictWriter(result, fieldnames=data[0].keys(), delimiter='\t')
writer.writeheader()
writer.writerows(data)
return result.getvalue().encode('utf8')
@hug.cli()
def create_appointments(
db: directives.PeeweeSession,
day: hug.types.number,
month: hug.types.number,
year: hug.types.number = date.today().year,
start_hour: hug.types.number = 8,
start_min: hug.types.number = 30,
num_slots: hug.types.number = 13,
num_appointment_per_slot: hug.types.number = 8,
slot_duration_min: hug.types.number = 30
):
"""
[--day] <number> [--month] <number> [--year <number=date.today().year>] [--start_hour <number=8>] [--start_min <number=30>] [--num_slots <number=13>] [--num_appointment_per_slot <number=8>] [--slot_duration_min <number=30>]
creates timeslots and their corresponsing appointments
"""
with db.atomic():
for i in range(num_slots):
ts = TimeSlot.create(
start_date_time=datetime(year, month, day, start_hour, start_min, tzinfo=None) + timedelta(
minutes=i * slot_duration_min),
length_min=slot_duration_min)
for _ in range(num_appointment_per_slot):
Appointment.create(booked=False, time_slot=ts)
ts.save()
@hug.cli()
def delete_timeslots(
db: directives.PeeweeSession,
year: hug.types.number,
month: hug.types.number,
day: hug.types.number,
start_hour: hug.types.number,
start_min: hug.types.number,
num_slots: hug.types.number,
for_real: hug.types.boolean = False
):
"""
[--year] <number> [--month] <number> [--day] <number> [--start_hour] <number> [--start_min] <number> [--num_slots] <number> [--for_real]
deletes timeslots and their corresponsing appointments if they are not booked
"""
with db.atomic():
dto = datetime(year, month, day, start_hour, start_min, tzinfo=None)
tomorrow = datetime(year, month, day, tzinfo=None) + timedelta(days=1)
ts = TimeSlot.select().where(
(TimeSlot.start_date_time >= dto) & (TimeSlot.start_date_time < tomorrow)).order_by(
TimeSlot.start_date_time).limit(num_slots)
if not for_real:
log.info(
f"I would delete the following time slots - run with --for_real if these are correct")
else:
log.info(f"Deleting the following time slots")
tsids_to_delete = []
for t in ts:
tsids_to_delete.append(t.id)
log.info(f"ID: {t.id} - {t.start_date_time}")
if not tsids_to_delete:
log.error("No matching timeslots found! Exiting.")
sys.exit(1)
apts = Appointment.select().where(Appointment.time_slot.in_(tsids_to_delete))
log.info(
f"this {"will" if for_real else "would"} affect the following appointments")
apts_to_delete = []
for apt in apts:
apts_to_delete.append(apt)
log.info(
f"ID: {apt.id} - {apt.time_slot.start_date_time}: {"booked!" if apt.booked else "free"}")
if all(not apt.booked for apt in apts_to_delete):
log.info(
f"none of these appointments are booked, so I {"will" if for_real else "would"} delete them")
if for_real:
aq = Appointment.delete().where(
Appointment.id.in_([a.id for a in apts_to_delete]))
tq = TimeSlot.delete().where(TimeSlot.id.in_(tsids_to_delete))
aq.execute()
tq.execute()
log.info("Done!")
else:
log.error(
f"Some of these appointments are already booked, {"will" if for_real else "would"} not delete!")
def _add_one_user(db: directives.PeeweeSession, username: hug.types.text, password: hug.types.text = None,
role: hug.types.one_of(
UserRoles.user_roles()) = UserRoles.USER,
coupons: hug.types.number = 10):
with db.atomic():
name = username.lower()
salt = get_random_string(2)
secret_password = password or get_random_string(12)
hashed_password = hash_pw(name, salt, secret_password)
user = User.create(user_name=name, role=role, salt=salt,
password=hashed_password, coupons=coupons)
user.save()
return {"name": user.user_name, "password": secret_password}
@hug.cli()
def add_user(db: directives.PeeweeSession, username: hug.types.text, password: hug.types.text = None,
role: hug.types.one_of(UserRoles.user_roles()) = UserRoles.USER,
coupons: hug.types.number = 10):
"""
[--username] <string> [--password] <string> [--role <one_of(UserRoles.user_roles()) = UserRoles.USER>] [--coupons <number=10>]; creates a user
"""
return [_add_one_user(db, username, password, role, coupons)]
@hug.cli()
def get_user_roles():
"""
get a list of available user_roles
"""
return UserRoles.user_roles()
@hug.cli()
def add_users(db: directives.PeeweeSession, filename: hug.types.text,
role: hug.types.one_of(UserRoles.user_roles()) = UserRoles.USER):
"""
[--filename] <string> [--role <one_of(UserRoles.user_roles()) = UserRoles.USER>]; imports usernames from the file, one user per line, with a default of 10 coupons
"""
with open(filename) as f:
return [_add_one_user(db, line.strip(), role=role) for line in f]
@hug.cli()
def change_user_pw(db: directives.PeeweeSession, username: hug.types.text, password: hug.types.text, for_real: hug.types.smart_boolean = False):
"""
[--username] <string> [--password] <string> [--for_real]; changes the passwort for given user
"""
if not for_real:
print(
f"this would change {username}'s pw to {password}. Run with --for_real if you're sure.")
sys.exit(1)
with db.atomic():
name = username.lower()
salt = get_random_string(2)
secret_password = password
hashed_password = hash_pw(name, salt, secret_password)
user = User.get(User.user_name == username)
user.salt = salt
user.password = hashed_password
user.save()
print(f"{user.user_name}'s pw successfully changed.")
@hug.cli()
def init_db(db: directives.PeeweeSession, for_real: hug.types.smart_boolean = False):
"""
[--for_real]; initializes the database
"""
if not for_real:
print('this will create the database (potentially destroying data), run with --for_real, if you are sure '
'*and* have a backup')
sys.exit(1)
else:
with db.atomic():
try:
migration = Migration.get()
print(f'Migration level is already set to version {migration.version} - implying the db has already been '
f'initialized. Run command `run_migrations` instead.')
sys.exit(1)
except DatabaseError:
init_database()
@hug.cli()
def run_migrations(for_real: hug.types.smart_boolean = False):
"""
[--for_real]; runs the database migrations
"""
if not for_real:
print('this will migrate the database (potentially destroying data), run with --for_real, if you are sure '
'*and* have a backup')
sys.exit(1)
else:
print('Start database migration...')
migrate_db()
print('Done.')
@hug.cli()
def get_coupon_state():
"""
get a list of all users and their bookings and remaining coupons
"""
ret = []
for user in User.select():
bookings = Booking.select().where(
user.user_name == Booking.booked_by)
ret.append({
"name": user.user_name,
"num_bookings": len(bookings),
"coupons": user.coupons
})
return ret
@hug.cli()
def set_coupon_count(db: directives.PeeweeSession, user_name: hug.types.text, value: hug.types.number):
"""
[--user_name] <string> [--value] <number>; set the user coupon_count to <value>
"""
with db.atomic():
user = User.get(User.user_name == user_name)
user.coupons = value
user.save()
@hug.cli()
def inc_coupon_count(db: directives.PeeweeSession, user_name: hug.types.text, increment: hug.types.number):
"""
[--user_name] <string> [--increment] <number>; increment the user coupon_count, to decrement give a negative number
"""
with db.atomic():
user = User.get(User.user_name == user_name)
user.coupons += increment
user.save()
@hug.cli()
def cancel_booking(db: directives.PeeweeSession, secret: hug.types.text, start_date_time: hug.types.text, for_real: hug.types.smart_boolean = False):
"""
[--secret] <string> [--start_date_time] <ISO datetime string> [--for_real]; cancel the booking with given secret at given time
"""
with db.atomic():
sdt = datetime.fromisoformat(start_date_time).replace(tzinfo=None)
timeslot = TimeSlot.get(TimeSlot.start_date_time == sdt)
booking = Booking.select(Booking).join(Appointment).where(
(Booking.secret == secret) &
(Appointment.time_slot == timeslot)).get()
if not for_real:
print(f"This would delete the booking with id '{booking.id}' and secret '{booking.secret}'. Run with "
f"--for_real if you are sure.")
sys.exit(1)
else:
print(
f"Deleting the booking with id '{booking.id}' and secret '{booking.secret}'.")
booking.appointment.booked = False
booking.appointment.save()
q = Booking.delete().where(Booking.id == booking.id)
q.execute()
print("Done.")
@hug.cli()
def set_frontend_config(db: directives.PeeweeSession, instance_name: hug.types.text, long_instance_name: hug.types.text,
contact_info_bookings: hug.types.text, contact_info_appointments: hug.types.text = None,
form_fields: hug.types.text = "base,address,dayOfBirth,reason",
for_real: hug.types.smart_boolean = False):
"""
[--instance_name] <string> [--long_instance_name] <string> [--contact_info_bookings] <string> [--contact_info_appointments <string=None>] [--form_fields <string="base,address,dayOfBirth,reason">] [--for_real]
"""
with db.atomic():
if not contact_info_appointments:
appointments_contact = contact_info_bookings
else:
appointments_contact = contact_info_appointments
template = {
"instanceName": f"{instance_name}",
"longInstanceName": f"{long_instance_name}",
"contactInfoCoupons": f"{contact_info_bookings}",
"contactInfoAppointment": f"{appointments_contact}",
"formFields": form_fields.split(","),
}
if not for_real:
print(f"This would update the config with '{json.dumps(template, indent=2)}'. Run with --for_real if you "
f"are sure.")
sys.exit(1)
else:
print(
f"Updating the config with '{json.dumps(template, indent=2)}'.")
try:
config = FrontendConfig.get()
config.config = template
except FrontendConfig.DoesNotExist:
config = FrontendConfig.create(config=template)
config.save()
print("Done.")
@hug.cli()
def load_frontend_config(db: directives.PeeweeSession, frontend_config_file: hug.types.text,
for_real: hug.types.smart_boolean = False):
"""
[--frontend_config_file] <file> [--for_real] loads the config file to the database if run with --for_real
To check the frontend_config, omit the --for_real flag
"""
with db.atomic():
with open(frontend_config_file, 'r') as j_file:
try:
new_config = json.load(j_file)
if 'instanceName' not in new_config or 'longInstanceName' not in new_config or \
'contactInfoCoupons' not in new_config \
or 'contactInfoAppointment' not in new_config or 'formFields' not in new_config:
print(
f"Given file '{json.dumps(new_config, indent=2)}' missing required fields!")
sys.exit(1)
elif type(new_config['formFields']) != list:
print("field formFields is not a list!")
sys.exit(1)
except json.JSONDecodeError as e:
print("The file can not decoded as json!")
sys.exit(1)
if not for_real:
print(
f"This would update the config with '{json.dumps(new_config, indent=2)}'. "
f"Run with --for_real if you are sure.")
sys.exit(1)
else:
print(
f"Updating the config with '{json.dumps(new_config, indent=2)}'.")
try:
config = FrontendConfig.get()
config.config = new_config
except FrontendConfig.DoesNotExist:
config = FrontendConfig.create(config=new_config)
config.save()
print("Done.")
@hug.cli(output=hug.output_format.pretty_json)
def get_bookings_created_at(db: directives.PeeweeSession, booked_at: hug.types.text):
"""
[--booked_at <ISO datetime string>] get all bookings made at specific day or time
Get bookings for a day with yyyy-mm-dd or one specific booking at yyyy-mm-ddThh:mm:ss.mmmmmm
"""
with db.atomic():
query = Booking.select(
Booking,
Appointment.time_slot.start_date_time.alias("start_date_time")
).join(Appointment).join(TimeSlot)
booked_start = datetime.fromisoformat(booked_at).replace(tzinfo=None)
if str(booked_start.date()) == booked_at:
# booked_at is yyyy-mm-dd
booked_end = booked_start.date() + timedelta(days=1)
bookings = query.where(
Booking.booked_at.between(booked_start, booked_end))
else:
# booked_at is yyyy-mm-ddThh:mm:ss.mmmmmm
bookings = query.where(Booking.booked_at == booked_start)
result = []
for booking in bookings.dicts().iterator():
del booking["appointment"]
result.append({**booking})
return result
def get_free_timeslots_between(db: directives.PeeweeSession, start: datetime, end: datetime):
with db.atomic():
now = datetime.now(tz=config.Settings.tz).replace(tzinfo=None)
slots = TimeSlot \
.select(TimeSlot.start_date_time, TimeSlot.length_min,
fn.count(Appointment.time_slot).alias("free_appointments")) \
.join(Appointment) \
.where(
(TimeSlot.start_date_time >= start) & (TimeSlot.start_date_time <= end) &
(Appointment.claim_token.is_null() | (Appointment.claimed_at +
timedelta(
minutes=config.Settings.claim_timeout_min) < now)) &
(Appointment.booked == False)
) \
.group_by(TimeSlot.start_date_time, TimeSlot.length_min) \
.order_by(TimeSlot.start_date_time) \
# @formatter:on
return [{"startDateTime": str(slot.start_date_time)} for slot in slots]
@hug.cli(output=hug.output_format.pretty_json)
def free_slots_at(db: directives.PeeweeSession, at_datetime: hug.types.text = None, max_days_after: hug.types.number = 2):
"""
[--at_datetime <ISO datetime string=None>] [--max_days_after <number=2>] returns a list of free slots after given date, up to date + max_days_after
"""
start = datetime.now(tz=config.Settings.tz).replace(tzinfo=None)
if at_datetime is not None:
start = datetime.fromisoformat(at_datetime).replace(tzinfo=None)
end = start + timedelta(days=max_days_after)
return get_free_timeslots_between(db, start, end)
@hug.cli(output=hug.output_format.pretty_json)
def free_slots_before(db: directives.PeeweeSession, at_datetime: hug.types.text = None, max_days_before: hug.types.number = 2):
"""
[--at_datetime <ISO datetime string=None>] [--max_days_before <number=2>] returns a list of free slots before given date, up to date - max_days_before
"""
end = datetime.now(tz=config.Settings.tz).replace(tzinfo=None)
if at_datetime is not None:
end = datetime.fromisoformat(at_datetime).replace(tzinfo=None)
start = end - timedelta(days=max_days_before)
return get_free_timeslots_between(db, start, end)
@hug.cli(output=hug.output_format.pretty_json)
def claim_appointment(db: directives.PeeweeSession, start_date_time: hug.types.text, user: hug.types.text):
"""
[--start_date_time] START_DATE_TIME (ISO string) [--user] USER_NAME
"""
try:
api_claim_appointment = api.claim_appointment(
db, start_date_time, get_or_create_auto_user(
db, UserRoles.USER, user)
)
except hug.HTTPGone as e:
return None
return api_claim_appointment
@hug.cli(output=hug.output_format.pretty_json)
def has_booked_by(db: directives.PeeweeSession, user: hug.types.text):
"""
USER_NAME; checks if there are bookings made by that user
"""
return Booking.select(Booking).where(Booking.booked_by == user).count() > 0
@hug.cli(output=hug.output_format.pretty_json)
def has_booking(db: directives.PeeweeSession, booking: hug.types.json):
"""
BOOKING_JSON; check if a booking exists for the booked person
"""
try:
return Booking.select(Booking).where(
(Booking.surname == booking["surname"])
& (Booking.first_name == booking["first_name"])
& (Booking.birthday == booking["birthday"])
& (Booking.phone == booking["phone"])
& (Booking.street == booking["street"])
& (Booking.street_number == booking["street_number"])
& (Booking.post_code == booking["post_code"])
& (Booking.city == booking["city"])
).count() > 0
except KeyError as e:
print(f"Key {e} is missing in booking.")
return None
@hug.cli(output=hug.output_format.pretty_json)
def book_followup(db: directives.PeeweeSession, booking: hug.types.json, delta_days: hug.types.number = 21, day_range: hug.types.number = 2):
"""
BOOKING_JSON [--delta_days <number=21>][--day_range <number=2>]
"""
if has_booked_by(db, booking["booked_by"]):
print(
f"User {booking["booked_by"]} already booked at least one appointment.")
return None
start_date = datetime.fromisoformat(
booking["start_date_time"]).replace(tzinfo=None)
followup_date = start_date + timedelta(days=delta_days)
slots_after = free_slots_at(
db, booking["booked_by"], str(followup_date), day_range)
slots_before = free_slots_before(
db, booking["booked_by"], str(followup_date), day_range)
slot_count = len(slots_before) + len(slots_after)
if slot_count == 0:
print(
f"No free slots available for booking: {booking} at '{followup_date}' in range of {day_range} days")
return None
found_time = None
tries = -1
claim_token = None
while claim_token is None and tries < len(slots_after):
tries += 1
found_time = slots_after[tries]["startDateTime"]
claim_token = claim_appointment(
db, found_time, booking["booked_by"])
tries = -1
while claim_token is None and tries < len(slots_before):
tries += 1
found_time = slots_before[tries]["startDateTime"]
claim_token = claim_appointment(
db, found_time, booking["booked_by"])
if claim_token is None:
print(
f"Failed to claim slot for booking: {booking} at '{followup_date}' in range of {day_range} days")
return None
booking["name"] = booking["surname"]
booking["claim_token"] = claim_token
booking["start_date_time"] = found_time
print(f"Book appointment with data {booking}")
booked = api.book_appointment(db, booking, get_or_create_auto_user(
db, UserRoles.USER, booking["booked_by"]))
return booked
@hug.cli()
def batch_book_followup(db: directives.PeeweeSession, delta_days: hug.types.number = 21, day_range: hug.types.number = 2):
"""
Expects result from get_bookings_created_at piped into stdin
delta_days: days after the first appointment
day_range: will search appointments in that range (+ or -) of above date (nearest will be taken)
"""
bookings = json.load(sys.stdin)
for booking in bookings:
booked = book_followup(db, booking, delta_days, day_range)
if booked is not None:
booked["time_slot"] = str(booked["time_slot"])
print(f"Booked appointment {booked}")
|
import csv
import io
import json
import logging
import sys
from datetime import date, datetime, timedelta
import hug
from peewee import fn, DatabaseError
from api import api
from access_control.access_control import UserRoles, get_or_create_auto_user
from config import config
from db import directives
from db.migration import migrate_db, init_database
from db.model import TimeSlot, Appointment, User, Booking, Migration, FrontendConfig
from secret_token.secret_token import get_random_string, hash_pw
log = logging.getLogger('cli')
@hug.default_output_format(apply_globally=False, cli=True, http=False)
def cli_output(data):
result = io.StringIO()
writer = csv.DictWriter(result, fieldnames=data[0].keys(), delimiter='\t')
writer.writeheader()
writer.writerows(data)
return result.getvalue().encode('utf8')
@hug.cli()
def create_appointments(
db: directives.PeeweeSession,
day: hug.types.number,
month: hug.types.number,
year: hug.types.number = date.today().year,
start_hour: hug.types.number = 8,
start_min: hug.types.number = 30,
num_slots: hug.types.number = 13,
num_appointment_per_slot: hug.types.number = 8,
slot_duration_min: hug.types.number = 30
):
"""
[--day] <number> [--month] <number> [--year <number=date.today().year>] [--start_hour <number=8>] [--start_min <number=30>] [--num_slots <number=13>] [--num_appointment_per_slot <number=8>] [--slot_duration_min <number=30>]
creates timeslots and their corresponsing appointments
"""
with db.atomic():
for i in range(num_slots):
ts = TimeSlot.create(
start_date_time=datetime(year, month, day, start_hour, start_min, tzinfo=None) + timedelta(
minutes=i * slot_duration_min),
length_min=slot_duration_min)
for _ in range(num_appointment_per_slot):
Appointment.create(booked=False, time_slot=ts)
ts.save()
@hug.cli()
def delete_timeslots(
db: directives.PeeweeSession,
year: hug.types.number,
month: hug.types.number,
day: hug.types.number,
start_hour: hug.types.number,
start_min: hug.types.number,
num_slots: hug.types.number,
for_real: hug.types.boolean = False
):
"""
[--year] <number> [--month] <number> [--day] <number> [--start_hour] <number> [--start_min] <number> [--num_slots] <number> [--for_real]
deletes timeslots and their corresponsing appointments if they are not booked
"""
with db.atomic():
dto = datetime(year, month, day, start_hour, start_min, tzinfo=None)
tomorrow = datetime(year, month, day, tzinfo=None) + timedelta(days=1)
ts = TimeSlot.select().where(
(TimeSlot.start_date_time >= dto) & (TimeSlot.start_date_time < tomorrow)).order_by(
TimeSlot.start_date_time).limit(num_slots)
if not for_real:
log.info(
f"I would delete the following time slots - run with --for_real if these are correct")
else:
log.info(f"Deleting the following time slots")
tsids_to_delete = []
for t in ts:
tsids_to_delete.append(t.id)
log.info(f"ID: {t.id} - {t.start_date_time}")
if not tsids_to_delete:
log.error("No matching timeslots found! Exiting.")
sys.exit(1)
apts = Appointment.select().where(Appointment.time_slot.in_(tsids_to_delete))
log.info(
f"this {'will' if for_real else 'would'} affect the following appointments")
apts_to_delete = []
for apt in apts:
apts_to_delete.append(apt)
log.info(
f"ID: {apt.id} - {apt.time_slot.start_date_time}: {'booked!' if apt.booked else 'free'}")
if all(not apt.booked for apt in apts_to_delete):
log.info(
f"none of these appointments are booked, so I {'will' if for_real else 'would'} delete them")
if for_real:
aq = Appointment.delete().where(
Appointment.id.in_([a.id for a in apts_to_delete]))
tq = TimeSlot.delete().where(TimeSlot.id.in_(tsids_to_delete))
aq.execute()
tq.execute()
log.info("Done!")
else:
log.error(
f"Some of these appointments are already booked, {'will' if for_real else 'would'} not delete!")
def _add_one_user(db: directives.PeeweeSession, username: hug.types.text, password: hug.types.text = None,
role: hug.types.one_of(
UserRoles.user_roles()) = UserRoles.USER,
coupons: hug.types.number = 10):
with db.atomic():
name = username.lower()
salt = get_random_string(2)
secret_password = password or get_random_string(12)
hashed_password = hash_pw(name, salt, secret_password)
user = User.create(user_name=name, role=role, salt=salt,
password=hashed_password, coupons=coupons)
user.save()
return {"name": user.user_name, "password": secret_password}
@hug.cli()
def add_user(db: directives.PeeweeSession, username: hug.types.text, password: hug.types.text = None,
role: hug.types.one_of(UserRoles.user_roles()) = UserRoles.USER,
coupons: hug.types.number = 10):
"""
[--username] <string> [--password] <string> [--role <one_of(UserRoles.user_roles()) = UserRoles.USER>] [--coupons <number=10>]; creates a user
"""
return [_add_one_user(db, username, password, role, coupons)]
@hug.cli()
def get_user_roles():
"""
get a list of available user_roles
"""
return UserRoles.user_roles()
@hug.cli()
def add_users(db: directives.PeeweeSession, filename: hug.types.text,
role: hug.types.one_of(UserRoles.user_roles()) = UserRoles.USER):
"""
[--filename] <string> [--role <one_of(UserRoles.user_roles()) = UserRoles.USER>]; imports usernames from the file, one user per line, with a default of 10 coupons
"""
with open(filename) as f:
return [_add_one_user(db, line.strip(), role=role) for line in f]
@hug.cli()
def change_user_pw(db: directives.PeeweeSession, username: hug.types.text, password: hug.types.text, for_real: hug.types.smart_boolean = False):
"""
[--username] <string> [--password] <string> [--for_real]; changes the passwort for given user
"""
if not for_real:
print(
f"this would change {username}'s pw to {password}. Run with --for_real if you're sure.")
sys.exit(1)
with db.atomic():
name = username.lower()
salt = get_random_string(2)
secret_password = password
hashed_password = hash_pw(name, salt, secret_password)
user = User.get(User.user_name == username)
user.salt = salt
user.password = hashed_password
user.save()
print(f"{user.user_name}'s pw successfully changed.")
@hug.cli()
def init_db(db: directives.PeeweeSession, for_real: hug.types.smart_boolean = False):
"""
[--for_real]; initializes the database
"""
if not for_real:
print('this will create the database (potentially destroying data), run with --for_real, if you are sure '
'*and* have a backup')
sys.exit(1)
else:
with db.atomic():
try:
migration = Migration.get()
print(f'Migration level is already set to version {migration.version} - implying the db has already been '
f'initialized. Run command `run_migrations` instead.')
sys.exit(1)
except DatabaseError:
init_database()
@hug.cli()
def run_migrations(for_real: hug.types.smart_boolean = False):
"""
[--for_real]; runs the database migrations
"""
if not for_real:
print('this will migrate the database (potentially destroying data), run with --for_real, if you are sure '
'*and* have a backup')
sys.exit(1)
else:
print('Start database migration...')
migrate_db()
print('Done.')
@hug.cli()
def get_coupon_state():
"""
get a list of all users and their bookings and remaining coupons
"""
ret = []
for user in User.select():
bookings = Booking.select().where(
user.user_name == Booking.booked_by)
ret.append({
"name": user.user_name,
"num_bookings": len(bookings),
"coupons": user.coupons
})
return ret
@hug.cli()
def set_coupon_count(db: directives.PeeweeSession, user_name: hug.types.text, value: hug.types.number):
"""
[--user_name] <string> [--value] <number>; set the user coupon_count to <value>
"""
with db.atomic():
user = User.get(User.user_name == user_name)
user.coupons = value
user.save()
@hug.cli()
def inc_coupon_count(db: directives.PeeweeSession, user_name: hug.types.text, increment: hug.types.number):
"""
[--user_name] <string> [--increment] <number>; increment the user coupon_count, to decrement give a negative number
"""
with db.atomic():
user = User.get(User.user_name == user_name)
user.coupons += increment
user.save()
@hug.cli()
def cancel_booking(db: directives.PeeweeSession, secret: hug.types.text, start_date_time: hug.types.text, for_real: hug.types.smart_boolean = False):
"""
[--secret] <string> [--start_date_time] <ISO datetime string> [--for_real]; cancel the booking with given secret at given time
"""
with db.atomic():
sdt = datetime.fromisoformat(start_date_time).replace(tzinfo=None)
timeslot = TimeSlot.get(TimeSlot.start_date_time == sdt)
booking = Booking.select(Booking).join(Appointment).where(
(Booking.secret == secret) &
(Appointment.time_slot == timeslot)).get()
if not for_real:
print(f"This would delete the booking with id '{booking.id}' and secret '{booking.secret}'. Run with "
f"--for_real if you are sure.")
sys.exit(1)
else:
print(
f"Deleting the booking with id '{booking.id}' and secret '{booking.secret}'.")
booking.appointment.booked = False
booking.appointment.save()
q = Booking.delete().where(Booking.id == booking.id)
q.execute()
print("Done.")
@hug.cli()
def set_frontend_config(db: directives.PeeweeSession, instance_name: hug.types.text, long_instance_name: hug.types.text,
contact_info_bookings: hug.types.text, contact_info_appointments: hug.types.text = None,
form_fields: hug.types.text = "base,address,dayOfBirth,reason",
for_real: hug.types.smart_boolean = False):
"""
[--instance_name] <string> [--long_instance_name] <string> [--contact_info_bookings] <string> [--contact_info_appointments <string=None>] [--form_fields <string="base,address,dayOfBirth,reason">] [--for_real]
"""
with db.atomic():
if not contact_info_appointments:
appointments_contact = contact_info_bookings
else:
appointments_contact = contact_info_appointments
template = {
"instanceName": f"{instance_name}",
"longInstanceName": f"{long_instance_name}",
"contactInfoCoupons": f"{contact_info_bookings}",
"contactInfoAppointment": f"{appointments_contact}",
"formFields": form_fields.split(","),
}
if not for_real:
print(f"This would update the config with '{json.dumps(template, indent=2)}'. Run with --for_real if you "
f"are sure.")
sys.exit(1)
else:
print(
f"Updating the config with '{json.dumps(template, indent=2)}'.")
try:
config = FrontendConfig.get()
config.config = template
except FrontendConfig.DoesNotExist:
config = FrontendConfig.create(config=template)
config.save()
print("Done.")
@hug.cli()
def load_frontend_config(db: directives.PeeweeSession, frontend_config_file: hug.types.text,
for_real: hug.types.smart_boolean = False):
"""
[--frontend_config_file] <file> [--for_real] loads the config file to the database if run with --for_real
To check the frontend_config, omit the --for_real flag
"""
with db.atomic():
with open(frontend_config_file, 'r') as j_file:
try:
new_config = json.load(j_file)
if 'instanceName' not in new_config or 'longInstanceName' not in new_config or \
'contactInfoCoupons' not in new_config \
or 'contactInfoAppointment' not in new_config or 'formFields' not in new_config:
print(
f"Given file '{json.dumps(new_config, indent=2)}' missing required fields!")
sys.exit(1)
elif type(new_config['formFields']) != list:
print("field formFields is not a list!")
sys.exit(1)
except json.JSONDecodeError as e:
print("The file can not decoded as json!")
sys.exit(1)
if not for_real:
print(
f"This would update the config with '{json.dumps(new_config, indent=2)}'. "
f"Run with --for_real if you are sure.")
sys.exit(1)
else:
print(
f"Updating the config with '{json.dumps(new_config, indent=2)}'.")
try:
config = FrontendConfig.get()
config.config = new_config
except FrontendConfig.DoesNotExist:
config = FrontendConfig.create(config=new_config)
config.save()
print("Done.")
@hug.cli(output=hug.output_format.pretty_json)
def get_bookings_created_at(db: directives.PeeweeSession, booked_at: hug.types.text):
"""
[--booked_at <ISO datetime string>] get all bookings made at specific day or time
Get bookings for a day with yyyy-mm-dd or one specific booking at yyyy-mm-ddThh:mm:ss.mmmmmm
"""
with db.atomic():
query = Booking.select(
Booking,
Appointment.time_slot.start_date_time.alias("start_date_time")
).join(Appointment).join(TimeSlot)
booked_start = datetime.fromisoformat(booked_at).replace(tzinfo=None)
if str(booked_start.date()) == booked_at:
# booked_at is yyyy-mm-dd
booked_end = booked_start.date() + timedelta(days=1)
bookings = query.where(
Booking.booked_at.between(booked_start, booked_end))
else:
# booked_at is yyyy-mm-ddThh:mm:ss.mmmmmm
bookings = query.where(Booking.booked_at == booked_start)
result = []
for booking in bookings.dicts().iterator():
del booking["appointment"]
result.append({**booking})
return result
def get_free_timeslots_between(db: directives.PeeweeSession, start: datetime, end: datetime):
with db.atomic():
now = datetime.now(tz=config.Settings.tz).replace(tzinfo=None)
slots = TimeSlot \
.select(TimeSlot.start_date_time, TimeSlot.length_min,
fn.count(Appointment.time_slot).alias("free_appointments")) \
.join(Appointment) \
.where(
(TimeSlot.start_date_time >= start) & (TimeSlot.start_date_time <= end) &
(Appointment.claim_token.is_null() | (Appointment.claimed_at +
timedelta(
minutes=config.Settings.claim_timeout_min) < now)) &
(Appointment.booked == False)
) \
.group_by(TimeSlot.start_date_time, TimeSlot.length_min) \
.order_by(TimeSlot.start_date_time) \
# @formatter:on
return [{"startDateTime": str(slot.start_date_time)} for slot in slots]
@hug.cli(output=hug.output_format.pretty_json)
def free_slots_at(db: directives.PeeweeSession, at_datetime: hug.types.text = None, max_days_after: hug.types.number = 2):
"""
[--at_datetime <ISO datetime string=None>] [--max_days_after <number=2>] returns a list of free slots after given date, up to date + max_days_after
"""
start = datetime.now(tz=config.Settings.tz).replace(tzinfo=None)
if at_datetime is not None:
start = datetime.fromisoformat(at_datetime).replace(tzinfo=None)
end = start + timedelta(days=max_days_after)
return get_free_timeslots_between(db, start, end)
@hug.cli(output=hug.output_format.pretty_json)
def free_slots_before(db: directives.PeeweeSession, at_datetime: hug.types.text = None, max_days_before: hug.types.number = 2):
"""
[--at_datetime <ISO datetime string=None>] [--max_days_before <number=2>] returns a list of free slots before given date, up to date - max_days_before
"""
end = datetime.now(tz=config.Settings.tz).replace(tzinfo=None)
if at_datetime is not None:
end = datetime.fromisoformat(at_datetime).replace(tzinfo=None)
start = end - timedelta(days=max_days_before)
return get_free_timeslots_between(db, start, end)
@hug.cli(output=hug.output_format.pretty_json)
def claim_appointment(db: directives.PeeweeSession, start_date_time: hug.types.text, user: hug.types.text):
"""
[--start_date_time] START_DATE_TIME (ISO string) [--user] USER_NAME
"""
try:
api_claim_appointment = api.claim_appointment(
db, start_date_time, get_or_create_auto_user(
db, UserRoles.USER, user)
)
except hug.HTTPGone as e:
return None
return api_claim_appointment
@hug.cli(output=hug.output_format.pretty_json)
def has_booked_by(db: directives.PeeweeSession, user: hug.types.text):
"""
USER_NAME; checks if there are bookings made by that user
"""
return Booking.select(Booking).where(Booking.booked_by == user).count() > 0
@hug.cli(output=hug.output_format.pretty_json)
def has_booking(db: directives.PeeweeSession, booking: hug.types.json):
"""
BOOKING_JSON; check if a booking exists for the booked person
"""
try:
return Booking.select(Booking).where(
(Booking.surname == booking["surname"])
& (Booking.first_name == booking["first_name"])
& (Booking.birthday == booking["birthday"])
& (Booking.phone == booking["phone"])
& (Booking.street == booking["street"])
& (Booking.street_number == booking["street_number"])
& (Booking.post_code == booking["post_code"])
& (Booking.city == booking["city"])
).count() > 0
except KeyError as e:
print(f"Key {e} is missing in booking.")
return None
@hug.cli(output=hug.output_format.pretty_json)
def book_followup(db: directives.PeeweeSession, booking: hug.types.json, delta_days: hug.types.number = 21, day_range: hug.types.number = 2):
"""
BOOKING_JSON [--delta_days <number=21>][--day_range <number=2>]
"""
if has_booked_by(db, booking["booked_by"]):
print(
f"User {booking['booked_by']} already booked at least one appointment.")
return None
start_date = datetime.fromisoformat(
booking["start_date_time"]).replace(tzinfo=None)
followup_date = start_date + timedelta(days=delta_days)
slots_after = free_slots_at(
db, booking["booked_by"], str(followup_date), day_range)
slots_before = free_slots_before(
db, booking["booked_by"], str(followup_date), day_range)
slot_count = len(slots_before) + len(slots_after)
if slot_count == 0:
print(
f"No free slots available for booking: {booking} at '{followup_date}' in range of {day_range} days")
return None
found_time = None
tries = -1
claim_token = None
while claim_token is None and tries < len(slots_after):
tries += 1
found_time = slots_after[tries]["startDateTime"]
claim_token = claim_appointment(
db, found_time, booking["booked_by"])
tries = -1
while claim_token is None and tries < len(slots_before):
tries += 1
found_time = slots_before[tries]["startDateTime"]
claim_token = claim_appointment(
db, found_time, booking["booked_by"])
if claim_token is None:
print(
f"Failed to claim slot for booking: {booking} at '{followup_date}' in range of {day_range} days")
return None
booking["name"] = booking["surname"]
booking["claim_token"] = claim_token
booking["start_date_time"] = found_time
print(f"Book appointment with data {booking}")
booked = api.book_appointment(db, booking, get_or_create_auto_user(
db, UserRoles.USER, booking["booked_by"]))
return booked
@hug.cli()
def batch_book_followup(db: directives.PeeweeSession, delta_days: hug.types.number = 21, day_range: hug.types.number = 2):
"""
Expects result from get_bookings_created_at piped into stdin
delta_days: days after the first appointment
day_range: will search appointments in that range (+ or -) of above date (nearest will be taken)
"""
bookings = json.load(sys.stdin)
for booking in bookings:
booked = book_followup(db, booking, delta_days, day_range)
if booked is not None:
booked["time_slot"] = str(booked["time_slot"])
print(f"Booked appointment {booked}")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 5 10:49:55 2020
@author: nmei
"""
import pandas as pd
import numpy as np
from glob import glob
from tqdm import tqdm
import os
from scipy import stats
import statsmodels.api as sm
from statsmodels.formula.api import ols
from statsmodels.stats.anova import AnovaRM
from matplotlib.ticker import FormatStrFormatter
from matplotlib import pyplot as plt
from mne.stats import fdr_correction
import seaborn as sns
sns.set_style('white')
sns.set_context('poster')
import matplotlib
font = {'weight' : 'bold',
'size' : 22}
matplotlib.rc('font', **font)
matplotlib.rcParams['font.weight']= 'bold'
from shutil import copyfile
copyfile('../../../utils.py','utils.py')
import utils
model = 'Image2vec Encoding Models'
experiment = 'metasema'
alpha = int(1e2)
here = 'encoding_model_15_ROIs_arrays'
model_name = 'Ridge Regression'
cv = 'Random Partition 300 folds'
multiple_correction_method = 'FDR Benjamini-Hochberg'
working_dir = '../../../../results/{}/RP/{}'.format(experiment,here)
#working_dir = '/bcbl/home/home_n-z/nmei/bench_marking/results/{}/RP/{}'.format(experiment,here)
here = 'compare word2vec and image2vec 15 roi'
figure_dir = '../../../../figures/{}/RP/{}'.format(experiment,here)
if not os.path.exists(figure_dir):
os.makedirs(figure_dir)
working_data = glob(os.path.join(working_dir,'*.npy'))
df_collect = dict(
sub_name = [],
roi = [],
condition = [],
model_name = [],
path = [],
scores = [],
)
def _append(df_collect,mapping):
for key,values in mapping.items():
df_collect[key].append(values)
return df_collect
for f in tqdm(working_data):
try:
_,_,sub_name,roi,condition,model = f.split(' ')
model = model.split('.')[0]
except:
_,_,sub_name,roi,condition,model1,model2 = f.split(' ')
model = f'{model1} {model2.split('.')[0]}'
df_collect = _append(df_collect,mapping = dict(
sub_name=sub_name,
roi=roi,
condition=condition,
model_name=model,
path=f,
scores=np.load(f).mean(0)
))
df_collect = pd.DataFrame(df_collect)
df_collect['model_name'] = df_collect['model_name'].map({'fast text':'Fast Text',
'glove':'GloVe',
'word2vec':'Word2Vec',
'concatenated_word2vec':'Word Embedding',
'VGG19':'VGG19',
'DenseNet169':'DenseNet169',
'MobileNetV2':'MobileNetV2'})
df_collect['Model'] = df_collect['model_name'].map({'Fast Text':'W2V',
'GloVe':'W2V',
'Word2Vec':'W2V',
'Word Embedding':'W2V',
'VGG19':'I2V',
'DenseNet169':'I2V',
'MobileNetV2':'I2V'})
df = dict(
sub_name = [],
roi = [],
condition = [],
model_type = [],
scores = [],
positive_voxels = [],
)
for (sub_name,roi,condition,Model),df_sub in df_collect.groupby(['sub_name',
'roi','condition','Model']):
temp = df_sub['scores'].values.mean(0)
df = _append(df,
mapping = dict(sub_name = sub_name,
roi = roi,
condition = condition,
model_type = Model,
scores = temp,
positive_voxels = np.sum(temp >=0)))
df = pd.DataFrame(df)
df['roi_name'] = df['roi'].apply(lambda x:x.split('_')[-1])
df['roi_name'] = df['roi_name'].map({'frontpole':'Frontal Pole',
'fusif':'Fusirorm Gyrus',
'infpar':'Inferior Parietal Lobe',
'inftemp':'Inferior Temporal Lobe',
'lofc':'Lateral Orbitofrontal Cortex',
'mofc':'Medial Orbitfrontal Cortex',
'mtemp':'Medial Temporal Lobe',
'parsoper':'Pars Opercularis',
'parsorbi':'Pars Orbitalis',
'parstri':'Pars Triangularis',
'phipp':'Parahippocampal Gyrus',
'postcing':'Posterior Cingulate Gyrus',
'precun':'Precuneus',
'sfrontal':'Superior Frontal Gyrus',
'tempole':'Anterior Temporal Lobe'})
df['roi_name_br'] = df['roi_name'].map({'Frontal Pole':'FP',
'Fusirorm Gyrus':'FFG',
'Inferior Parietal Lobe':'IPL',
'Inferior Temporal Lobe':'ITL',
'Lateral Orbitofrontal Cortex':'LOFC',
'Medial Orbitfrontal Cortex':'MOFC',
'Medial Temporal Lobe':'MTL',
'Pars Opercularis':'POP',
'Pars Orbitalis':'POR',
'Pars Triangularis':'PTR',
'Parahippocampal Gyrus':'PHG',
'Posterior Cingulate Gyrus':'PCG',
'Precuneus':'Precuneus',
'Superior Frontal Gyrus':'SFG',
'Anterior Temporal Lobe':'ATL'})
df['ROIs'] = df['roi_name']
df['Conditions'] = df['condition']
sort_by = ['sub_name','roi_name','condition']
df_i2v = df[df['model_type']=='I2V'].sort_values(sort_by)
df_w2v = df[df['model_type']=='W2V'].sort_values(sort_by)
fig,ax = plt.subplots(figsize = (24,20))
ax = sns.scatterplot(df_w2v['positive_voxels'].values,
df_i2v['positive_voxels'].values,
hue = df_i2v['ROIs'].values,
style = df_i2v['Conditions'].values,
ax = ax,
)
ax.plot([0,600],[0,600],linestyle = '--',color = 'black',alpha = .4,)
ax.set(xlim=(-10,550),
ylim=(-10,550),
xlabel = 'Word embedding models',
ylabel = 'Computer vision models',
title = 'Number of Positive Variance Explained Voxels')
fig.savefig(os.path.join(figure_dir,'positive voxels.jpeg'),
bbox_inches = 'tight',)
fig.savefig(os.path.join(figure_dir,'positive voxels (high).jpeg'),
dpi = 400,
bbox_inches = 'tight',)
df_voxel = dict(sub_name=[],
roi_name=[],
condition=[],
score_i=[],
score_w=[],
)
for ((sub_name,roi_name,condition),df_i2v_sub),(_,df_w2v_sub) in zip(df_i2v.groupby(['sub_name','roi_name','condition',]),
df_w2v.groupby(['sub_name','roi_name','condition',])):
for ii,ww in zip(df_i2v_sub['scores'].values[0],df_w2v_sub['scores'].values[0]):
df_voxel = _append(df_voxel,
mapping = dict(sub_name=sub_name,
roi_name=roi_name,
condition=condition,
score_i=ii,
score_w=ww,))
df_voxel = pd.DataFrame(df_voxel)
df_voxel['ROIs'] = df_voxel['roi_name']
df_voxel['Conditions'] = df_voxel['condition']
idx = np.logical_or(df_voxel['score_i'].apply(lambda x:x>=0).values,
df_voxel['score_w'].apply(lambda x:x>=0).values)
df_voxel_plot = df_voxel[idx]
idx = np.logical_or(df_voxel['score_i'].apply(lambda x:-10<x<0).values,
df_voxel['score_w'].apply(lambda x:-10<x<0).values)
df_voxel_negative = df_voxel[idx]
fig,ax = plt.subplots(figsize = (24,20))
ax.scatter(df_voxel_negative['score_w'].values,
df_voxel_negative['score_i'].values,
marker = '*',
s = 1,
color = 'black',
alpha = 0.5,
)
ax = sns.scatterplot('score_w','score_i',
hue='ROIs',
style='Conditions',
data = df_voxel_plot,
ax = ax,
)
ax.plot([-600,600],[-600,600],linestyle = '--',color = 'black',alpha = .4,)
vims = df_voxel['score_i'].max() * 1.1
ax.set(xlim=(-vims,vims),
ylim=(-vims,vims),
xlabel = 'Word embedding models',
ylabel = 'Computer vision models',
title = 'Variance Explained of Individual Voxels',
)
fig.savefig(os.path.join(figure_dir,'voxel wise scores.jpeg'),
bbox_inches = 'tight',)
fig.savefig(os.path.join(figure_dir,'voxel wise scores (high).jpeg'),
dpi = 500,
bbox_inches = 'tight',)
plt.close('all')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 5 10:49:55 2020
@author: nmei
"""
import pandas as pd
import numpy as np
from glob import glob
from tqdm import tqdm
import os
from scipy import stats
import statsmodels.api as sm
from statsmodels.formula.api import ols
from statsmodels.stats.anova import AnovaRM
from matplotlib.ticker import FormatStrFormatter
from matplotlib import pyplot as plt
from mne.stats import fdr_correction
import seaborn as sns
sns.set_style('white')
sns.set_context('poster')
import matplotlib
font = {'weight' : 'bold',
'size' : 22}
matplotlib.rc('font', **font)
matplotlib.rcParams['font.weight']= 'bold'
from shutil import copyfile
copyfile('../../../utils.py','utils.py')
import utils
model = 'Image2vec Encoding Models'
experiment = 'metasema'
alpha = int(1e2)
here = 'encoding_model_15_ROIs_arrays'
model_name = 'Ridge Regression'
cv = 'Random Partition 300 folds'
multiple_correction_method = 'FDR Benjamini-Hochberg'
working_dir = '../../../../results/{}/RP/{}'.format(experiment,here)
#working_dir = '/bcbl/home/home_n-z/nmei/bench_marking/results/{}/RP/{}'.format(experiment,here)
here = 'compare word2vec and image2vec 15 roi'
figure_dir = '../../../../figures/{}/RP/{}'.format(experiment,here)
if not os.path.exists(figure_dir):
os.makedirs(figure_dir)
working_data = glob(os.path.join(working_dir,'*.npy'))
df_collect = dict(
sub_name = [],
roi = [],
condition = [],
model_name = [],
path = [],
scores = [],
)
def _append(df_collect,mapping):
for key,values in mapping.items():
df_collect[key].append(values)
return df_collect
for f in tqdm(working_data):
try:
_,_,sub_name,roi,condition,model = f.split(' ')
model = model.split('.')[0]
except:
_,_,sub_name,roi,condition,model1,model2 = f.split(' ')
model = f'{model1} {model2.split(".")[0]}'
df_collect = _append(df_collect,mapping = dict(
sub_name=sub_name,
roi=roi,
condition=condition,
model_name=model,
path=f,
scores=np.load(f).mean(0)
))
df_collect = pd.DataFrame(df_collect)
df_collect['model_name'] = df_collect['model_name'].map({'fast text':'Fast Text',
'glove':'GloVe',
'word2vec':'Word2Vec',
'concatenated_word2vec':'Word Embedding',
'VGG19':'VGG19',
'DenseNet169':'DenseNet169',
'MobileNetV2':'MobileNetV2'})
df_collect['Model'] = df_collect['model_name'].map({'Fast Text':'W2V',
'GloVe':'W2V',
'Word2Vec':'W2V',
'Word Embedding':'W2V',
'VGG19':'I2V',
'DenseNet169':'I2V',
'MobileNetV2':'I2V'})
df = dict(
sub_name = [],
roi = [],
condition = [],
model_type = [],
scores = [],
positive_voxels = [],
)
for (sub_name,roi,condition,Model),df_sub in df_collect.groupby(['sub_name',
'roi','condition','Model']):
temp = df_sub['scores'].values.mean(0)
df = _append(df,
mapping = dict(sub_name = sub_name,
roi = roi,
condition = condition,
model_type = Model,
scores = temp,
positive_voxels = np.sum(temp >=0)))
df = pd.DataFrame(df)
df['roi_name'] = df['roi'].apply(lambda x:x.split('_')[-1])
df['roi_name'] = df['roi_name'].map({'frontpole':'Frontal Pole',
'fusif':'Fusirorm Gyrus',
'infpar':'Inferior Parietal Lobe',
'inftemp':'Inferior Temporal Lobe',
'lofc':'Lateral Orbitofrontal Cortex',
'mofc':'Medial Orbitfrontal Cortex',
'mtemp':'Medial Temporal Lobe',
'parsoper':'Pars Opercularis',
'parsorbi':'Pars Orbitalis',
'parstri':'Pars Triangularis',
'phipp':'Parahippocampal Gyrus',
'postcing':'Posterior Cingulate Gyrus',
'precun':'Precuneus',
'sfrontal':'Superior Frontal Gyrus',
'tempole':'Anterior Temporal Lobe'})
df['roi_name_br'] = df['roi_name'].map({'Frontal Pole':'FP',
'Fusirorm Gyrus':'FFG',
'Inferior Parietal Lobe':'IPL',
'Inferior Temporal Lobe':'ITL',
'Lateral Orbitofrontal Cortex':'LOFC',
'Medial Orbitfrontal Cortex':'MOFC',
'Medial Temporal Lobe':'MTL',
'Pars Opercularis':'POP',
'Pars Orbitalis':'POR',
'Pars Triangularis':'PTR',
'Parahippocampal Gyrus':'PHG',
'Posterior Cingulate Gyrus':'PCG',
'Precuneus':'Precuneus',
'Superior Frontal Gyrus':'SFG',
'Anterior Temporal Lobe':'ATL'})
df['ROIs'] = df['roi_name']
df['Conditions'] = df['condition']
sort_by = ['sub_name','roi_name','condition']
df_i2v = df[df['model_type']=='I2V'].sort_values(sort_by)
df_w2v = df[df['model_type']=='W2V'].sort_values(sort_by)
fig,ax = plt.subplots(figsize = (24,20))
ax = sns.scatterplot(df_w2v['positive_voxels'].values,
df_i2v['positive_voxels'].values,
hue = df_i2v['ROIs'].values,
style = df_i2v['Conditions'].values,
ax = ax,
)
ax.plot([0,600],[0,600],linestyle = '--',color = 'black',alpha = .4,)
ax.set(xlim=(-10,550),
ylim=(-10,550),
xlabel = 'Word embedding models',
ylabel = 'Computer vision models',
title = 'Number of Positive Variance Explained Voxels')
fig.savefig(os.path.join(figure_dir,'positive voxels.jpeg'),
bbox_inches = 'tight',)
fig.savefig(os.path.join(figure_dir,'positive voxels (high).jpeg'),
dpi = 400,
bbox_inches = 'tight',)
df_voxel = dict(sub_name=[],
roi_name=[],
condition=[],
score_i=[],
score_w=[],
)
for ((sub_name,roi_name,condition),df_i2v_sub),(_,df_w2v_sub) in zip(df_i2v.groupby(['sub_name','roi_name','condition',]),
df_w2v.groupby(['sub_name','roi_name','condition',])):
for ii,ww in zip(df_i2v_sub['scores'].values[0],df_w2v_sub['scores'].values[0]):
df_voxel = _append(df_voxel,
mapping = dict(sub_name=sub_name,
roi_name=roi_name,
condition=condition,
score_i=ii,
score_w=ww,))
df_voxel = pd.DataFrame(df_voxel)
df_voxel['ROIs'] = df_voxel['roi_name']
df_voxel['Conditions'] = df_voxel['condition']
idx = np.logical_or(df_voxel['score_i'].apply(lambda x:x>=0).values,
df_voxel['score_w'].apply(lambda x:x>=0).values)
df_voxel_plot = df_voxel[idx]
idx = np.logical_or(df_voxel['score_i'].apply(lambda x:-10<x<0).values,
df_voxel['score_w'].apply(lambda x:-10<x<0).values)
df_voxel_negative = df_voxel[idx]
fig,ax = plt.subplots(figsize = (24,20))
ax.scatter(df_voxel_negative['score_w'].values,
df_voxel_negative['score_i'].values,
marker = '*',
s = 1,
color = 'black',
alpha = 0.5,
)
ax = sns.scatterplot('score_w','score_i',
hue='ROIs',
style='Conditions',
data = df_voxel_plot,
ax = ax,
)
ax.plot([-600,600],[-600,600],linestyle = '--',color = 'black',alpha = .4,)
vims = df_voxel['score_i'].max() * 1.1
ax.set(xlim=(-vims,vims),
ylim=(-vims,vims),
xlabel = 'Word embedding models',
ylabel = 'Computer vision models',
title = 'Variance Explained of Individual Voxels',
)
fig.savefig(os.path.join(figure_dir,'voxel wise scores.jpeg'),
bbox_inches = 'tight',)
fig.savefig(os.path.join(figure_dir,'voxel wise scores (high).jpeg'),
dpi = 500,
bbox_inches = 'tight',)
plt.close('all')
|
import discord
from discord.ext import commands#
import json
from func import options
class Recipe(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def recipe(self, ctx, *, n=None):
if n is None:
await ctx.send(f'{ctx.author.mention} invalid syntax: `?recipe (item)`')
else:
with open('./data/recipes.json', 'r') as json_file:
recipesFile = json.load(json_file)
for recipe in recipesFile['recipe']:
for name in recipe['names']:
if n.lower() == name or n.lower() == name + 's':
embed = discord.Embed(title="Recipe lookup", description=f"The result of {ctx.author}'s recipe lookup", color=options.getEmbedColour(ctx.guild.id))
embed.set_image(url=f"https://www.minecraftcrafting.info/imgs/craft_{recipe["url"]}")
await ctx.send(embed=embed)
return
await ctx.send(f"{ctx.author.mention} it appears we could not find the recipe you're looking for.\nI have informed the developers")
recipesFile['unknown'].append(n)
with open('./data/recipes.json', 'w') as json_file:
json.dump(recipesFile, json_file, indent=2)
def setup(bot):
bot.add_cog(Recipe(bot))
|
import discord
from discord.ext import commands#
import json
from func import options
class Recipe(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def recipe(self, ctx, *, n=None):
if n is None:
await ctx.send(f'{ctx.author.mention} invalid syntax: `?recipe (item)`')
else:
with open('./data/recipes.json', 'r') as json_file:
recipesFile = json.load(json_file)
for recipe in recipesFile['recipe']:
for name in recipe['names']:
if n.lower() == name or n.lower() == name + 's':
embed = discord.Embed(title="Recipe lookup", description=f"The result of {ctx.author}'s recipe lookup", color=options.getEmbedColour(ctx.guild.id))
embed.set_image(url=f"https://www.minecraftcrafting.info/imgs/craft_{recipe['url']}")
await ctx.send(embed=embed)
return
await ctx.send(f"{ctx.author.mention} it appears we could not find the recipe you're looking for.\nI have informed the developers")
recipesFile['unknown'].append(n)
with open('./data/recipes.json', 'w') as json_file:
json.dump(recipesFile, json_file, indent=2)
def setup(bot):
bot.add_cog(Recipe(bot))
|
#!/usr/bin/env python3
"""
Combine individual metadata files into datapackage.json
"""
import fnmatch
import hashlib
import json
import logging
import os
import sys
from os import path
import yaml
LOGGER = logging.getLogger(__name__)
def get_source_dict(filename):
with open(filename, 'r', encoding='utf8') as f:
citations = yaml.load(f)
for k in citations:
citations[k]['name'] = k
return citations
def replace_sources(keys, sources):
newsrc = []
for k in keys:
if k in sources:
newsrc.append(sources[k])
else:
print("ERROR: source %s not found" % k)
return newsrc
def replace_all_sources(bib, data):
sources = get_source_dict(bib)
data['references'] = sources
for res in data['resources']:
if 'sources' in res:
res['sources'] = replace_sources(res['sources'], sources)
else:
res['sources'] = []
if 'schema' in res:
for col in res['schema']['fields']:
try:
col['sources'] = replace_sources(col['sources'], sources)
except KeyError:
pass
# From http://stackoverflow.com/questions/1131220/get-md5-hash-of-big-files-in-python
def md5sum(filename):
md5 = hashlib.md5()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(128 * md5.block_size), b''):
md5.update(chunk)
return md5.hexdigest()
def process_metadata(filename):
with open(filename, 'r', encoding='utf8') as f:
data = yaml.load(f)
description = path.join(filename).replace('.yaml', '.rst')
if path.exists(description):
with open(description, 'r', encoding='utf8') as f:
description_text = f.read()
data['description'] = description_text
return data
def process_dpkg(filename):
return process_metadata(filename)
def process_resource(filename):
meta = process_metadata(filename)
if meta:
if path.exists(meta['path']):
meta['bytes'] = path.getsize(meta['path'])
meta['hash'] = md5sum(meta['path'])
meta['path'] = path.basename(meta['path'])
return meta
else:
LOGGER.error(f"{filename}: {meta["path"]} does not exist")
else:
LOGGER.error(f"{filename} has no metadata.")
def build(src, dst):
metadir = path.join(src, 'rawdata', 'metadata')
meta_res_dir = path.join(metadir, 'resources')
data = process_dpkg(path.join(metadir, 'datapackage.yaml'))
data['resources'] = []
for filename in sorted(os.listdir(meta_res_dir)):
if fnmatch.fnmatch(filename, '*.yaml'):
res = process_resource(path.join(meta_res_dir, filename))
if res:
data['resources'].append(res)
bib = path.join(metadir, 'sources.yaml')
replace_all_sources(bib, data)
with open(path.join(dst, 'datapackage.json'), 'w', encoding="utf8") as f:
json.dump(data, f, indent=2)
print("Writing: %s" % path.join(dst, 'datapackage.json'))
def main():
src = sys.argv[1]
dst = sys.argv[2]
build(src, dst)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
"""
Combine individual metadata files into datapackage.json
"""
import fnmatch
import hashlib
import json
import logging
import os
import sys
from os import path
import yaml
LOGGER = logging.getLogger(__name__)
def get_source_dict(filename):
with open(filename, 'r', encoding='utf8') as f:
citations = yaml.load(f)
for k in citations:
citations[k]['name'] = k
return citations
def replace_sources(keys, sources):
newsrc = []
for k in keys:
if k in sources:
newsrc.append(sources[k])
else:
print("ERROR: source %s not found" % k)
return newsrc
def replace_all_sources(bib, data):
sources = get_source_dict(bib)
data['references'] = sources
for res in data['resources']:
if 'sources' in res:
res['sources'] = replace_sources(res['sources'], sources)
else:
res['sources'] = []
if 'schema' in res:
for col in res['schema']['fields']:
try:
col['sources'] = replace_sources(col['sources'], sources)
except KeyError:
pass
# From http://stackoverflow.com/questions/1131220/get-md5-hash-of-big-files-in-python
def md5sum(filename):
md5 = hashlib.md5()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(128 * md5.block_size), b''):
md5.update(chunk)
return md5.hexdigest()
def process_metadata(filename):
with open(filename, 'r', encoding='utf8') as f:
data = yaml.load(f)
description = path.join(filename).replace('.yaml', '.rst')
if path.exists(description):
with open(description, 'r', encoding='utf8') as f:
description_text = f.read()
data['description'] = description_text
return data
def process_dpkg(filename):
return process_metadata(filename)
def process_resource(filename):
meta = process_metadata(filename)
if meta:
if path.exists(meta['path']):
meta['bytes'] = path.getsize(meta['path'])
meta['hash'] = md5sum(meta['path'])
meta['path'] = path.basename(meta['path'])
return meta
else:
LOGGER.error(f"{filename}: {meta['path']} does not exist")
else:
LOGGER.error(f"{filename} has no metadata.")
def build(src, dst):
metadir = path.join(src, 'rawdata', 'metadata')
meta_res_dir = path.join(metadir, 'resources')
data = process_dpkg(path.join(metadir, 'datapackage.yaml'))
data['resources'] = []
for filename in sorted(os.listdir(meta_res_dir)):
if fnmatch.fnmatch(filename, '*.yaml'):
res = process_resource(path.join(meta_res_dir, filename))
if res:
data['resources'].append(res)
bib = path.join(metadir, 'sources.yaml')
replace_all_sources(bib, data)
with open(path.join(dst, 'datapackage.json'), 'w', encoding="utf8") as f:
json.dump(data, f, indent=2)
print("Writing: %s" % path.join(dst, 'datapackage.json'))
def main():
src = sys.argv[1]
dst = sys.argv[2]
build(src, dst)
if __name__ == '__main__':
main()
|
""" Functions to download and transform the parquet files to numpy files """
import os
from itertools import repeat
from multiprocessing import Pool
from typing import Tuple
from tqdm import tqdm as tq
from autofaiss.datasets.readers.remote_iterators import read_filenames
from autofaiss.utils.os_tools import run_command
def download(parquet_embeddings_path: str, dest_path: str, n_cores: int = 32, verbose: bool = True) -> bool:
"""
Download .parquet files from hdfs at max speed
Parallelisation is essential to use the full bandwidth.
"""
filenames = read_filenames(parquet_embeddings_path)
nb_files = len(filenames)
os.makedirs(dest_path, exist_ok=True)
src_dest_paths = zip(filenames, repeat(dest_path))
if n_cores == 1:
if verbose:
src_dest_paths = tq(list(src_dest_paths))
for src_dest_path in src_dest_paths:
download_one(src_dest_path)
else:
with tq(total=nb_files) as pbar:
with Pool(processes=n_cores) as pool:
for _ in pool.imap_unordered(download_one, src_dest_paths):
pbar.update(1)
return True
def download_one(src_dest_path: Tuple[str, str]) -> None:
"""Function to download one file from hdfs to local"""
filename, dest_path = src_dest_path
if not os.path.exists(f"{dest_path}/{filename.split("/")[-1]}"):
cmd = f"hdfs dfs -copyToLocal {filename} {dest_path}"
_ = run_command(cmd)
|
""" Functions to download and transform the parquet files to numpy files """
import os
from itertools import repeat
from multiprocessing import Pool
from typing import Tuple
from tqdm import tqdm as tq
from autofaiss.datasets.readers.remote_iterators import read_filenames
from autofaiss.utils.os_tools import run_command
def download(parquet_embeddings_path: str, dest_path: str, n_cores: int = 32, verbose: bool = True) -> bool:
"""
Download .parquet files from hdfs at max speed
Parallelisation is essential to use the full bandwidth.
"""
filenames = read_filenames(parquet_embeddings_path)
nb_files = len(filenames)
os.makedirs(dest_path, exist_ok=True)
src_dest_paths = zip(filenames, repeat(dest_path))
if n_cores == 1:
if verbose:
src_dest_paths = tq(list(src_dest_paths))
for src_dest_path in src_dest_paths:
download_one(src_dest_path)
else:
with tq(total=nb_files) as pbar:
with Pool(processes=n_cores) as pool:
for _ in pool.imap_unordered(download_one, src_dest_paths):
pbar.update(1)
return True
def download_one(src_dest_path: Tuple[str, str]) -> None:
"""Function to download one file from hdfs to local"""
filename, dest_path = src_dest_path
if not os.path.exists(f"{dest_path}/{filename.split('/')[-1]}"):
cmd = f"hdfs dfs -copyToLocal {filename} {dest_path}"
_ = run_command(cmd)
|
#!/usr/bin/env python
import csv
import requests
import base64
from os import environ
from logging import basicConfig, getLogger
from time import sleep
from pathlib import Path
url = "http://www.vpngate.net/api/iphone/"
basicConfig(format='[%(asctime)s - %(levelname)s] %(message)s',
datefmt="%H:%M:%S")
logger = getLogger("App")
logger.setLevel("DEBUG" if environ.get("DEBUG") else "INFO")
def main():
while True:
try:
logger.info("Download SoftEtherVPN list")
res = requests.get(url)
break
except Exception as error:
logger.error(error)
logger.critical("Unable to get the VPN list. Please check Netwok connection")
sleep(10)
try:
logger.debug("Decoding")
content = res.content.decode('utf-8')
logger.debug("Line processing")
if not Path("openvpn-configs").exists():
Path("openvpn-configs").mkdir()
for line in csv.reader(content.splitlines()):
#HostName,IP,Score,Ping,Speed,CountryLong,CountryShort,NumVpnSessions,Uptime,TotalUsers,TotalTraffic,LogType,Operator,Message,OpenVPN_ConfigData_Base64
# Skip headers
if line[0] == "*vpn_servers" or line[0] == "#HostName" or line[0] == "*":
logger.debug("Skipping mark lines")
continue
logger.debug("Filename processing")
if type(line[0]) is str:
filename = Path("openvpn-configs" + "/" + line[0] + ".ovpn")
else:
logger.critical(type(line[0]))
filename = "WTF.txt"
if line[6] != "UA":
logger.info(f"Found {line[5]} vpn on {line[1]}")
with open(filename, "w", newline='') as file:
file.write(f"{base64.b64decode(line[14]).decode("utf-8")}")
except KeyboardInterrupt:
logger.info("User Crtl + ^C")
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import csv
import requests
import base64
from os import environ
from logging import basicConfig, getLogger
from time import sleep
from pathlib import Path
url = "http://www.vpngate.net/api/iphone/"
basicConfig(format='[%(asctime)s - %(levelname)s] %(message)s',
datefmt="%H:%M:%S")
logger = getLogger("App")
logger.setLevel("DEBUG" if environ.get("DEBUG") else "INFO")
def main():
while True:
try:
logger.info("Download SoftEtherVPN list")
res = requests.get(url)
break
except Exception as error:
logger.error(error)
logger.critical("Unable to get the VPN list. Please check Netwok connection")
sleep(10)
try:
logger.debug("Decoding")
content = res.content.decode('utf-8')
logger.debug("Line processing")
if not Path("openvpn-configs").exists():
Path("openvpn-configs").mkdir()
for line in csv.reader(content.splitlines()):
#HostName,IP,Score,Ping,Speed,CountryLong,CountryShort,NumVpnSessions,Uptime,TotalUsers,TotalTraffic,LogType,Operator,Message,OpenVPN_ConfigData_Base64
# Skip headers
if line[0] == "*vpn_servers" or line[0] == "#HostName" or line[0] == "*":
logger.debug("Skipping mark lines")
continue
logger.debug("Filename processing")
if type(line[0]) is str:
filename = Path("openvpn-configs" + "/" + line[0] + ".ovpn")
else:
logger.critical(type(line[0]))
filename = "WTF.txt"
if line[6] != "UA":
logger.info(f"Found {line[5]} vpn on {line[1]}")
with open(filename, "w", newline='') as file:
file.write(f"{base64.b64decode(line[14]).decode('utf-8')}")
except KeyboardInterrupt:
logger.info("User Crtl + ^C")
if __name__ == "__main__":
main()
|
import calendar
import copy
import datetime
import functools
import hashlib
import itertools
import json
import math
import os.path
import random
import re
import sys
import threading
import time
import traceback
from .common import InfoExtractor, SearchInfoExtractor
from ..compat import (
compat_chr,
compat_HTTPError,
compat_parse_qs,
compat_str,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
)
from ..jsinterp import JSInterpreter
from ..utils import (
NO_DEFAULT,
ExtractorError,
bug_reports_message,
clean_html,
datetime_from_str,
dict_get,
error_to_compat_str,
float_or_none,
format_field,
get_first,
int_or_none,
is_html,
join_nonempty,
js_to_json,
mimetype2ext,
network_exceptions,
orderedSet,
parse_codecs,
parse_count,
parse_duration,
parse_iso8601,
parse_qs,
qualities,
remove_end,
remove_start,
smuggle_url,
str_or_none,
str_to_int,
strftime_or_none,
traverse_obj,
try_get,
unescapeHTML,
unified_strdate,
unified_timestamp,
unsmuggle_url,
update_url_query,
url_or_none,
urljoin,
variadic,
)
# any clients starting with _ cannot be explicity requested by the user
INNERTUBE_CLIENTS = {
'web': {
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB',
'clientVersion': '2.20211221.00.00',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 1
},
'web_embedded': {
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB_EMBEDDED_PLAYER',
'clientVersion': '1.20211215.00.01',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 56
},
'web_music': {
'INNERTUBE_API_KEY': 'AIzaSyC9XL3ZjWddXya6X74dJoCTL-WEYFDNX30',
'INNERTUBE_HOST': 'music.youtube.com',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB_REMIX',
'clientVersion': '1.20211213.00.00',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 67,
},
'web_creator': {
'INNERTUBE_API_KEY': 'AIzaSyBUPetSUmoZL-OhlxA7wSac5XinrygCqMo',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB_CREATOR',
'clientVersion': '1.20211220.02.00',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 62,
},
'android': {
'INNERTUBE_API_KEY': 'AIzaSyA8eiZmM1FaDVjRy-df2KTyQ_vz_yYM39w',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'ANDROID',
'clientVersion': '16.49',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 3,
'REQUIRE_JS_PLAYER': False
},
'android_embedded': {
'INNERTUBE_API_KEY': 'AIzaSyCjc_pVEDi4qsv5MtC2dMXzpIaDoRFLsxw',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'ANDROID_EMBEDDED_PLAYER',
'clientVersion': '16.49',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 55,
'REQUIRE_JS_PLAYER': False
},
'android_music': {
'INNERTUBE_API_KEY': 'AIzaSyAOghZGza2MQSZkY_zfZ370N-PUdXEo8AI',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'ANDROID_MUSIC',
'clientVersion': '4.57',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 21,
'REQUIRE_JS_PLAYER': False
},
'android_creator': {
'INNERTUBE_API_KEY': 'AIzaSyD_qjV8zaaUMehtLkrKFgVeSX_Iqbtyws8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'ANDROID_CREATOR',
'clientVersion': '21.47',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 14,
'REQUIRE_JS_PLAYER': False
},
# iOS clients have HLS live streams. Setting device model to get 60fps formats.
# See: https://github.com/TeamNewPipe/NewPipeExtractor/issues/680#issuecomment-1002724558
'ios': {
'INNERTUBE_API_KEY': 'AIzaSyB-63vPrdThhKuerbB2N_l7Kwwcxj6yUAc',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS',
'clientVersion': '16.46',
'deviceModel': 'iPhone14,3',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 5,
'REQUIRE_JS_PLAYER': False
},
'ios_embedded': {
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS_MESSAGES_EXTENSION',
'clientVersion': '16.46',
'deviceModel': 'iPhone14,3',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 66,
'REQUIRE_JS_PLAYER': False
},
'ios_music': {
'INNERTUBE_API_KEY': 'AIzaSyBAETezhkwP0ZWA02RsqT1zu78Fpt0bC_s',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS_MUSIC',
'clientVersion': '4.57',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 26,
'REQUIRE_JS_PLAYER': False
},
'ios_creator': {
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS_CREATOR',
'clientVersion': '21.47',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 15,
'REQUIRE_JS_PLAYER': False
},
# mweb has 'ultralow' formats
# See: https://github.com/yt-dlp/yt-dlp/pull/557
'mweb': {
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'MWEB',
'clientVersion': '2.20211221.01.00',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 2
},
# This client can access age restricted videos (unless the uploader has disabled the 'allow embedding' option)
# See: https://github.com/zerodytrash/YouTube-Internal-Clients
'tv_embedded': {
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'TVHTML5_SIMPLY_EMBEDDED_PLAYER',
'clientVersion': '2.0',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 85
},
}
def _split_innertube_client(client_name):
variant, *base = client_name.rsplit('.', 1)
if base:
return variant, base[0], variant
base, *variant = client_name.split('_', 1)
return client_name, base, variant[0] if variant else None
def build_innertube_clients():
THIRD_PARTY = {
'embedUrl': 'https://www.youtube.com/', # Can be any valid URL
}
BASE_CLIENTS = ('android', 'web', 'tv', 'ios', 'mweb')
priority = qualities(BASE_CLIENTS[::-1])
for client, ytcfg in tuple(INNERTUBE_CLIENTS.items()):
ytcfg.setdefault('INNERTUBE_API_KEY', 'AIzaSyDCU8hByM-4DrUqRUYnGn-3llEO78bcxq8')
ytcfg.setdefault('INNERTUBE_HOST', 'www.youtube.com')
ytcfg.setdefault('REQUIRE_JS_PLAYER', True)
ytcfg['INNERTUBE_CONTEXT']['client'].setdefault('hl', 'en')
_, base_client, variant = _split_innertube_client(client)
ytcfg['priority'] = 10 * priority(base_client)
if not variant:
INNERTUBE_CLIENTS[f'{client}_embedscreen'] = embedscreen = copy.deepcopy(ytcfg)
embedscreen['INNERTUBE_CONTEXT']['client']['clientScreen'] = 'EMBED'
embedscreen['INNERTUBE_CONTEXT']['thirdParty'] = THIRD_PARTY
embedscreen['priority'] -= 3
elif variant == 'embedded':
ytcfg['INNERTUBE_CONTEXT']['thirdParty'] = THIRD_PARTY
ytcfg['priority'] -= 2
else:
ytcfg['priority'] -= 3
build_innertube_clients()
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_RESERVED_NAMES = (
r'channel|c|user|playlist|watch|w|v|embed|e|watch_popup|clip|'
r'shorts|movies|results|search|shared|hashtag|trending|explore|feed|feeds|'
r'browse|oembed|get_video_info|iframe_api|s/player|'
r'storefront|oops|index|account|reporthistory|t/terms|about|upload|signin|logout')
_PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM|WL|LL|LM)'
# _NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
_INVIDIOUS_SITES = (
# invidious-redirect websites
r'(?:www\.)?redirect\.invidious\.io',
r'(?:(?:www|dev)\.)?invidio\.us',
# Invidious instances taken from https://github.com/iv-org/documentation/blob/master/docs/instances.md
r'(?:www\.)?invidious\.pussthecat\.org',
r'(?:www\.)?invidious\.zee\.li',
r'(?:www\.)?invidious\.ethibox\.fr',
r'(?:www\.)?invidious\.3o7z6yfxhbw7n3za4rss6l434kmv55cgw2vuziwuigpwegswvwzqipyd\.onion',
r'(?:www\.)?osbivz6guyeahrwp2lnwyjk2xos342h4ocsxyqrlaopqjuhwn2djiiyd\.onion',
r'(?:www\.)?u2cvlit75owumwpy4dj2hsmvkq7nvrclkpht7xgyye2pyoxhpmclkrad\.onion',
# youtube-dl invidious instances list
r'(?:(?:www|no)\.)?invidiou\.sh',
r'(?:(?:www|fi)\.)?invidious\.snopyta\.org',
r'(?:www\.)?invidious\.kabi\.tk',
r'(?:www\.)?invidious\.mastodon\.host',
r'(?:www\.)?invidious\.zapashcanon\.fr',
r'(?:www\.)?(?:invidious(?:-us)?|piped)\.kavin\.rocks',
r'(?:www\.)?invidious\.tinfoil-hat\.net',
r'(?:www\.)?invidious\.himiko\.cloud',
r'(?:www\.)?invidious\.reallyancient\.tech',
r'(?:www\.)?invidious\.tube',
r'(?:www\.)?invidiou\.site',
r'(?:www\.)?invidious\.site',
r'(?:www\.)?invidious\.xyz',
r'(?:www\.)?invidious\.nixnet\.xyz',
r'(?:www\.)?invidious\.048596\.xyz',
r'(?:www\.)?invidious\.drycat\.fr',
r'(?:www\.)?inv\.skyn3t\.in',
r'(?:www\.)?tube\.poal\.co',
r'(?:www\.)?tube\.connect\.cafe',
r'(?:www\.)?vid\.wxzm\.sx',
r'(?:www\.)?vid\.mint\.lgbt',
r'(?:www\.)?vid\.puffyan\.us',
r'(?:www\.)?yewtu\.be',
r'(?:www\.)?yt\.elukerio\.org',
r'(?:www\.)?yt\.lelux\.fi',
r'(?:www\.)?invidious\.ggc-project\.de',
r'(?:www\.)?yt\.maisputain\.ovh',
r'(?:www\.)?ytprivate\.com',
r'(?:www\.)?invidious\.13ad\.de',
r'(?:www\.)?invidious\.toot\.koeln',
r'(?:www\.)?invidious\.fdn\.fr',
r'(?:www\.)?watch\.nettohikari\.com',
r'(?:www\.)?invidious\.namazso\.eu',
r'(?:www\.)?invidious\.silkky\.cloud',
r'(?:www\.)?invidious\.exonip\.de',
r'(?:www\.)?invidious\.riverside\.rocks',
r'(?:www\.)?invidious\.blamefran\.net',
r'(?:www\.)?invidious\.moomoo\.de',
r'(?:www\.)?ytb\.trom\.tf',
r'(?:www\.)?yt\.cyberhost\.uk',
r'(?:www\.)?kgg2m7yk5aybusll\.onion',
r'(?:www\.)?qklhadlycap4cnod\.onion',
r'(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion',
r'(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion',
r'(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion',
r'(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion',
r'(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p',
r'(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion',
r'(?:www\.)?w6ijuptxiku4xpnnaetxvnkc5vqcdu7mgns2u77qefoixi63vbvnpnqd\.onion',
r'(?:www\.)?kbjggqkzv65ivcqj6bumvp337z6264huv5kpkwuv6gu5yjiskvan7fad\.onion',
r'(?:www\.)?grwp24hodrefzvjjuccrkw3mjq4tzhaaq32amf33dzpmuxe7ilepcmad\.onion',
r'(?:www\.)?hpniueoejy4opn7bc4ftgazyqjoeqwlvh2uiku2xqku6zpoa4bf5ruid\.onion',
)
def _initialize_consent(self):
cookies = self._get_cookies('https://www.youtube.com/')
if cookies.get('__Secure-3PSID'):
return
consent_id = None
consent = cookies.get('CONSENT')
if consent:
if 'YES' in consent.value:
return
consent_id = self._search_regex(
r'PENDING\+(\d+)', consent.value, 'consent', default=None)
if not consent_id:
consent_id = random.randint(100, 999)
self._set_cookie('.youtube.com', 'CONSENT', 'YES+cb.20210328-17-p0.en+FX+%s' % consent_id)
def _initialize_pref(self):
cookies = self._get_cookies('https://www.youtube.com/')
pref_cookie = cookies.get('PREF')
pref = {}
if pref_cookie:
try:
pref = dict(compat_urlparse.parse_qsl(pref_cookie.value))
except ValueError:
self.report_warning('Failed to parse user PREF cookie' + bug_reports_message())
pref.update({'hl': 'en', 'tz': 'UTC'})
self._set_cookie('.youtube.com', name='PREF', value=compat_urllib_parse_urlencode(pref))
def _real_initialize(self):
self._initialize_pref()
self._initialize_consent()
self._check_login_required()
def _check_login_required(self):
if (self._LOGIN_REQUIRED
and self.get_param('cookiefile') is None
and self.get_param('cookiesfrombrowser') is None):
self.raise_login_required('Login details are needed to download this content', method='cookies')
_YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
_YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
_YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|</script|\n)'
def _get_default_ytcfg(self, client='web'):
return copy.deepcopy(INNERTUBE_CLIENTS[client])
def _get_innertube_host(self, client='web'):
return INNERTUBE_CLIENTS[client]['INNERTUBE_HOST']
def _ytcfg_get_safe(self, ytcfg, getter, expected_type=None, default_client='web'):
# try_get but with fallback to default ytcfg client values when present
_func = lambda y: try_get(y, getter, expected_type)
return _func(ytcfg) or _func(self._get_default_ytcfg(default_client))
def _extract_client_name(self, ytcfg, default_client='web'):
return self._ytcfg_get_safe(
ytcfg, (lambda x: x['INNERTUBE_CLIENT_NAME'],
lambda x: x['INNERTUBE_CONTEXT']['client']['clientName']), compat_str, default_client)
def _extract_client_version(self, ytcfg, default_client='web'):
return self._ytcfg_get_safe(
ytcfg, (lambda x: x['INNERTUBE_CLIENT_VERSION'],
lambda x: x['INNERTUBE_CONTEXT']['client']['clientVersion']), compat_str, default_client)
def _extract_api_key(self, ytcfg=None, default_client='web'):
return self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_API_KEY'], compat_str, default_client)
def _extract_context(self, ytcfg=None, default_client='web'):
context = get_first(
(ytcfg, self._get_default_ytcfg(default_client)), 'INNERTUBE_CONTEXT', expected_type=dict)
# Enforce language and tz for extraction
client_context = traverse_obj(context, 'client', expected_type=dict, default={})
client_context.update({'hl': 'en', 'timeZone': 'UTC', 'utcOffsetMinutes': 0})
return context
_SAPISID = None
def _generate_sapisidhash_header(self, origin='https://www.youtube.com'):
time_now = round(time.time())
if self._SAPISID is None:
yt_cookies = self._get_cookies('https://www.youtube.com')
# Sometimes SAPISID cookie isn't present but __Secure-3PAPISID is.
# See: https://github.com/yt-dlp/yt-dlp/issues/393
sapisid_cookie = dict_get(
yt_cookies, ('__Secure-3PAPISID', 'SAPISID'))
if sapisid_cookie and sapisid_cookie.value:
self._SAPISID = sapisid_cookie.value
self.write_debug('Extracted SAPISID cookie')
# SAPISID cookie is required if not already present
if not yt_cookies.get('SAPISID'):
self.write_debug('Copying __Secure-3PAPISID cookie to SAPISID cookie')
self._set_cookie(
'.youtube.com', 'SAPISID', self._SAPISID, secure=True, expire_time=time_now + 3600)
else:
self._SAPISID = False
if not self._SAPISID:
return None
# SAPISIDHASH algorithm from https://stackoverflow.com/a/32065323
sapisidhash = hashlib.sha1(
f'{time_now} {self._SAPISID} {origin}'.encode()).hexdigest()
return f'SAPISIDHASH {time_now}_{sapisidhash}'
def _call_api(self, ep, query, video_id, fatal=True, headers=None,
note='Downloading API JSON', errnote='Unable to download API page',
context=None, api_key=None, api_hostname=None, default_client='web'):
data = {'context': context} if context else {'context': self._extract_context(default_client=default_client)}
data.update(query)
real_headers = self.generate_api_headers(default_client=default_client)
real_headers.update({'content-type': 'application/json'})
if headers:
real_headers.update(headers)
return self._download_json(
f'https://{api_hostname or self._get_innertube_host(default_client)}/youtubei/v1/{ep}',
video_id=video_id, fatal=fatal, note=note, errnote=errnote,
data=json.dumps(data).encode('utf8'), headers=real_headers,
query={'key': api_key or self._extract_api_key(), 'prettyPrint': 'false'})
def extract_yt_initial_data(self, item_id, webpage, fatal=True):
data = self._search_regex(
(fr'{self._YT_INITIAL_DATA_RE}\s*{self._YT_INITIAL_BOUNDARY_RE}',
self._YT_INITIAL_DATA_RE), webpage, 'yt initial data', fatal=fatal)
if data:
return self._parse_json(data, item_id, fatal=fatal)
@staticmethod
def _extract_session_index(*data):
"""
Index of current account in account list.
See: https://github.com/yt-dlp/yt-dlp/pull/519
"""
for ytcfg in data:
session_index = int_or_none(try_get(ytcfg, lambda x: x['SESSION_INDEX']))
if session_index is not None:
return session_index
# Deprecated?
def _extract_identity_token(self, ytcfg=None, webpage=None):
if ytcfg:
token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str)
if token:
return token
if webpage:
return self._search_regex(
r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
'identity token', default=None, fatal=False)
@staticmethod
def _extract_account_syncid(*args):
"""
Extract syncId required to download private playlists of secondary channels
@params response and/or ytcfg
"""
for data in args:
# ytcfg includes channel_syncid if on secondary channel
delegated_sid = try_get(data, lambda x: x['DELEGATED_SESSION_ID'], compat_str)
if delegated_sid:
return delegated_sid
sync_ids = (try_get(
data, (lambda x: x['responseContext']['mainAppWebResponseContext']['datasyncId'],
lambda x: x['DATASYNC_ID']), compat_str) or '').split('||')
if len(sync_ids) >= 2 and sync_ids[1]:
# datasyncid is of the form "channel_syncid||user_syncid" for secondary channel
# and just "user_syncid||" for primary channel. We only want the channel_syncid
return sync_ids[0]
@staticmethod
def _extract_visitor_data(*args):
"""
Extracts visitorData from an API response or ytcfg
Appears to be used to track session state
"""
return get_first(
args, [('VISITOR_DATA', ('INNERTUBE_CONTEXT', 'client', 'visitorData'), ('responseContext', 'visitorData'))],
expected_type=str)
@property
def is_authenticated(self):
return bool(self._generate_sapisidhash_header())
def extract_ytcfg(self, video_id, webpage):
if not webpage:
return {}
return self._parse_json(
self._search_regex(
r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg',
default='{}'), video_id, fatal=False) or {}
def generate_api_headers(
self, *, ytcfg=None, account_syncid=None, session_index=None,
visitor_data=None, identity_token=None, api_hostname=None, default_client='web'):
origin = 'https://' + (api_hostname if api_hostname else self._get_innertube_host(default_client))
headers = {
'X-YouTube-Client-Name': compat_str(
self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_CONTEXT_CLIENT_NAME'], default_client=default_client)),
'X-YouTube-Client-Version': self._extract_client_version(ytcfg, default_client),
'Origin': origin,
'X-Youtube-Identity-Token': identity_token or self._extract_identity_token(ytcfg),
'X-Goog-PageId': account_syncid or self._extract_account_syncid(ytcfg),
'X-Goog-Visitor-Id': visitor_data or self._extract_visitor_data(ytcfg)
}
if session_index is None:
session_index = self._extract_session_index(ytcfg)
if account_syncid or session_index is not None:
headers['X-Goog-AuthUser'] = session_index if session_index is not None else 0
auth = self._generate_sapisidhash_header(origin)
if auth is not None:
headers['Authorization'] = auth
headers['X-Origin'] = origin
return {h: v for h, v in headers.items() if v is not None}
def _download_ytcfg(self, client, video_id):
url = {
'web': 'https://www.youtube.com',
'web_music': 'https://music.youtube.com',
'web_embedded': f'https://www.youtube.com/embed/{video_id}?html5=1'
}.get(client)
if not url:
return {}
webpage = self._download_webpage(
url, video_id, fatal=False, note=f'Downloading {client.replace('_', ' ').strip()} client config')
return self.extract_ytcfg(video_id, webpage) or {}
@staticmethod
def _build_api_continuation_query(continuation, ctp=None):
query = {
'continuation': continuation
}
# TODO: Inconsistency with clickTrackingParams.
# Currently we have a fixed ctp contained within context (from ytcfg)
# and a ctp in root query for continuation.
if ctp:
query['clickTracking'] = {'clickTrackingParams': ctp}
return query
@classmethod
def _extract_next_continuation_data(cls, renderer):
next_continuation = try_get(
renderer, (lambda x: x['continuations'][0]['nextContinuationData'],
lambda x: x['continuation']['reloadContinuationData']), dict)
if not next_continuation:
return
continuation = next_continuation.get('continuation')
if not continuation:
return
ctp = next_continuation.get('clickTrackingParams')
return cls._build_api_continuation_query(continuation, ctp)
@classmethod
def _extract_continuation_ep_data(cls, continuation_ep: dict):
if isinstance(continuation_ep, dict):
continuation = try_get(
continuation_ep, lambda x: x['continuationCommand']['token'], compat_str)
if not continuation:
return
ctp = continuation_ep.get('clickTrackingParams')
return cls._build_api_continuation_query(continuation, ctp)
@classmethod
def _extract_continuation(cls, renderer):
next_continuation = cls._extract_next_continuation_data(renderer)
if next_continuation:
return next_continuation
contents = []
for key in ('contents', 'items'):
contents.extend(try_get(renderer, lambda x: x[key], list) or [])
for content in contents:
if not isinstance(content, dict):
continue
continuation_ep = try_get(
content, (lambda x: x['continuationItemRenderer']['continuationEndpoint'],
lambda x: x['continuationItemRenderer']['button']['buttonRenderer']['command']),
dict)
continuation = cls._extract_continuation_ep_data(continuation_ep)
if continuation:
return continuation
@classmethod
def _extract_alerts(cls, data):
for alert_dict in try_get(data, lambda x: x['alerts'], list) or []:
if not isinstance(alert_dict, dict):
continue
for alert in alert_dict.values():
alert_type = alert.get('type')
if not alert_type:
continue
message = cls._get_text(alert, 'text')
if message:
yield alert_type, message
def _report_alerts(self, alerts, expected=True, fatal=True, only_once=False):
errors = []
warnings = []
for alert_type, alert_message in alerts:
if alert_type.lower() == 'error' and fatal:
errors.append([alert_type, alert_message])
else:
warnings.append([alert_type, alert_message])
for alert_type, alert_message in (warnings + errors[:-1]):
self.report_warning(f'YouTube said: {alert_type} - {alert_message}', only_once=only_once)
if errors:
raise ExtractorError('YouTube said: %s' % errors[-1][1], expected=expected)
def _extract_and_report_alerts(self, data, *args, **kwargs):
return self._report_alerts(self._extract_alerts(data), *args, **kwargs)
def _extract_badges(self, renderer: dict):
badges = set()
for badge in try_get(renderer, lambda x: x['badges'], list) or []:
label = try_get(badge, lambda x: x['metadataBadgeRenderer']['label'], compat_str)
if label:
badges.add(label.lower())
return badges
@staticmethod
def _get_text(data, *path_list, max_runs=None):
for path in path_list or [None]:
if path is None:
obj = [data]
else:
obj = traverse_obj(data, path, default=[])
if not any(key is ... or isinstance(key, (list, tuple)) for key in variadic(path)):
obj = [obj]
for item in obj:
text = try_get(item, lambda x: x['simpleText'], compat_str)
if text:
return text
runs = try_get(item, lambda x: x['runs'], list) or []
if not runs and isinstance(item, list):
runs = item
runs = runs[:min(len(runs), max_runs or len(runs))]
text = ''.join(traverse_obj(runs, (..., 'text'), expected_type=str, default=[]))
if text:
return text
def _get_count(self, data, *path_list):
count_text = self._get_text(data, *path_list) or ''
count = parse_count(count_text)
if count is None:
count = str_to_int(
self._search_regex(r'^([\d,]+)', re.sub(r'\s', '', count_text), 'count', default=None))
return count
@staticmethod
def _extract_thumbnails(data, *path_list):
"""
Extract thumbnails from thumbnails dict
@param path_list: path list to level that contains 'thumbnails' key
"""
thumbnails = []
for path in path_list or [()]:
for thumbnail in traverse_obj(data, (*variadic(path), 'thumbnails', ...), default=[]):
thumbnail_url = url_or_none(thumbnail.get('url'))
if not thumbnail_url:
continue
# Sometimes youtube gives a wrong thumbnail URL. See:
# https://github.com/yt-dlp/yt-dlp/issues/233
# https://github.com/ytdl-org/youtube-dl/issues/28023
if 'maxresdefault' in thumbnail_url:
thumbnail_url = thumbnail_url.split('?')[0]
thumbnails.append({
'url': thumbnail_url,
'height': int_or_none(thumbnail.get('height')),
'width': int_or_none(thumbnail.get('width')),
})
return thumbnails
@staticmethod
def extract_relative_time(relative_time_text):
"""
Extracts a relative time from string and converts to dt object
e.g. 'streamed 6 days ago', '5 seconds ago (edited)', 'updated today'
"""
mobj = re.search(r'(?P<start>today|yesterday|now)|(?P<time>\d+)\s*(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?\s*ago', relative_time_text)
if mobj:
start = mobj.group('start')
if start:
return datetime_from_str(start)
try:
return datetime_from_str('now-%s%s' % (mobj.group('time'), mobj.group('unit')))
except ValueError:
return None
def _extract_time_text(self, renderer, *path_list):
"""@returns (timestamp, time_text)"""
text = self._get_text(renderer, *path_list) or ''
dt = self.extract_relative_time(text)
timestamp = None
if isinstance(dt, datetime.datetime):
timestamp = calendar.timegm(dt.timetuple())
if timestamp is None:
timestamp = (
unified_timestamp(text) or unified_timestamp(
self._search_regex(
(r'([a-z]+\s*\d{1,2},?\s*20\d{2})', r'(?:.+|^)(?:live|premieres|ed|ing)(?:\s*(?:on|for))?\s*(.+\d)'),
text.lower(), 'time text', default=None)))
if text and timestamp is None:
self.report_warning(f"Cannot parse localized time text '{text}'" + bug_reports_message(), only_once=True)
return timestamp, text
def _extract_response(self, item_id, query, note='Downloading API JSON', headers=None,
ytcfg=None, check_get_keys=None, ep='browse', fatal=True, api_hostname=None,
default_client='web'):
response = None
last_error = None
count = -1
retries = self.get_param('extractor_retries', 3)
if check_get_keys is None:
check_get_keys = []
while count < retries:
count += 1
if last_error:
self.report_warning('%s. Retrying ...' % remove_end(last_error, '.'))
try:
response = self._call_api(
ep=ep, fatal=True, headers=headers,
video_id=item_id, query=query,
context=self._extract_context(ytcfg, default_client),
api_key=self._extract_api_key(ytcfg, default_client),
api_hostname=api_hostname, default_client=default_client,
note='%s%s' % (note, ' (retry #%d)' % count if count else ''))
except ExtractorError as e:
if isinstance(e.cause, network_exceptions):
if isinstance(e.cause, compat_HTTPError):
first_bytes = e.cause.read(512)
if not is_html(first_bytes):
yt_error = try_get(
self._parse_json(
self._webpage_read_content(e.cause, None, item_id, prefix=first_bytes) or '{}', item_id, fatal=False),
lambda x: x['error']['message'], compat_str)
if yt_error:
self._report_alerts([('ERROR', yt_error)], fatal=False)
# Downloading page may result in intermittent 5xx HTTP error
# Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289
# We also want to catch all other network exceptions since errors in later pages can be troublesome
# See https://github.com/yt-dlp/yt-dlp/issues/507#issuecomment-880188210
if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429):
last_error = error_to_compat_str(e.cause or e.msg)
if count < retries:
continue
if fatal:
raise
else:
self.report_warning(error_to_compat_str(e))
return
else:
try:
self._extract_and_report_alerts(response, only_once=True)
except ExtractorError as e:
# YouTube servers may return errors we want to retry on in a 200 OK response
# See: https://github.com/yt-dlp/yt-dlp/issues/839
if 'unknown error' in e.msg.lower():
last_error = e.msg
continue
if fatal:
raise
self.report_warning(error_to_compat_str(e))
return
if not check_get_keys or dict_get(response, check_get_keys):
break
# Youtube sometimes sends incomplete data
# See: https://github.com/ytdl-org/youtube-dl/issues/28194
last_error = 'Incomplete data received'
if count >= retries:
if fatal:
raise ExtractorError(last_error)
else:
self.report_warning(last_error)
return
return response
@staticmethod
def is_music_url(url):
return re.match(r'https?://music\.youtube\.com/', url) is not None
def _extract_video(self, renderer):
video_id = renderer.get('videoId')
title = self._get_text(renderer, 'title')
description = self._get_text(renderer, 'descriptionSnippet')
duration = parse_duration(self._get_text(
renderer, 'lengthText', ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'text')))
if duration is None:
duration = parse_duration(self._search_regex(
r'(?i)(ago)(?!.*\1)\s+(?P<duration>[a-z0-9 ,]+?)(?:\s+[\d,]+\s+views)?(?:\s+-\s+play\s+short)?$',
traverse_obj(renderer, ('title', 'accessibility', 'accessibilityData', 'label'), default='', expected_type=str),
video_id, default=None, group='duration'))
view_count = self._get_count(renderer, 'viewCountText')
uploader = self._get_text(renderer, 'ownerText', 'shortBylineText')
channel_id = traverse_obj(
renderer, ('shortBylineText', 'runs', ..., 'navigationEndpoint', 'browseEndpoint', 'browseId'),
expected_type=str, get_all=False)
timestamp, time_text = self._extract_time_text(renderer, 'publishedTimeText')
scheduled_timestamp = str_to_int(traverse_obj(renderer, ('upcomingEventData', 'startTime'), get_all=False))
overlay_style = traverse_obj(
renderer, ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'style'),
get_all=False, expected_type=str)
badges = self._extract_badges(renderer)
thumbnails = self._extract_thumbnails(renderer, 'thumbnail')
navigation_url = urljoin('https://www.youtube.com/', traverse_obj(
renderer, ('navigationEndpoint', 'commandMetadata', 'webCommandMetadata', 'url'),
expected_type=str)) or ''
url = f'https://www.youtube.com/watch?v={video_id}'
if overlay_style == 'SHORTS' or '/shorts/' in navigation_url:
url = f'https://www.youtube.com/shorts/{video_id}'
return {
'_type': 'url',
'ie_key': YoutubeIE.ie_key(),
'id': video_id,
'url': url,
'title': title,
'description': description,
'duration': duration,
'view_count': view_count,
'uploader': uploader,
'channel_id': channel_id,
'thumbnails': thumbnails,
'upload_date': (strftime_or_none(timestamp, '%Y%m%d')
if self._configuration_arg('approximate_date', ie_key='youtubetab')
else None),
'live_status': ('is_upcoming' if scheduled_timestamp is not None
else 'was_live' if 'streamed' in time_text.lower()
else 'is_live' if overlay_style is not None and overlay_style == 'LIVE' or 'live now' in badges
else None),
'release_timestamp': scheduled_timestamp,
'availability': self._availability(needs_premium='premium' in badges, needs_subscription='members only' in badges)
}
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com|
(?:www\.)?deturl\.com/www\.youtube\.com|
(?:www\.)?pwnyoutube\.com|
(?:www\.)?hooktube\.com|
(?:www\.)?yourepeat\.com|
tube\.majestyc\.net|
%(invidious)s|
youtube\.googleapis\.com)/ # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e|shorts)/(?!videoseries|live_stream)) # v/ or embed/ or e/ or shorts/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY)
v=
)
))
|(?:
youtu\.be| # just youtu.be/xxxx
vid\.plus| # or vid.plus/xxxx
zwearz\.com/watch| # or zwearz.com/watch/xxxx
%(invidious)s
)/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
(?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?(1).+)? # if we found the ID, everything can follow
(?:\#|$)""" % {
'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES),
}
_PLAYER_INFO_RE = (
r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/player',
r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$',
r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.js$',
)
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
'17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
'18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
'36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# 3D videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
# Apple HTTP Live Streaming
'91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
'212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
'256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
'328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
# itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
# Dash webm audio
'171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
'172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
'250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
'251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
# av01 video only formats sometimes served with "unknown" codecs
'394': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'},
'395': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'},
'396': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'av01.0.01M.08'},
'397': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'av01.0.04M.08'},
'398': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'av01.0.05M.08'},
'399': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'av01.0.08M.08'},
'400': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'},
'401': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'},
}
_SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt')
_GEO_BYPASS = False
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'channel': 'Philipp Hagemeister',
'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
'upload_date': '20121002',
'description': 'md5:8fb536f4877b8a7455c2ec23794dbc22',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'availability': 'public',
'playable_in_embed': True,
'thumbnail': 'https://i.ytimg.com/vi/BaW_jenozKc/maxresdefault.jpg',
'live_status': 'not_live',
'age_limit': 0,
'start_time': 1,
'end_time': 9,
'channel_follower_count': int
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
'age_limit': 18,
},
'skip': 'Private video',
},
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=yZIXLfi8CZQ',
'note': 'Use the first video ID in the URL',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'channel': 'Philipp Hagemeister',
'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
'upload_date': '20121002',
'description': 'md5:8fb536f4877b8a7455c2ec23794dbc22',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'availability': 'public',
'playable_in_embed': True,
'thumbnail': 'https://i.ytimg.com/vi/BaW_jenozKc/maxresdefault.jpg',
'live_status': 'not_live',
'age_limit': 0,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
'skip': 'format 141 not served anymore',
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
'duration': 244,
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
'abr': 129.495,
'like_count': int,
'channel_id': 'UChuZAo1RKL85gev3Eal9_zg',
'playable_in_embed': True,
'channel_url': 'https://www.youtube.com/channel/UChuZAo1RKL85gev3Eal9_zg',
'view_count': int,
'track': 'The Spark',
'live_status': 'not_live',
'thumbnail': 'https://i.ytimg.com/vi_webp/IB3lcPjvWLA/maxresdefault.webp',
'channel': 'Afrojack',
'uploader_url': 'http://www.youtube.com/user/AfrojackVEVO',
'tags': 'count:19',
'availability': 'public',
'categories': ['Music'],
'age_limit': 0,
'alt_title': 'The Spark',
'channel_follower_count': int
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# Age-gate videos. See https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-888837000
{
'note': 'Embed allowed age-gate video',
'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'duration': 142,
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
'upload_date': '20140605',
'age_limit': 18,
'categories': ['Gaming'],
'thumbnail': 'https://i.ytimg.com/vi_webp/HtVdAasjOgU/maxresdefault.webp',
'availability': 'needs_auth',
'channel_url': 'https://www.youtube.com/channel/UCzybXLxv08IApdjdN0mJhEg',
'like_count': int,
'channel': 'The Witcher',
'live_status': 'not_live',
'tags': 'count:17',
'channel_id': 'UCzybXLxv08IApdjdN0mJhEg',
'playable_in_embed': True,
'view_count': int,
'channel_follower_count': int
},
},
{
'note': 'Age-gate video with embed allowed in public site',
'url': 'https://youtube.com/watch?v=HsUATh_Nc2U',
'info_dict': {
'id': 'HsUATh_Nc2U',
'ext': 'mp4',
'title': 'Godzilla 2 (Official Video)',
'description': 'md5:bf77e03fcae5529475e500129b05668a',
'upload_date': '20200408',
'uploader_id': 'FlyingKitty900',
'uploader': 'FlyingKitty',
'age_limit': 18,
'availability': 'needs_auth',
'channel_id': 'UCYQT13AtrJC0gsM1far_zJg',
'uploader_url': 'http://www.youtube.com/user/FlyingKitty900',
'channel': 'FlyingKitty',
'channel_url': 'https://www.youtube.com/channel/UCYQT13AtrJC0gsM1far_zJg',
'view_count': int,
'categories': ['Entertainment'],
'live_status': 'not_live',
'tags': ['Flyingkitty', 'godzilla 2'],
'thumbnail': 'https://i.ytimg.com/vi/HsUATh_Nc2U/maxresdefault.jpg',
'like_count': int,
'duration': 177,
'playable_in_embed': True,
'channel_follower_count': int
},
},
{
'note': 'Age-gate video embedable only with clientScreen=EMBED',
'url': 'https://youtube.com/watch?v=Tq92D6wQ1mg',
'info_dict': {
'id': 'Tq92D6wQ1mg',
'title': '[MMD] Adios - EVERGLOW [+Motion DL]',
'ext': 'mp4',
'upload_date': '20191228',
'uploader_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ',
'uploader': 'Projekt Melody',
'description': 'md5:17eccca93a786d51bc67646756894066',
'age_limit': 18,
'like_count': int,
'availability': 'needs_auth',
'uploader_url': 'http://www.youtube.com/channel/UC1yoRdFoFJaCY-AGfD9W0wQ',
'channel_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ',
'view_count': int,
'thumbnail': 'https://i.ytimg.com/vi_webp/Tq92D6wQ1mg/sddefault.webp',
'channel': 'Projekt Melody',
'live_status': 'not_live',
'tags': ['mmd', 'dance', 'mikumikudance', 'kpop', 'vtuber'],
'playable_in_embed': True,
'categories': ['Entertainment'],
'duration': 106,
'channel_url': 'https://www.youtube.com/channel/UC1yoRdFoFJaCY-AGfD9W0wQ',
'channel_follower_count': int
},
},
{
'note': 'Non-Agegated non-embeddable video',
'url': 'https://youtube.com/watch?v=MeJVWBSsPAY',
'info_dict': {
'id': 'MeJVWBSsPAY',
'ext': 'mp4',
'title': 'OOMPH! - Such Mich Find Mich (Lyrics)',
'uploader': 'Herr Lurik',
'uploader_id': 'st3in234',
'description': 'Fan Video. Music & Lyrics by OOMPH!.',
'upload_date': '20130730',
'track': 'Such mich find mich',
'age_limit': 0,
'tags': ['oomph', 'such mich find mich', 'lyrics', 'german industrial', 'musica industrial'],
'like_count': int,
'playable_in_embed': False,
'creator': 'OOMPH!',
'thumbnail': 'https://i.ytimg.com/vi/MeJVWBSsPAY/sddefault.jpg',
'view_count': int,
'alt_title': 'Such mich find mich',
'duration': 210,
'channel': 'Herr Lurik',
'channel_id': 'UCdR3RSDPqub28LjZx0v9-aA',
'categories': ['Music'],
'availability': 'public',
'uploader_url': 'http://www.youtube.com/user/st3in234',
'channel_url': 'https://www.youtube.com/channel/UCdR3RSDPqub28LjZx0v9-aA',
'live_status': 'not_live',
'artist': 'OOMPH!',
'channel_follower_count': int
},
},
{
'note': 'Non-bypassable age-gated video',
'url': 'https://youtube.com/watch?v=Cr381pDsSsA',
'only_matching': True,
},
# video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
# YouTube Red ad is not captured for creator
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'duration': 266,
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
'creator': 'deadmau5',
'description': 'md5:6cbcd3a92ce1bc676fc4d6ab4ace2336',
'uploader': 'deadmau5',
'title': 'Deadmau5 - Some Chords (HD)',
'alt_title': 'Some Chords',
'availability': 'public',
'tags': 'count:14',
'channel_id': 'UCYEK6xds6eo-3tr4xRdflmQ',
'view_count': int,
'live_status': 'not_live',
'channel': 'deadmau5',
'thumbnail': 'https://i.ytimg.com/vi_webp/__2ABJjxzNo/maxresdefault.webp',
'like_count': int,
'track': 'Some Chords',
'artist': 'deadmau5',
'playable_in_embed': True,
'age_limit': 0,
'channel_url': 'https://www.youtube.com/channel/UCYEK6xds6eo-3tr4xRdflmQ',
'categories': ['Music'],
'album': 'Some Chords',
'channel_follower_count': int
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'duration': 6085,
'upload_date': '20150827',
'uploader_id': 'olympic',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympics',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
'like_count': int,
'release_timestamp': 1343767800,
'playable_in_embed': True,
'categories': ['Sports'],
'release_date': '20120731',
'channel': 'Olympics',
'tags': ['Hockey', '2012-07-31', '31 July 2012', 'Riverbank Arena', 'Session', 'Olympics', 'Olympic Games', 'London 2012', '2012 Summer Olympics', 'Summer Games'],
'channel_id': 'UCTl3QQTvqHFjurroKxexy2Q',
'thumbnail': 'https://i.ytimg.com/vi/lqQg6PlCWgI/maxresdefault.jpg',
'age_limit': 0,
'availability': 'public',
'live_status': 'was_live',
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UCTl3QQTvqHFjurroKxexy2Q',
'channel_follower_count': int
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'duration': 85,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫ᄋᄅ',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
'playable_in_embed': True,
'channel': '孫ᄋᄅ',
'age_limit': 0,
'tags': 'count:11',
'channel_url': 'https://www.youtube.com/channel/UCS-xxCmRaA6BFdmgDPA_BIw',
'channel_id': 'UCS-xxCmRaA6BFdmgDPA_BIw',
'thumbnail': 'https://i.ytimg.com/vi/_b-2C3KPAM0/maxresdefault.jpg',
'view_count': int,
'categories': ['People & Blogs'],
'like_count': int,
'live_status': 'not_live',
'availability': 'unlisted',
'channel_follower_count': int
},
},
# url_encoded_fmt_stream_map is empty string
{
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
'uploader_id': 'spbelect',
'uploader': 'Наблюдатели Петербурга',
},
'params': {
'skip_download': 'requires avconv',
},
'skip': 'This live event has ended.',
},
# Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
{
'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
'info_dict': {
'id': 'FIl7x6_3R5Y',
'ext': 'webm',
'title': 'md5:7b81415841e02ecd4313668cde88737a',
'description': 'md5:116377fd2963b81ec4ce64b542173306',
'duration': 220,
'upload_date': '20150625',
'uploader_id': 'dorappi2000',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
'uploader': 'dorappi2000',
'formats': 'mincount:31',
},
'skip': 'not actual anymore',
},
# DASH manifest with segment_list
{
'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
'md5': '8ce563a1d667b599d21064e982ab9e31',
'info_dict': {
'id': 'CsmdDsKjzN8',
'ext': 'mp4',
'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
'uploader': 'Airtek',
'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '135', # bestvideo
},
'skip': 'This live event has ended.',
},
{
# Multifeed videos (multiple cameras), URL is for Main Camera
'url': 'https://www.youtube.com/watch?v=jvGDaLqkpTg',
'info_dict': {
'id': 'jvGDaLqkpTg',
'title': 'Tom Clancy Free Weekend Rainbow Whatever',
'description': 'md5:e03b909557865076822aa169218d6a5d',
},
'playlist': [{
'info_dict': {
'id': 'jvGDaLqkpTg',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Main Camera)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10643,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}, {
'info_dict': {
'id': '3AKt1R1aDnw',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 2)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10991,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}, {
'info_dict': {
'id': 'RtAMM00gpVc',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 3)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10995,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}, {
'info_dict': {
'id': '6N2fdlP3C5U',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 4)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10990,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}],
'params': {
'skip_download': True,
},
'skip': 'Not multifeed anymore',
},
{
# Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
'info_dict': {
'id': 'gVfLd0zydlo',
'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
},
'playlist_count': 2,
'skip': 'Not multifeed anymore',
},
{
'url': 'https://vid.plus/FlRa-iH7PGw',
'only_matching': True,
},
{
'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
'only_matching': True,
},
{
# Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
# Also tests cut-off URL expansion in video description (see
# https://github.com/ytdl-org/youtube-dl/issues/1892,
# https://github.com/ytdl-org/youtube-dl/issues/8164)
'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
'info_dict': {
'id': 'lsguqyKfVQg',
'ext': 'mp4',
'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
'alt_title': 'Dark Walk',
'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
'duration': 133,
'upload_date': '20151119',
'uploader_id': 'IronSoulElf',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
'uploader': 'IronSoulElf',
'creator': 'Todd Haberman;\nDaniel Law Heath and Aaron Kaplan',
'track': 'Dark Walk',
'artist': 'Todd Haberman;\nDaniel Law Heath and Aaron Kaplan',
'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
'thumbnail': 'https://i.ytimg.com/vi_webp/lsguqyKfVQg/maxresdefault.webp',
'categories': ['Film & Animation'],
'view_count': int,
'live_status': 'not_live',
'channel_url': 'https://www.youtube.com/channel/UCTSRgz5jylBvFt_S7wnsqLQ',
'channel_id': 'UCTSRgz5jylBvFt_S7wnsqLQ',
'tags': 'count:13',
'availability': 'public',
'channel': 'IronSoulElf',
'playable_in_embed': True,
'like_count': int,
'age_limit': 0,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
'only_matching': True,
},
{
# Video with yt:stretch=17:0
'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
'info_dict': {
'id': 'Q39EVAstoRM',
'ext': 'mp4',
'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
'description': 'md5:ee18a25c350637c8faff806845bddee9',
'upload_date': '20151107',
'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
'uploader': 'CH GAMER DROID',
},
'params': {
'skip_download': True,
},
'skip': 'This video does not exist.',
},
{
# Video with incomplete 'yt:stretch=16:'
'url': 'https://www.youtube.com/watch?v=FRhJzUSJbGI',
'only_matching': True,
},
{
# Video licensed under Creative Commons
'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
'info_dict': {
'id': 'M4gD1WSo5mA',
'ext': 'mp4',
'title': 'md5:e41008789470fc2533a3252216f1c1d1',
'description': 'md5:a677553cf0840649b731a3024aeff4cc',
'duration': 721,
'upload_date': '20150128',
'uploader_id': 'BerkmanCenter',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
'uploader': 'The Berkman Klein Center for Internet & Society',
'license': 'Creative Commons Attribution license (reuse allowed)',
'channel_id': 'UCuLGmD72gJDBwmLw06X58SA',
'channel_url': 'https://www.youtube.com/channel/UCuLGmD72gJDBwmLw06X58SA',
'like_count': int,
'age_limit': 0,
'tags': ['Copyright (Legal Subject)', 'Law (Industry)', 'William W. Fisher (Author)'],
'channel': 'The Berkman Klein Center for Internet & Society',
'availability': 'public',
'view_count': int,
'categories': ['Education'],
'thumbnail': 'https://i.ytimg.com/vi_webp/M4gD1WSo5mA/maxresdefault.webp',
'live_status': 'not_live',
'playable_in_embed': True,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# Channel-like uploader_url
'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
'info_dict': {
'id': 'eQcmzGIKrzg',
'ext': 'mp4',
'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
'description': 'md5:13a2503d7b5904ef4b223aa101628f39',
'duration': 4060,
'upload_date': '20151120',
'uploader': 'Bernie Sanders',
'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'license': 'Creative Commons Attribution license (reuse allowed)',
'playable_in_embed': True,
'tags': 'count:12',
'like_count': int,
'channel_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'age_limit': 0,
'availability': 'public',
'categories': ['News & Politics'],
'channel': 'Bernie Sanders',
'thumbnail': 'https://i.ytimg.com/vi_webp/eQcmzGIKrzg/maxresdefault.webp',
'view_count': int,
'live_status': 'not_live',
'channel_url': 'https://www.youtube.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY',
'only_matching': True,
},
{
# YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
'only_matching': True,
},
{
# Rental video preview
'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
'info_dict': {
'id': 'uGpuVWrhIzE',
'ext': 'mp4',
'title': 'Piku - Trailer',
'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
'upload_date': '20150811',
'uploader': 'FlixMatrix',
'uploader_id': 'FlixMatrixKaravan',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
'license': 'Standard YouTube License',
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# YouTube Red video with episode data
'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
'info_dict': {
'id': 'iqKdEhx-dD4',
'ext': 'mp4',
'title': 'Isolation - Mind Field (Ep 1)',
'description': 'md5:f540112edec5d09fc8cc752d3d4ba3cd',
'duration': 2085,
'upload_date': '20170118',
'uploader': 'Vsauce',
'uploader_id': 'Vsauce',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
'series': 'Mind Field',
'season_number': 1,
'episode_number': 1,
'thumbnail': 'https://i.ytimg.com/vi_webp/iqKdEhx-dD4/maxresdefault.webp',
'tags': 'count:12',
'view_count': int,
'availability': 'public',
'age_limit': 0,
'channel': 'Vsauce',
'episode': 'Episode 1',
'categories': ['Entertainment'],
'season': 'Season 1',
'channel_id': 'UC6nSFpj9HTCZ5t-N3Rm3-HA',
'channel_url': 'https://www.youtube.com/channel/UC6nSFpj9HTCZ5t-N3Rm3-HA',
'like_count': int,
'playable_in_embed': True,
'live_status': 'not_live',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
'expected_warnings': [
'Skipping DASH manifest',
],
},
{
# The following content has been identified by the YouTube community
# as inappropriate or offensive to some audiences.
'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
'info_dict': {
'id': '6SJNVb0GnPI',
'ext': 'mp4',
'title': 'Race Differences in Intelligence',
'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
'duration': 965,
'upload_date': '20140124',
'uploader': 'New Century Foundation',
'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
},
'params': {
'skip_download': True,
},
'skip': 'This video has been removed for violating YouTube\'s policy on hate speech.',
},
{
# itag 212
'url': '1t24XAntNCY',
'only_matching': True,
},
{
# geo restricted to JP
'url': 'sJL6WA-aGkQ',
'only_matching': True,
},
{
'url': 'https://invidio.us/watch?v=BaW_jenozKc',
'only_matching': True,
},
{
'url': 'https://redirect.invidious.io/watch?v=BaW_jenozKc',
'only_matching': True,
},
{
# from https://nitter.pussthecat.org/YouTube/status/1360363141947944964#m
'url': 'https://redirect.invidious.io/Yh0AhrY9GjA',
'only_matching': True,
},
{
# DRM protected
'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
'only_matching': True,
},
{
# Video with unsupported adaptive stream type formats
'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
'info_dict': {
'id': 'Z4Vy8R84T1U',
'ext': 'mp4',
'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'duration': 433,
'upload_date': '20130923',
'uploader': 'Amelia Putri Harwita',
'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
'formats': 'maxcount:10',
},
'params': {
'skip_download': True,
'youtube_include_dash_manifest': False,
},
'skip': 'not actual anymore',
},
{
# Youtube Music Auto-generated description
'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
'info_dict': {
'id': 'MgNrAu2pzNs',
'ext': 'mp4',
'title': 'Voyeur Girl',
'description': 'md5:7ae382a65843d6df2685993e90a8628f',
'upload_date': '20190312',
'uploader': 'Stephen - Topic',
'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'artist': 'Stephen',
'track': 'Voyeur Girl',
'album': 'it\'s too much love to know my dear',
'release_date': '20190313',
'release_year': 2019,
'alt_title': 'Voyeur Girl',
'view_count': int,
'uploader_url': 'http://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA',
'playable_in_embed': True,
'like_count': int,
'categories': ['Music'],
'channel_url': 'https://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA',
'channel': 'Stephen',
'availability': 'public',
'creator': 'Stephen',
'duration': 169,
'thumbnail': 'https://i.ytimg.com/vi_webp/MgNrAu2pzNs/maxresdefault.webp',
'age_limit': 0,
'channel_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'tags': 'count:11',
'live_status': 'not_live',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
'only_matching': True,
},
{
# invalid -> valid video id redirection
'url': 'DJztXj2GPfl',
'info_dict': {
'id': 'DJztXj2GPfk',
'ext': 'mp4',
'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
'description': 'md5:bf577a41da97918e94fa9798d9228825',
'upload_date': '20090125',
'uploader': 'Prochorowka',
'uploader_id': 'Prochorowka',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
'artist': 'Panjabi MC',
'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
'album': 'Beware of the Boys (Mundian To Bach Ke)',
},
'params': {
'skip_download': True,
},
'skip': 'Video unavailable',
},
{
# empty description results in an empty string
'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
'info_dict': {
'id': 'x41yOUIvK2k',
'ext': 'mp4',
'title': 'IMG 3456',
'description': '',
'upload_date': '20170613',
'uploader_id': 'ElevageOrVert',
'uploader': 'ElevageOrVert',
'view_count': int,
'thumbnail': 'https://i.ytimg.com/vi_webp/x41yOUIvK2k/maxresdefault.webp',
'uploader_url': 'http://www.youtube.com/user/ElevageOrVert',
'like_count': int,
'channel_id': 'UCo03ZQPBW5U4UC3regpt1nw',
'tags': [],
'channel_url': 'https://www.youtube.com/channel/UCo03ZQPBW5U4UC3regpt1nw',
'availability': 'public',
'age_limit': 0,
'categories': ['Pets & Animals'],
'duration': 7,
'playable_in_embed': True,
'live_status': 'not_live',
'channel': 'ElevageOrVert',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# with '};' inside yt initial data (see [1])
# see [2] for an example with '};' inside ytInitialPlayerResponse
# 1. https://github.com/ytdl-org/youtube-dl/issues/27093
# 2. https://github.com/ytdl-org/youtube-dl/issues/27216
'url': 'https://www.youtube.com/watch?v=CHqg6qOn4no',
'info_dict': {
'id': 'CHqg6qOn4no',
'ext': 'mp4',
'title': 'Part 77 Sort a list of simple types in c#',
'description': 'md5:b8746fa52e10cdbf47997903f13b20dc',
'upload_date': '20130831',
'uploader_id': 'kudvenkat',
'uploader': 'kudvenkat',
'channel_id': 'UCCTVrRB5KpIiK6V2GGVsR1Q',
'like_count': int,
'uploader_url': 'http://www.youtube.com/user/kudvenkat',
'channel_url': 'https://www.youtube.com/channel/UCCTVrRB5KpIiK6V2GGVsR1Q',
'live_status': 'not_live',
'categories': ['Education'],
'availability': 'public',
'thumbnail': 'https://i.ytimg.com/vi/CHqg6qOn4no/sddefault.jpg',
'tags': 'count:12',
'playable_in_embed': True,
'age_limit': 0,
'view_count': int,
'duration': 522,
'channel': 'kudvenkat',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# another example of '};' in ytInitialData
'url': 'https://www.youtube.com/watch?v=gVfgbahppCY',
'only_matching': True,
},
{
'url': 'https://www.youtube.com/watch_popup?v=63RmMXCd_bQ',
'only_matching': True,
},
{
# https://github.com/ytdl-org/youtube-dl/pull/28094
'url': 'OtqTfy26tG0',
'info_dict': {
'id': 'OtqTfy26tG0',
'ext': 'mp4',
'title': 'Burn Out',
'description': 'md5:8d07b84dcbcbfb34bc12a56d968b6131',
'upload_date': '20141120',
'uploader': 'The Cinematic Orchestra - Topic',
'uploader_id': 'UCIzsJBIyo8hhpFm1NK0uLgw',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCIzsJBIyo8hhpFm1NK0uLgw',
'artist': 'The Cinematic Orchestra',
'track': 'Burn Out',
'album': 'Every Day',
'like_count': int,
'live_status': 'not_live',
'alt_title': 'Burn Out',
'duration': 614,
'age_limit': 0,
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UCIzsJBIyo8hhpFm1NK0uLgw',
'creator': 'The Cinematic Orchestra',
'channel': 'The Cinematic Orchestra',
'tags': ['The Cinematic Orchestra', 'Every Day', 'Burn Out'],
'channel_id': 'UCIzsJBIyo8hhpFm1NK0uLgw',
'availability': 'public',
'thumbnail': 'https://i.ytimg.com/vi/OtqTfy26tG0/maxresdefault.jpg',
'categories': ['Music'],
'playable_in_embed': True,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# controversial video, only works with bpctr when authenticated with cookies
'url': 'https://www.youtube.com/watch?v=nGC3D_FkCmg',
'only_matching': True,
},
{
# controversial video, requires bpctr/contentCheckOk
'url': 'https://www.youtube.com/watch?v=SZJvDhaSDnc',
'info_dict': {
'id': 'SZJvDhaSDnc',
'ext': 'mp4',
'title': 'San Diego teen commits suicide after bullying over embarrassing video',
'channel_id': 'UC-SJ6nODDmufqBzPBwCvYvQ',
'uploader': 'CBS Mornings',
'uploader_id': 'CBSThisMorning',
'upload_date': '20140716',
'description': 'md5:acde3a73d3f133fc97e837a9f76b53b7',
'duration': 170,
'categories': ['News & Politics'],
'uploader_url': 'http://www.youtube.com/user/CBSThisMorning',
'view_count': int,
'channel': 'CBS Mornings',
'tags': ['suicide', 'bullying', 'video', 'cbs', 'news'],
'thumbnail': 'https://i.ytimg.com/vi/SZJvDhaSDnc/hqdefault.jpg',
'age_limit': 18,
'availability': 'needs_auth',
'channel_url': 'https://www.youtube.com/channel/UC-SJ6nODDmufqBzPBwCvYvQ',
'like_count': int,
'live_status': 'not_live',
'playable_in_embed': True,
'channel_follower_count': int
}
},
{
# restricted location, https://github.com/ytdl-org/youtube-dl/issues/28685
'url': 'cBvYw8_A0vQ',
'info_dict': {
'id': 'cBvYw8_A0vQ',
'ext': 'mp4',
'title': '4K Ueno Okachimachi Street Scenes 上野御徒町歩き',
'description': 'md5:ea770e474b7cd6722b4c95b833c03630',
'upload_date': '20201120',
'uploader': 'Walk around Japan',
'uploader_id': 'UC3o_t8PzBmXf5S9b7GLx1Mw',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UC3o_t8PzBmXf5S9b7GLx1Mw',
'duration': 1456,
'categories': ['Travel & Events'],
'channel_id': 'UC3o_t8PzBmXf5S9b7GLx1Mw',
'view_count': int,
'channel': 'Walk around Japan',
'tags': ['Ueno Tokyo', 'Okachimachi Tokyo', 'Ameyoko Street', 'Tokyo attraction', 'Travel in Tokyo'],
'thumbnail': 'https://i.ytimg.com/vi_webp/cBvYw8_A0vQ/hqdefault.webp',
'age_limit': 0,
'availability': 'public',
'channel_url': 'https://www.youtube.com/channel/UC3o_t8PzBmXf5S9b7GLx1Mw',
'live_status': 'not_live',
'playable_in_embed': True,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
}, {
# Has multiple audio streams
'url': 'WaOKSUlf4TM',
'only_matching': True
}, {
# Requires Premium: has format 141 when requested using YTM url
'url': 'https://music.youtube.com/watch?v=XclachpHxis',
'only_matching': True
}, {
# multiple subtitles with same lang_code
'url': 'https://www.youtube.com/watch?v=wsQiKKfKxug',
'only_matching': True,
}, {
# Force use android client fallback
'url': 'https://www.youtube.com/watch?v=YOelRv7fMxY',
'info_dict': {
'id': 'YOelRv7fMxY',
'title': 'DIGGING A SECRET TUNNEL Part 1',
'ext': '3gp',
'upload_date': '20210624',
'channel_id': 'UCp68_FLety0O-n9QU6phsgw',
'uploader': 'colinfurze',
'uploader_id': 'colinfurze',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCp68_FLety0O-n9QU6phsgw',
'description': 'md5:5d5991195d599b56cd0c4148907eec50',
'duration': 596,
'categories': ['Entertainment'],
'uploader_url': 'http://www.youtube.com/user/colinfurze',
'view_count': int,
'channel': 'colinfurze',
'tags': ['Colin', 'furze', 'Terry', 'tunnel', 'underground', 'bunker'],
'thumbnail': 'https://i.ytimg.com/vi/YOelRv7fMxY/maxresdefault.jpg',
'age_limit': 0,
'availability': 'public',
'like_count': int,
'live_status': 'not_live',
'playable_in_embed': True,
'channel_follower_count': int
},
'params': {
'format': '17', # 3gp format available on android
'extractor_args': {'youtube': {'player_client': ['android']}},
},
},
{
# Skip download of additional client configs (remix client config in this case)
'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
'only_matching': True,
'params': {
'extractor_args': {'youtube': {'player_skip': ['configs']}},
},
}, {
# shorts
'url': 'https://www.youtube.com/shorts/BGQWPY4IigY',
'only_matching': True,
}, {
'note': 'Storyboards',
'url': 'https://www.youtube.com/watch?v=5KLPxDtMqe8',
'info_dict': {
'id': '5KLPxDtMqe8',
'ext': 'mhtml',
'format_id': 'sb0',
'title': 'Your Brain is Plastic',
'uploader_id': 'scishow',
'description': 'md5:89cd86034bdb5466cd87c6ba206cd2bc',
'upload_date': '20140324',
'uploader': 'SciShow',
'like_count': int,
'channel_id': 'UCZYTClx2T1of7BRZ86-8fow',
'channel_url': 'https://www.youtube.com/channel/UCZYTClx2T1of7BRZ86-8fow',
'view_count': int,
'thumbnail': 'https://i.ytimg.com/vi/5KLPxDtMqe8/maxresdefault.jpg',
'playable_in_embed': True,
'tags': 'count:12',
'uploader_url': 'http://www.youtube.com/user/scishow',
'availability': 'public',
'channel': 'SciShow',
'live_status': 'not_live',
'duration': 248,
'categories': ['Education'],
'age_limit': 0,
'channel_follower_count': int
}, 'params': {'format': 'mhtml', 'skip_download': True}
}, {
# Ensure video upload_date is in UTC timezone (video was uploaded 1641170939)
'url': 'https://www.youtube.com/watch?v=2NUZ8W2llS4',
'info_dict': {
'id': '2NUZ8W2llS4',
'ext': 'mp4',
'title': 'The NP that test your phone performance 🙂',
'description': 'md5:144494b24d4f9dfacb97c1bbef5de84d',
'uploader': 'Leon Nguyen',
'uploader_id': 'VNSXIII',
'uploader_url': 'http://www.youtube.com/user/VNSXIII',
'channel_id': 'UCRqNBSOHgilHfAczlUmlWHA',
'channel_url': 'https://www.youtube.com/channel/UCRqNBSOHgilHfAczlUmlWHA',
'duration': 21,
'view_count': int,
'age_limit': 0,
'categories': ['Gaming'],
'tags': 'count:23',
'playable_in_embed': True,
'live_status': 'not_live',
'upload_date': '20220103',
'like_count': int,
'availability': 'public',
'channel': 'Leon Nguyen',
'thumbnail': 'https://i.ytimg.com/vi_webp/2NUZ8W2llS4/maxresdefault.webp',
'channel_follower_count': int
}
}, {
# date text is premiered video, ensure upload date in UTC (published 1641172509)
'url': 'https://www.youtube.com/watch?v=mzZzzBU6lrM',
'info_dict': {
'id': 'mzZzzBU6lrM',
'ext': 'mp4',
'title': 'I Met GeorgeNotFound In Real Life...',
'description': 'md5:cca98a355c7184e750f711f3a1b22c84',
'uploader': 'Quackity',
'uploader_id': 'QuackityHQ',
'uploader_url': 'http://www.youtube.com/user/QuackityHQ',
'channel_id': 'UC_8NknAFiyhOUaZqHR3lq3Q',
'channel_url': 'https://www.youtube.com/channel/UC_8NknAFiyhOUaZqHR3lq3Q',
'duration': 955,
'view_count': int,
'age_limit': 0,
'categories': ['Entertainment'],
'tags': 'count:26',
'playable_in_embed': True,
'live_status': 'not_live',
'release_timestamp': 1641172509,
'release_date': '20220103',
'upload_date': '20220103',
'like_count': int,
'availability': 'public',
'channel': 'Quackity',
'thumbnail': 'https://i.ytimg.com/vi/mzZzzBU6lrM/maxresdefault.jpg',
'channel_follower_count': int
}
},
{ # continuous livestream. Microformat upload date should be preferred.
# Upload date was 2021-06-19 (not UTC), while stream start is 2021-11-27
'url': 'https://www.youtube.com/watch?v=kgx4WGK0oNU',
'info_dict': {
'id': 'kgx4WGK0oNU',
'title': r're:jazz\/lofi hip hop radio🌱chill beats to relax\/study to \[LIVE 24\/7\] \d{4}-\d{2}-\d{2} \d{2}:\d{2}',
'ext': 'mp4',
'channel_id': 'UC84whx2xxsiA1gXHXXqKGOA',
'availability': 'public',
'age_limit': 0,
'release_timestamp': 1637975704,
'upload_date': '20210619',
'channel_url': 'https://www.youtube.com/channel/UC84whx2xxsiA1gXHXXqKGOA',
'live_status': 'is_live',
'thumbnail': 'https://i.ytimg.com/vi/kgx4WGK0oNU/maxresdefault.jpg',
'uploader': '阿鲍Abao',
'uploader_url': 'http://www.youtube.com/channel/UC84whx2xxsiA1gXHXXqKGOA',
'channel': 'Abao in Tokyo',
'channel_follower_count': int,
'release_date': '20211127',
'tags': 'count:39',
'categories': ['People & Blogs'],
'like_count': int,
'uploader_id': 'UC84whx2xxsiA1gXHXXqKGOA',
'view_count': int,
'playable_in_embed': True,
'description': 'md5:2ef1d002cad520f65825346e2084e49d',
},
'params': {'skip_download': True}
},
]
@classmethod
def suitable(cls, url):
from ..utils import parse_qs
qs = parse_qs(url)
if qs.get('list', [None])[0]:
return False
return super().suitable(url)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._code_cache = {}
self._player_cache = {}
def _prepare_live_from_start_formats(self, formats, video_id, live_start_time, url, webpage_url, smuggled_data):
lock = threading.Lock()
is_live = True
start_time = time.time()
formats = [f for f in formats if f.get('is_from_start')]
def refetch_manifest(format_id, delay):
nonlocal formats, start_time, is_live
if time.time() <= start_time + delay:
return
_, _, prs, player_url = self._download_player_responses(url, smuggled_data, video_id, webpage_url)
video_details = traverse_obj(
prs, (..., 'videoDetails'), expected_type=dict, default=[])
microformats = traverse_obj(
prs, (..., 'microformat', 'playerMicroformatRenderer'),
expected_type=dict, default=[])
_, is_live, _, formats = self._list_formats(video_id, microformats, video_details, prs, player_url)
start_time = time.time()
def mpd_feed(format_id, delay):
"""
@returns (manifest_url, manifest_stream_number, is_live) or None
"""
with lock:
refetch_manifest(format_id, delay)
f = next((f for f in formats if f['format_id'] == format_id), None)
if not f:
if not is_live:
self.to_screen(f'{video_id}: Video is no longer live')
else:
self.report_warning(
f'Cannot find refreshed manifest for format {format_id}{bug_reports_message()}')
return None
return f['manifest_url'], f['manifest_stream_number'], is_live
for f in formats:
f['is_live'] = True
f['protocol'] = 'http_dash_segments_generator'
f['fragments'] = functools.partial(
self._live_dash_fragments, f['format_id'], live_start_time, mpd_feed)
def _live_dash_fragments(self, format_id, live_start_time, mpd_feed, ctx):
FETCH_SPAN, MAX_DURATION = 5, 432000
mpd_url, stream_number, is_live = None, None, True
begin_index = 0
download_start_time = ctx.get('start') or time.time()
lack_early_segments = download_start_time - (live_start_time or download_start_time) > MAX_DURATION
if lack_early_segments:
self.report_warning(bug_reports_message(
'Starting download from the last 120 hours of the live stream since '
'YouTube does not have data before that. If you think this is wrong,'), only_once=True)
lack_early_segments = True
known_idx, no_fragment_score, last_segment_url = begin_index, 0, None
fragments, fragment_base_url = None, None
def _extract_sequence_from_mpd(refresh_sequence, immediate):
nonlocal mpd_url, stream_number, is_live, no_fragment_score, fragments, fragment_base_url
# Obtain from MPD's maximum seq value
old_mpd_url = mpd_url
last_error = ctx.pop('last_error', None)
expire_fast = immediate or last_error and isinstance(last_error, compat_HTTPError) and last_error.code == 403
mpd_url, stream_number, is_live = (mpd_feed(format_id, 5 if expire_fast else 18000)
or (mpd_url, stream_number, False))
if not refresh_sequence:
if expire_fast and not is_live:
return False, last_seq
elif old_mpd_url == mpd_url:
return True, last_seq
try:
fmts, _ = self._extract_mpd_formats_and_subtitles(
mpd_url, None, note=False, errnote=False, fatal=False)
except ExtractorError:
fmts = None
if not fmts:
no_fragment_score += 2
return False, last_seq
fmt_info = next(x for x in fmts if x['manifest_stream_number'] == stream_number)
fragments = fmt_info['fragments']
fragment_base_url = fmt_info['fragment_base_url']
assert fragment_base_url
_last_seq = int(re.search(r'(?:/|^)sq/(\d+)', fragments[-1]['path']).group(1))
return True, _last_seq
while is_live:
fetch_time = time.time()
if no_fragment_score > 30:
return
if last_segment_url:
# Obtain from "X-Head-Seqnum" header value from each segment
try:
urlh = self._request_webpage(
last_segment_url, None, note=False, errnote=False, fatal=False)
except ExtractorError:
urlh = None
last_seq = try_get(urlh, lambda x: int_or_none(x.headers['X-Head-Seqnum']))
if last_seq is None:
no_fragment_score += 2
last_segment_url = None
continue
else:
should_continue, last_seq = _extract_sequence_from_mpd(True, no_fragment_score > 15)
no_fragment_score += 2
if not should_continue:
continue
if known_idx > last_seq:
last_segment_url = None
continue
last_seq += 1
if begin_index < 0 and known_idx < 0:
# skip from the start when it's negative value
known_idx = last_seq + begin_index
if lack_early_segments:
known_idx = max(known_idx, last_seq - int(MAX_DURATION // fragments[-1]['duration']))
try:
for idx in range(known_idx, last_seq):
# do not update sequence here or you'll get skipped some part of it
should_continue, _ = _extract_sequence_from_mpd(False, False)
if not should_continue:
known_idx = idx - 1
raise ExtractorError('breaking out of outer loop')
last_segment_url = urljoin(fragment_base_url, 'sq/%d' % idx)
yield {
'url': last_segment_url,
}
if known_idx == last_seq:
no_fragment_score += 5
else:
no_fragment_score = 0
known_idx = last_seq
except ExtractorError:
continue
time.sleep(max(0, FETCH_SPAN + fetch_time - time.time()))
def _extract_player_url(self, *ytcfgs, webpage=None):
player_url = traverse_obj(
ytcfgs, (..., 'PLAYER_JS_URL'), (..., 'WEB_PLAYER_CONTEXT_CONFIGS', ..., 'jsUrl'),
get_all=False, expected_type=compat_str)
if not player_url:
return
return urljoin('https://www.youtube.com', player_url)
def _download_player_url(self, video_id, fatal=False):
res = self._download_webpage(
'https://www.youtube.com/iframe_api',
note='Downloading iframe API JS', video_id=video_id, fatal=fatal)
if res:
player_version = self._search_regex(
r'player\\?/([0-9a-fA-F]{8})\\?/', res, 'player version', fatal=fatal)
if player_version:
return f'https://www.youtube.com/s/player/{player_version}/player_ias.vflset/en_US/base.js'
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
@classmethod
def _extract_player_info(cls, player_url):
for player_re in cls._PLAYER_INFO_RE:
id_m = re.search(player_re, player_url)
if id_m:
break
else:
raise ExtractorError('Cannot identify player %r' % player_url)
return id_m.group('id')
def _load_player(self, video_id, player_url, fatal=True):
player_id = self._extract_player_info(player_url)
if player_id not in self._code_cache:
code = self._download_webpage(
player_url, video_id, fatal=fatal,
note='Downloading player ' + player_id,
errnote='Download of %s failed' % player_url)
if code:
self._code_cache[player_id] = code
return self._code_cache.get(player_id)
def _extract_signature_function(self, video_id, player_url, example_sig):
player_id = self._extract_player_info(player_url)
# Read from filesystem cache
func_id = f'js_{player_id}_{self._signature_cache_id(example_sig)}'
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
code = self._load_player(video_id, player_url)
if code:
res = self._parse_sig_js(code)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
if not self.get_param('youtube_print_sig_code'):
return
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return f's[{starts}{ends}{steps}]'
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
(r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bm=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(h\.s\)\)',
r'\bc&&\(c=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(c\)\)',
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\);[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\)',
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
# Obsolete patterns
r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
jscode, 'Initial JS player signature function name', group='sig')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
self._print_sig_code(func, s)
return func(s)
except Exception as e:
raise ExtractorError('Signature extraction failed: ' + traceback.format_exc(), cause=e)
def _decrypt_nsig(self, s, video_id, player_url):
"""Turn the encrypted n field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt nsig without player_url')
player_url = urljoin('https://www.youtube.com', player_url)
sig_id = ('nsig_value', s)
if sig_id in self._player_cache:
return self._player_cache[sig_id]
try:
player_id = ('nsig', player_url)
if player_id not in self._player_cache:
self._player_cache[player_id] = self._extract_n_function(video_id, player_url)
func = self._player_cache[player_id]
self._player_cache[sig_id] = func(s)
self.write_debug(f'Decrypted nsig {s} => {self._player_cache[sig_id]}')
return self._player_cache[sig_id]
except Exception as e:
raise ExtractorError(traceback.format_exc(), cause=e, video_id=video_id)
def _extract_n_function_name(self, jscode):
nfunc, idx = self._search_regex(
r'\.get\("n"\)\)&&\(b=(?P<nfunc>[a-zA-Z0-9$]+)(?:\[(?P<idx>\d+)\])?\([a-zA-Z0-9]\)',
jscode, 'Initial JS player n function name', group=('nfunc', 'idx'))
if not idx:
return nfunc
return json.loads(js_to_json(self._search_regex(
rf'var {re.escape(nfunc)}\s*=\s*(\[.+?\]);', jscode,
f'Initial JS player n function list ({nfunc}.{idx})')))[int(idx)]
def _extract_n_function(self, video_id, player_url):
player_id = self._extract_player_info(player_url)
func_code = self._downloader.cache.load('youtube-nsig', player_id)
if func_code:
jsi = JSInterpreter(func_code)
else:
jscode = self._load_player(video_id, player_url)
funcname = self._extract_n_function_name(jscode)
jsi = JSInterpreter(jscode)
func_code = jsi.extract_function_code(funcname)
self._downloader.cache.store('youtube-nsig', player_id, func_code)
if self.get_param('youtube_print_sig_code'):
self.to_screen(f'Extracted nsig function from {player_id}:\n{func_code[1]}\n')
return lambda s: jsi.extract_function_from_code(*func_code)([s])
def _extract_signature_timestamp(self, video_id, player_url, ytcfg=None, fatal=False):
"""
Extract signatureTimestamp (sts)
Required to tell API what sig/player version is in use.
"""
sts = None
if isinstance(ytcfg, dict):
sts = int_or_none(ytcfg.get('STS'))
if not sts:
# Attempt to extract from player
if player_url is None:
error_msg = 'Cannot extract signature timestamp without player_url.'
if fatal:
raise ExtractorError(error_msg)
self.report_warning(error_msg)
return
code = self._load_player(video_id, player_url, fatal=fatal)
if code:
sts = int_or_none(self._search_regex(
r'(?:signatureTimestamp|sts)\s*:\s*(?P<sts>[0-9]{5})', code,
'JS player signature timestamp', group='sts', fatal=fatal))
return sts
def _mark_watched(self, video_id, player_responses):
playback_url = get_first(
player_responses, ('playbackTracking', 'videostatsPlaybackUrl', 'baseUrl'),
expected_type=url_or_none)
if not playback_url:
self.report_warning('Unable to mark watched')
return
parsed_playback_url = compat_urlparse.urlparse(playback_url)
qs = compat_urlparse.parse_qs(parsed_playback_url.query)
# cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn.
CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
cpn = ''.join(CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16))
qs.update({
'ver': ['2'],
'cpn': [cpn],
})
playback_url = compat_urlparse.urlunparse(
parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
self._download_webpage(
playback_url, video_id, 'Marking watched',
'Unable to mark watched', fatal=False)
@staticmethod
def _extract_urls(webpage):
# Embedded YouTube player
entries = [
unescapeHTML(mobj.group('url'))
for mobj in re.finditer(r'''(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
<object[^>]+data=|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
\1''', webpage)]
# lazyYT YouTube embed
entries.extend(list(map(
unescapeHTML,
re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
# Wordpress "YouTube Video Importer" plugin
matches = re.findall(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
entries.extend(m[-1] for m in matches)
return entries
@staticmethod
def _extract_url(webpage):
urls = YoutubeIE._extract_urls(webpage)
return urls[0] if urls else None
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
return mobj.group('id')
def _extract_chapters_from_json(self, data, duration):
chapter_list = traverse_obj(
data, (
'playerOverlays', 'playerOverlayRenderer', 'decoratedPlayerBarRenderer',
'decoratedPlayerBarRenderer', 'playerBar', 'chapteredPlayerBarRenderer', 'chapters'
), expected_type=list)
return self._extract_chapters(
chapter_list,
chapter_time=lambda chapter: float_or_none(
traverse_obj(chapter, ('chapterRenderer', 'timeRangeStartMillis')), scale=1000),
chapter_title=lambda chapter: traverse_obj(
chapter, ('chapterRenderer', 'title', 'simpleText'), expected_type=str),
duration=duration)
def _extract_chapters_from_engagement_panel(self, data, duration):
content_list = traverse_obj(
data,
('engagementPanels', ..., 'engagementPanelSectionListRenderer', 'content', 'macroMarkersListRenderer', 'contents'),
expected_type=list, default=[])
chapter_time = lambda chapter: parse_duration(self._get_text(chapter, 'timeDescription'))
chapter_title = lambda chapter: self._get_text(chapter, 'title')
return next((
filter(None, (
self._extract_chapters(
traverse_obj(contents, (..., 'macroMarkersListItemRenderer')),
chapter_time, chapter_title, duration)
for contents in content_list
))), [])
def _extract_chapters(self, chapter_list, chapter_time, chapter_title, duration):
chapters = []
last_chapter = {'start_time': 0}
for idx, chapter in enumerate(chapter_list or []):
title = chapter_title(chapter)
start_time = chapter_time(chapter)
if start_time is None:
continue
last_chapter['end_time'] = start_time
if start_time < last_chapter['start_time']:
if idx == 1:
chapters.pop()
self.report_warning('Invalid start time for chapter "%s"' % last_chapter['title'])
else:
self.report_warning(f'Invalid start time for chapter "{title}"')
continue
last_chapter = {'start_time': start_time, 'title': title}
chapters.append(last_chapter)
last_chapter['end_time'] = duration
return chapters
def _extract_yt_initial_variable(self, webpage, regex, video_id, name):
return self._parse_json(self._search_regex(
(fr'{regex}\s*{self._YT_INITIAL_BOUNDARY_RE}',
regex), webpage, name, default='{}'), video_id, fatal=False)
def _extract_comment(self, comment_renderer, parent=None):
comment_id = comment_renderer.get('commentId')
if not comment_id:
return
text = self._get_text(comment_renderer, 'contentText')
# note: timestamp is an estimate calculated from the current time and time_text
timestamp, time_text = self._extract_time_text(comment_renderer, 'publishedTimeText')
author = self._get_text(comment_renderer, 'authorText')
author_id = try_get(comment_renderer,
lambda x: x['authorEndpoint']['browseEndpoint']['browseId'], compat_str)
votes = parse_count(try_get(comment_renderer, (lambda x: x['voteCount']['simpleText'],
lambda x: x['likeCount']), compat_str)) or 0
author_thumbnail = try_get(comment_renderer,
lambda x: x['authorThumbnail']['thumbnails'][-1]['url'], compat_str)
author_is_uploader = try_get(comment_renderer, lambda x: x['authorIsChannelOwner'], bool)
is_favorited = 'creatorHeart' in (try_get(
comment_renderer, lambda x: x['actionButtons']['commentActionButtonsRenderer'], dict) or {})
return {
'id': comment_id,
'text': text,
'timestamp': timestamp,
'time_text': time_text,
'like_count': votes,
'is_favorited': is_favorited,
'author': author,
'author_id': author_id,
'author_thumbnail': author_thumbnail,
'author_is_uploader': author_is_uploader,
'parent': parent or 'root'
}
def _comment_entries(self, root_continuation_data, ytcfg, video_id, parent=None, tracker=None):
get_single_config_arg = lambda c: self._configuration_arg(c, [''])[0]
def extract_header(contents):
_continuation = None
for content in contents:
comments_header_renderer = traverse_obj(content, 'commentsHeaderRenderer')
expected_comment_count = self._get_count(
comments_header_renderer, 'countText', 'commentsCount')
if expected_comment_count:
tracker['est_total'] = expected_comment_count
self.to_screen(f'Downloading ~{expected_comment_count} comments')
comment_sort_index = int(get_single_config_arg('comment_sort') != 'top') # 1 = new, 0 = top
sort_menu_item = try_get(
comments_header_renderer,
lambda x: x['sortMenu']['sortFilterSubMenuRenderer']['subMenuItems'][comment_sort_index], dict) or {}
sort_continuation_ep = sort_menu_item.get('serviceEndpoint') or {}
_continuation = self._extract_continuation_ep_data(sort_continuation_ep) or self._extract_continuation(sort_menu_item)
if not _continuation:
continue
sort_text = str_or_none(sort_menu_item.get('title'))
if not sort_text:
sort_text = 'top comments' if comment_sort_index == 0 else 'newest first'
self.to_screen('Sorting comments by %s' % sort_text.lower())
break
return _continuation
def extract_thread(contents):
if not parent:
tracker['current_page_thread'] = 0
for content in contents:
if not parent and tracker['total_parent_comments'] >= max_parents:
yield
comment_thread_renderer = try_get(content, lambda x: x['commentThreadRenderer'])
comment_renderer = get_first(
(comment_thread_renderer, content), [['commentRenderer', ('comment', 'commentRenderer')]],
expected_type=dict, default={})
comment = self._extract_comment(comment_renderer, parent)
if not comment:
continue
tracker['running_total'] += 1
tracker['total_reply_comments' if parent else 'total_parent_comments'] += 1
yield comment
# Attempt to get the replies
comment_replies_renderer = try_get(
comment_thread_renderer, lambda x: x['replies']['commentRepliesRenderer'], dict)
if comment_replies_renderer:
tracker['current_page_thread'] += 1
comment_entries_iter = self._comment_entries(
comment_replies_renderer, ytcfg, video_id,
parent=comment.get('id'), tracker=tracker)
yield from itertools.islice(comment_entries_iter, min(
max_replies_per_thread, max(0, max_replies - tracker['total_reply_comments'])))
# Keeps track of counts across recursive calls
if not tracker:
tracker = dict(
running_total=0,
est_total=0,
current_page_thread=0,
total_parent_comments=0,
total_reply_comments=0)
# TODO: Deprecated
# YouTube comments have a max depth of 2
max_depth = int_or_none(get_single_config_arg('max_comment_depth'))
if max_depth:
self._downloader.deprecation_warning(
'[youtube] max_comment_depth extractor argument is deprecated. Set max replies in the max-comments extractor argument instead.')
if max_depth == 1 and parent:
return
max_comments, max_parents, max_replies, max_replies_per_thread, *_ = map(
lambda p: int_or_none(p, default=sys.maxsize), self._configuration_arg('max_comments', ) + [''] * 4)
continuation = self._extract_continuation(root_continuation_data)
message = self._get_text(root_continuation_data, ('contents', ..., 'messageRenderer', 'text'), max_runs=1)
if message and not parent:
self.report_warning(message, video_id=video_id)
response = None
is_first_continuation = parent is None
for page_num in itertools.count(0):
if not continuation:
break
headers = self.generate_api_headers(ytcfg=ytcfg, visitor_data=self._extract_visitor_data(response))
comment_prog_str = f"({tracker["running_total"]}/{tracker["est_total"]})"
if page_num == 0:
if is_first_continuation:
note_prefix = 'Downloading comment section API JSON'
else:
note_prefix = ' Downloading comment API JSON reply thread %d %s' % (
tracker['current_page_thread'], comment_prog_str)
else:
note_prefix = '%sDownloading comment%s API JSON page %d %s' % (
' ' if parent else '', ' replies' if parent else '',
page_num, comment_prog_str)
response = self._extract_response(
item_id=None, query=continuation,
ep='next', ytcfg=ytcfg, headers=headers, note=note_prefix,
check_get_keys='onResponseReceivedEndpoints')
continuation_contents = traverse_obj(
response, 'onResponseReceivedEndpoints', expected_type=list, default=[])
continuation = None
for continuation_section in continuation_contents:
continuation_items = traverse_obj(
continuation_section,
(('reloadContinuationItemsCommand', 'appendContinuationItemsAction'), 'continuationItems'),
get_all=False, expected_type=list) or []
if is_first_continuation:
continuation = extract_header(continuation_items)
is_first_continuation = False
if continuation:
break
continue
for entry in extract_thread(continuation_items):
if not entry:
return
yield entry
continuation = self._extract_continuation({'contents': continuation_items})
if continuation:
break
def _get_comments(self, ytcfg, video_id, contents, webpage):
"""Entry for comment extraction"""
def _real_comment_extract(contents):
renderer = next((
item for item in traverse_obj(contents, (..., 'itemSectionRenderer'), default={})
if item.get('sectionIdentifier') == 'comment-item-section'), None)
yield from self._comment_entries(renderer, ytcfg, video_id)
max_comments = int_or_none(self._configuration_arg('max_comments', [''])[0])
return itertools.islice(_real_comment_extract(contents), 0, max_comments)
@staticmethod
def _get_checkok_params():
return {'contentCheckOk': True, 'racyCheckOk': True}
@classmethod
def _generate_player_context(cls, sts=None):
context = {
'html5Preference': 'HTML5_PREF_WANTS',
}
if sts is not None:
context['signatureTimestamp'] = sts
return {
'playbackContext': {
'contentPlaybackContext': context
},
**cls._get_checkok_params()
}
@staticmethod
def _is_agegated(player_response):
if traverse_obj(player_response, ('playabilityStatus', 'desktopLegacyAgeGateReason')):
return True
reasons = traverse_obj(player_response, ('playabilityStatus', ('status', 'reason')), default=[])
AGE_GATE_REASONS = (
'confirm your age', 'age-restricted', 'inappropriate', # reason
'age_verification_required', 'age_check_required', # status
)
return any(expected in reason for expected in AGE_GATE_REASONS for reason in reasons)
@staticmethod
def _is_unplayable(player_response):
return traverse_obj(player_response, ('playabilityStatus', 'status')) == 'UNPLAYABLE'
def _extract_player_response(self, client, video_id, master_ytcfg, player_ytcfg, player_url, initial_pr):
session_index = self._extract_session_index(player_ytcfg, master_ytcfg)
syncid = self._extract_account_syncid(player_ytcfg, master_ytcfg, initial_pr)
sts = self._extract_signature_timestamp(video_id, player_url, master_ytcfg, fatal=False) if player_url else None
headers = self.generate_api_headers(
ytcfg=player_ytcfg, account_syncid=syncid, session_index=session_index, default_client=client)
yt_query = {'videoId': video_id}
yt_query.update(self._generate_player_context(sts))
return self._extract_response(
item_id=video_id, ep='player', query=yt_query,
ytcfg=player_ytcfg, headers=headers, fatal=True,
default_client=client,
note='Downloading %s player API JSON' % client.replace('_', ' ').strip()
) or None
def _get_requested_clients(self, url, smuggled_data):
requested_clients = []
default = ['android', 'web']
allowed_clients = sorted(
(client for client in INNERTUBE_CLIENTS.keys() if client[:1] != '_'),
key=lambda client: INNERTUBE_CLIENTS[client]['priority'], reverse=True)
for client in self._configuration_arg('player_client'):
if client in allowed_clients:
requested_clients.append(client)
elif client == 'default':
requested_clients.extend(default)
elif client == 'all':
requested_clients.extend(allowed_clients)
else:
self.report_warning(f'Skipping unsupported client {client}')
if not requested_clients:
requested_clients = default
if smuggled_data.get('is_music_url') or self.is_music_url(url):
requested_clients.extend(
f'{client}_music' for client in requested_clients if f'{client}_music' in INNERTUBE_CLIENTS)
return orderedSet(requested_clients)
def _extract_player_responses(self, clients, video_id, webpage, master_ytcfg):
initial_pr = None
if webpage:
initial_pr = self._extract_yt_initial_variable(
webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE,
video_id, 'initial player response')
all_clients = set(clients)
clients = clients[::-1]
prs = []
def append_client(*client_names):
""" Append the first client name that exists but not already used """
for client_name in client_names:
actual_client = _split_innertube_client(client_name)[0]
if actual_client in INNERTUBE_CLIENTS:
if actual_client not in all_clients:
clients.append(client_name)
all_clients.add(actual_client)
return
# Android player_response does not have microFormats which are needed for
# extraction of some data. So we return the initial_pr with formats
# stripped out even if not requested by the user
# See: https://github.com/yt-dlp/yt-dlp/issues/501
if initial_pr:
pr = dict(initial_pr)
pr['streamingData'] = None
prs.append(pr)
last_error = None
tried_iframe_fallback = False
player_url = None
while clients:
client, base_client, variant = _split_innertube_client(clients.pop())
player_ytcfg = master_ytcfg if client == 'web' else {}
if 'configs' not in self._configuration_arg('player_skip') and client != 'web':
player_ytcfg = self._download_ytcfg(client, video_id) or player_ytcfg
player_url = player_url or self._extract_player_url(master_ytcfg, player_ytcfg, webpage=webpage)
require_js_player = self._get_default_ytcfg(client).get('REQUIRE_JS_PLAYER')
if 'js' in self._configuration_arg('player_skip'):
require_js_player = False
player_url = None
if not player_url and not tried_iframe_fallback and require_js_player:
player_url = self._download_player_url(video_id)
tried_iframe_fallback = True
try:
pr = initial_pr if client == 'web' and initial_pr else self._extract_player_response(
client, video_id, player_ytcfg or master_ytcfg, player_ytcfg, player_url if require_js_player else None, initial_pr)
except ExtractorError as e:
if last_error:
self.report_warning(last_error)
last_error = e
continue
if pr:
prs.append(pr)
# creator clients can bypass AGE_VERIFICATION_REQUIRED if logged in
if variant == 'embedded' and self._is_unplayable(pr) and self.is_authenticated:
append_client(f'{base_client}_creator')
elif self._is_agegated(pr):
if variant == 'tv_embedded':
append_client(f'{base_client}_embedded')
elif not variant:
append_client(f'tv_embedded.{base_client}', f'{base_client}_embedded')
if last_error:
if not len(prs):
raise last_error
self.report_warning(last_error)
return prs, player_url
def _extract_formats(self, streaming_data, video_id, player_url, is_live, duration):
itags, stream_ids = {}, []
itag_qualities, res_qualities = {}, {}
q = qualities([
# Normally tiny is the smallest video-only formats. But
# audio-only formats with unknown quality may get tagged as tiny
'tiny',
'audio_quality_ultralow', 'audio_quality_low', 'audio_quality_medium', 'audio_quality_high', # Audio only formats
'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres'
])
streaming_formats = traverse_obj(streaming_data, (..., ('formats', 'adaptiveFormats'), ...), default=[])
for fmt in streaming_formats:
if fmt.get('targetDurationSec'):
continue
itag = str_or_none(fmt.get('itag'))
audio_track = fmt.get('audioTrack') or {}
stream_id = '%s.%s' % (itag or '', audio_track.get('id', ''))
if stream_id in stream_ids:
continue
quality = fmt.get('quality')
height = int_or_none(fmt.get('height'))
if quality == 'tiny' or not quality:
quality = fmt.get('audioQuality', '').lower() or quality
# The 3gp format (17) in android client has a quality of "small",
# but is actually worse than other formats
if itag == '17':
quality = 'tiny'
if quality:
if itag:
itag_qualities[itag] = quality
if height:
res_qualities[height] = quality
# FORMAT_STREAM_TYPE_OTF(otf=1) requires downloading the init fragment
# (adding `&sq=0` to the URL) and parsing emsg box to determine the
# number of fragment that would subsequently requested with (`&sq=N`)
if fmt.get('type') == 'FORMAT_STREAM_TYPE_OTF':
continue
fmt_url = fmt.get('url')
if not fmt_url:
sc = compat_parse_qs(fmt.get('signatureCipher'))
fmt_url = url_or_none(try_get(sc, lambda x: x['url'][0]))
encrypted_sig = try_get(sc, lambda x: x['s'][0])
if not (sc and fmt_url and encrypted_sig):
continue
if not player_url:
continue
signature = self._decrypt_signature(sc['s'][0], video_id, player_url)
sp = try_get(sc, lambda x: x['sp'][0]) or 'signature'
fmt_url += '&' + sp + '=' + signature
query = parse_qs(fmt_url)
throttled = False
if query.get('n'):
try:
fmt_url = update_url_query(fmt_url, {
'n': self._decrypt_nsig(query['n'][0], video_id, player_url)})
except ExtractorError as e:
self.report_warning(
'nsig extraction failed: You may experience throttling for some formats\n'
f'n = {query['n'][0]} ; player = {player_url}\n{e}', only_once=True)
throttled = True
if itag:
itags[itag] = 'https'
stream_ids.append(stream_id)
tbr = float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000)
language_preference = (
10 if audio_track.get('audioIsDefault') and 10
else -10 if 'descriptive' in (audio_track.get('displayName') or '').lower() and -10
else -1)
# Some formats may have much smaller duration than others (possibly damaged during encoding)
# Eg: 2-nOtRESiUc Ref: https://github.com/yt-dlp/yt-dlp/issues/2823
# Make sure to avoid false positives with small duration differences.
# Eg: __2ABJjxzNo, ySuUZEjARPY
is_damaged = try_get(fmt, lambda x: float(x['approxDurationMs']) / duration < 500)
if is_damaged:
self.report_warning(f'{video_id}: Some formats are possibly damaged. They will be deprioritized', only_once=True)
dct = {
'asr': int_or_none(fmt.get('audioSampleRate')),
'filesize': int_or_none(fmt.get('contentLength')),
'format_id': itag,
'format_note': join_nonempty(
'%s%s' % (audio_track.get('displayName') or '',
' (default)' if language_preference > 0 else ''),
fmt.get('qualityLabel') or quality.replace('audio_quality_', ''),
throttled and 'THROTTLED', is_damaged and 'DAMAGED', delim=', '),
'source_preference': -10 if throttled else -1,
'fps': int_or_none(fmt.get('fps')) or None,
'height': height,
'quality': q(quality),
'has_drm': bool(fmt.get('drmFamilies')),
'tbr': tbr,
'url': fmt_url,
'width': int_or_none(fmt.get('width')),
'language': join_nonempty(audio_track.get('id', '').split('.')[0],
'desc' if language_preference < -1 else ''),
'language_preference': language_preference,
# Strictly de-prioritize damaged and 3gp formats
'preference': -10 if is_damaged else -2 if itag == '17' else None,
}
mime_mobj = re.match(
r'((?:[^/]+)/(?:[^;]+))(?:;\s*codecs="([^"]+)")?', fmt.get('mimeType') or '')
if mime_mobj:
dct['ext'] = mimetype2ext(mime_mobj.group(1))
dct.update(parse_codecs(mime_mobj.group(2)))
no_audio = dct.get('acodec') == 'none'
no_video = dct.get('vcodec') == 'none'
if no_audio:
dct['vbr'] = tbr
if no_video:
dct['abr'] = tbr
if no_audio or no_video:
dct['downloader_options'] = {
# Youtube throttles chunks >~10M
'http_chunk_size': 10485760,
}
if dct.get('ext'):
dct['container'] = dct['ext'] + '_dash'
yield dct
live_from_start = is_live and self.get_param('live_from_start')
skip_manifests = self._configuration_arg('skip')
if not self.get_param('youtube_include_hls_manifest', True):
skip_manifests.append('hls')
get_dash = 'dash' not in skip_manifests and (
not is_live or live_from_start or self._configuration_arg('include_live_dash'))
get_hls = not live_from_start and 'hls' not in skip_manifests
def process_manifest_format(f, proto, itag):
if itag in itags:
if itags[itag] == proto or f'{itag}-{proto}' in itags:
return False
itag = f'{itag}-{proto}'
if itag:
f['format_id'] = itag
itags[itag] = proto
f['quality'] = next((
q(qdict[val])
for val, qdict in ((f.get('format_id', '').split('-')[0], itag_qualities), (f.get('height'), res_qualities))
if val in qdict), -1)
return True
for sd in streaming_data:
hls_manifest_url = get_hls and sd.get('hlsManifestUrl')
if hls_manifest_url:
for f in self._extract_m3u8_formats(hls_manifest_url, video_id, 'mp4', fatal=False):
if process_manifest_format(f, 'hls', self._search_regex(
r'/itag/(\d+)', f['url'], 'itag', default=None)):
yield f
dash_manifest_url = get_dash and sd.get('dashManifestUrl')
if dash_manifest_url:
for f in self._extract_mpd_formats(dash_manifest_url, video_id, fatal=False):
if process_manifest_format(f, 'dash', f['format_id']):
f['filesize'] = int_or_none(self._search_regex(
r'/clen/(\d+)', f.get('fragment_base_url') or f['url'], 'file size', default=None))
if live_from_start:
f['is_from_start'] = True
yield f
def _extract_storyboard(self, player_responses, duration):
spec = get_first(
player_responses, ('storyboards', 'playerStoryboardSpecRenderer', 'spec'), default='').split('|')[::-1]
base_url = url_or_none(urljoin('https://i.ytimg.com/', spec.pop() or None))
if not base_url:
return
L = len(spec) - 1
for i, args in enumerate(spec):
args = args.split('#')
counts = list(map(int_or_none, args[:5]))
if len(args) != 8 or not all(counts):
self.report_warning(f'Malformed storyboard {i}: {'#'.join(args)}{bug_reports_message()}')
continue
width, height, frame_count, cols, rows = counts
N, sigh = args[6:]
url = base_url.replace('$L', str(L - i)).replace('$N', N) + f'&sigh={sigh}'
fragment_count = frame_count / (cols * rows)
fragment_duration = duration / fragment_count
yield {
'format_id': f'sb{i}',
'format_note': 'storyboard',
'ext': 'mhtml',
'protocol': 'mhtml',
'acodec': 'none',
'vcodec': 'none',
'url': url,
'width': width,
'height': height,
'fragments': [{
'url': url.replace('$M', str(j)),
'duration': min(fragment_duration, duration - (j * fragment_duration)),
} for j in range(math.ceil(fragment_count))],
}
def _download_player_responses(self, url, smuggled_data, video_id, webpage_url):
webpage = None
if 'webpage' not in self._configuration_arg('player_skip'):
webpage = self._download_webpage(
webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False)
master_ytcfg = self.extract_ytcfg(video_id, webpage) or self._get_default_ytcfg()
player_responses, player_url = self._extract_player_responses(
self._get_requested_clients(url, smuggled_data),
video_id, webpage, master_ytcfg)
return webpage, master_ytcfg, player_responses, player_url
def _list_formats(self, video_id, microformats, video_details, player_responses, player_url, duration=None):
live_broadcast_details = traverse_obj(microformats, (..., 'liveBroadcastDetails'))
is_live = get_first(video_details, 'isLive')
if is_live is None:
is_live = get_first(live_broadcast_details, 'isLiveNow')
streaming_data = traverse_obj(player_responses, (..., 'streamingData'), default=[])
formats = list(self._extract_formats(streaming_data, video_id, player_url, is_live, duration))
return live_broadcast_details, is_live, streaming_data, formats
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
video_id = self._match_id(url)
base_url = self.http_scheme() + '//www.youtube.com/'
webpage_url = base_url + 'watch?v=' + video_id
webpage, master_ytcfg, player_responses, player_url = self._download_player_responses(url, smuggled_data, video_id, webpage_url)
playability_statuses = traverse_obj(
player_responses, (..., 'playabilityStatus'), expected_type=dict, default=[])
trailer_video_id = get_first(
playability_statuses,
('errorScreen', 'playerLegacyDesktopYpcTrailerRenderer', 'trailerVideoId'),
expected_type=str)
if trailer_video_id:
return self.url_result(
trailer_video_id, self.ie_key(), trailer_video_id)
search_meta = ((lambda x: self._html_search_meta(x, webpage, default=None))
if webpage else (lambda x: None))
video_details = traverse_obj(
player_responses, (..., 'videoDetails'), expected_type=dict, default=[])
microformats = traverse_obj(
player_responses, (..., 'microformat', 'playerMicroformatRenderer'),
expected_type=dict, default=[])
video_title = (
get_first(video_details, 'title')
or self._get_text(microformats, (..., 'title'))
or search_meta(['og:title', 'twitter:title', 'title']))
video_description = get_first(video_details, 'shortDescription')
multifeed_metadata_list = get_first(
player_responses,
('multicamera', 'playerLegacyMulticameraRenderer', 'metadataList'),
expected_type=str)
if multifeed_metadata_list and not smuggled_data.get('force_singlefeed'):
if self.get_param('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
else:
entries = []
feed_ids = []
for feed in multifeed_metadata_list.split(','):
# Unquote should take place before split on comma (,) since textual
# fields may contain comma as well (see
# https://github.com/ytdl-org/youtube-dl/issues/8536)
feed_data = compat_parse_qs(
compat_urllib_parse_unquote_plus(feed))
def feed_entry(name):
return try_get(
feed_data, lambda x: x[name][0], compat_str)
feed_id = feed_entry('id')
if not feed_id:
continue
feed_title = feed_entry('title')
title = video_title
if feed_title:
title += ' (%s)' % feed_title
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
'url': smuggle_url(
'%swatch?v=%s' % (base_url, feed_data['id'][0]),
{'force_singlefeed': True}),
'title': title,
})
feed_ids.append(feed_id)
self.to_screen(
'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
% (', '.join(feed_ids), video_id))
return self.playlist_result(
entries, video_id, video_title, video_description)
duration = int_or_none(
get_first(video_details, 'lengthSeconds')
or get_first(microformats, 'lengthSeconds')
or parse_duration(search_meta('duration'))) or None
live_broadcast_details, is_live, streaming_data, formats = self._list_formats(
video_id, microformats, video_details, player_responses, player_url, duration)
if not formats:
if not self.get_param('allow_unplayable_formats') and traverse_obj(streaming_data, (..., 'licenseInfos')):
self.report_drm(video_id)
pemr = get_first(
playability_statuses,
('errorScreen', 'playerErrorMessageRenderer'), expected_type=dict) or {}
reason = self._get_text(pemr, 'reason') or get_first(playability_statuses, 'reason')
subreason = clean_html(self._get_text(pemr, 'subreason') or '')
if subreason:
if subreason == 'The uploader has not made this video available in your country.':
countries = get_first(microformats, 'availableCountries')
if not countries:
regions_allowed = search_meta('regionsAllowed')
countries = regions_allowed.split(',') if regions_allowed else None
self.raise_geo_restricted(subreason, countries, metadata_available=True)
reason += f'. {subreason}'
if reason:
self.raise_no_formats(reason, expected=True)
keywords = get_first(video_details, 'keywords', expected_type=list) or []
if not keywords and webpage:
keywords = [
unescapeHTML(m.group('content'))
for m in re.finditer(self._meta_regex('og:video:tag'), webpage)]
for keyword in keywords:
if keyword.startswith('yt:stretch='):
mobj = re.search(r'(\d+)\s*:\s*(\d+)', keyword)
if mobj:
# NB: float is intentional for forcing float division
w, h = (float(v) for v in mobj.groups())
if w > 0 and h > 0:
ratio = w / h
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
break
thumbnails = self._extract_thumbnails((video_details, microformats), (..., ..., 'thumbnail'))
thumbnail_url = search_meta(['og:image', 'twitter:image'])
if thumbnail_url:
thumbnails.append({
'url': thumbnail_url,
})
original_thumbnails = thumbnails.copy()
# The best resolution thumbnails sometimes does not appear in the webpage
# See: https://github.com/yt-dlp/yt-dlp/issues/340
# List of possible thumbnails - Ref: <https://stackoverflow.com/a/20542029>
thumbnail_names = [
# While the *1,*2,*3 thumbnails are just below their correspnding "*default" variants
# in resolution, these are not the custom thumbnail. So de-prioritize them
'maxresdefault', 'hq720', 'sddefault', 'hqdefault', '0', 'mqdefault', 'default',
'sd1', 'sd2', 'sd3', 'hq1', 'hq2', 'hq3', 'mq1', 'mq2', 'mq3', '1', '2', '3'
]
n_thumbnail_names = len(thumbnail_names)
thumbnails.extend({
'url': 'https://i.ytimg.com/vi{webp}/{video_id}/{name}{live}.{ext}'.format(
video_id=video_id, name=name, ext=ext,
webp='_webp' if ext == 'webp' else '', live='_live' if is_live else ''),
} for name in thumbnail_names for ext in ('webp', 'jpg'))
for thumb in thumbnails:
i = next((i for i, t in enumerate(thumbnail_names) if f'/{video_id}/{t}' in thumb['url']), n_thumbnail_names)
thumb['preference'] = (0 if '.webp' in thumb['url'] else -1) - (2 * i)
self._remove_duplicate_formats(thumbnails)
self._downloader._sort_thumbnails(original_thumbnails)
category = get_first(microformats, 'category') or search_meta('genre')
channel_id = str_or_none(
get_first(video_details, 'channelId')
or get_first(microformats, 'externalChannelId')
or search_meta('channelId'))
owner_profile_url = get_first(microformats, 'ownerProfileUrl')
live_content = get_first(video_details, 'isLiveContent')
is_upcoming = get_first(video_details, 'isUpcoming')
if is_live is None:
if is_upcoming or live_content is False:
is_live = False
if is_upcoming is None and (live_content or is_live):
is_upcoming = False
live_start_time = parse_iso8601(get_first(live_broadcast_details, 'startTimestamp'))
live_end_time = parse_iso8601(get_first(live_broadcast_details, 'endTimestamp'))
if not duration and live_end_time and live_start_time:
duration = live_end_time - live_start_time
if is_live and self.get_param('live_from_start'):
self._prepare_live_from_start_formats(formats, video_id, live_start_time, url, webpage_url, smuggled_data)
formats.extend(self._extract_storyboard(player_responses, duration))
# Source is given priority since formats that throttle are given lower source_preference
# When throttling issue is fully fixed, remove this
self._sort_formats(formats, ('quality', 'res', 'fps', 'hdr:12', 'source', 'codec:vp9.2', 'lang', 'proto'))
info = {
'id': video_id,
'title': video_title,
'formats': formats,
'thumbnails': thumbnails,
# The best thumbnail that we are sure exists. Prevents unnecessary
# URL checking if user don't care about getting the best possible thumbnail
'thumbnail': traverse_obj(original_thumbnails, (-1, 'url')),
'description': video_description,
'uploader': get_first(video_details, 'author'),
'uploader_id': self._search_regex(r'/(?:channel|user)/([^/?&#]+)', owner_profile_url, 'uploader id') if owner_profile_url else None,
'uploader_url': owner_profile_url,
'channel_id': channel_id,
'channel_url': format_field(channel_id, template='https://www.youtube.com/channel/%s'),
'duration': duration,
'view_count': int_or_none(
get_first((video_details, microformats), (..., 'viewCount'))
or search_meta('interactionCount')),
'average_rating': float_or_none(get_first(video_details, 'averageRating')),
'age_limit': 18 if (
get_first(microformats, 'isFamilySafe') is False
or search_meta('isFamilyFriendly') == 'false'
or search_meta('og:restrictions:age') == '18+') else 0,
'webpage_url': webpage_url,
'categories': [category] if category else None,
'tags': keywords,
'playable_in_embed': get_first(playability_statuses, 'playableInEmbed'),
'is_live': is_live,
'was_live': (False if is_live or is_upcoming or live_content is False
else None if is_live is None or is_upcoming is None
else live_content),
'live_status': 'is_upcoming' if is_upcoming else None, # rest will be set by YoutubeDL
'release_timestamp': live_start_time,
}
pctr = traverse_obj(player_responses, (..., 'captions', 'playerCaptionsTracklistRenderer'), expected_type=dict)
if pctr:
def get_lang_code(track):
return (remove_start(track.get('vssId') or '', '.').replace('.', '-')
or track.get('languageCode'))
# Converted into dicts to remove duplicates
captions = {
get_lang_code(sub): sub
for sub in traverse_obj(pctr, (..., 'captionTracks', ...), default=[])}
translation_languages = {
lang.get('languageCode'): self._get_text(lang.get('languageName'), max_runs=1)
for lang in traverse_obj(pctr, (..., 'translationLanguages', ...), default=[])}
def process_language(container, base_url, lang_code, sub_name, query):
lang_subs = container.setdefault(lang_code, [])
for fmt in self._SUBTITLE_FORMATS:
query.update({
'fmt': fmt,
})
lang_subs.append({
'ext': fmt,
'url': urljoin('https://www.youtube.com', update_url_query(base_url, query)),
'name': sub_name,
})
subtitles, automatic_captions = {}, {}
for lang_code, caption_track in captions.items():
base_url = caption_track.get('baseUrl')
orig_lang = parse_qs(base_url).get('lang', [None])[-1]
if not base_url:
continue
lang_name = self._get_text(caption_track, 'name', max_runs=1)
if caption_track.get('kind') != 'asr':
if not lang_code:
continue
process_language(
subtitles, base_url, lang_code, lang_name, {})
if not caption_track.get('isTranslatable'):
continue
for trans_code, trans_name in translation_languages.items():
if not trans_code:
continue
orig_trans_code = trans_code
if caption_track.get('kind') != 'asr':
if 'translated_subs' in self._configuration_arg('skip'):
continue
trans_code += f'-{lang_code}'
trans_name += format_field(lang_name, template=' from %s')
# Add an "-orig" label to the original language so that it can be distinguished.
# The subs are returned without "-orig" as well for compatibility
if lang_code == f'a-{orig_trans_code}':
process_language(
automatic_captions, base_url, f'{trans_code}-orig', f'{trans_name} (Original)', {})
# Setting tlang=lang returns damaged subtitles.
process_language(automatic_captions, base_url, trans_code, trans_name,
{} if orig_lang == orig_trans_code else {'tlang': trans_code})
info['automatic_captions'] = automatic_captions
info['subtitles'] = subtitles
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
for k, v in query.items():
for d_k, s_ks in [('start', ('start', 't')), ('end', ('end',))]:
d_k += '_time'
if d_k not in info and k in s_ks:
info[d_k] = parse_duration(query[k][0])
# Youtube Music Auto-generated description
if video_description:
mobj = re.search(r'(?s)(?P<track>[^·\n]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?.+\nAuto-generated by YouTube\.\s*$', video_description)
if mobj:
release_year = mobj.group('release_year')
release_date = mobj.group('release_date')
if release_date:
release_date = release_date.replace('-', '')
if not release_year:
release_year = release_date[:4]
info.update({
'album': mobj.group('album'.strip()),
'artist': mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·')),
'track': mobj.group('track').strip(),
'release_date': release_date,
'release_year': int_or_none(release_year),
})
initial_data = None
if webpage:
initial_data = self._extract_yt_initial_variable(
webpage, self._YT_INITIAL_DATA_RE, video_id,
'yt initial data')
if not initial_data:
query = {'videoId': video_id}
query.update(self._get_checkok_params())
initial_data = self._extract_response(
item_id=video_id, ep='next', fatal=False,
ytcfg=master_ytcfg, query=query,
headers=self.generate_api_headers(ytcfg=master_ytcfg),
note='Downloading initial data API JSON')
try: # This will error if there is no livechat
initial_data['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['continuations'][0]['reloadContinuationData']['continuation']
except (KeyError, IndexError, TypeError):
pass
else:
info.setdefault('subtitles', {})['live_chat'] = [{
'url': f'https://www.youtube.com/watch?v={video_id}', # url is needed to set cookies
'video_id': video_id,
'ext': 'json',
'protocol': 'youtube_live_chat' if is_live or is_upcoming else 'youtube_live_chat_replay',
}]
if initial_data:
info['chapters'] = (
self._extract_chapters_from_json(initial_data, duration)
or self._extract_chapters_from_engagement_panel(initial_data, duration)
or None)
contents = traverse_obj(
initial_data, ('contents', 'twoColumnWatchNextResults', 'results', 'results', 'contents'),
expected_type=list, default=[])
vpir = get_first(contents, 'videoPrimaryInfoRenderer')
if vpir:
stl = vpir.get('superTitleLink')
if stl:
stl = self._get_text(stl)
if try_get(
vpir,
lambda x: x['superTitleIcon']['iconType']) == 'LOCATION_PIN':
info['location'] = stl
else:
mobj = re.search(r'(.+?)\s*S(\d+)\s*•?\s*E(\d+)', stl)
if mobj:
info.update({
'series': mobj.group(1),
'season_number': int(mobj.group(2)),
'episode_number': int(mobj.group(3)),
})
for tlb in (try_get(
vpir,
lambda x: x['videoActions']['menuRenderer']['topLevelButtons'],
list) or []):
tbr = tlb.get('toggleButtonRenderer') or {}
for getter, regex in [(
lambda x: x['defaultText']['accessibility']['accessibilityData'],
r'(?P<count>[\d,]+)\s*(?P<type>(?:dis)?like)'), ([
lambda x: x['accessibility'],
lambda x: x['accessibilityData']['accessibilityData'],
], r'(?P<type>(?:dis)?like) this video along with (?P<count>[\d,]+) other people')]:
label = (try_get(tbr, getter, dict) or {}).get('label')
if label:
mobj = re.match(regex, label)
if mobj:
info[mobj.group('type') + '_count'] = str_to_int(mobj.group('count'))
break
sbr_tooltip = try_get(
vpir, lambda x: x['sentimentBar']['sentimentBarRenderer']['tooltip'])
if sbr_tooltip:
like_count, dislike_count = sbr_tooltip.split(' / ')
info.update({
'like_count': str_to_int(like_count),
'dislike_count': str_to_int(dislike_count),
})
vsir = get_first(contents, 'videoSecondaryInfoRenderer')
if vsir:
vor = traverse_obj(vsir, ('owner', 'videoOwnerRenderer'))
info.update({
'channel': self._get_text(vor, 'title'),
'channel_follower_count': self._get_count(vor, 'subscriberCountText')})
rows = try_get(
vsir,
lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'],
list) or []
multiple_songs = False
for row in rows:
if try_get(row, lambda x: x['metadataRowRenderer']['hasDividerLine']) is True:
multiple_songs = True
break
for row in rows:
mrr = row.get('metadataRowRenderer') or {}
mrr_title = mrr.get('title')
if not mrr_title:
continue
mrr_title = self._get_text(mrr, 'title')
mrr_contents_text = self._get_text(mrr, ('contents', 0))
if mrr_title == 'License':
info['license'] = mrr_contents_text
elif not multiple_songs:
if mrr_title == 'Album':
info['album'] = mrr_contents_text
elif mrr_title == 'Artist':
info['artist'] = mrr_contents_text
elif mrr_title == 'Song':
info['track'] = mrr_contents_text
fallbacks = {
'channel': 'uploader',
'channel_id': 'uploader_id',
'channel_url': 'uploader_url',
}
# The upload date for scheduled, live and past live streams / premieres in microformats
# may be different from the stream date. Although not in UTC, we will prefer it in this case.
# See: https://github.com/yt-dlp/yt-dlp/pull/2223#issuecomment-1008485139
upload_date = (
unified_strdate(get_first(microformats, 'uploadDate'))
or unified_strdate(search_meta('uploadDate')))
if not upload_date or (not info.get('is_live') and not info.get('was_live') and info.get('live_status') != 'is_upcoming'):
upload_date = strftime_or_none(self._extract_time_text(vpir, 'dateText')[0], '%Y%m%d')
info['upload_date'] = upload_date
for to, frm in fallbacks.items():
if not info.get(to):
info[to] = info.get(frm)
for s_k, d_k in [('artist', 'creator'), ('track', 'alt_title')]:
v = info.get(s_k)
if v:
info[d_k] = v
is_private = get_first(video_details, 'isPrivate', expected_type=bool)
is_unlisted = get_first(microformats, 'isUnlisted', expected_type=bool)
is_membersonly = None
is_premium = None
if initial_data and is_private is not None:
is_membersonly = False
is_premium = False
contents = try_get(initial_data, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'], list) or []
badge_labels = set()
for content in contents:
if not isinstance(content, dict):
continue
badge_labels.update(self._extract_badges(content.get('videoPrimaryInfoRenderer')))
for badge_label in badge_labels:
if badge_label.lower() == 'members only':
is_membersonly = True
elif badge_label.lower() == 'premium':
is_premium = True
elif badge_label.lower() == 'unlisted':
is_unlisted = True
info['availability'] = self._availability(
is_private=is_private,
needs_premium=is_premium,
needs_subscription=is_membersonly,
needs_auth=info['age_limit'] >= 18,
is_unlisted=None if is_private is None else is_unlisted)
info['__post_extractor'] = self.extract_comments(master_ytcfg, video_id, contents, webpage)
self.mark_watched(video_id, player_responses)
return info
class YoutubeTabBaseInfoExtractor(YoutubeBaseInfoExtractor):
@staticmethod
def passthrough_smuggled_data(func):
def _smuggle(entries, smuggled_data):
for entry in entries:
# TODO: Convert URL to music.youtube instead.
# Do we need to passthrough any other smuggled_data?
entry['url'] = smuggle_url(entry['url'], smuggled_data)
yield entry
@functools.wraps(func)
def wrapper(self, url):
url, smuggled_data = unsmuggle_url(url, {})
if self.is_music_url(url):
smuggled_data['is_music_url'] = True
info_dict = func(self, url, smuggled_data)
if smuggled_data and info_dict.get('entries'):
info_dict['entries'] = _smuggle(info_dict['entries'], smuggled_data)
return info_dict
return wrapper
def _extract_channel_id(self, webpage):
channel_id = self._html_search_meta(
'channelId', webpage, 'channel id', default=None)
if channel_id:
return channel_id
channel_url = self._html_search_meta(
('og:url', 'al:ios:url', 'al:android:url', 'al:web:url',
'twitter:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad',
'twitter:app:url:googleplay'), webpage, 'channel url')
return self._search_regex(
r'https?://(?:www\.)?youtube\.com/channel/([^/?#&])+',
channel_url, 'channel id')
@staticmethod
def _extract_basic_item_renderer(item):
# Modified from _extract_grid_item_renderer
known_basic_renderers = (
'playlistRenderer', 'videoRenderer', 'channelRenderer', 'showRenderer', 'reelItemRenderer'
)
for key, renderer in item.items():
if not isinstance(renderer, dict):
continue
elif key in known_basic_renderers:
return renderer
elif key.startswith('grid') and key.endswith('Renderer'):
return renderer
def _grid_entries(self, grid_renderer):
for item in grid_renderer['items']:
if not isinstance(item, dict):
continue
renderer = self._extract_basic_item_renderer(item)
if not isinstance(renderer, dict):
continue
title = self._get_text(renderer, 'title')
# playlist
playlist_id = renderer.get('playlistId')
if playlist_id:
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id,
ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
continue
# video
video_id = renderer.get('videoId')
if video_id:
yield self._extract_video(renderer)
continue
# channel
channel_id = renderer.get('channelId')
if channel_id:
yield self.url_result(
'https://www.youtube.com/channel/%s' % channel_id,
ie=YoutubeTabIE.ie_key(), video_title=title)
continue
# generic endpoint URL support
ep_url = urljoin('https://www.youtube.com/', try_get(
renderer, lambda x: x['navigationEndpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str))
if ep_url:
for ie in (YoutubeTabIE, YoutubePlaylistIE, YoutubeIE):
if ie.suitable(ep_url):
yield self.url_result(
ep_url, ie=ie.ie_key(), video_id=ie._match_id(ep_url), video_title=title)
break
def _music_reponsive_list_entry(self, renderer):
video_id = traverse_obj(renderer, ('playlistItemData', 'videoId'))
if video_id:
return self.url_result(f'https://music.youtube.com/watch?v={video_id}',
ie=YoutubeIE.ie_key(), video_id=video_id)
playlist_id = traverse_obj(renderer, ('navigationEndpoint', 'watchEndpoint', 'playlistId'))
if playlist_id:
video_id = traverse_obj(renderer, ('navigationEndpoint', 'watchEndpoint', 'videoId'))
if video_id:
return self.url_result(f'https://music.youtube.com/watch?v={video_id}&list={playlist_id}',
ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
return self.url_result(f'https://music.youtube.com/playlist?list={playlist_id}',
ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
browse_id = traverse_obj(renderer, ('navigationEndpoint', 'browseEndpoint', 'browseId'))
if browse_id:
return self.url_result(f'https://music.youtube.com/browse/{browse_id}',
ie=YoutubeTabIE.ie_key(), video_id=browse_id)
def _shelf_entries_from_content(self, shelf_renderer):
content = shelf_renderer.get('content')
if not isinstance(content, dict):
return
renderer = content.get('gridRenderer') or content.get('expandedShelfContentsRenderer')
if renderer:
# TODO: add support for nested playlists so each shelf is processed
# as separate playlist
# TODO: this includes only first N items
yield from self._grid_entries(renderer)
renderer = content.get('horizontalListRenderer')
if renderer:
# TODO
pass
def _shelf_entries(self, shelf_renderer, skip_channels=False):
ep = try_get(
shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str)
shelf_url = urljoin('https://www.youtube.com', ep)
if shelf_url:
# Skipping links to another channels, note that checking for
# endpoint.commandMetadata.webCommandMetadata.webPageTypwebPageType == WEB_PAGE_TYPE_CHANNEL
# will not work
if skip_channels and '/channels?' in shelf_url:
return
title = self._get_text(shelf_renderer, 'title')
yield self.url_result(shelf_url, video_title=title)
# Shelf may not contain shelf URL, fallback to extraction from content
yield from self._shelf_entries_from_content(shelf_renderer)
def _playlist_entries(self, video_list_renderer):
for content in video_list_renderer['contents']:
if not isinstance(content, dict):
continue
renderer = content.get('playlistVideoRenderer') or content.get('playlistPanelVideoRenderer')
if not isinstance(renderer, dict):
continue
video_id = renderer.get('videoId')
if not video_id:
continue
yield self._extract_video(renderer)
def _rich_entries(self, rich_grid_renderer):
renderer = try_get(
rich_grid_renderer, lambda x: x['content']['videoRenderer'], dict) or {}
video_id = renderer.get('videoId')
if not video_id:
return
yield self._extract_video(renderer)
def _video_entry(self, video_renderer):
video_id = video_renderer.get('videoId')
if video_id:
return self._extract_video(video_renderer)
def _hashtag_tile_entry(self, hashtag_tile_renderer):
url = urljoin('https://youtube.com', traverse_obj(
hashtag_tile_renderer, ('onTapCommand', 'commandMetadata', 'webCommandMetadata', 'url')))
if url:
return self.url_result(
url, ie=YoutubeTabIE.ie_key(), title=self._get_text(hashtag_tile_renderer, 'hashtag'))
def _post_thread_entries(self, post_thread_renderer):
post_renderer = try_get(
post_thread_renderer, lambda x: x['post']['backstagePostRenderer'], dict)
if not post_renderer:
return
# video attachment
video_renderer = try_get(
post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict) or {}
video_id = video_renderer.get('videoId')
if video_id:
entry = self._extract_video(video_renderer)
if entry:
yield entry
# playlist attachment
playlist_id = try_get(
post_renderer, lambda x: x['backstageAttachment']['playlistRenderer']['playlistId'], compat_str)
if playlist_id:
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id,
ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
# inline video links
runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
for run in runs:
if not isinstance(run, dict):
continue
ep_url = try_get(
run, lambda x: x['navigationEndpoint']['urlEndpoint']['url'], compat_str)
if not ep_url:
continue
if not YoutubeIE.suitable(ep_url):
continue
ep_video_id = YoutubeIE._match_id(ep_url)
if video_id == ep_video_id:
continue
yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=ep_video_id)
def _post_thread_continuation_entries(self, post_thread_continuation):
contents = post_thread_continuation.get('contents')
if not isinstance(contents, list):
return
for content in contents:
renderer = content.get('backstagePostThreadRenderer')
if not isinstance(renderer, dict):
continue
yield from self._post_thread_entries(renderer)
r''' # unused
def _rich_grid_entries(self, contents):
for content in contents:
video_renderer = try_get(content, lambda x: x['richItemRenderer']['content']['videoRenderer'], dict)
if video_renderer:
entry = self._video_entry(video_renderer)
if entry:
yield entry
'''
def _extract_entries(self, parent_renderer, continuation_list):
# continuation_list is modified in-place with continuation_list = [continuation_token]
continuation_list[:] = [None]
contents = try_get(parent_renderer, lambda x: x['contents'], list) or []
for content in contents:
if not isinstance(content, dict):
continue
is_renderer = traverse_obj(
content, 'itemSectionRenderer', 'musicShelfRenderer', 'musicShelfContinuation',
expected_type=dict)
if not is_renderer:
renderer = content.get('richItemRenderer')
if renderer:
for entry in self._rich_entries(renderer):
yield entry
continuation_list[0] = self._extract_continuation(parent_renderer)
continue
isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
for isr_content in isr_contents:
if not isinstance(isr_content, dict):
continue
known_renderers = {
'playlistVideoListRenderer': self._playlist_entries,
'gridRenderer': self._grid_entries,
'reelShelfRenderer': self._grid_entries,
'shelfRenderer': self._shelf_entries,
'musicResponsiveListItemRenderer': lambda x: [self._music_reponsive_list_entry(x)],
'backstagePostThreadRenderer': self._post_thread_entries,
'videoRenderer': lambda x: [self._video_entry(x)],
'playlistRenderer': lambda x: self._grid_entries({'items': [{'playlistRenderer': x}]}),
'channelRenderer': lambda x: self._grid_entries({'items': [{'channelRenderer': x}]}),
'hashtagTileRenderer': lambda x: [self._hashtag_tile_entry(x)]
}
for key, renderer in isr_content.items():
if key not in known_renderers:
continue
for entry in known_renderers[key](renderer):
if entry:
yield entry
continuation_list[0] = self._extract_continuation(renderer)
break
if not continuation_list[0]:
continuation_list[0] = self._extract_continuation(is_renderer)
if not continuation_list[0]:
continuation_list[0] = self._extract_continuation(parent_renderer)
def _entries(self, tab, item_id, ytcfg, account_syncid, visitor_data):
continuation_list = [None]
extract_entries = lambda x: self._extract_entries(x, continuation_list)
tab_content = try_get(tab, lambda x: x['content'], dict)
if not tab_content:
return
parent_renderer = (
try_get(tab_content, lambda x: x['sectionListRenderer'], dict)
or try_get(tab_content, lambda x: x['richGridRenderer'], dict) or {})
yield from extract_entries(parent_renderer)
continuation = continuation_list[0]
for page_num in itertools.count(1):
if not continuation:
break
headers = self.generate_api_headers(
ytcfg=ytcfg, account_syncid=account_syncid, visitor_data=visitor_data)
response = self._extract_response(
item_id=f'{item_id} page {page_num}',
query=continuation, headers=headers, ytcfg=ytcfg,
check_get_keys=('continuationContents', 'onResponseReceivedActions', 'onResponseReceivedEndpoints'))
if not response:
break
# Extracting updated visitor data is required to prevent an infinite extraction loop in some cases
# See: https://github.com/ytdl-org/youtube-dl/issues/28702
visitor_data = self._extract_visitor_data(response) or visitor_data
known_continuation_renderers = {
'playlistVideoListContinuation': self._playlist_entries,
'gridContinuation': self._grid_entries,
'itemSectionContinuation': self._post_thread_continuation_entries,
'sectionListContinuation': extract_entries, # for feeds
}
continuation_contents = try_get(
response, lambda x: x['continuationContents'], dict) or {}
continuation_renderer = None
for key, value in continuation_contents.items():
if key not in known_continuation_renderers:
continue
continuation_renderer = value
continuation_list = [None]
yield from known_continuation_renderers[key](continuation_renderer)
continuation = continuation_list[0] or self._extract_continuation(continuation_renderer)
break
if continuation_renderer:
continue
known_renderers = {
'videoRenderer': (self._grid_entries, 'items'), # for membership tab
'gridPlaylistRenderer': (self._grid_entries, 'items'),
'gridVideoRenderer': (self._grid_entries, 'items'),
'gridChannelRenderer': (self._grid_entries, 'items'),
'playlistVideoRenderer': (self._playlist_entries, 'contents'),
'itemSectionRenderer': (extract_entries, 'contents'), # for feeds
'richItemRenderer': (extract_entries, 'contents'), # for hashtag
'backstagePostThreadRenderer': (self._post_thread_continuation_entries, 'contents')
}
on_response_received = dict_get(response, ('onResponseReceivedActions', 'onResponseReceivedEndpoints'))
continuation_items = try_get(
on_response_received, lambda x: x[0]['appendContinuationItemsAction']['continuationItems'], list)
continuation_item = try_get(continuation_items, lambda x: x[0], dict) or {}
video_items_renderer = None
for key, value in continuation_item.items():
if key not in known_renderers:
continue
video_items_renderer = {known_renderers[key][1]: continuation_items}
continuation_list = [None]
yield from known_renderers[key][0](video_items_renderer)
continuation = continuation_list[0] or self._extract_continuation(video_items_renderer)
break
if video_items_renderer:
continue
break
@staticmethod
def _extract_selected_tab(tabs, fatal=True):
for tab in tabs:
renderer = dict_get(tab, ('tabRenderer', 'expandableTabRenderer')) or {}
if renderer.get('selected') is True:
return renderer
else:
if fatal:
raise ExtractorError('Unable to find selected tab')
def _extract_uploader(self, data):
uploader = {}
renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarSecondaryInfoRenderer') or {}
owner = try_get(
renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict)
if owner:
owner_text = owner.get('text')
uploader['uploader'] = self._search_regex(
r'^by (.+) and \d+ others?$', owner_text, 'uploader', default=owner_text)
uploader['uploader_id'] = try_get(
owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], compat_str)
uploader['uploader_url'] = urljoin(
'https://www.youtube.com/',
try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str))
return {k: v for k, v in uploader.items() if v is not None}
def _extract_from_tabs(self, item_id, ytcfg, data, tabs):
playlist_id = title = description = channel_url = channel_name = channel_id = None
tags = []
selected_tab = self._extract_selected_tab(tabs)
primary_sidebar_renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer')
renderer = try_get(
data, lambda x: x['metadata']['channelMetadataRenderer'], dict)
if renderer:
channel_name = renderer.get('title')
channel_url = renderer.get('channelUrl')
channel_id = renderer.get('externalId')
else:
renderer = try_get(
data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
if renderer:
title = renderer.get('title')
description = renderer.get('description', '')
playlist_id = channel_id
tags = renderer.get('keywords', '').split()
# We can get the uncropped banner/avatar by replacing the crop params with '=s0'
# See: https://github.com/yt-dlp/yt-dlp/issues/2237#issuecomment-1013694714
def _get_uncropped(url):
return url_or_none((url or '').split('=')[0] + '=s0')
avatar_thumbnails = self._extract_thumbnails(renderer, 'avatar')
if avatar_thumbnails:
uncropped_avatar = _get_uncropped(avatar_thumbnails[0]['url'])
if uncropped_avatar:
avatar_thumbnails.append({
'url': uncropped_avatar,
'id': 'avatar_uncropped',
'preference': 1
})
channel_banners = self._extract_thumbnails(
data, ('header', ..., ['banner', 'mobileBanner', 'tvBanner']))
for banner in channel_banners:
banner['preference'] = -10
if channel_banners:
uncropped_banner = _get_uncropped(channel_banners[0]['url'])
if uncropped_banner:
channel_banners.append({
'url': uncropped_banner,
'id': 'banner_uncropped',
'preference': -5
})
primary_thumbnails = self._extract_thumbnails(
primary_sidebar_renderer, ('thumbnailRenderer', ('playlistVideoThumbnailRenderer', 'playlistCustomThumbnailRenderer'), 'thumbnail'))
if playlist_id is None:
playlist_id = item_id
playlist_stats = traverse_obj(primary_sidebar_renderer, 'stats')
last_updated_unix, _ = self._extract_time_text(playlist_stats, 2)
if title is None:
title = self._get_text(data, ('header', 'hashtagHeaderRenderer', 'hashtag')) or playlist_id
title += format_field(selected_tab, 'title', ' - %s')
title += format_field(selected_tab, 'expandedText', ' - %s')
metadata = {
'playlist_id': playlist_id,
'playlist_title': title,
'playlist_description': description,
'uploader': channel_name,
'uploader_id': channel_id,
'uploader_url': channel_url,
'thumbnails': primary_thumbnails + avatar_thumbnails + channel_banners,
'tags': tags,
'view_count': self._get_count(playlist_stats, 1),
'availability': self._extract_availability(data),
'modified_date': strftime_or_none(last_updated_unix, '%Y%m%d'),
'playlist_count': self._get_count(playlist_stats, 0),
'channel_follower_count': self._get_count(data, ('header', ..., 'subscriberCountText')),
}
if not channel_id:
metadata.update(self._extract_uploader(data))
metadata.update({
'channel': metadata['uploader'],
'channel_id': metadata['uploader_id'],
'channel_url': metadata['uploader_url']})
return self.playlist_result(
self._entries(
selected_tab, playlist_id, ytcfg,
self._extract_account_syncid(ytcfg, data),
self._extract_visitor_data(data, ytcfg)),
**metadata)
def _extract_mix_playlist(self, playlist, playlist_id, data, ytcfg):
first_id = last_id = response = None
for page_num in itertools.count(1):
videos = list(self._playlist_entries(playlist))
if not videos:
return
start = next((i for i, v in enumerate(videos) if v['id'] == last_id), -1) + 1
if start >= len(videos):
return
for video in videos[start:]:
if video['id'] == first_id:
self.to_screen('First video %s found again; Assuming end of Mix' % first_id)
return
yield video
first_id = first_id or videos[0]['id']
last_id = videos[-1]['id']
watch_endpoint = try_get(
playlist, lambda x: x['contents'][-1]['playlistPanelVideoRenderer']['navigationEndpoint']['watchEndpoint'])
headers = self.generate_api_headers(
ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data),
visitor_data=self._extract_visitor_data(response, data, ytcfg))
query = {
'playlistId': playlist_id,
'videoId': watch_endpoint.get('videoId') or last_id,
'index': watch_endpoint.get('index') or len(videos),
'params': watch_endpoint.get('params') or 'OAE%3D'
}
response = self._extract_response(
item_id='%s page %d' % (playlist_id, page_num),
query=query, ep='next', headers=headers, ytcfg=ytcfg,
check_get_keys='contents'
)
playlist = try_get(
response, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
def _extract_from_playlist(self, item_id, url, data, playlist, ytcfg):
title = playlist.get('title') or try_get(
data, lambda x: x['titleText']['simpleText'], compat_str)
playlist_id = playlist.get('playlistId') or item_id
# Delegating everything except mix playlists to regular tab-based playlist URL
playlist_url = urljoin(url, try_get(
playlist, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str))
if playlist_url and playlist_url != url:
return self.url_result(
playlist_url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
return self.playlist_result(
self._extract_mix_playlist(playlist, playlist_id, data, ytcfg),
playlist_id=playlist_id, playlist_title=title)
def _extract_availability(self, data):
"""
Gets the availability of a given playlist/tab.
Note: Unless YouTube tells us explicitly, we do not assume it is public
@param data: response
"""
is_private = is_unlisted = None
renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') or {}
badge_labels = self._extract_badges(renderer)
# Personal playlists, when authenticated, have a dropdown visibility selector instead of a badge
privacy_dropdown_entries = try_get(
renderer, lambda x: x['privacyForm']['dropdownFormFieldRenderer']['dropdown']['dropdownRenderer']['entries'], list) or []
for renderer_dict in privacy_dropdown_entries:
is_selected = try_get(
renderer_dict, lambda x: x['privacyDropdownItemRenderer']['isSelected'], bool) or False
if not is_selected:
continue
label = self._get_text(renderer_dict, ('privacyDropdownItemRenderer', 'label'))
if label:
badge_labels.add(label.lower())
break
for badge_label in badge_labels:
if badge_label == 'unlisted':
is_unlisted = True
elif badge_label == 'private':
is_private = True
elif badge_label == 'public':
is_unlisted = is_private = False
return self._availability(is_private, False, False, False, is_unlisted)
@staticmethod
def _extract_sidebar_info_renderer(data, info_renderer, expected_type=dict):
sidebar_renderer = try_get(
data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list) or []
for item in sidebar_renderer:
renderer = try_get(item, lambda x: x[info_renderer], expected_type)
if renderer:
return renderer
def _reload_with_unavailable_videos(self, item_id, data, ytcfg):
"""
Get playlist with unavailable videos if the 'show unavailable videos' button exists.
"""
browse_id = params = None
renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer')
if not renderer:
return
menu_renderer = try_get(
renderer, lambda x: x['menu']['menuRenderer']['items'], list) or []
for menu_item in menu_renderer:
if not isinstance(menu_item, dict):
continue
nav_item_renderer = menu_item.get('menuNavigationItemRenderer')
text = try_get(
nav_item_renderer, lambda x: x['text']['simpleText'], compat_str)
if not text or text.lower() != 'show unavailable videos':
continue
browse_endpoint = try_get(
nav_item_renderer, lambda x: x['navigationEndpoint']['browseEndpoint'], dict) or {}
browse_id = browse_endpoint.get('browseId')
params = browse_endpoint.get('params')
break
headers = self.generate_api_headers(
ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data),
visitor_data=self._extract_visitor_data(data, ytcfg))
query = {
'params': params or 'wgYCCAA=',
'browseId': browse_id or 'VL%s' % item_id
}
return self._extract_response(
item_id=item_id, headers=headers, query=query,
check_get_keys='contents', fatal=False, ytcfg=ytcfg,
note='Downloading API JSON with unavailable videos')
@property
def skip_webpage(self):
return 'webpage' in self._configuration_arg('skip', ie_key=YoutubeTabIE.ie_key())
def _extract_webpage(self, url, item_id, fatal=True):
retries = self.get_param('extractor_retries', 3)
count = -1
webpage = data = last_error = None
while count < retries:
count += 1
# Sometimes youtube returns a webpage with incomplete ytInitialData
# See: https://github.com/yt-dlp/yt-dlp/issues/116
if last_error:
self.report_warning('%s. Retrying ...' % last_error)
try:
webpage = self._download_webpage(
url, item_id,
note='Downloading webpage%s' % (' (retry #%d)' % count if count else '',))
data = self.extract_yt_initial_data(item_id, webpage or '', fatal=fatal) or {}
except ExtractorError as e:
if isinstance(e.cause, network_exceptions):
if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429):
last_error = error_to_compat_str(e.cause or e.msg)
if count < retries:
continue
if fatal:
raise
self.report_warning(error_to_compat_str(e))
break
else:
try:
self._extract_and_report_alerts(data)
except ExtractorError as e:
if fatal:
raise
self.report_warning(error_to_compat_str(e))
break
if dict_get(data, ('contents', 'currentVideoEndpoint', 'onResponseReceivedActions')):
break
last_error = 'Incomplete yt initial data received'
if count >= retries:
if fatal:
raise ExtractorError(last_error)
self.report_warning(last_error)
break
return webpage, data
def _report_playlist_authcheck(self, ytcfg, fatal=True):
"""Use if failed to extract ytcfg (and data) from initial webpage"""
if not ytcfg and self.is_authenticated:
msg = 'Playlists that require authentication may not extract correctly without a successful webpage download'
if 'authcheck' not in self._configuration_arg('skip', ie_key=YoutubeTabIE.ie_key()) and fatal:
raise ExtractorError(
f'{msg}. If you are not downloading private content, or '
'your cookies are only for the first account and channel,'
' pass "--extractor-args youtubetab:skip=authcheck" to skip this check',
expected=True)
self.report_warning(msg, only_once=True)
def _extract_data(self, url, item_id, ytcfg=None, fatal=True, webpage_fatal=False, default_client='web'):
data = None
if not self.skip_webpage:
webpage, data = self._extract_webpage(url, item_id, fatal=webpage_fatal)
ytcfg = ytcfg or self.extract_ytcfg(item_id, webpage)
# Reject webpage data if redirected to home page without explicitly requesting
selected_tab = self._extract_selected_tab(traverse_obj(
data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list, default=[]), fatal=False) or {}
if (url != 'https://www.youtube.com/feed/recommended'
and selected_tab.get('tabIdentifier') == 'FEwhat_to_watch' # Home page
and 'no-youtube-channel-redirect' not in self.get_param('compat_opts', [])):
msg = 'The channel/playlist does not exist and the URL redirected to youtube.com home page'
if fatal:
raise ExtractorError(msg, expected=True)
self.report_warning(msg, only_once=True)
if not data:
self._report_playlist_authcheck(ytcfg, fatal=fatal)
data = self._extract_tab_endpoint(url, item_id, ytcfg, fatal=fatal, default_client=default_client)
return data, ytcfg
def _extract_tab_endpoint(self, url, item_id, ytcfg=None, fatal=True, default_client='web'):
headers = self.generate_api_headers(ytcfg=ytcfg, default_client=default_client)
resolve_response = self._extract_response(
item_id=item_id, query={'url': url}, check_get_keys='endpoint', headers=headers, ytcfg=ytcfg, fatal=fatal,
ep='navigation/resolve_url', note='Downloading API parameters API JSON', default_client=default_client)
endpoints = {'browseEndpoint': 'browse', 'watchEndpoint': 'next'}
for ep_key, ep in endpoints.items():
params = try_get(resolve_response, lambda x: x['endpoint'][ep_key], dict)
if params:
return self._extract_response(
item_id=item_id, query=params, ep=ep, headers=headers,
ytcfg=ytcfg, fatal=fatal, default_client=default_client,
check_get_keys=('contents', 'currentVideoEndpoint', 'onResponseReceivedActions'))
err_note = 'Failed to resolve url (does the playlist exist?)'
if fatal:
raise ExtractorError(err_note, expected=True)
self.report_warning(err_note, item_id)
_SEARCH_PARAMS = None
def _search_results(self, query, params=NO_DEFAULT, default_client='web'):
data = {'query': query}
if params is NO_DEFAULT:
params = self._SEARCH_PARAMS
if params:
data['params'] = params
content_keys = (
('contents', 'twoColumnSearchResultsRenderer', 'primaryContents', 'sectionListRenderer', 'contents'),
('onResponseReceivedCommands', 0, 'appendContinuationItemsAction', 'continuationItems'),
# ytmusic search
('contents', 'tabbedSearchResultsRenderer', 'tabs', 0, 'tabRenderer', 'content', 'sectionListRenderer', 'contents'),
('continuationContents', ),
)
display_id = f'query "{query}"'
check_get_keys = tuple({keys[0] for keys in content_keys})
ytcfg = self._download_ytcfg(default_client, display_id) if not self.skip_webpage else {}
self._report_playlist_authcheck(ytcfg, fatal=False)
continuation_list = [None]
search = None
for page_num in itertools.count(1):
data.update(continuation_list[0] or {})
headers = self.generate_api_headers(
ytcfg=ytcfg, visitor_data=self._extract_visitor_data(search), default_client=default_client)
search = self._extract_response(
item_id=f'{display_id} page {page_num}', ep='search', query=data,
default_client=default_client, check_get_keys=check_get_keys, ytcfg=ytcfg, headers=headers)
slr_contents = traverse_obj(search, *content_keys)
yield from self._extract_entries({'contents': list(variadic(slr_contents))}, continuation_list)
if not continuation_list[0]:
break
class YoutubeTabIE(YoutubeTabBaseInfoExtractor):
IE_DESC = 'YouTube Tabs'
_VALID_URL = r'''(?x:
https?://
(?:\w+\.)?
(?:
youtube(?:kids)?\.com|
%(invidious)s
)/
(?:
(?P<channel_type>channel|c|user|browse)/|
(?P<not_channel>
feed/|hashtag/|
(?:playlist|watch)\?.*?\blist=
)|
(?!(?:%(reserved_names)s)\b) # Direct URLs
)
(?P<id>[^/?\#&]+)
)''' % {
'reserved_names': YoutubeBaseInfoExtractor._RESERVED_NAMES,
'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES),
}
IE_NAME = 'youtube:tab'
_TESTS = [{
'note': 'playlists, multipage',
'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
'playlist_mincount': 94,
'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Igor Kleiner - Playlists',
'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
'uploader': 'Igor Kleiner',
'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg',
'channel': 'Igor Kleiner',
'channel_id': 'UCqj7Cz7revf5maW9g5pgNcg',
'tags': ['"критическое', 'мышление"', '"наука', 'просто"', 'математика', '"анализ', 'данных"'],
'channel_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg',
'uploader_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg',
'channel_follower_count': int
},
}, {
'note': 'playlists, multipage, different order',
'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
'playlist_mincount': 94,
'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Igor Kleiner - Playlists',
'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg',
'uploader': 'Igor Kleiner',
'uploader_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg',
'tags': ['"критическое', 'мышление"', '"наука', 'просто"', 'математика', '"анализ', 'данных"'],
'channel_id': 'UCqj7Cz7revf5maW9g5pgNcg',
'channel': 'Igor Kleiner',
'channel_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg',
'channel_follower_count': int
},
}, {
'note': 'playlists, series',
'url': 'https://www.youtube.com/c/3blue1brown/playlists?view=50&sort=dd&shelf_id=3',
'playlist_mincount': 5,
'info_dict': {
'id': 'UCYO_jab_esuFRV4b17AJtAw',
'title': '3Blue1Brown - Playlists',
'description': 'md5:e1384e8a133307dd10edee76e875d62f',
'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw',
'uploader': '3Blue1Brown',
'channel_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
'uploader_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
'channel': '3Blue1Brown',
'channel_id': 'UCYO_jab_esuFRV4b17AJtAw',
'tags': ['Mathematics'],
'channel_follower_count': int
},
}, {
'note': 'playlists, singlepage',
'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
'playlist_mincount': 4,
'info_dict': {
'id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
'title': 'ThirstForScience - Playlists',
'description': 'md5:609399d937ea957b0f53cbffb747a14c',
'uploader': 'ThirstForScience',
'uploader_id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
'uploader_url': 'https://www.youtube.com/channel/UCAEtajcuhQ6an9WEzY9LEMQ',
'channel_url': 'https://www.youtube.com/channel/UCAEtajcuhQ6an9WEzY9LEMQ',
'channel_id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
'tags': 'count:13',
'channel': 'ThirstForScience',
'channel_follower_count': int
}
}, {
'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
'only_matching': True,
}, {
'note': 'basic, single video playlist',
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'title': 'youtube-dl public playlist',
'description': '',
'tags': [],
'view_count': int,
'modified_date': '20201130',
'channel': 'Sergey M.',
'channel_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'channel_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
},
'playlist_count': 1,
}, {
'note': 'empty playlist',
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'title': 'youtube-dl empty playlist',
'tags': [],
'channel': 'Sergey M.',
'description': '',
'modified_date': '20160902',
'channel_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'channel_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'uploader_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
},
'playlist_count': 0,
}, {
'note': 'Home tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Home',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'tags': ['bible', 'history', 'prophesy'],
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_follower_count': int
},
'playlist_mincount': 2,
}, {
'note': 'Videos tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Videos',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'tags': ['bible', 'history', 'prophesy'],
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'channel_follower_count': int
},
'playlist_mincount': 975,
}, {
'note': 'Videos tab, sorted by popular',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Videos',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'tags': ['bible', 'history', 'prophesy'],
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_follower_count': int
},
'playlist_mincount': 199,
}, {
'note': 'Playlists tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Playlists',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'tags': ['bible', 'history', 'prophesy'],
'channel_follower_count': int
},
'playlist_mincount': 17,
}, {
'note': 'Community tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Community',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'tags': ['bible', 'history', 'prophesy'],
'channel_follower_count': int
},
'playlist_mincount': 18,
}, {
'note': 'Channels tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Channels',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'tags': ['bible', 'history', 'prophesy'],
'channel_follower_count': int
},
'playlist_mincount': 12,
}, {
'note': 'Search tab',
'url': 'https://www.youtube.com/c/3blue1brown/search?query=linear%20algebra',
'playlist_mincount': 40,
'info_dict': {
'id': 'UCYO_jab_esuFRV4b17AJtAw',
'title': '3Blue1Brown - Search - linear algebra',
'description': 'md5:e1384e8a133307dd10edee76e875d62f',
'uploader': '3Blue1Brown',
'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw',
'channel_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
'uploader_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
'tags': ['Mathematics'],
'channel': '3Blue1Brown',
'channel_id': 'UCYO_jab_esuFRV4b17AJtAw',
'channel_follower_count': int
},
}, {
'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
}, {
'url': 'https://www.youtubekids.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
}, {
'url': 'https://music.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'uploader': 'Christiaan008',
'uploader_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
'description': 'md5:a14dc1a8ef8307a9807fe136a0660268',
'tags': [],
'uploader_url': 'https://www.youtube.com/c/ChRiStIaAn008',
'view_count': int,
'modified_date': '20150605',
'channel_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
'channel_url': 'https://www.youtube.com/c/ChRiStIaAn008',
'channel': 'Christiaan008',
},
'playlist_count': 96,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
'uploader': 'Cauchemar',
'uploader_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
'channel_url': 'https://www.youtube.com/c/Cauchemar89',
'tags': [],
'modified_date': r're:\d{8}',
'channel': 'Cauchemar',
'uploader_url': 'https://www.youtube.com/c/Cauchemar89',
'view_count': int,
'description': '',
'channel_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
},
'playlist_mincount': 1123,
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'note': 'even larger playlist, 8832 videos',
'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
'only_matching': True,
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
'uploader': 'Interstellar Movie',
'uploader_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
'uploader_url': 'https://www.youtube.com/c/InterstellarMovie',
'tags': [],
'view_count': int,
'channel_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
'channel_url': 'https://www.youtube.com/c/InterstellarMovie',
'channel': 'Interstellar Movie',
'description': '',
'modified_date': r're:\d{8}',
},
'playlist_mincount': 21,
}, {
'note': 'Playlist with "show unavailable videos" button',
'url': 'https://www.youtube.com/playlist?list=UUTYLiWFZy8xtPwxFwX9rV7Q',
'info_dict': {
'title': 'Uploads from Phim Siêu Nhân Nhật Bản',
'id': 'UUTYLiWFZy8xtPwxFwX9rV7Q',
'uploader': 'Phim Siêu Nhân Nhật Bản',
'uploader_id': 'UCTYLiWFZy8xtPwxFwX9rV7Q',
'view_count': int,
'channel': 'Phim Siêu Nhân Nhật Bản',
'tags': [],
'uploader_url': 'https://www.youtube.com/channel/UCTYLiWFZy8xtPwxFwX9rV7Q',
'description': '',
'channel_url': 'https://www.youtube.com/channel/UCTYLiWFZy8xtPwxFwX9rV7Q',
'channel_id': 'UCTYLiWFZy8xtPwxFwX9rV7Q',
'modified_date': r're:\d{8}',
},
'playlist_mincount': 200,
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'note': 'Playlist with unavailable videos in page 7',
'url': 'https://www.youtube.com/playlist?list=UU8l9frL61Yl5KFOl87nIm2w',
'info_dict': {
'title': 'Uploads from BlankTV',
'id': 'UU8l9frL61Yl5KFOl87nIm2w',
'uploader': 'BlankTV',
'uploader_id': 'UC8l9frL61Yl5KFOl87nIm2w',
'channel': 'BlankTV',
'channel_url': 'https://www.youtube.com/c/blanktv',
'channel_id': 'UC8l9frL61Yl5KFOl87nIm2w',
'view_count': int,
'tags': [],
'uploader_url': 'https://www.youtube.com/c/blanktv',
'modified_date': r're:\d{8}',
'description': '',
},
'playlist_mincount': 1000,
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'note': 'https://github.com/ytdl-org/youtube-dl/issues/21844',
'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'info_dict': {
'title': 'Data Analysis with Dr Mike Pound',
'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'uploader_id': 'UC9-y-6csu5WGm29I7JiwpnA',
'uploader': 'Computerphile',
'description': 'md5:7f567c574d13d3f8c0954d9ffee4e487',
'uploader_url': 'https://www.youtube.com/user/Computerphile',
'tags': [],
'view_count': int,
'channel_id': 'UC9-y-6csu5WGm29I7JiwpnA',
'channel_url': 'https://www.youtube.com/user/Computerphile',
'channel': 'Computerphile',
},
'playlist_mincount': 11,
}, {
'url': 'https://invidio.us/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'only_matching': True,
}, {
'note': 'Playlist URL that does not actually serve a playlist',
'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
'info_dict': {
'id': 'FqZTN594JQw',
'ext': 'webm',
'title': "Smiley's People 01 detective, Adventure Series, Action",
'uploader': 'STREEM',
'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
'upload_date': '20150526',
'license': 'Standard YouTube License',
'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
'categories': ['People & Blogs'],
'tags': list,
'view_count': int,
'like_count': int,
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
'add_ie': [YoutubeIE.ie_key()],
}, {
'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live',
'info_dict': {
'id': 'GgL890LIznQ', # This will keep changing
'ext': 'mp4',
'title': str,
'uploader': 'Sky News',
'uploader_id': 'skynews',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/skynews',
'upload_date': r're:\d{8}',
'description': str,
'categories': ['News & Politics'],
'tags': list,
'like_count': int,
'release_timestamp': 1642502819,
'channel': 'Sky News',
'channel_id': 'UCoMdktPbSTixAyNGwb-UYkQ',
'age_limit': 0,
'view_count': int,
'thumbnail': 'https://i.ytimg.com/vi/GgL890LIznQ/maxresdefault_live.jpg',
'playable_in_embed': True,
'release_date': '20220118',
'availability': 'public',
'live_status': 'is_live',
'channel_url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Ignoring subtitle tracks found in '],
}, {
'url': 'https://www.youtube.com/user/TheYoungTurks/live',
'info_dict': {
'id': 'a48o2S1cPoo',
'ext': 'mp4',
'title': 'The Young Turks - Live Main Show',
'uploader': 'The Young Turks',
'uploader_id': 'TheYoungTurks',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
'upload_date': '20150715',
'license': 'Standard YouTube License',
'description': 'md5:438179573adcdff3c97ebb1ee632b891',
'categories': ['News & Politics'],
'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
'like_count': int,
},
'params': {
'skip_download': True,
},
'only_matching': True,
}, {
'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
'only_matching': True,
}, {
'note': 'A channel that is not live. Should raise error',
'url': 'https://www.youtube.com/user/numberphile/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/trending',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/library',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/history',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/subscriptions',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/watch_later',
'only_matching': True,
}, {
'note': 'Recommended - redirects to home page.',
'url': 'https://www.youtube.com/feed/recommended',
'only_matching': True,
}, {
'note': 'inline playlist with not always working continuations',
'url': 'https://www.youtube.com/watch?v=UC6u0Tct-Fo&list=PL36D642111D65BE7C',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/course',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/zsecurity',
'only_matching': True,
}, {
'url': 'http://www.youtube.com/NASAgovVideo/videos',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/TheYoungTurks/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/hashtag/cctv9',
'info_dict': {
'id': 'cctv9',
'title': '#cctv9',
'tags': [],
},
'playlist_mincount': 350,
}, {
'url': 'https://www.youtube.com/watch?list=PLW4dVinRY435CBE_JD3t-0SRXKfnZHS1P&feature=youtu.be&v=M9cJMXmQ_ZU',
'only_matching': True,
}, {
'note': 'Requires Premium: should request additional YTM-info webpage (and have format 141) for videos in playlist',
'url': 'https://music.youtube.com/playlist?list=PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
'only_matching': True
}, {
'note': '/browse/ should redirect to /channel/',
'url': 'https://music.youtube.com/browse/UC1a8OFewdjuLq6KlF8M_8Ng',
'only_matching': True
}, {
'note': 'VLPL, should redirect to playlist?list=PL...',
'url': 'https://music.youtube.com/browse/VLPLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
'info_dict': {
'id': 'PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
'uploader': 'NoCopyrightSounds',
'description': 'Providing you with copyright free / safe music for gaming, live streaming, studying and more!',
'uploader_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
'title': 'NCS Releases',
'uploader_url': 'https://www.youtube.com/c/NoCopyrightSounds',
'channel_url': 'https://www.youtube.com/c/NoCopyrightSounds',
'modified_date': r're:\d{8}',
'view_count': int,
'channel_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
'tags': [],
'channel': 'NoCopyrightSounds',
},
'playlist_mincount': 166,
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'note': 'Topic, should redirect to playlist?list=UU...',
'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw',
'info_dict': {
'id': 'UU9ALqqC4aIeG5iDs7i90Bfw',
'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
'title': 'Uploads from Royalty Free Music - Topic',
'uploader': 'Royalty Free Music - Topic',
'tags': [],
'channel_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
'channel': 'Royalty Free Music - Topic',
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
'channel_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
'modified_date': r're:\d{8}',
'uploader_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
'description': '',
},
'expected_warnings': [
'The URL does not have a videos tab',
r'[Uu]navailable videos (are|will be) hidden',
],
'playlist_mincount': 101,
}, {
'note': 'Topic without a UU playlist',
'url': 'https://www.youtube.com/channel/UCtFRv9O2AHqOZjjynzrv-xg',
'info_dict': {
'id': 'UCtFRv9O2AHqOZjjynzrv-xg',
'title': 'UCtFRv9O2AHqOZjjynzrv-xg',
'tags': [],
},
'expected_warnings': [
'the playlist redirect gave error',
],
'playlist_mincount': 9,
}, {
'note': 'Youtube music Album',
'url': 'https://music.youtube.com/browse/MPREb_gTAcphH99wE',
'info_dict': {
'id': 'OLAK5uy_l1m0thk3g31NmIIz_vMIbWtyv7eZixlH0',
'title': 'Album - Royalty Free Music Library V2 (50 Songs)',
'tags': [],
'view_count': int,
'description': '',
'availability': 'unlisted',
'modified_date': r're:\d{8}',
},
'playlist_count': 50,
}, {
'note': 'unlisted single video playlist',
'url': 'https://www.youtube.com/playlist?list=PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf',
'info_dict': {
'uploader_id': 'UC9zHu_mHU96r19o-wV5Qs1Q',
'uploader': 'colethedj',
'id': 'PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf',
'title': 'yt-dlp unlisted playlist test',
'availability': 'unlisted',
'tags': [],
'modified_date': '20211208',
'channel': 'colethedj',
'view_count': int,
'description': '',
'uploader_url': 'https://www.youtube.com/channel/UC9zHu_mHU96r19o-wV5Qs1Q',
'channel_id': 'UC9zHu_mHU96r19o-wV5Qs1Q',
'channel_url': 'https://www.youtube.com/channel/UC9zHu_mHU96r19o-wV5Qs1Q',
},
'playlist_count': 1,
}, {
'note': 'API Fallback: Recommended - redirects to home page. Requires visitorData',
'url': 'https://www.youtube.com/feed/recommended',
'info_dict': {
'id': 'recommended',
'title': 'recommended',
'tags': [],
},
'playlist_mincount': 50,
'params': {
'skip_download': True,
'extractor_args': {'youtubetab': {'skip': ['webpage']}}
},
}, {
'note': 'API Fallback: /videos tab, sorted by oldest first',
'url': 'https://www.youtube.com/user/theCodyReeder/videos?view=0&sort=da&flow=grid',
'info_dict': {
'id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
'title': 'Cody\'sLab - Videos',
'description': 'md5:d083b7c2f0c67ee7a6c74c3e9b4243fa',
'uploader': 'Cody\'sLab',
'uploader_id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
'channel': 'Cody\'sLab',
'channel_id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
'tags': [],
'channel_url': 'https://www.youtube.com/channel/UCu6mSoMNzHQiBIOCkHUa2Aw',
'uploader_url': 'https://www.youtube.com/channel/UCu6mSoMNzHQiBIOCkHUa2Aw',
'channel_follower_count': int
},
'playlist_mincount': 650,
'params': {
'skip_download': True,
'extractor_args': {'youtubetab': {'skip': ['webpage']}}
},
}, {
'note': 'API Fallback: Topic, should redirect to playlist?list=UU...',
'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw',
'info_dict': {
'id': 'UU9ALqqC4aIeG5iDs7i90Bfw',
'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
'title': 'Uploads from Royalty Free Music - Topic',
'uploader': 'Royalty Free Music - Topic',
'modified_date': r're:\d{8}',
'channel_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
'description': '',
'channel_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
'tags': [],
'channel': 'Royalty Free Music - Topic',
'view_count': int,
'uploader_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
},
'expected_warnings': [
'does not have a videos tab',
r'[Uu]navailable videos (are|will be) hidden',
],
'playlist_mincount': 101,
'params': {
'skip_download': True,
'extractor_args': {'youtubetab': {'skip': ['webpage']}}
},
}, {
'note': 'non-standard redirect to regional channel',
'url': 'https://www.youtube.com/channel/UCwVVpHQ2Cs9iGJfpdFngePQ',
'only_matching': True
}, {
'note': 'collaborative playlist (uploader name in the form "by <uploader> and x other(s)")',
'url': 'https://www.youtube.com/playlist?list=PLx-_-Kk4c89oOHEDQAojOXzEzemXxoqx6',
'info_dict': {
'id': 'PLx-_-Kk4c89oOHEDQAojOXzEzemXxoqx6',
'modified_date': '20220407',
'channel_url': 'https://www.youtube.com/channel/UCKcqXmCcyqnhgpA5P0oHH_Q',
'tags': [],
'uploader_id': 'UCKcqXmCcyqnhgpA5P0oHH_Q',
'uploader': 'pukkandan',
'availability': 'unlisted',
'channel_id': 'UCKcqXmCcyqnhgpA5P0oHH_Q',
'channel': 'pukkandan',
'description': 'Test for collaborative playlist',
'title': 'yt-dlp test - collaborative playlist',
'uploader_url': 'https://www.youtube.com/channel/UCKcqXmCcyqnhgpA5P0oHH_Q',
},
'playlist_mincount': 2
}]
@classmethod
def suitable(cls, url):
return False if YoutubeIE.suitable(url) else super().suitable(url)
_URL_RE = re.compile(rf'(?P<pre>{_VALID_URL})(?(not_channel)|(?P<tab>/\w+))?(?P<post>.*)$')
@YoutubeTabBaseInfoExtractor.passthrough_smuggled_data
def _real_extract(self, url, smuggled_data):
item_id = self._match_id(url)
url = compat_urlparse.urlunparse(
compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
compat_opts = self.get_param('compat_opts', [])
def get_mobj(url):
mobj = self._URL_RE.match(url).groupdict()
mobj.update((k, '') for k, v in mobj.items() if v is None)
return mobj
mobj, redirect_warning = get_mobj(url), None
# Youtube returns incomplete data if tabname is not lower case
pre, tab, post, is_channel = mobj['pre'], mobj['tab'].lower(), mobj['post'], not mobj['not_channel']
if is_channel:
if smuggled_data.get('is_music_url'):
if item_id[:2] == 'VL': # Youtube music VL channels have an equivalent playlist
item_id = item_id[2:]
pre, tab, post, is_channel = f'https://www.youtube.com/playlist?list={item_id}', '', '', False
elif item_id[:2] == 'MP': # Resolve albums (/[channel/browse]/MP...) to their equivalent playlist
mdata = self._extract_tab_endpoint(
f'https://music.youtube.com/channel/{item_id}', item_id, default_client='web_music')
murl = traverse_obj(mdata, ('microformat', 'microformatDataRenderer', 'urlCanonical'),
get_all=False, expected_type=compat_str)
if not murl:
raise ExtractorError('Failed to resolve album to playlist')
return self.url_result(murl, ie=YoutubeTabIE.ie_key())
elif mobj['channel_type'] == 'browse': # Youtube music /browse/ should be changed to /channel/
pre = f'https://www.youtube.com/channel/{item_id}'
original_tab_name = tab
if is_channel and not tab and 'no-youtube-channel-redirect' not in compat_opts:
# Home URLs should redirect to /videos/
redirect_warning = ('A channel/user page was given. All the channel\'s videos will be downloaded. '
'To download only the videos in the home page, add a "/featured" to the URL')
tab = '/videos'
url = ''.join((pre, tab, post))
mobj = get_mobj(url)
# Handle both video/playlist URLs
qs = parse_qs(url)
video_id, playlist_id = (qs.get(key, [None])[0] for key in ('v', 'list'))
if not video_id and mobj['not_channel'].startswith('watch'):
if not playlist_id:
# If there is neither video or playlist ids, youtube redirects to home page, which is undesirable
raise ExtractorError('Unable to recognize tab page')
# Common mistake: https://www.youtube.com/watch?list=playlist_id
self.report_warning(f'A video URL was given without video ID. Trying to download playlist {playlist_id}')
url = f'https://www.youtube.com/playlist?list={playlist_id}'
mobj = get_mobj(url)
if video_id and playlist_id:
if self.get_param('noplaylist'):
self.to_screen(f'Downloading just video {video_id} because of --no-playlist')
return self.url_result(f'https://www.youtube.com/watch?v={video_id}',
ie=YoutubeIE.ie_key(), video_id=video_id)
self.to_screen(f'Downloading playlist {playlist_id}; add --no-playlist to just download video {video_id}')
data, ytcfg = self._extract_data(url, item_id)
# YouTube may provide a non-standard redirect to the regional channel
# See: https://github.com/yt-dlp/yt-dlp/issues/2694
redirect_url = traverse_obj(
data, ('onResponseReceivedActions', ..., 'navigateAction', 'endpoint', 'commandMetadata', 'webCommandMetadata', 'url'), get_all=False)
if redirect_url and 'no-youtube-channel-redirect' not in compat_opts:
redirect_url = ''.join((
urljoin('https://www.youtube.com', redirect_url), mobj['tab'], mobj['post']))
self.to_screen(f'This playlist is likely not available in your region. Following redirect to regional playlist {redirect_url}')
return self.url_result(redirect_url, ie=YoutubeTabIE.ie_key())
tabs = traverse_obj(data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list)
if tabs:
selected_tab = self._extract_selected_tab(tabs)
selected_tab_name = selected_tab.get('title', '').lower()
if selected_tab_name == 'home':
selected_tab_name = 'featured'
requested_tab_name = mobj['tab'][1:]
if 'no-youtube-channel-redirect' not in compat_opts:
if requested_tab_name == 'live':
# Live tab should have redirected to the video
raise ExtractorError('The channel is not currently live', expected=True)
if requested_tab_name not in ('', selected_tab_name):
redirect_warning = f'The channel does not have a {requested_tab_name} tab'
if not original_tab_name:
if item_id[:2] == 'UC':
# Topic channels don't have /videos. Use the equivalent playlist instead
pl_id = f'UU{item_id[2:]}'
pl_url = f'https://www.youtube.com/playlist?list={pl_id}'
try:
data, ytcfg = self._extract_data(pl_url, pl_id, ytcfg=ytcfg, fatal=True, webpage_fatal=True)
except ExtractorError:
redirect_warning += ' and the playlist redirect gave error'
else:
item_id, url, selected_tab_name = pl_id, pl_url, requested_tab_name
redirect_warning += f'. Redirecting to playlist {pl_id} instead'
if selected_tab_name and selected_tab_name != requested_tab_name:
redirect_warning += f'. {selected_tab_name} tab is being downloaded instead'
else:
raise ExtractorError(redirect_warning, expected=True)
if redirect_warning:
self.to_screen(redirect_warning)
self.write_debug(f'Final URL: {url}')
# YouTube sometimes provides a button to reload playlist with unavailable videos.
if 'no-youtube-unavailable-videos' not in compat_opts:
data = self._reload_with_unavailable_videos(item_id, data, ytcfg) or data
self._extract_and_report_alerts(data, only_once=True)
tabs = traverse_obj(data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list)
if tabs:
return self._extract_from_tabs(item_id, ytcfg, data, tabs)
playlist = traverse_obj(
data, ('contents', 'twoColumnWatchNextResults', 'playlist', 'playlist'), expected_type=dict)
if playlist:
return self._extract_from_playlist(item_id, url, data, playlist, ytcfg)
video_id = traverse_obj(
data, ('currentVideoEndpoint', 'watchEndpoint', 'videoId'), expected_type=str) or video_id
if video_id:
if mobj['tab'] != '/live': # live tab is expected to redirect to video
self.report_warning(f'Unable to recognize playlist. Downloading just video {video_id}')
return self.url_result(f'https://www.youtube.com/watch?v={video_id}',
ie=YoutubeIE.ie_key(), video_id=video_id)
raise ExtractorError('Unable to recognize tab page')
class YoutubePlaylistIE(InfoExtractor):
IE_DESC = 'YouTube playlists'
_VALID_URL = r'''(?x)(?:
(?:https?://)?
(?:\w+\.)?
(?:
(?:
youtube(?:kids)?\.com|
%(invidious)s
)
/.*?\?.*?\blist=
)?
(?P<id>%(playlist_id)s)
)''' % {
'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE,
'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES),
}
IE_NAME = 'youtube:playlist'
_TESTS = [{
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
'id': 'PLBB231211A4F62143',
'uploader': 'Wickman',
'uploader_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
'description': 'md5:8fa6f52abb47a9552002fa3ddfc57fc2',
'view_count': int,
'uploader_url': 'https://www.youtube.com/user/Wickydoo',
'modified_date': r're:\d{8}',
'channel_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
'channel': 'Wickman',
'tags': [],
'channel_url': 'https://www.youtube.com/user/Wickydoo',
},
'playlist_mincount': 29,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
},
'playlist_count': 2,
'skip': 'This playlist is private',
}, {
'note': 'embedded',
'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'uploader': 'milan',
'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
'description': '',
'channel_url': 'https://www.youtube.com/channel/UCEI1-PVPcYXjB73Hfelbmaw',
'tags': [],
'modified_date': '20140919',
'view_count': int,
'channel': 'milan',
'channel_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
'uploader_url': 'https://www.youtube.com/channel/UCEI1-PVPcYXjB73Hfelbmaw',
},
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'playlist_mincount': 654,
'info_dict': {
'title': '2018 Chinese New Singles (11/6 updated)',
'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'uploader': 'LBK',
'uploader_id': 'UC21nz3_MesPLqtDqwdvnoxA',
'description': 'md5:da521864744d60a198e3a88af4db0d9d',
'channel': 'LBK',
'view_count': int,
'channel_url': 'https://www.youtube.com/c/愛低音的國王',
'tags': [],
'uploader_url': 'https://www.youtube.com/c/愛低音的國王',
'channel_id': 'UC21nz3_MesPLqtDqwdvnoxA',
'modified_date': r're:\d{8}',
},
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
'only_matching': True,
}, {
# music album playlist
'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
if YoutubeTabIE.suitable(url):
return False
from ..utils import parse_qs
qs = parse_qs(url)
if qs.get('v', [None])[0]:
return False
return super().suitable(url)
def _real_extract(self, url):
playlist_id = self._match_id(url)
is_music_url = YoutubeBaseInfoExtractor.is_music_url(url)
url = update_url_query(
'https://www.youtube.com/playlist',
parse_qs(url) or {'list': playlist_id})
if is_music_url:
url = smuggle_url(url, {'is_music_url': True})
return self.url_result(url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
class YoutubeYtBeIE(InfoExtractor):
IE_DESC = 'youtu.be'
_VALID_URL = r'https?://youtu\.be/(?P<id>[0-9A-Za-z_-]{11})/*?.*?\blist=(?P<playlist_id>%(playlist_id)s)' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_TESTS = [{
'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
'info_dict': {
'id': 'yeWKywCrFtk',
'ext': 'mp4',
'title': 'Small Scale Baler and Braiding Rugs',
'uploader': 'Backus-Page House Museum',
'uploader_id': 'backuspagemuseum',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
'upload_date': '20161008',
'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
'categories': ['Nonprofits & Activism'],
'tags': list,
'like_count': int,
'age_limit': 0,
'playable_in_embed': True,
'thumbnail': 'https://i.ytimg.com/vi_webp/yeWKywCrFtk/maxresdefault.webp',
'channel': 'Backus-Page House Museum',
'channel_id': 'UCEfMCQ9bs3tjvjy1s451zaw',
'live_status': 'not_live',
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UCEfMCQ9bs3tjvjy1s451zaw',
'availability': 'public',
'duration': 59,
},
'params': {
'noplaylist': True,
'skip_download': True,
},
}, {
'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
playlist_id = mobj.group('playlist_id')
return self.url_result(
update_url_query('https://www.youtube.com/watch', {
'v': video_id,
'list': playlist_id,
'feature': 'youtu.be',
}), ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
class YoutubeLivestreamEmbedIE(InfoExtractor):
IE_DESC = 'YouTube livestream embeds'
_VALID_URL = r'https?://(?:\w+\.)?youtube\.com/embed/live_stream/?\?(?:[^#]+&)?channel=(?P<id>[^&#]+)'
_TESTS = [{
'url': 'https://www.youtube.com/embed/live_stream?channel=UC2_KI6RB__jGdlnK6dvFEZA',
'only_matching': True,
}]
def _real_extract(self, url):
channel_id = self._match_id(url)
return self.url_result(
f'https://www.youtube.com/channel/{channel_id}/live',
ie=YoutubeTabIE.ie_key(), video_id=channel_id)
class YoutubeYtUserIE(InfoExtractor):
IE_DESC = 'YouTube user videos; "ytuser:" prefix'
IE_NAME = 'youtube:user'
_VALID_URL = r'ytuser:(?P<id>.+)'
_TESTS = [{
'url': 'ytuser:phihag',
'only_matching': True,
}]
def _real_extract(self, url):
user_id = self._match_id(url)
return self.url_result(
'https://www.youtube.com/user/%s/videos' % user_id,
ie=YoutubeTabIE.ie_key(), video_id=user_id)
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
IE_DESC = 'YouTube liked videos; ":ytfav" keyword (requires cookies)'
_VALID_URL = r':ytfav(?:ou?rite)?s?'
_LOGIN_REQUIRED = True
_TESTS = [{
'url': ':ytfav',
'only_matching': True,
}, {
'url': ':ytfavorites',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/playlist?list=LL',
ie=YoutubeTabIE.ie_key())
class YoutubeNotificationsIE(YoutubeTabBaseInfoExtractor):
IE_NAME = 'youtube:notif'
IE_DESC = 'YouTube notifications; ":ytnotif" keyword (requires cookies)'
_VALID_URL = r':ytnotif(?:ication)?s?'
_LOGIN_REQUIRED = True
_TESTS = [{
'url': ':ytnotif',
'only_matching': True,
}, {
'url': ':ytnotifications',
'only_matching': True,
}]
def _extract_notification_menu(self, response, continuation_list):
notification_list = traverse_obj(
response,
('actions', 0, 'openPopupAction', 'popup', 'multiPageMenuRenderer', 'sections', 0, 'multiPageMenuNotificationSectionRenderer', 'items'),
('actions', 0, 'appendContinuationItemsAction', 'continuationItems'),
expected_type=list) or []
continuation_list[0] = None
for item in notification_list:
entry = self._extract_notification_renderer(item.get('notificationRenderer'))
if entry:
yield entry
continuation = item.get('continuationItemRenderer')
if continuation:
continuation_list[0] = continuation
def _extract_notification_renderer(self, notification):
video_id = traverse_obj(
notification, ('navigationEndpoint', 'watchEndpoint', 'videoId'), expected_type=str)
url = f'https://www.youtube.com/watch?v={video_id}'
channel_id = None
if not video_id:
browse_ep = traverse_obj(
notification, ('navigationEndpoint', 'browseEndpoint'), expected_type=dict)
channel_id = traverse_obj(browse_ep, 'browseId', expected_type=str)
post_id = self._search_regex(
r'/post/(.+)', traverse_obj(browse_ep, 'canonicalBaseUrl', expected_type=str),
'post id', default=None)
if not channel_id or not post_id:
return
# The direct /post url redirects to this in the browser
url = f'https://www.youtube.com/channel/{channel_id}/community?lb={post_id}'
channel = traverse_obj(
notification, ('contextualMenu', 'menuRenderer', 'items', 1, 'menuServiceItemRenderer', 'text', 'runs', 1, 'text'),
expected_type=str)
title = self._search_regex(
rf'{re.escape(channel)} [^:]+: (.+)', self._get_text(notification, 'shortMessage'),
'video title', default=None)
if title:
title = title.replace('\xad', '') # remove soft hyphens
upload_date = (strftime_or_none(self._extract_time_text(notification, 'sentTimeText')[0], '%Y%m%d')
if self._configuration_arg('approximate_date', ie_key=YoutubeTabIE.ie_key())
else None)
return {
'_type': 'url',
'url': url,
'ie_key': (YoutubeIE if video_id else YoutubeTabIE).ie_key(),
'video_id': video_id,
'title': title,
'channel_id': channel_id,
'channel': channel,
'thumbnails': self._extract_thumbnails(notification, 'videoThumbnail'),
'upload_date': upload_date,
}
def _notification_menu_entries(self, ytcfg):
continuation_list = [None]
response = None
for page in itertools.count(1):
ctoken = traverse_obj(
continuation_list, (0, 'continuationEndpoint', 'getNotificationMenuEndpoint', 'ctoken'), expected_type=str)
response = self._extract_response(
item_id=f'page {page}', query={'ctoken': ctoken} if ctoken else {}, ytcfg=ytcfg,
ep='notification/get_notification_menu', check_get_keys='actions',
headers=self.generate_api_headers(ytcfg=ytcfg, visitor_data=self._extract_visitor_data(response)))
yield from self._extract_notification_menu(response, continuation_list)
if not continuation_list[0]:
break
def _real_extract(self, url):
display_id = 'notifications'
ytcfg = self._download_ytcfg('web', display_id) if not self.skip_webpage else {}
self._report_playlist_authcheck(ytcfg)
return self.playlist_result(self._notification_menu_entries(ytcfg), display_id, display_id)
class YoutubeSearchIE(YoutubeTabBaseInfoExtractor, SearchInfoExtractor):
IE_DESC = 'YouTube search'
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_SEARCH_PARAMS = 'EgIQAQ%3D%3D' # Videos only
_TESTS = [{
'url': 'ytsearch5:youtube-dl test video',
'playlist_count': 5,
'info_dict': {
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
}
}]
class YoutubeSearchDateIE(YoutubeTabBaseInfoExtractor, SearchInfoExtractor):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube search, newest videos first'
_SEARCH_PARAMS = 'CAISAhAB' # Videos only, sorted by date
_TESTS = [{
'url': 'ytsearchdate5:youtube-dl test video',
'playlist_count': 5,
'info_dict': {
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
}
}]
class YoutubeSearchURLIE(YoutubeTabBaseInfoExtractor):
IE_DESC = 'YouTube search URLs with sorting and filter support'
IE_NAME = YoutubeSearchIE.IE_NAME + '_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:results|search)\?([^#]+&)?(?:search_query|q)=(?:[^&]+)(?:[&#]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
}
}, {
'url': 'https://www.youtube.com/results?search_query=python&sp=EgIQAg%253D%253D',
'playlist_mincount': 5,
'info_dict': {
'id': 'python',
'title': 'python',
}
}, {
'url': 'https://www.youtube.com/results?search_query=%23cats',
'playlist_mincount': 1,
'info_dict': {
'id': '#cats',
'title': '#cats',
'entries': [{
'url': r're:https://(www\.)?youtube\.com/hashtag/cats',
'title': '#cats',
}],
},
}, {
'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
'only_matching': True,
}]
def _real_extract(self, url):
qs = parse_qs(url)
query = (qs.get('search_query') or qs.get('q'))[0]
return self.playlist_result(self._search_results(query, qs.get('sp', (None,))[0]), query, query)
class YoutubeMusicSearchURLIE(YoutubeTabBaseInfoExtractor):
IE_DESC = 'YouTube music search URLs with selectable sections (Eg: #songs)'
IE_NAME = 'youtube:music:search_url'
_VALID_URL = r'https?://music\.youtube\.com/search\?([^#]+&)?(?:search_query|q)=(?:[^&]+)(?:[&#]|$)'
_TESTS = [{
'url': 'https://music.youtube.com/search?q=royalty+free+music',
'playlist_count': 16,
'info_dict': {
'id': 'royalty free music',
'title': 'royalty free music',
}
}, {
'url': 'https://music.youtube.com/search?q=royalty+free+music&sp=EgWKAQIIAWoKEAoQAxAEEAkQBQ%3D%3D',
'playlist_mincount': 30,
'info_dict': {
'id': 'royalty free music - songs',
'title': 'royalty free music - songs',
},
'params': {'extract_flat': 'in_playlist'}
}, {
'url': 'https://music.youtube.com/search?q=royalty+free+music#community+playlists',
'playlist_mincount': 30,
'info_dict': {
'id': 'royalty free music - community playlists',
'title': 'royalty free music - community playlists',
},
'params': {'extract_flat': 'in_playlist'}
}]
_SECTIONS = {
'albums': 'EgWKAQIYAWoKEAoQAxAEEAkQBQ==',
'artists': 'EgWKAQIgAWoKEAoQAxAEEAkQBQ==',
'community playlists': 'EgeKAQQoAEABagoQChADEAQQCRAF',
'featured playlists': 'EgeKAQQoADgBagwQAxAJEAQQDhAKEAU==',
'songs': 'EgWKAQIIAWoKEAoQAxAEEAkQBQ==',
'videos': 'EgWKAQIQAWoKEAoQAxAEEAkQBQ==',
}
def _real_extract(self, url):
qs = parse_qs(url)
query = (qs.get('search_query') or qs.get('q'))[0]
params = qs.get('sp', (None,))[0]
if params:
section = next((k for k, v in self._SECTIONS.items() if v == params), params)
else:
section = compat_urllib_parse_unquote_plus((url.split('#') + [''])[1]).lower()
params = self._SECTIONS.get(section)
if not params:
section = None
title = join_nonempty(query, section, delim=' - ')
return self.playlist_result(self._search_results(query, params, default_client='web_music'), title, title)
class YoutubeFeedsInfoExtractor(InfoExtractor):
"""
Base class for feed extractors
Subclasses must define the _FEED_NAME property.
"""
_LOGIN_REQUIRED = True
def _real_initialize(self):
YoutubeBaseInfoExtractor._check_login_required(self)
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_extract(self, url):
return self.url_result(
f'https://www.youtube.com/feed/{self._FEED_NAME}', ie=YoutubeTabIE.ie_key())
class YoutubeWatchLaterIE(InfoExtractor):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list; ":ytwatchlater" keyword (requires cookies)'
_VALID_URL = r':ytwatchlater'
_TESTS = [{
'url': ':ytwatchlater',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube recommended videos; ":ytrec" keyword'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/?(?:[?#]|$)|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_LOGIN_REQUIRED = False
_TESTS = [{
'url': ':ytrec',
'only_matching': True,
}, {
'url': ':ytrecommended',
'only_matching': True,
}, {
'url': 'https://youtube.com',
'only_matching': True,
}]
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube subscriptions feed; ":ytsubs" keyword (requires cookies)'
_VALID_URL = r':ytsub(?:scription)?s?'
_FEED_NAME = 'subscriptions'
_TESTS = [{
'url': ':ytsubs',
'only_matching': True,
}, {
'url': ':ytsubscriptions',
'only_matching': True,
}]
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history; ":ythis" keyword (requires cookies)'
_VALID_URL = r':ythis(?:tory)?'
_FEED_NAME = 'history'
_TESTS = [{
'url': ':ythistory',
'only_matching': True,
}]
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
t=[0-9]+
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?t=2372',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeClipIE(InfoExtractor):
IE_NAME = 'youtube:clip'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/clip/'
def _real_extract(self, url):
self.report_warning('YouTube clips are not currently supported. The entire video will be downloaded instead')
return self.url_result(url, 'Generic')
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
f'Incomplete YouTube ID {video_id}. URL {url} looks truncated.',
expected=True)
|
import calendar
import copy
import datetime
import functools
import hashlib
import itertools
import json
import math
import os.path
import random
import re
import sys
import threading
import time
import traceback
from .common import InfoExtractor, SearchInfoExtractor
from ..compat import (
compat_chr,
compat_HTTPError,
compat_parse_qs,
compat_str,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
)
from ..jsinterp import JSInterpreter
from ..utils import (
NO_DEFAULT,
ExtractorError,
bug_reports_message,
clean_html,
datetime_from_str,
dict_get,
error_to_compat_str,
float_or_none,
format_field,
get_first,
int_or_none,
is_html,
join_nonempty,
js_to_json,
mimetype2ext,
network_exceptions,
orderedSet,
parse_codecs,
parse_count,
parse_duration,
parse_iso8601,
parse_qs,
qualities,
remove_end,
remove_start,
smuggle_url,
str_or_none,
str_to_int,
strftime_or_none,
traverse_obj,
try_get,
unescapeHTML,
unified_strdate,
unified_timestamp,
unsmuggle_url,
update_url_query,
url_or_none,
urljoin,
variadic,
)
# any clients starting with _ cannot be explicity requested by the user
INNERTUBE_CLIENTS = {
'web': {
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB',
'clientVersion': '2.20211221.00.00',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 1
},
'web_embedded': {
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB_EMBEDDED_PLAYER',
'clientVersion': '1.20211215.00.01',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 56
},
'web_music': {
'INNERTUBE_API_KEY': 'AIzaSyC9XL3ZjWddXya6X74dJoCTL-WEYFDNX30',
'INNERTUBE_HOST': 'music.youtube.com',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB_REMIX',
'clientVersion': '1.20211213.00.00',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 67,
},
'web_creator': {
'INNERTUBE_API_KEY': 'AIzaSyBUPetSUmoZL-OhlxA7wSac5XinrygCqMo',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB_CREATOR',
'clientVersion': '1.20211220.02.00',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 62,
},
'android': {
'INNERTUBE_API_KEY': 'AIzaSyA8eiZmM1FaDVjRy-df2KTyQ_vz_yYM39w',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'ANDROID',
'clientVersion': '16.49',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 3,
'REQUIRE_JS_PLAYER': False
},
'android_embedded': {
'INNERTUBE_API_KEY': 'AIzaSyCjc_pVEDi4qsv5MtC2dMXzpIaDoRFLsxw',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'ANDROID_EMBEDDED_PLAYER',
'clientVersion': '16.49',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 55,
'REQUIRE_JS_PLAYER': False
},
'android_music': {
'INNERTUBE_API_KEY': 'AIzaSyAOghZGza2MQSZkY_zfZ370N-PUdXEo8AI',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'ANDROID_MUSIC',
'clientVersion': '4.57',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 21,
'REQUIRE_JS_PLAYER': False
},
'android_creator': {
'INNERTUBE_API_KEY': 'AIzaSyD_qjV8zaaUMehtLkrKFgVeSX_Iqbtyws8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'ANDROID_CREATOR',
'clientVersion': '21.47',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 14,
'REQUIRE_JS_PLAYER': False
},
# iOS clients have HLS live streams. Setting device model to get 60fps formats.
# See: https://github.com/TeamNewPipe/NewPipeExtractor/issues/680#issuecomment-1002724558
'ios': {
'INNERTUBE_API_KEY': 'AIzaSyB-63vPrdThhKuerbB2N_l7Kwwcxj6yUAc',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS',
'clientVersion': '16.46',
'deviceModel': 'iPhone14,3',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 5,
'REQUIRE_JS_PLAYER': False
},
'ios_embedded': {
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS_MESSAGES_EXTENSION',
'clientVersion': '16.46',
'deviceModel': 'iPhone14,3',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 66,
'REQUIRE_JS_PLAYER': False
},
'ios_music': {
'INNERTUBE_API_KEY': 'AIzaSyBAETezhkwP0ZWA02RsqT1zu78Fpt0bC_s',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS_MUSIC',
'clientVersion': '4.57',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 26,
'REQUIRE_JS_PLAYER': False
},
'ios_creator': {
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS_CREATOR',
'clientVersion': '21.47',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 15,
'REQUIRE_JS_PLAYER': False
},
# mweb has 'ultralow' formats
# See: https://github.com/yt-dlp/yt-dlp/pull/557
'mweb': {
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'MWEB',
'clientVersion': '2.20211221.01.00',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 2
},
# This client can access age restricted videos (unless the uploader has disabled the 'allow embedding' option)
# See: https://github.com/zerodytrash/YouTube-Internal-Clients
'tv_embedded': {
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'TVHTML5_SIMPLY_EMBEDDED_PLAYER',
'clientVersion': '2.0',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 85
},
}
def _split_innertube_client(client_name):
variant, *base = client_name.rsplit('.', 1)
if base:
return variant, base[0], variant
base, *variant = client_name.split('_', 1)
return client_name, base, variant[0] if variant else None
def build_innertube_clients():
THIRD_PARTY = {
'embedUrl': 'https://www.youtube.com/', # Can be any valid URL
}
BASE_CLIENTS = ('android', 'web', 'tv', 'ios', 'mweb')
priority = qualities(BASE_CLIENTS[::-1])
for client, ytcfg in tuple(INNERTUBE_CLIENTS.items()):
ytcfg.setdefault('INNERTUBE_API_KEY', 'AIzaSyDCU8hByM-4DrUqRUYnGn-3llEO78bcxq8')
ytcfg.setdefault('INNERTUBE_HOST', 'www.youtube.com')
ytcfg.setdefault('REQUIRE_JS_PLAYER', True)
ytcfg['INNERTUBE_CONTEXT']['client'].setdefault('hl', 'en')
_, base_client, variant = _split_innertube_client(client)
ytcfg['priority'] = 10 * priority(base_client)
if not variant:
INNERTUBE_CLIENTS[f'{client}_embedscreen'] = embedscreen = copy.deepcopy(ytcfg)
embedscreen['INNERTUBE_CONTEXT']['client']['clientScreen'] = 'EMBED'
embedscreen['INNERTUBE_CONTEXT']['thirdParty'] = THIRD_PARTY
embedscreen['priority'] -= 3
elif variant == 'embedded':
ytcfg['INNERTUBE_CONTEXT']['thirdParty'] = THIRD_PARTY
ytcfg['priority'] -= 2
else:
ytcfg['priority'] -= 3
build_innertube_clients()
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_RESERVED_NAMES = (
r'channel|c|user|playlist|watch|w|v|embed|e|watch_popup|clip|'
r'shorts|movies|results|search|shared|hashtag|trending|explore|feed|feeds|'
r'browse|oembed|get_video_info|iframe_api|s/player|'
r'storefront|oops|index|account|reporthistory|t/terms|about|upload|signin|logout')
_PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM|WL|LL|LM)'
# _NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
_INVIDIOUS_SITES = (
# invidious-redirect websites
r'(?:www\.)?redirect\.invidious\.io',
r'(?:(?:www|dev)\.)?invidio\.us',
# Invidious instances taken from https://github.com/iv-org/documentation/blob/master/docs/instances.md
r'(?:www\.)?invidious\.pussthecat\.org',
r'(?:www\.)?invidious\.zee\.li',
r'(?:www\.)?invidious\.ethibox\.fr',
r'(?:www\.)?invidious\.3o7z6yfxhbw7n3za4rss6l434kmv55cgw2vuziwuigpwegswvwzqipyd\.onion',
r'(?:www\.)?osbivz6guyeahrwp2lnwyjk2xos342h4ocsxyqrlaopqjuhwn2djiiyd\.onion',
r'(?:www\.)?u2cvlit75owumwpy4dj2hsmvkq7nvrclkpht7xgyye2pyoxhpmclkrad\.onion',
# youtube-dl invidious instances list
r'(?:(?:www|no)\.)?invidiou\.sh',
r'(?:(?:www|fi)\.)?invidious\.snopyta\.org',
r'(?:www\.)?invidious\.kabi\.tk',
r'(?:www\.)?invidious\.mastodon\.host',
r'(?:www\.)?invidious\.zapashcanon\.fr',
r'(?:www\.)?(?:invidious(?:-us)?|piped)\.kavin\.rocks',
r'(?:www\.)?invidious\.tinfoil-hat\.net',
r'(?:www\.)?invidious\.himiko\.cloud',
r'(?:www\.)?invidious\.reallyancient\.tech',
r'(?:www\.)?invidious\.tube',
r'(?:www\.)?invidiou\.site',
r'(?:www\.)?invidious\.site',
r'(?:www\.)?invidious\.xyz',
r'(?:www\.)?invidious\.nixnet\.xyz',
r'(?:www\.)?invidious\.048596\.xyz',
r'(?:www\.)?invidious\.drycat\.fr',
r'(?:www\.)?inv\.skyn3t\.in',
r'(?:www\.)?tube\.poal\.co',
r'(?:www\.)?tube\.connect\.cafe',
r'(?:www\.)?vid\.wxzm\.sx',
r'(?:www\.)?vid\.mint\.lgbt',
r'(?:www\.)?vid\.puffyan\.us',
r'(?:www\.)?yewtu\.be',
r'(?:www\.)?yt\.elukerio\.org',
r'(?:www\.)?yt\.lelux\.fi',
r'(?:www\.)?invidious\.ggc-project\.de',
r'(?:www\.)?yt\.maisputain\.ovh',
r'(?:www\.)?ytprivate\.com',
r'(?:www\.)?invidious\.13ad\.de',
r'(?:www\.)?invidious\.toot\.koeln',
r'(?:www\.)?invidious\.fdn\.fr',
r'(?:www\.)?watch\.nettohikari\.com',
r'(?:www\.)?invidious\.namazso\.eu',
r'(?:www\.)?invidious\.silkky\.cloud',
r'(?:www\.)?invidious\.exonip\.de',
r'(?:www\.)?invidious\.riverside\.rocks',
r'(?:www\.)?invidious\.blamefran\.net',
r'(?:www\.)?invidious\.moomoo\.de',
r'(?:www\.)?ytb\.trom\.tf',
r'(?:www\.)?yt\.cyberhost\.uk',
r'(?:www\.)?kgg2m7yk5aybusll\.onion',
r'(?:www\.)?qklhadlycap4cnod\.onion',
r'(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion',
r'(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion',
r'(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion',
r'(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion',
r'(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p',
r'(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion',
r'(?:www\.)?w6ijuptxiku4xpnnaetxvnkc5vqcdu7mgns2u77qefoixi63vbvnpnqd\.onion',
r'(?:www\.)?kbjggqkzv65ivcqj6bumvp337z6264huv5kpkwuv6gu5yjiskvan7fad\.onion',
r'(?:www\.)?grwp24hodrefzvjjuccrkw3mjq4tzhaaq32amf33dzpmuxe7ilepcmad\.onion',
r'(?:www\.)?hpniueoejy4opn7bc4ftgazyqjoeqwlvh2uiku2xqku6zpoa4bf5ruid\.onion',
)
def _initialize_consent(self):
cookies = self._get_cookies('https://www.youtube.com/')
if cookies.get('__Secure-3PSID'):
return
consent_id = None
consent = cookies.get('CONSENT')
if consent:
if 'YES' in consent.value:
return
consent_id = self._search_regex(
r'PENDING\+(\d+)', consent.value, 'consent', default=None)
if not consent_id:
consent_id = random.randint(100, 999)
self._set_cookie('.youtube.com', 'CONSENT', 'YES+cb.20210328-17-p0.en+FX+%s' % consent_id)
def _initialize_pref(self):
cookies = self._get_cookies('https://www.youtube.com/')
pref_cookie = cookies.get('PREF')
pref = {}
if pref_cookie:
try:
pref = dict(compat_urlparse.parse_qsl(pref_cookie.value))
except ValueError:
self.report_warning('Failed to parse user PREF cookie' + bug_reports_message())
pref.update({'hl': 'en', 'tz': 'UTC'})
self._set_cookie('.youtube.com', name='PREF', value=compat_urllib_parse_urlencode(pref))
def _real_initialize(self):
self._initialize_pref()
self._initialize_consent()
self._check_login_required()
def _check_login_required(self):
if (self._LOGIN_REQUIRED
and self.get_param('cookiefile') is None
and self.get_param('cookiesfrombrowser') is None):
self.raise_login_required('Login details are needed to download this content', method='cookies')
_YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
_YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
_YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|</script|\n)'
def _get_default_ytcfg(self, client='web'):
return copy.deepcopy(INNERTUBE_CLIENTS[client])
def _get_innertube_host(self, client='web'):
return INNERTUBE_CLIENTS[client]['INNERTUBE_HOST']
def _ytcfg_get_safe(self, ytcfg, getter, expected_type=None, default_client='web'):
# try_get but with fallback to default ytcfg client values when present
_func = lambda y: try_get(y, getter, expected_type)
return _func(ytcfg) or _func(self._get_default_ytcfg(default_client))
def _extract_client_name(self, ytcfg, default_client='web'):
return self._ytcfg_get_safe(
ytcfg, (lambda x: x['INNERTUBE_CLIENT_NAME'],
lambda x: x['INNERTUBE_CONTEXT']['client']['clientName']), compat_str, default_client)
def _extract_client_version(self, ytcfg, default_client='web'):
return self._ytcfg_get_safe(
ytcfg, (lambda x: x['INNERTUBE_CLIENT_VERSION'],
lambda x: x['INNERTUBE_CONTEXT']['client']['clientVersion']), compat_str, default_client)
def _extract_api_key(self, ytcfg=None, default_client='web'):
return self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_API_KEY'], compat_str, default_client)
def _extract_context(self, ytcfg=None, default_client='web'):
context = get_first(
(ytcfg, self._get_default_ytcfg(default_client)), 'INNERTUBE_CONTEXT', expected_type=dict)
# Enforce language and tz for extraction
client_context = traverse_obj(context, 'client', expected_type=dict, default={})
client_context.update({'hl': 'en', 'timeZone': 'UTC', 'utcOffsetMinutes': 0})
return context
_SAPISID = None
def _generate_sapisidhash_header(self, origin='https://www.youtube.com'):
time_now = round(time.time())
if self._SAPISID is None:
yt_cookies = self._get_cookies('https://www.youtube.com')
# Sometimes SAPISID cookie isn't present but __Secure-3PAPISID is.
# See: https://github.com/yt-dlp/yt-dlp/issues/393
sapisid_cookie = dict_get(
yt_cookies, ('__Secure-3PAPISID', 'SAPISID'))
if sapisid_cookie and sapisid_cookie.value:
self._SAPISID = sapisid_cookie.value
self.write_debug('Extracted SAPISID cookie')
# SAPISID cookie is required if not already present
if not yt_cookies.get('SAPISID'):
self.write_debug('Copying __Secure-3PAPISID cookie to SAPISID cookie')
self._set_cookie(
'.youtube.com', 'SAPISID', self._SAPISID, secure=True, expire_time=time_now + 3600)
else:
self._SAPISID = False
if not self._SAPISID:
return None
# SAPISIDHASH algorithm from https://stackoverflow.com/a/32065323
sapisidhash = hashlib.sha1(
f'{time_now} {self._SAPISID} {origin}'.encode()).hexdigest()
return f'SAPISIDHASH {time_now}_{sapisidhash}'
def _call_api(self, ep, query, video_id, fatal=True, headers=None,
note='Downloading API JSON', errnote='Unable to download API page',
context=None, api_key=None, api_hostname=None, default_client='web'):
data = {'context': context} if context else {'context': self._extract_context(default_client=default_client)}
data.update(query)
real_headers = self.generate_api_headers(default_client=default_client)
real_headers.update({'content-type': 'application/json'})
if headers:
real_headers.update(headers)
return self._download_json(
f'https://{api_hostname or self._get_innertube_host(default_client)}/youtubei/v1/{ep}',
video_id=video_id, fatal=fatal, note=note, errnote=errnote,
data=json.dumps(data).encode('utf8'), headers=real_headers,
query={'key': api_key or self._extract_api_key(), 'prettyPrint': 'false'})
def extract_yt_initial_data(self, item_id, webpage, fatal=True):
data = self._search_regex(
(fr'{self._YT_INITIAL_DATA_RE}\s*{self._YT_INITIAL_BOUNDARY_RE}',
self._YT_INITIAL_DATA_RE), webpage, 'yt initial data', fatal=fatal)
if data:
return self._parse_json(data, item_id, fatal=fatal)
@staticmethod
def _extract_session_index(*data):
"""
Index of current account in account list.
See: https://github.com/yt-dlp/yt-dlp/pull/519
"""
for ytcfg in data:
session_index = int_or_none(try_get(ytcfg, lambda x: x['SESSION_INDEX']))
if session_index is not None:
return session_index
# Deprecated?
def _extract_identity_token(self, ytcfg=None, webpage=None):
if ytcfg:
token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str)
if token:
return token
if webpage:
return self._search_regex(
r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
'identity token', default=None, fatal=False)
@staticmethod
def _extract_account_syncid(*args):
"""
Extract syncId required to download private playlists of secondary channels
@params response and/or ytcfg
"""
for data in args:
# ytcfg includes channel_syncid if on secondary channel
delegated_sid = try_get(data, lambda x: x['DELEGATED_SESSION_ID'], compat_str)
if delegated_sid:
return delegated_sid
sync_ids = (try_get(
data, (lambda x: x['responseContext']['mainAppWebResponseContext']['datasyncId'],
lambda x: x['DATASYNC_ID']), compat_str) or '').split('||')
if len(sync_ids) >= 2 and sync_ids[1]:
# datasyncid is of the form "channel_syncid||user_syncid" for secondary channel
# and just "user_syncid||" for primary channel. We only want the channel_syncid
return sync_ids[0]
@staticmethod
def _extract_visitor_data(*args):
"""
Extracts visitorData from an API response or ytcfg
Appears to be used to track session state
"""
return get_first(
args, [('VISITOR_DATA', ('INNERTUBE_CONTEXT', 'client', 'visitorData'), ('responseContext', 'visitorData'))],
expected_type=str)
@property
def is_authenticated(self):
return bool(self._generate_sapisidhash_header())
def extract_ytcfg(self, video_id, webpage):
if not webpage:
return {}
return self._parse_json(
self._search_regex(
r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg',
default='{}'), video_id, fatal=False) or {}
def generate_api_headers(
self, *, ytcfg=None, account_syncid=None, session_index=None,
visitor_data=None, identity_token=None, api_hostname=None, default_client='web'):
origin = 'https://' + (api_hostname if api_hostname else self._get_innertube_host(default_client))
headers = {
'X-YouTube-Client-Name': compat_str(
self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_CONTEXT_CLIENT_NAME'], default_client=default_client)),
'X-YouTube-Client-Version': self._extract_client_version(ytcfg, default_client),
'Origin': origin,
'X-Youtube-Identity-Token': identity_token or self._extract_identity_token(ytcfg),
'X-Goog-PageId': account_syncid or self._extract_account_syncid(ytcfg),
'X-Goog-Visitor-Id': visitor_data or self._extract_visitor_data(ytcfg)
}
if session_index is None:
session_index = self._extract_session_index(ytcfg)
if account_syncid or session_index is not None:
headers['X-Goog-AuthUser'] = session_index if session_index is not None else 0
auth = self._generate_sapisidhash_header(origin)
if auth is not None:
headers['Authorization'] = auth
headers['X-Origin'] = origin
return {h: v for h, v in headers.items() if v is not None}
def _download_ytcfg(self, client, video_id):
url = {
'web': 'https://www.youtube.com',
'web_music': 'https://music.youtube.com',
'web_embedded': f'https://www.youtube.com/embed/{video_id}?html5=1'
}.get(client)
if not url:
return {}
webpage = self._download_webpage(
url, video_id, fatal=False, note=f'Downloading {client.replace("_", " ").strip()} client config')
return self.extract_ytcfg(video_id, webpage) or {}
@staticmethod
def _build_api_continuation_query(continuation, ctp=None):
query = {
'continuation': continuation
}
# TODO: Inconsistency with clickTrackingParams.
# Currently we have a fixed ctp contained within context (from ytcfg)
# and a ctp in root query for continuation.
if ctp:
query['clickTracking'] = {'clickTrackingParams': ctp}
return query
@classmethod
def _extract_next_continuation_data(cls, renderer):
next_continuation = try_get(
renderer, (lambda x: x['continuations'][0]['nextContinuationData'],
lambda x: x['continuation']['reloadContinuationData']), dict)
if not next_continuation:
return
continuation = next_continuation.get('continuation')
if not continuation:
return
ctp = next_continuation.get('clickTrackingParams')
return cls._build_api_continuation_query(continuation, ctp)
@classmethod
def _extract_continuation_ep_data(cls, continuation_ep: dict):
if isinstance(continuation_ep, dict):
continuation = try_get(
continuation_ep, lambda x: x['continuationCommand']['token'], compat_str)
if not continuation:
return
ctp = continuation_ep.get('clickTrackingParams')
return cls._build_api_continuation_query(continuation, ctp)
@classmethod
def _extract_continuation(cls, renderer):
next_continuation = cls._extract_next_continuation_data(renderer)
if next_continuation:
return next_continuation
contents = []
for key in ('contents', 'items'):
contents.extend(try_get(renderer, lambda x: x[key], list) or [])
for content in contents:
if not isinstance(content, dict):
continue
continuation_ep = try_get(
content, (lambda x: x['continuationItemRenderer']['continuationEndpoint'],
lambda x: x['continuationItemRenderer']['button']['buttonRenderer']['command']),
dict)
continuation = cls._extract_continuation_ep_data(continuation_ep)
if continuation:
return continuation
@classmethod
def _extract_alerts(cls, data):
for alert_dict in try_get(data, lambda x: x['alerts'], list) or []:
if not isinstance(alert_dict, dict):
continue
for alert in alert_dict.values():
alert_type = alert.get('type')
if not alert_type:
continue
message = cls._get_text(alert, 'text')
if message:
yield alert_type, message
def _report_alerts(self, alerts, expected=True, fatal=True, only_once=False):
errors = []
warnings = []
for alert_type, alert_message in alerts:
if alert_type.lower() == 'error' and fatal:
errors.append([alert_type, alert_message])
else:
warnings.append([alert_type, alert_message])
for alert_type, alert_message in (warnings + errors[:-1]):
self.report_warning(f'YouTube said: {alert_type} - {alert_message}', only_once=only_once)
if errors:
raise ExtractorError('YouTube said: %s' % errors[-1][1], expected=expected)
def _extract_and_report_alerts(self, data, *args, **kwargs):
return self._report_alerts(self._extract_alerts(data), *args, **kwargs)
def _extract_badges(self, renderer: dict):
badges = set()
for badge in try_get(renderer, lambda x: x['badges'], list) or []:
label = try_get(badge, lambda x: x['metadataBadgeRenderer']['label'], compat_str)
if label:
badges.add(label.lower())
return badges
@staticmethod
def _get_text(data, *path_list, max_runs=None):
for path in path_list or [None]:
if path is None:
obj = [data]
else:
obj = traverse_obj(data, path, default=[])
if not any(key is ... or isinstance(key, (list, tuple)) for key in variadic(path)):
obj = [obj]
for item in obj:
text = try_get(item, lambda x: x['simpleText'], compat_str)
if text:
return text
runs = try_get(item, lambda x: x['runs'], list) or []
if not runs and isinstance(item, list):
runs = item
runs = runs[:min(len(runs), max_runs or len(runs))]
text = ''.join(traverse_obj(runs, (..., 'text'), expected_type=str, default=[]))
if text:
return text
def _get_count(self, data, *path_list):
count_text = self._get_text(data, *path_list) or ''
count = parse_count(count_text)
if count is None:
count = str_to_int(
self._search_regex(r'^([\d,]+)', re.sub(r'\s', '', count_text), 'count', default=None))
return count
@staticmethod
def _extract_thumbnails(data, *path_list):
"""
Extract thumbnails from thumbnails dict
@param path_list: path list to level that contains 'thumbnails' key
"""
thumbnails = []
for path in path_list or [()]:
for thumbnail in traverse_obj(data, (*variadic(path), 'thumbnails', ...), default=[]):
thumbnail_url = url_or_none(thumbnail.get('url'))
if not thumbnail_url:
continue
# Sometimes youtube gives a wrong thumbnail URL. See:
# https://github.com/yt-dlp/yt-dlp/issues/233
# https://github.com/ytdl-org/youtube-dl/issues/28023
if 'maxresdefault' in thumbnail_url:
thumbnail_url = thumbnail_url.split('?')[0]
thumbnails.append({
'url': thumbnail_url,
'height': int_or_none(thumbnail.get('height')),
'width': int_or_none(thumbnail.get('width')),
})
return thumbnails
@staticmethod
def extract_relative_time(relative_time_text):
"""
Extracts a relative time from string and converts to dt object
e.g. 'streamed 6 days ago', '5 seconds ago (edited)', 'updated today'
"""
mobj = re.search(r'(?P<start>today|yesterday|now)|(?P<time>\d+)\s*(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?\s*ago', relative_time_text)
if mobj:
start = mobj.group('start')
if start:
return datetime_from_str(start)
try:
return datetime_from_str('now-%s%s' % (mobj.group('time'), mobj.group('unit')))
except ValueError:
return None
def _extract_time_text(self, renderer, *path_list):
"""@returns (timestamp, time_text)"""
text = self._get_text(renderer, *path_list) or ''
dt = self.extract_relative_time(text)
timestamp = None
if isinstance(dt, datetime.datetime):
timestamp = calendar.timegm(dt.timetuple())
if timestamp is None:
timestamp = (
unified_timestamp(text) or unified_timestamp(
self._search_regex(
(r'([a-z]+\s*\d{1,2},?\s*20\d{2})', r'(?:.+|^)(?:live|premieres|ed|ing)(?:\s*(?:on|for))?\s*(.+\d)'),
text.lower(), 'time text', default=None)))
if text and timestamp is None:
self.report_warning(f"Cannot parse localized time text '{text}'" + bug_reports_message(), only_once=True)
return timestamp, text
def _extract_response(self, item_id, query, note='Downloading API JSON', headers=None,
ytcfg=None, check_get_keys=None, ep='browse', fatal=True, api_hostname=None,
default_client='web'):
response = None
last_error = None
count = -1
retries = self.get_param('extractor_retries', 3)
if check_get_keys is None:
check_get_keys = []
while count < retries:
count += 1
if last_error:
self.report_warning('%s. Retrying ...' % remove_end(last_error, '.'))
try:
response = self._call_api(
ep=ep, fatal=True, headers=headers,
video_id=item_id, query=query,
context=self._extract_context(ytcfg, default_client),
api_key=self._extract_api_key(ytcfg, default_client),
api_hostname=api_hostname, default_client=default_client,
note='%s%s' % (note, ' (retry #%d)' % count if count else ''))
except ExtractorError as e:
if isinstance(e.cause, network_exceptions):
if isinstance(e.cause, compat_HTTPError):
first_bytes = e.cause.read(512)
if not is_html(first_bytes):
yt_error = try_get(
self._parse_json(
self._webpage_read_content(e.cause, None, item_id, prefix=first_bytes) or '{}', item_id, fatal=False),
lambda x: x['error']['message'], compat_str)
if yt_error:
self._report_alerts([('ERROR', yt_error)], fatal=False)
# Downloading page may result in intermittent 5xx HTTP error
# Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289
# We also want to catch all other network exceptions since errors in later pages can be troublesome
# See https://github.com/yt-dlp/yt-dlp/issues/507#issuecomment-880188210
if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429):
last_error = error_to_compat_str(e.cause or e.msg)
if count < retries:
continue
if fatal:
raise
else:
self.report_warning(error_to_compat_str(e))
return
else:
try:
self._extract_and_report_alerts(response, only_once=True)
except ExtractorError as e:
# YouTube servers may return errors we want to retry on in a 200 OK response
# See: https://github.com/yt-dlp/yt-dlp/issues/839
if 'unknown error' in e.msg.lower():
last_error = e.msg
continue
if fatal:
raise
self.report_warning(error_to_compat_str(e))
return
if not check_get_keys or dict_get(response, check_get_keys):
break
# Youtube sometimes sends incomplete data
# See: https://github.com/ytdl-org/youtube-dl/issues/28194
last_error = 'Incomplete data received'
if count >= retries:
if fatal:
raise ExtractorError(last_error)
else:
self.report_warning(last_error)
return
return response
@staticmethod
def is_music_url(url):
return re.match(r'https?://music\.youtube\.com/', url) is not None
def _extract_video(self, renderer):
video_id = renderer.get('videoId')
title = self._get_text(renderer, 'title')
description = self._get_text(renderer, 'descriptionSnippet')
duration = parse_duration(self._get_text(
renderer, 'lengthText', ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'text')))
if duration is None:
duration = parse_duration(self._search_regex(
r'(?i)(ago)(?!.*\1)\s+(?P<duration>[a-z0-9 ,]+?)(?:\s+[\d,]+\s+views)?(?:\s+-\s+play\s+short)?$',
traverse_obj(renderer, ('title', 'accessibility', 'accessibilityData', 'label'), default='', expected_type=str),
video_id, default=None, group='duration'))
view_count = self._get_count(renderer, 'viewCountText')
uploader = self._get_text(renderer, 'ownerText', 'shortBylineText')
channel_id = traverse_obj(
renderer, ('shortBylineText', 'runs', ..., 'navigationEndpoint', 'browseEndpoint', 'browseId'),
expected_type=str, get_all=False)
timestamp, time_text = self._extract_time_text(renderer, 'publishedTimeText')
scheduled_timestamp = str_to_int(traverse_obj(renderer, ('upcomingEventData', 'startTime'), get_all=False))
overlay_style = traverse_obj(
renderer, ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'style'),
get_all=False, expected_type=str)
badges = self._extract_badges(renderer)
thumbnails = self._extract_thumbnails(renderer, 'thumbnail')
navigation_url = urljoin('https://www.youtube.com/', traverse_obj(
renderer, ('navigationEndpoint', 'commandMetadata', 'webCommandMetadata', 'url'),
expected_type=str)) or ''
url = f'https://www.youtube.com/watch?v={video_id}'
if overlay_style == 'SHORTS' or '/shorts/' in navigation_url:
url = f'https://www.youtube.com/shorts/{video_id}'
return {
'_type': 'url',
'ie_key': YoutubeIE.ie_key(),
'id': video_id,
'url': url,
'title': title,
'description': description,
'duration': duration,
'view_count': view_count,
'uploader': uploader,
'channel_id': channel_id,
'thumbnails': thumbnails,
'upload_date': (strftime_or_none(timestamp, '%Y%m%d')
if self._configuration_arg('approximate_date', ie_key='youtubetab')
else None),
'live_status': ('is_upcoming' if scheduled_timestamp is not None
else 'was_live' if 'streamed' in time_text.lower()
else 'is_live' if overlay_style is not None and overlay_style == 'LIVE' or 'live now' in badges
else None),
'release_timestamp': scheduled_timestamp,
'availability': self._availability(needs_premium='premium' in badges, needs_subscription='members only' in badges)
}
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com|
(?:www\.)?deturl\.com/www\.youtube\.com|
(?:www\.)?pwnyoutube\.com|
(?:www\.)?hooktube\.com|
(?:www\.)?yourepeat\.com|
tube\.majestyc\.net|
%(invidious)s|
youtube\.googleapis\.com)/ # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e|shorts)/(?!videoseries|live_stream)) # v/ or embed/ or e/ or shorts/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY)
v=
)
))
|(?:
youtu\.be| # just youtu.be/xxxx
vid\.plus| # or vid.plus/xxxx
zwearz\.com/watch| # or zwearz.com/watch/xxxx
%(invidious)s
)/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
(?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?(1).+)? # if we found the ID, everything can follow
(?:\#|$)""" % {
'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES),
}
_PLAYER_INFO_RE = (
r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/player',
r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$',
r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.js$',
)
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
'17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
'18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
'36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# 3D videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
# Apple HTTP Live Streaming
'91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
'212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
'256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
'328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
# itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
# Dash webm audio
'171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
'172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
'250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
'251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
# av01 video only formats sometimes served with "unknown" codecs
'394': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'},
'395': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'},
'396': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'av01.0.01M.08'},
'397': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'av01.0.04M.08'},
'398': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'av01.0.05M.08'},
'399': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'av01.0.08M.08'},
'400': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'},
'401': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'},
}
_SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt')
_GEO_BYPASS = False
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'channel': 'Philipp Hagemeister',
'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
'upload_date': '20121002',
'description': 'md5:8fb536f4877b8a7455c2ec23794dbc22',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'availability': 'public',
'playable_in_embed': True,
'thumbnail': 'https://i.ytimg.com/vi/BaW_jenozKc/maxresdefault.jpg',
'live_status': 'not_live',
'age_limit': 0,
'start_time': 1,
'end_time': 9,
'channel_follower_count': int
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
'age_limit': 18,
},
'skip': 'Private video',
},
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=yZIXLfi8CZQ',
'note': 'Use the first video ID in the URL',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'channel': 'Philipp Hagemeister',
'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
'upload_date': '20121002',
'description': 'md5:8fb536f4877b8a7455c2ec23794dbc22',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'view_count': int,
'like_count': int,
'availability': 'public',
'playable_in_embed': True,
'thumbnail': 'https://i.ytimg.com/vi/BaW_jenozKc/maxresdefault.jpg',
'live_status': 'not_live',
'age_limit': 0,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
'skip': 'format 141 not served anymore',
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
'duration': 244,
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
'abr': 129.495,
'like_count': int,
'channel_id': 'UChuZAo1RKL85gev3Eal9_zg',
'playable_in_embed': True,
'channel_url': 'https://www.youtube.com/channel/UChuZAo1RKL85gev3Eal9_zg',
'view_count': int,
'track': 'The Spark',
'live_status': 'not_live',
'thumbnail': 'https://i.ytimg.com/vi_webp/IB3lcPjvWLA/maxresdefault.webp',
'channel': 'Afrojack',
'uploader_url': 'http://www.youtube.com/user/AfrojackVEVO',
'tags': 'count:19',
'availability': 'public',
'categories': ['Music'],
'age_limit': 0,
'alt_title': 'The Spark',
'channel_follower_count': int
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# Age-gate videos. See https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-888837000
{
'note': 'Embed allowed age-gate video',
'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'duration': 142,
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
'upload_date': '20140605',
'age_limit': 18,
'categories': ['Gaming'],
'thumbnail': 'https://i.ytimg.com/vi_webp/HtVdAasjOgU/maxresdefault.webp',
'availability': 'needs_auth',
'channel_url': 'https://www.youtube.com/channel/UCzybXLxv08IApdjdN0mJhEg',
'like_count': int,
'channel': 'The Witcher',
'live_status': 'not_live',
'tags': 'count:17',
'channel_id': 'UCzybXLxv08IApdjdN0mJhEg',
'playable_in_embed': True,
'view_count': int,
'channel_follower_count': int
},
},
{
'note': 'Age-gate video with embed allowed in public site',
'url': 'https://youtube.com/watch?v=HsUATh_Nc2U',
'info_dict': {
'id': 'HsUATh_Nc2U',
'ext': 'mp4',
'title': 'Godzilla 2 (Official Video)',
'description': 'md5:bf77e03fcae5529475e500129b05668a',
'upload_date': '20200408',
'uploader_id': 'FlyingKitty900',
'uploader': 'FlyingKitty',
'age_limit': 18,
'availability': 'needs_auth',
'channel_id': 'UCYQT13AtrJC0gsM1far_zJg',
'uploader_url': 'http://www.youtube.com/user/FlyingKitty900',
'channel': 'FlyingKitty',
'channel_url': 'https://www.youtube.com/channel/UCYQT13AtrJC0gsM1far_zJg',
'view_count': int,
'categories': ['Entertainment'],
'live_status': 'not_live',
'tags': ['Flyingkitty', 'godzilla 2'],
'thumbnail': 'https://i.ytimg.com/vi/HsUATh_Nc2U/maxresdefault.jpg',
'like_count': int,
'duration': 177,
'playable_in_embed': True,
'channel_follower_count': int
},
},
{
'note': 'Age-gate video embedable only with clientScreen=EMBED',
'url': 'https://youtube.com/watch?v=Tq92D6wQ1mg',
'info_dict': {
'id': 'Tq92D6wQ1mg',
'title': '[MMD] Adios - EVERGLOW [+Motion DL]',
'ext': 'mp4',
'upload_date': '20191228',
'uploader_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ',
'uploader': 'Projekt Melody',
'description': 'md5:17eccca93a786d51bc67646756894066',
'age_limit': 18,
'like_count': int,
'availability': 'needs_auth',
'uploader_url': 'http://www.youtube.com/channel/UC1yoRdFoFJaCY-AGfD9W0wQ',
'channel_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ',
'view_count': int,
'thumbnail': 'https://i.ytimg.com/vi_webp/Tq92D6wQ1mg/sddefault.webp',
'channel': 'Projekt Melody',
'live_status': 'not_live',
'tags': ['mmd', 'dance', 'mikumikudance', 'kpop', 'vtuber'],
'playable_in_embed': True,
'categories': ['Entertainment'],
'duration': 106,
'channel_url': 'https://www.youtube.com/channel/UC1yoRdFoFJaCY-AGfD9W0wQ',
'channel_follower_count': int
},
},
{
'note': 'Non-Agegated non-embeddable video',
'url': 'https://youtube.com/watch?v=MeJVWBSsPAY',
'info_dict': {
'id': 'MeJVWBSsPAY',
'ext': 'mp4',
'title': 'OOMPH! - Such Mich Find Mich (Lyrics)',
'uploader': 'Herr Lurik',
'uploader_id': 'st3in234',
'description': 'Fan Video. Music & Lyrics by OOMPH!.',
'upload_date': '20130730',
'track': 'Such mich find mich',
'age_limit': 0,
'tags': ['oomph', 'such mich find mich', 'lyrics', 'german industrial', 'musica industrial'],
'like_count': int,
'playable_in_embed': False,
'creator': 'OOMPH!',
'thumbnail': 'https://i.ytimg.com/vi/MeJVWBSsPAY/sddefault.jpg',
'view_count': int,
'alt_title': 'Such mich find mich',
'duration': 210,
'channel': 'Herr Lurik',
'channel_id': 'UCdR3RSDPqub28LjZx0v9-aA',
'categories': ['Music'],
'availability': 'public',
'uploader_url': 'http://www.youtube.com/user/st3in234',
'channel_url': 'https://www.youtube.com/channel/UCdR3RSDPqub28LjZx0v9-aA',
'live_status': 'not_live',
'artist': 'OOMPH!',
'channel_follower_count': int
},
},
{
'note': 'Non-bypassable age-gated video',
'url': 'https://youtube.com/watch?v=Cr381pDsSsA',
'only_matching': True,
},
# video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
# YouTube Red ad is not captured for creator
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'duration': 266,
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
'creator': 'deadmau5',
'description': 'md5:6cbcd3a92ce1bc676fc4d6ab4ace2336',
'uploader': 'deadmau5',
'title': 'Deadmau5 - Some Chords (HD)',
'alt_title': 'Some Chords',
'availability': 'public',
'tags': 'count:14',
'channel_id': 'UCYEK6xds6eo-3tr4xRdflmQ',
'view_count': int,
'live_status': 'not_live',
'channel': 'deadmau5',
'thumbnail': 'https://i.ytimg.com/vi_webp/__2ABJjxzNo/maxresdefault.webp',
'like_count': int,
'track': 'Some Chords',
'artist': 'deadmau5',
'playable_in_embed': True,
'age_limit': 0,
'channel_url': 'https://www.youtube.com/channel/UCYEK6xds6eo-3tr4xRdflmQ',
'categories': ['Music'],
'album': 'Some Chords',
'channel_follower_count': int
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'duration': 6085,
'upload_date': '20150827',
'uploader_id': 'olympic',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympics',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
'like_count': int,
'release_timestamp': 1343767800,
'playable_in_embed': True,
'categories': ['Sports'],
'release_date': '20120731',
'channel': 'Olympics',
'tags': ['Hockey', '2012-07-31', '31 July 2012', 'Riverbank Arena', 'Session', 'Olympics', 'Olympic Games', 'London 2012', '2012 Summer Olympics', 'Summer Games'],
'channel_id': 'UCTl3QQTvqHFjurroKxexy2Q',
'thumbnail': 'https://i.ytimg.com/vi/lqQg6PlCWgI/maxresdefault.jpg',
'age_limit': 0,
'availability': 'public',
'live_status': 'was_live',
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UCTl3QQTvqHFjurroKxexy2Q',
'channel_follower_count': int
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'duration': 85,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫ᄋᄅ',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
'playable_in_embed': True,
'channel': '孫ᄋᄅ',
'age_limit': 0,
'tags': 'count:11',
'channel_url': 'https://www.youtube.com/channel/UCS-xxCmRaA6BFdmgDPA_BIw',
'channel_id': 'UCS-xxCmRaA6BFdmgDPA_BIw',
'thumbnail': 'https://i.ytimg.com/vi/_b-2C3KPAM0/maxresdefault.jpg',
'view_count': int,
'categories': ['People & Blogs'],
'like_count': int,
'live_status': 'not_live',
'availability': 'unlisted',
'channel_follower_count': int
},
},
# url_encoded_fmt_stream_map is empty string
{
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
'uploader_id': 'spbelect',
'uploader': 'Наблюдатели Петербурга',
},
'params': {
'skip_download': 'requires avconv',
},
'skip': 'This live event has ended.',
},
# Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
{
'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
'info_dict': {
'id': 'FIl7x6_3R5Y',
'ext': 'webm',
'title': 'md5:7b81415841e02ecd4313668cde88737a',
'description': 'md5:116377fd2963b81ec4ce64b542173306',
'duration': 220,
'upload_date': '20150625',
'uploader_id': 'dorappi2000',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
'uploader': 'dorappi2000',
'formats': 'mincount:31',
},
'skip': 'not actual anymore',
},
# DASH manifest with segment_list
{
'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
'md5': '8ce563a1d667b599d21064e982ab9e31',
'info_dict': {
'id': 'CsmdDsKjzN8',
'ext': 'mp4',
'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
'uploader': 'Airtek',
'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '135', # bestvideo
},
'skip': 'This live event has ended.',
},
{
# Multifeed videos (multiple cameras), URL is for Main Camera
'url': 'https://www.youtube.com/watch?v=jvGDaLqkpTg',
'info_dict': {
'id': 'jvGDaLqkpTg',
'title': 'Tom Clancy Free Weekend Rainbow Whatever',
'description': 'md5:e03b909557865076822aa169218d6a5d',
},
'playlist': [{
'info_dict': {
'id': 'jvGDaLqkpTg',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Main Camera)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10643,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}, {
'info_dict': {
'id': '3AKt1R1aDnw',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 2)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10991,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}, {
'info_dict': {
'id': 'RtAMM00gpVc',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 3)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10995,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}, {
'info_dict': {
'id': '6N2fdlP3C5U',
'ext': 'mp4',
'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 4)',
'description': 'md5:e03b909557865076822aa169218d6a5d',
'duration': 10990,
'upload_date': '20161111',
'uploader': 'Team PGP',
'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
},
}],
'params': {
'skip_download': True,
},
'skip': 'Not multifeed anymore',
},
{
# Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
'info_dict': {
'id': 'gVfLd0zydlo',
'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
},
'playlist_count': 2,
'skip': 'Not multifeed anymore',
},
{
'url': 'https://vid.plus/FlRa-iH7PGw',
'only_matching': True,
},
{
'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
'only_matching': True,
},
{
# Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
# Also tests cut-off URL expansion in video description (see
# https://github.com/ytdl-org/youtube-dl/issues/1892,
# https://github.com/ytdl-org/youtube-dl/issues/8164)
'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
'info_dict': {
'id': 'lsguqyKfVQg',
'ext': 'mp4',
'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
'alt_title': 'Dark Walk',
'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
'duration': 133,
'upload_date': '20151119',
'uploader_id': 'IronSoulElf',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
'uploader': 'IronSoulElf',
'creator': 'Todd Haberman;\nDaniel Law Heath and Aaron Kaplan',
'track': 'Dark Walk',
'artist': 'Todd Haberman;\nDaniel Law Heath and Aaron Kaplan',
'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
'thumbnail': 'https://i.ytimg.com/vi_webp/lsguqyKfVQg/maxresdefault.webp',
'categories': ['Film & Animation'],
'view_count': int,
'live_status': 'not_live',
'channel_url': 'https://www.youtube.com/channel/UCTSRgz5jylBvFt_S7wnsqLQ',
'channel_id': 'UCTSRgz5jylBvFt_S7wnsqLQ',
'tags': 'count:13',
'availability': 'public',
'channel': 'IronSoulElf',
'playable_in_embed': True,
'like_count': int,
'age_limit': 0,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
'only_matching': True,
},
{
# Video with yt:stretch=17:0
'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
'info_dict': {
'id': 'Q39EVAstoRM',
'ext': 'mp4',
'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
'description': 'md5:ee18a25c350637c8faff806845bddee9',
'upload_date': '20151107',
'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
'uploader': 'CH GAMER DROID',
},
'params': {
'skip_download': True,
},
'skip': 'This video does not exist.',
},
{
# Video with incomplete 'yt:stretch=16:'
'url': 'https://www.youtube.com/watch?v=FRhJzUSJbGI',
'only_matching': True,
},
{
# Video licensed under Creative Commons
'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
'info_dict': {
'id': 'M4gD1WSo5mA',
'ext': 'mp4',
'title': 'md5:e41008789470fc2533a3252216f1c1d1',
'description': 'md5:a677553cf0840649b731a3024aeff4cc',
'duration': 721,
'upload_date': '20150128',
'uploader_id': 'BerkmanCenter',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
'uploader': 'The Berkman Klein Center for Internet & Society',
'license': 'Creative Commons Attribution license (reuse allowed)',
'channel_id': 'UCuLGmD72gJDBwmLw06X58SA',
'channel_url': 'https://www.youtube.com/channel/UCuLGmD72gJDBwmLw06X58SA',
'like_count': int,
'age_limit': 0,
'tags': ['Copyright (Legal Subject)', 'Law (Industry)', 'William W. Fisher (Author)'],
'channel': 'The Berkman Klein Center for Internet & Society',
'availability': 'public',
'view_count': int,
'categories': ['Education'],
'thumbnail': 'https://i.ytimg.com/vi_webp/M4gD1WSo5mA/maxresdefault.webp',
'live_status': 'not_live',
'playable_in_embed': True,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# Channel-like uploader_url
'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
'info_dict': {
'id': 'eQcmzGIKrzg',
'ext': 'mp4',
'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
'description': 'md5:13a2503d7b5904ef4b223aa101628f39',
'duration': 4060,
'upload_date': '20151120',
'uploader': 'Bernie Sanders',
'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'license': 'Creative Commons Attribution license (reuse allowed)',
'playable_in_embed': True,
'tags': 'count:12',
'like_count': int,
'channel_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'age_limit': 0,
'availability': 'public',
'categories': ['News & Politics'],
'channel': 'Bernie Sanders',
'thumbnail': 'https://i.ytimg.com/vi_webp/eQcmzGIKrzg/maxresdefault.webp',
'view_count': int,
'live_status': 'not_live',
'channel_url': 'https://www.youtube.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY',
'only_matching': True,
},
{
# YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
'only_matching': True,
},
{
# Rental video preview
'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
'info_dict': {
'id': 'uGpuVWrhIzE',
'ext': 'mp4',
'title': 'Piku - Trailer',
'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
'upload_date': '20150811',
'uploader': 'FlixMatrix',
'uploader_id': 'FlixMatrixKaravan',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
'license': 'Standard YouTube License',
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# YouTube Red video with episode data
'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
'info_dict': {
'id': 'iqKdEhx-dD4',
'ext': 'mp4',
'title': 'Isolation - Mind Field (Ep 1)',
'description': 'md5:f540112edec5d09fc8cc752d3d4ba3cd',
'duration': 2085,
'upload_date': '20170118',
'uploader': 'Vsauce',
'uploader_id': 'Vsauce',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
'series': 'Mind Field',
'season_number': 1,
'episode_number': 1,
'thumbnail': 'https://i.ytimg.com/vi_webp/iqKdEhx-dD4/maxresdefault.webp',
'tags': 'count:12',
'view_count': int,
'availability': 'public',
'age_limit': 0,
'channel': 'Vsauce',
'episode': 'Episode 1',
'categories': ['Entertainment'],
'season': 'Season 1',
'channel_id': 'UC6nSFpj9HTCZ5t-N3Rm3-HA',
'channel_url': 'https://www.youtube.com/channel/UC6nSFpj9HTCZ5t-N3Rm3-HA',
'like_count': int,
'playable_in_embed': True,
'live_status': 'not_live',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
'expected_warnings': [
'Skipping DASH manifest',
],
},
{
# The following content has been identified by the YouTube community
# as inappropriate or offensive to some audiences.
'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
'info_dict': {
'id': '6SJNVb0GnPI',
'ext': 'mp4',
'title': 'Race Differences in Intelligence',
'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
'duration': 965,
'upload_date': '20140124',
'uploader': 'New Century Foundation',
'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
},
'params': {
'skip_download': True,
},
'skip': 'This video has been removed for violating YouTube\'s policy on hate speech.',
},
{
# itag 212
'url': '1t24XAntNCY',
'only_matching': True,
},
{
# geo restricted to JP
'url': 'sJL6WA-aGkQ',
'only_matching': True,
},
{
'url': 'https://invidio.us/watch?v=BaW_jenozKc',
'only_matching': True,
},
{
'url': 'https://redirect.invidious.io/watch?v=BaW_jenozKc',
'only_matching': True,
},
{
# from https://nitter.pussthecat.org/YouTube/status/1360363141947944964#m
'url': 'https://redirect.invidious.io/Yh0AhrY9GjA',
'only_matching': True,
},
{
# DRM protected
'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
'only_matching': True,
},
{
# Video with unsupported adaptive stream type formats
'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
'info_dict': {
'id': 'Z4Vy8R84T1U',
'ext': 'mp4',
'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'duration': 433,
'upload_date': '20130923',
'uploader': 'Amelia Putri Harwita',
'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
'formats': 'maxcount:10',
},
'params': {
'skip_download': True,
'youtube_include_dash_manifest': False,
},
'skip': 'not actual anymore',
},
{
# Youtube Music Auto-generated description
'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
'info_dict': {
'id': 'MgNrAu2pzNs',
'ext': 'mp4',
'title': 'Voyeur Girl',
'description': 'md5:7ae382a65843d6df2685993e90a8628f',
'upload_date': '20190312',
'uploader': 'Stephen - Topic',
'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'artist': 'Stephen',
'track': 'Voyeur Girl',
'album': 'it\'s too much love to know my dear',
'release_date': '20190313',
'release_year': 2019,
'alt_title': 'Voyeur Girl',
'view_count': int,
'uploader_url': 'http://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA',
'playable_in_embed': True,
'like_count': int,
'categories': ['Music'],
'channel_url': 'https://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA',
'channel': 'Stephen',
'availability': 'public',
'creator': 'Stephen',
'duration': 169,
'thumbnail': 'https://i.ytimg.com/vi_webp/MgNrAu2pzNs/maxresdefault.webp',
'age_limit': 0,
'channel_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'tags': 'count:11',
'live_status': 'not_live',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
'only_matching': True,
},
{
# invalid -> valid video id redirection
'url': 'DJztXj2GPfl',
'info_dict': {
'id': 'DJztXj2GPfk',
'ext': 'mp4',
'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
'description': 'md5:bf577a41da97918e94fa9798d9228825',
'upload_date': '20090125',
'uploader': 'Prochorowka',
'uploader_id': 'Prochorowka',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
'artist': 'Panjabi MC',
'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
'album': 'Beware of the Boys (Mundian To Bach Ke)',
},
'params': {
'skip_download': True,
},
'skip': 'Video unavailable',
},
{
# empty description results in an empty string
'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
'info_dict': {
'id': 'x41yOUIvK2k',
'ext': 'mp4',
'title': 'IMG 3456',
'description': '',
'upload_date': '20170613',
'uploader_id': 'ElevageOrVert',
'uploader': 'ElevageOrVert',
'view_count': int,
'thumbnail': 'https://i.ytimg.com/vi_webp/x41yOUIvK2k/maxresdefault.webp',
'uploader_url': 'http://www.youtube.com/user/ElevageOrVert',
'like_count': int,
'channel_id': 'UCo03ZQPBW5U4UC3regpt1nw',
'tags': [],
'channel_url': 'https://www.youtube.com/channel/UCo03ZQPBW5U4UC3regpt1nw',
'availability': 'public',
'age_limit': 0,
'categories': ['Pets & Animals'],
'duration': 7,
'playable_in_embed': True,
'live_status': 'not_live',
'channel': 'ElevageOrVert',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# with '};' inside yt initial data (see [1])
# see [2] for an example with '};' inside ytInitialPlayerResponse
# 1. https://github.com/ytdl-org/youtube-dl/issues/27093
# 2. https://github.com/ytdl-org/youtube-dl/issues/27216
'url': 'https://www.youtube.com/watch?v=CHqg6qOn4no',
'info_dict': {
'id': 'CHqg6qOn4no',
'ext': 'mp4',
'title': 'Part 77 Sort a list of simple types in c#',
'description': 'md5:b8746fa52e10cdbf47997903f13b20dc',
'upload_date': '20130831',
'uploader_id': 'kudvenkat',
'uploader': 'kudvenkat',
'channel_id': 'UCCTVrRB5KpIiK6V2GGVsR1Q',
'like_count': int,
'uploader_url': 'http://www.youtube.com/user/kudvenkat',
'channel_url': 'https://www.youtube.com/channel/UCCTVrRB5KpIiK6V2GGVsR1Q',
'live_status': 'not_live',
'categories': ['Education'],
'availability': 'public',
'thumbnail': 'https://i.ytimg.com/vi/CHqg6qOn4no/sddefault.jpg',
'tags': 'count:12',
'playable_in_embed': True,
'age_limit': 0,
'view_count': int,
'duration': 522,
'channel': 'kudvenkat',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# another example of '};' in ytInitialData
'url': 'https://www.youtube.com/watch?v=gVfgbahppCY',
'only_matching': True,
},
{
'url': 'https://www.youtube.com/watch_popup?v=63RmMXCd_bQ',
'only_matching': True,
},
{
# https://github.com/ytdl-org/youtube-dl/pull/28094
'url': 'OtqTfy26tG0',
'info_dict': {
'id': 'OtqTfy26tG0',
'ext': 'mp4',
'title': 'Burn Out',
'description': 'md5:8d07b84dcbcbfb34bc12a56d968b6131',
'upload_date': '20141120',
'uploader': 'The Cinematic Orchestra - Topic',
'uploader_id': 'UCIzsJBIyo8hhpFm1NK0uLgw',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCIzsJBIyo8hhpFm1NK0uLgw',
'artist': 'The Cinematic Orchestra',
'track': 'Burn Out',
'album': 'Every Day',
'like_count': int,
'live_status': 'not_live',
'alt_title': 'Burn Out',
'duration': 614,
'age_limit': 0,
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UCIzsJBIyo8hhpFm1NK0uLgw',
'creator': 'The Cinematic Orchestra',
'channel': 'The Cinematic Orchestra',
'tags': ['The Cinematic Orchestra', 'Every Day', 'Burn Out'],
'channel_id': 'UCIzsJBIyo8hhpFm1NK0uLgw',
'availability': 'public',
'thumbnail': 'https://i.ytimg.com/vi/OtqTfy26tG0/maxresdefault.jpg',
'categories': ['Music'],
'playable_in_embed': True,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
},
{
# controversial video, only works with bpctr when authenticated with cookies
'url': 'https://www.youtube.com/watch?v=nGC3D_FkCmg',
'only_matching': True,
},
{
# controversial video, requires bpctr/contentCheckOk
'url': 'https://www.youtube.com/watch?v=SZJvDhaSDnc',
'info_dict': {
'id': 'SZJvDhaSDnc',
'ext': 'mp4',
'title': 'San Diego teen commits suicide after bullying over embarrassing video',
'channel_id': 'UC-SJ6nODDmufqBzPBwCvYvQ',
'uploader': 'CBS Mornings',
'uploader_id': 'CBSThisMorning',
'upload_date': '20140716',
'description': 'md5:acde3a73d3f133fc97e837a9f76b53b7',
'duration': 170,
'categories': ['News & Politics'],
'uploader_url': 'http://www.youtube.com/user/CBSThisMorning',
'view_count': int,
'channel': 'CBS Mornings',
'tags': ['suicide', 'bullying', 'video', 'cbs', 'news'],
'thumbnail': 'https://i.ytimg.com/vi/SZJvDhaSDnc/hqdefault.jpg',
'age_limit': 18,
'availability': 'needs_auth',
'channel_url': 'https://www.youtube.com/channel/UC-SJ6nODDmufqBzPBwCvYvQ',
'like_count': int,
'live_status': 'not_live',
'playable_in_embed': True,
'channel_follower_count': int
}
},
{
# restricted location, https://github.com/ytdl-org/youtube-dl/issues/28685
'url': 'cBvYw8_A0vQ',
'info_dict': {
'id': 'cBvYw8_A0vQ',
'ext': 'mp4',
'title': '4K Ueno Okachimachi Street Scenes 上野御徒町歩き',
'description': 'md5:ea770e474b7cd6722b4c95b833c03630',
'upload_date': '20201120',
'uploader': 'Walk around Japan',
'uploader_id': 'UC3o_t8PzBmXf5S9b7GLx1Mw',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UC3o_t8PzBmXf5S9b7GLx1Mw',
'duration': 1456,
'categories': ['Travel & Events'],
'channel_id': 'UC3o_t8PzBmXf5S9b7GLx1Mw',
'view_count': int,
'channel': 'Walk around Japan',
'tags': ['Ueno Tokyo', 'Okachimachi Tokyo', 'Ameyoko Street', 'Tokyo attraction', 'Travel in Tokyo'],
'thumbnail': 'https://i.ytimg.com/vi_webp/cBvYw8_A0vQ/hqdefault.webp',
'age_limit': 0,
'availability': 'public',
'channel_url': 'https://www.youtube.com/channel/UC3o_t8PzBmXf5S9b7GLx1Mw',
'live_status': 'not_live',
'playable_in_embed': True,
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
}, {
# Has multiple audio streams
'url': 'WaOKSUlf4TM',
'only_matching': True
}, {
# Requires Premium: has format 141 when requested using YTM url
'url': 'https://music.youtube.com/watch?v=XclachpHxis',
'only_matching': True
}, {
# multiple subtitles with same lang_code
'url': 'https://www.youtube.com/watch?v=wsQiKKfKxug',
'only_matching': True,
}, {
# Force use android client fallback
'url': 'https://www.youtube.com/watch?v=YOelRv7fMxY',
'info_dict': {
'id': 'YOelRv7fMxY',
'title': 'DIGGING A SECRET TUNNEL Part 1',
'ext': '3gp',
'upload_date': '20210624',
'channel_id': 'UCp68_FLety0O-n9QU6phsgw',
'uploader': 'colinfurze',
'uploader_id': 'colinfurze',
'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCp68_FLety0O-n9QU6phsgw',
'description': 'md5:5d5991195d599b56cd0c4148907eec50',
'duration': 596,
'categories': ['Entertainment'],
'uploader_url': 'http://www.youtube.com/user/colinfurze',
'view_count': int,
'channel': 'colinfurze',
'tags': ['Colin', 'furze', 'Terry', 'tunnel', 'underground', 'bunker'],
'thumbnail': 'https://i.ytimg.com/vi/YOelRv7fMxY/maxresdefault.jpg',
'age_limit': 0,
'availability': 'public',
'like_count': int,
'live_status': 'not_live',
'playable_in_embed': True,
'channel_follower_count': int
},
'params': {
'format': '17', # 3gp format available on android
'extractor_args': {'youtube': {'player_client': ['android']}},
},
},
{
# Skip download of additional client configs (remix client config in this case)
'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
'only_matching': True,
'params': {
'extractor_args': {'youtube': {'player_skip': ['configs']}},
},
}, {
# shorts
'url': 'https://www.youtube.com/shorts/BGQWPY4IigY',
'only_matching': True,
}, {
'note': 'Storyboards',
'url': 'https://www.youtube.com/watch?v=5KLPxDtMqe8',
'info_dict': {
'id': '5KLPxDtMqe8',
'ext': 'mhtml',
'format_id': 'sb0',
'title': 'Your Brain is Plastic',
'uploader_id': 'scishow',
'description': 'md5:89cd86034bdb5466cd87c6ba206cd2bc',
'upload_date': '20140324',
'uploader': 'SciShow',
'like_count': int,
'channel_id': 'UCZYTClx2T1of7BRZ86-8fow',
'channel_url': 'https://www.youtube.com/channel/UCZYTClx2T1of7BRZ86-8fow',
'view_count': int,
'thumbnail': 'https://i.ytimg.com/vi/5KLPxDtMqe8/maxresdefault.jpg',
'playable_in_embed': True,
'tags': 'count:12',
'uploader_url': 'http://www.youtube.com/user/scishow',
'availability': 'public',
'channel': 'SciShow',
'live_status': 'not_live',
'duration': 248,
'categories': ['Education'],
'age_limit': 0,
'channel_follower_count': int
}, 'params': {'format': 'mhtml', 'skip_download': True}
}, {
# Ensure video upload_date is in UTC timezone (video was uploaded 1641170939)
'url': 'https://www.youtube.com/watch?v=2NUZ8W2llS4',
'info_dict': {
'id': '2NUZ8W2llS4',
'ext': 'mp4',
'title': 'The NP that test your phone performance 🙂',
'description': 'md5:144494b24d4f9dfacb97c1bbef5de84d',
'uploader': 'Leon Nguyen',
'uploader_id': 'VNSXIII',
'uploader_url': 'http://www.youtube.com/user/VNSXIII',
'channel_id': 'UCRqNBSOHgilHfAczlUmlWHA',
'channel_url': 'https://www.youtube.com/channel/UCRqNBSOHgilHfAczlUmlWHA',
'duration': 21,
'view_count': int,
'age_limit': 0,
'categories': ['Gaming'],
'tags': 'count:23',
'playable_in_embed': True,
'live_status': 'not_live',
'upload_date': '20220103',
'like_count': int,
'availability': 'public',
'channel': 'Leon Nguyen',
'thumbnail': 'https://i.ytimg.com/vi_webp/2NUZ8W2llS4/maxresdefault.webp',
'channel_follower_count': int
}
}, {
# date text is premiered video, ensure upload date in UTC (published 1641172509)
'url': 'https://www.youtube.com/watch?v=mzZzzBU6lrM',
'info_dict': {
'id': 'mzZzzBU6lrM',
'ext': 'mp4',
'title': 'I Met GeorgeNotFound In Real Life...',
'description': 'md5:cca98a355c7184e750f711f3a1b22c84',
'uploader': 'Quackity',
'uploader_id': 'QuackityHQ',
'uploader_url': 'http://www.youtube.com/user/QuackityHQ',
'channel_id': 'UC_8NknAFiyhOUaZqHR3lq3Q',
'channel_url': 'https://www.youtube.com/channel/UC_8NknAFiyhOUaZqHR3lq3Q',
'duration': 955,
'view_count': int,
'age_limit': 0,
'categories': ['Entertainment'],
'tags': 'count:26',
'playable_in_embed': True,
'live_status': 'not_live',
'release_timestamp': 1641172509,
'release_date': '20220103',
'upload_date': '20220103',
'like_count': int,
'availability': 'public',
'channel': 'Quackity',
'thumbnail': 'https://i.ytimg.com/vi/mzZzzBU6lrM/maxresdefault.jpg',
'channel_follower_count': int
}
},
{ # continuous livestream. Microformat upload date should be preferred.
# Upload date was 2021-06-19 (not UTC), while stream start is 2021-11-27
'url': 'https://www.youtube.com/watch?v=kgx4WGK0oNU',
'info_dict': {
'id': 'kgx4WGK0oNU',
'title': r're:jazz\/lofi hip hop radio🌱chill beats to relax\/study to \[LIVE 24\/7\] \d{4}-\d{2}-\d{2} \d{2}:\d{2}',
'ext': 'mp4',
'channel_id': 'UC84whx2xxsiA1gXHXXqKGOA',
'availability': 'public',
'age_limit': 0,
'release_timestamp': 1637975704,
'upload_date': '20210619',
'channel_url': 'https://www.youtube.com/channel/UC84whx2xxsiA1gXHXXqKGOA',
'live_status': 'is_live',
'thumbnail': 'https://i.ytimg.com/vi/kgx4WGK0oNU/maxresdefault.jpg',
'uploader': '阿鲍Abao',
'uploader_url': 'http://www.youtube.com/channel/UC84whx2xxsiA1gXHXXqKGOA',
'channel': 'Abao in Tokyo',
'channel_follower_count': int,
'release_date': '20211127',
'tags': 'count:39',
'categories': ['People & Blogs'],
'like_count': int,
'uploader_id': 'UC84whx2xxsiA1gXHXXqKGOA',
'view_count': int,
'playable_in_embed': True,
'description': 'md5:2ef1d002cad520f65825346e2084e49d',
},
'params': {'skip_download': True}
},
]
@classmethod
def suitable(cls, url):
from ..utils import parse_qs
qs = parse_qs(url)
if qs.get('list', [None])[0]:
return False
return super().suitable(url)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._code_cache = {}
self._player_cache = {}
def _prepare_live_from_start_formats(self, formats, video_id, live_start_time, url, webpage_url, smuggled_data):
lock = threading.Lock()
is_live = True
start_time = time.time()
formats = [f for f in formats if f.get('is_from_start')]
def refetch_manifest(format_id, delay):
nonlocal formats, start_time, is_live
if time.time() <= start_time + delay:
return
_, _, prs, player_url = self._download_player_responses(url, smuggled_data, video_id, webpage_url)
video_details = traverse_obj(
prs, (..., 'videoDetails'), expected_type=dict, default=[])
microformats = traverse_obj(
prs, (..., 'microformat', 'playerMicroformatRenderer'),
expected_type=dict, default=[])
_, is_live, _, formats = self._list_formats(video_id, microformats, video_details, prs, player_url)
start_time = time.time()
def mpd_feed(format_id, delay):
"""
@returns (manifest_url, manifest_stream_number, is_live) or None
"""
with lock:
refetch_manifest(format_id, delay)
f = next((f for f in formats if f['format_id'] == format_id), None)
if not f:
if not is_live:
self.to_screen(f'{video_id}: Video is no longer live')
else:
self.report_warning(
f'Cannot find refreshed manifest for format {format_id}{bug_reports_message()}')
return None
return f['manifest_url'], f['manifest_stream_number'], is_live
for f in formats:
f['is_live'] = True
f['protocol'] = 'http_dash_segments_generator'
f['fragments'] = functools.partial(
self._live_dash_fragments, f['format_id'], live_start_time, mpd_feed)
def _live_dash_fragments(self, format_id, live_start_time, mpd_feed, ctx):
FETCH_SPAN, MAX_DURATION = 5, 432000
mpd_url, stream_number, is_live = None, None, True
begin_index = 0
download_start_time = ctx.get('start') or time.time()
lack_early_segments = download_start_time - (live_start_time or download_start_time) > MAX_DURATION
if lack_early_segments:
self.report_warning(bug_reports_message(
'Starting download from the last 120 hours of the live stream since '
'YouTube does not have data before that. If you think this is wrong,'), only_once=True)
lack_early_segments = True
known_idx, no_fragment_score, last_segment_url = begin_index, 0, None
fragments, fragment_base_url = None, None
def _extract_sequence_from_mpd(refresh_sequence, immediate):
nonlocal mpd_url, stream_number, is_live, no_fragment_score, fragments, fragment_base_url
# Obtain from MPD's maximum seq value
old_mpd_url = mpd_url
last_error = ctx.pop('last_error', None)
expire_fast = immediate or last_error and isinstance(last_error, compat_HTTPError) and last_error.code == 403
mpd_url, stream_number, is_live = (mpd_feed(format_id, 5 if expire_fast else 18000)
or (mpd_url, stream_number, False))
if not refresh_sequence:
if expire_fast and not is_live:
return False, last_seq
elif old_mpd_url == mpd_url:
return True, last_seq
try:
fmts, _ = self._extract_mpd_formats_and_subtitles(
mpd_url, None, note=False, errnote=False, fatal=False)
except ExtractorError:
fmts = None
if not fmts:
no_fragment_score += 2
return False, last_seq
fmt_info = next(x for x in fmts if x['manifest_stream_number'] == stream_number)
fragments = fmt_info['fragments']
fragment_base_url = fmt_info['fragment_base_url']
assert fragment_base_url
_last_seq = int(re.search(r'(?:/|^)sq/(\d+)', fragments[-1]['path']).group(1))
return True, _last_seq
while is_live:
fetch_time = time.time()
if no_fragment_score > 30:
return
if last_segment_url:
# Obtain from "X-Head-Seqnum" header value from each segment
try:
urlh = self._request_webpage(
last_segment_url, None, note=False, errnote=False, fatal=False)
except ExtractorError:
urlh = None
last_seq = try_get(urlh, lambda x: int_or_none(x.headers['X-Head-Seqnum']))
if last_seq is None:
no_fragment_score += 2
last_segment_url = None
continue
else:
should_continue, last_seq = _extract_sequence_from_mpd(True, no_fragment_score > 15)
no_fragment_score += 2
if not should_continue:
continue
if known_idx > last_seq:
last_segment_url = None
continue
last_seq += 1
if begin_index < 0 and known_idx < 0:
# skip from the start when it's negative value
known_idx = last_seq + begin_index
if lack_early_segments:
known_idx = max(known_idx, last_seq - int(MAX_DURATION // fragments[-1]['duration']))
try:
for idx in range(known_idx, last_seq):
# do not update sequence here or you'll get skipped some part of it
should_continue, _ = _extract_sequence_from_mpd(False, False)
if not should_continue:
known_idx = idx - 1
raise ExtractorError('breaking out of outer loop')
last_segment_url = urljoin(fragment_base_url, 'sq/%d' % idx)
yield {
'url': last_segment_url,
}
if known_idx == last_seq:
no_fragment_score += 5
else:
no_fragment_score = 0
known_idx = last_seq
except ExtractorError:
continue
time.sleep(max(0, FETCH_SPAN + fetch_time - time.time()))
def _extract_player_url(self, *ytcfgs, webpage=None):
player_url = traverse_obj(
ytcfgs, (..., 'PLAYER_JS_URL'), (..., 'WEB_PLAYER_CONTEXT_CONFIGS', ..., 'jsUrl'),
get_all=False, expected_type=compat_str)
if not player_url:
return
return urljoin('https://www.youtube.com', player_url)
def _download_player_url(self, video_id, fatal=False):
res = self._download_webpage(
'https://www.youtube.com/iframe_api',
note='Downloading iframe API JS', video_id=video_id, fatal=fatal)
if res:
player_version = self._search_regex(
r'player\\?/([0-9a-fA-F]{8})\\?/', res, 'player version', fatal=fatal)
if player_version:
return f'https://www.youtube.com/s/player/{player_version}/player_ias.vflset/en_US/base.js'
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
@classmethod
def _extract_player_info(cls, player_url):
for player_re in cls._PLAYER_INFO_RE:
id_m = re.search(player_re, player_url)
if id_m:
break
else:
raise ExtractorError('Cannot identify player %r' % player_url)
return id_m.group('id')
def _load_player(self, video_id, player_url, fatal=True):
player_id = self._extract_player_info(player_url)
if player_id not in self._code_cache:
code = self._download_webpage(
player_url, video_id, fatal=fatal,
note='Downloading player ' + player_id,
errnote='Download of %s failed' % player_url)
if code:
self._code_cache[player_id] = code
return self._code_cache.get(player_id)
def _extract_signature_function(self, video_id, player_url, example_sig):
player_id = self._extract_player_info(player_url)
# Read from filesystem cache
func_id = f'js_{player_id}_{self._signature_cache_id(example_sig)}'
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
code = self._load_player(video_id, player_url)
if code:
res = self._parse_sig_js(code)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
if not self.get_param('youtube_print_sig_code'):
return
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return f's[{starts}{ends}{steps}]'
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
(r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bm=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(h\.s\)\)',
r'\bc&&\(c=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(c\)\)',
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\);[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\)',
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
# Obsolete patterns
r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
jscode, 'Initial JS player signature function name', group='sig')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
self._print_sig_code(func, s)
return func(s)
except Exception as e:
raise ExtractorError('Signature extraction failed: ' + traceback.format_exc(), cause=e)
def _decrypt_nsig(self, s, video_id, player_url):
"""Turn the encrypted n field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt nsig without player_url')
player_url = urljoin('https://www.youtube.com', player_url)
sig_id = ('nsig_value', s)
if sig_id in self._player_cache:
return self._player_cache[sig_id]
try:
player_id = ('nsig', player_url)
if player_id not in self._player_cache:
self._player_cache[player_id] = self._extract_n_function(video_id, player_url)
func = self._player_cache[player_id]
self._player_cache[sig_id] = func(s)
self.write_debug(f'Decrypted nsig {s} => {self._player_cache[sig_id]}')
return self._player_cache[sig_id]
except Exception as e:
raise ExtractorError(traceback.format_exc(), cause=e, video_id=video_id)
def _extract_n_function_name(self, jscode):
nfunc, idx = self._search_regex(
r'\.get\("n"\)\)&&\(b=(?P<nfunc>[a-zA-Z0-9$]+)(?:\[(?P<idx>\d+)\])?\([a-zA-Z0-9]\)',
jscode, 'Initial JS player n function name', group=('nfunc', 'idx'))
if not idx:
return nfunc
return json.loads(js_to_json(self._search_regex(
rf'var {re.escape(nfunc)}\s*=\s*(\[.+?\]);', jscode,
f'Initial JS player n function list ({nfunc}.{idx})')))[int(idx)]
def _extract_n_function(self, video_id, player_url):
player_id = self._extract_player_info(player_url)
func_code = self._downloader.cache.load('youtube-nsig', player_id)
if func_code:
jsi = JSInterpreter(func_code)
else:
jscode = self._load_player(video_id, player_url)
funcname = self._extract_n_function_name(jscode)
jsi = JSInterpreter(jscode)
func_code = jsi.extract_function_code(funcname)
self._downloader.cache.store('youtube-nsig', player_id, func_code)
if self.get_param('youtube_print_sig_code'):
self.to_screen(f'Extracted nsig function from {player_id}:\n{func_code[1]}\n')
return lambda s: jsi.extract_function_from_code(*func_code)([s])
def _extract_signature_timestamp(self, video_id, player_url, ytcfg=None, fatal=False):
"""
Extract signatureTimestamp (sts)
Required to tell API what sig/player version is in use.
"""
sts = None
if isinstance(ytcfg, dict):
sts = int_or_none(ytcfg.get('STS'))
if not sts:
# Attempt to extract from player
if player_url is None:
error_msg = 'Cannot extract signature timestamp without player_url.'
if fatal:
raise ExtractorError(error_msg)
self.report_warning(error_msg)
return
code = self._load_player(video_id, player_url, fatal=fatal)
if code:
sts = int_or_none(self._search_regex(
r'(?:signatureTimestamp|sts)\s*:\s*(?P<sts>[0-9]{5})', code,
'JS player signature timestamp', group='sts', fatal=fatal))
return sts
def _mark_watched(self, video_id, player_responses):
playback_url = get_first(
player_responses, ('playbackTracking', 'videostatsPlaybackUrl', 'baseUrl'),
expected_type=url_or_none)
if not playback_url:
self.report_warning('Unable to mark watched')
return
parsed_playback_url = compat_urlparse.urlparse(playback_url)
qs = compat_urlparse.parse_qs(parsed_playback_url.query)
# cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn.
CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
cpn = ''.join(CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16))
qs.update({
'ver': ['2'],
'cpn': [cpn],
})
playback_url = compat_urlparse.urlunparse(
parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
self._download_webpage(
playback_url, video_id, 'Marking watched',
'Unable to mark watched', fatal=False)
@staticmethod
def _extract_urls(webpage):
# Embedded YouTube player
entries = [
unescapeHTML(mobj.group('url'))
for mobj in re.finditer(r'''(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
<object[^>]+data=|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
\1''', webpage)]
# lazyYT YouTube embed
entries.extend(list(map(
unescapeHTML,
re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
# Wordpress "YouTube Video Importer" plugin
matches = re.findall(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
entries.extend(m[-1] for m in matches)
return entries
@staticmethod
def _extract_url(webpage):
urls = YoutubeIE._extract_urls(webpage)
return urls[0] if urls else None
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
return mobj.group('id')
def _extract_chapters_from_json(self, data, duration):
chapter_list = traverse_obj(
data, (
'playerOverlays', 'playerOverlayRenderer', 'decoratedPlayerBarRenderer',
'decoratedPlayerBarRenderer', 'playerBar', 'chapteredPlayerBarRenderer', 'chapters'
), expected_type=list)
return self._extract_chapters(
chapter_list,
chapter_time=lambda chapter: float_or_none(
traverse_obj(chapter, ('chapterRenderer', 'timeRangeStartMillis')), scale=1000),
chapter_title=lambda chapter: traverse_obj(
chapter, ('chapterRenderer', 'title', 'simpleText'), expected_type=str),
duration=duration)
def _extract_chapters_from_engagement_panel(self, data, duration):
content_list = traverse_obj(
data,
('engagementPanels', ..., 'engagementPanelSectionListRenderer', 'content', 'macroMarkersListRenderer', 'contents'),
expected_type=list, default=[])
chapter_time = lambda chapter: parse_duration(self._get_text(chapter, 'timeDescription'))
chapter_title = lambda chapter: self._get_text(chapter, 'title')
return next((
filter(None, (
self._extract_chapters(
traverse_obj(contents, (..., 'macroMarkersListItemRenderer')),
chapter_time, chapter_title, duration)
for contents in content_list
))), [])
def _extract_chapters(self, chapter_list, chapter_time, chapter_title, duration):
chapters = []
last_chapter = {'start_time': 0}
for idx, chapter in enumerate(chapter_list or []):
title = chapter_title(chapter)
start_time = chapter_time(chapter)
if start_time is None:
continue
last_chapter['end_time'] = start_time
if start_time < last_chapter['start_time']:
if idx == 1:
chapters.pop()
self.report_warning('Invalid start time for chapter "%s"' % last_chapter['title'])
else:
self.report_warning(f'Invalid start time for chapter "{title}"')
continue
last_chapter = {'start_time': start_time, 'title': title}
chapters.append(last_chapter)
last_chapter['end_time'] = duration
return chapters
def _extract_yt_initial_variable(self, webpage, regex, video_id, name):
return self._parse_json(self._search_regex(
(fr'{regex}\s*{self._YT_INITIAL_BOUNDARY_RE}',
regex), webpage, name, default='{}'), video_id, fatal=False)
def _extract_comment(self, comment_renderer, parent=None):
comment_id = comment_renderer.get('commentId')
if not comment_id:
return
text = self._get_text(comment_renderer, 'contentText')
# note: timestamp is an estimate calculated from the current time and time_text
timestamp, time_text = self._extract_time_text(comment_renderer, 'publishedTimeText')
author = self._get_text(comment_renderer, 'authorText')
author_id = try_get(comment_renderer,
lambda x: x['authorEndpoint']['browseEndpoint']['browseId'], compat_str)
votes = parse_count(try_get(comment_renderer, (lambda x: x['voteCount']['simpleText'],
lambda x: x['likeCount']), compat_str)) or 0
author_thumbnail = try_get(comment_renderer,
lambda x: x['authorThumbnail']['thumbnails'][-1]['url'], compat_str)
author_is_uploader = try_get(comment_renderer, lambda x: x['authorIsChannelOwner'], bool)
is_favorited = 'creatorHeart' in (try_get(
comment_renderer, lambda x: x['actionButtons']['commentActionButtonsRenderer'], dict) or {})
return {
'id': comment_id,
'text': text,
'timestamp': timestamp,
'time_text': time_text,
'like_count': votes,
'is_favorited': is_favorited,
'author': author,
'author_id': author_id,
'author_thumbnail': author_thumbnail,
'author_is_uploader': author_is_uploader,
'parent': parent or 'root'
}
def _comment_entries(self, root_continuation_data, ytcfg, video_id, parent=None, tracker=None):
get_single_config_arg = lambda c: self._configuration_arg(c, [''])[0]
def extract_header(contents):
_continuation = None
for content in contents:
comments_header_renderer = traverse_obj(content, 'commentsHeaderRenderer')
expected_comment_count = self._get_count(
comments_header_renderer, 'countText', 'commentsCount')
if expected_comment_count:
tracker['est_total'] = expected_comment_count
self.to_screen(f'Downloading ~{expected_comment_count} comments')
comment_sort_index = int(get_single_config_arg('comment_sort') != 'top') # 1 = new, 0 = top
sort_menu_item = try_get(
comments_header_renderer,
lambda x: x['sortMenu']['sortFilterSubMenuRenderer']['subMenuItems'][comment_sort_index], dict) or {}
sort_continuation_ep = sort_menu_item.get('serviceEndpoint') or {}
_continuation = self._extract_continuation_ep_data(sort_continuation_ep) or self._extract_continuation(sort_menu_item)
if not _continuation:
continue
sort_text = str_or_none(sort_menu_item.get('title'))
if not sort_text:
sort_text = 'top comments' if comment_sort_index == 0 else 'newest first'
self.to_screen('Sorting comments by %s' % sort_text.lower())
break
return _continuation
def extract_thread(contents):
if not parent:
tracker['current_page_thread'] = 0
for content in contents:
if not parent and tracker['total_parent_comments'] >= max_parents:
yield
comment_thread_renderer = try_get(content, lambda x: x['commentThreadRenderer'])
comment_renderer = get_first(
(comment_thread_renderer, content), [['commentRenderer', ('comment', 'commentRenderer')]],
expected_type=dict, default={})
comment = self._extract_comment(comment_renderer, parent)
if not comment:
continue
tracker['running_total'] += 1
tracker['total_reply_comments' if parent else 'total_parent_comments'] += 1
yield comment
# Attempt to get the replies
comment_replies_renderer = try_get(
comment_thread_renderer, lambda x: x['replies']['commentRepliesRenderer'], dict)
if comment_replies_renderer:
tracker['current_page_thread'] += 1
comment_entries_iter = self._comment_entries(
comment_replies_renderer, ytcfg, video_id,
parent=comment.get('id'), tracker=tracker)
yield from itertools.islice(comment_entries_iter, min(
max_replies_per_thread, max(0, max_replies - tracker['total_reply_comments'])))
# Keeps track of counts across recursive calls
if not tracker:
tracker = dict(
running_total=0,
est_total=0,
current_page_thread=0,
total_parent_comments=0,
total_reply_comments=0)
# TODO: Deprecated
# YouTube comments have a max depth of 2
max_depth = int_or_none(get_single_config_arg('max_comment_depth'))
if max_depth:
self._downloader.deprecation_warning(
'[youtube] max_comment_depth extractor argument is deprecated. Set max replies in the max-comments extractor argument instead.')
if max_depth == 1 and parent:
return
max_comments, max_parents, max_replies, max_replies_per_thread, *_ = map(
lambda p: int_or_none(p, default=sys.maxsize), self._configuration_arg('max_comments', ) + [''] * 4)
continuation = self._extract_continuation(root_continuation_data)
message = self._get_text(root_continuation_data, ('contents', ..., 'messageRenderer', 'text'), max_runs=1)
if message and not parent:
self.report_warning(message, video_id=video_id)
response = None
is_first_continuation = parent is None
for page_num in itertools.count(0):
if not continuation:
break
headers = self.generate_api_headers(ytcfg=ytcfg, visitor_data=self._extract_visitor_data(response))
comment_prog_str = f"({tracker['running_total']}/{tracker['est_total']})"
if page_num == 0:
if is_first_continuation:
note_prefix = 'Downloading comment section API JSON'
else:
note_prefix = ' Downloading comment API JSON reply thread %d %s' % (
tracker['current_page_thread'], comment_prog_str)
else:
note_prefix = '%sDownloading comment%s API JSON page %d %s' % (
' ' if parent else '', ' replies' if parent else '',
page_num, comment_prog_str)
response = self._extract_response(
item_id=None, query=continuation,
ep='next', ytcfg=ytcfg, headers=headers, note=note_prefix,
check_get_keys='onResponseReceivedEndpoints')
continuation_contents = traverse_obj(
response, 'onResponseReceivedEndpoints', expected_type=list, default=[])
continuation = None
for continuation_section in continuation_contents:
continuation_items = traverse_obj(
continuation_section,
(('reloadContinuationItemsCommand', 'appendContinuationItemsAction'), 'continuationItems'),
get_all=False, expected_type=list) or []
if is_first_continuation:
continuation = extract_header(continuation_items)
is_first_continuation = False
if continuation:
break
continue
for entry in extract_thread(continuation_items):
if not entry:
return
yield entry
continuation = self._extract_continuation({'contents': continuation_items})
if continuation:
break
def _get_comments(self, ytcfg, video_id, contents, webpage):
"""Entry for comment extraction"""
def _real_comment_extract(contents):
renderer = next((
item for item in traverse_obj(contents, (..., 'itemSectionRenderer'), default={})
if item.get('sectionIdentifier') == 'comment-item-section'), None)
yield from self._comment_entries(renderer, ytcfg, video_id)
max_comments = int_or_none(self._configuration_arg('max_comments', [''])[0])
return itertools.islice(_real_comment_extract(contents), 0, max_comments)
@staticmethod
def _get_checkok_params():
return {'contentCheckOk': True, 'racyCheckOk': True}
@classmethod
def _generate_player_context(cls, sts=None):
context = {
'html5Preference': 'HTML5_PREF_WANTS',
}
if sts is not None:
context['signatureTimestamp'] = sts
return {
'playbackContext': {
'contentPlaybackContext': context
},
**cls._get_checkok_params()
}
@staticmethod
def _is_agegated(player_response):
if traverse_obj(player_response, ('playabilityStatus', 'desktopLegacyAgeGateReason')):
return True
reasons = traverse_obj(player_response, ('playabilityStatus', ('status', 'reason')), default=[])
AGE_GATE_REASONS = (
'confirm your age', 'age-restricted', 'inappropriate', # reason
'age_verification_required', 'age_check_required', # status
)
return any(expected in reason for expected in AGE_GATE_REASONS for reason in reasons)
@staticmethod
def _is_unplayable(player_response):
return traverse_obj(player_response, ('playabilityStatus', 'status')) == 'UNPLAYABLE'
def _extract_player_response(self, client, video_id, master_ytcfg, player_ytcfg, player_url, initial_pr):
session_index = self._extract_session_index(player_ytcfg, master_ytcfg)
syncid = self._extract_account_syncid(player_ytcfg, master_ytcfg, initial_pr)
sts = self._extract_signature_timestamp(video_id, player_url, master_ytcfg, fatal=False) if player_url else None
headers = self.generate_api_headers(
ytcfg=player_ytcfg, account_syncid=syncid, session_index=session_index, default_client=client)
yt_query = {'videoId': video_id}
yt_query.update(self._generate_player_context(sts))
return self._extract_response(
item_id=video_id, ep='player', query=yt_query,
ytcfg=player_ytcfg, headers=headers, fatal=True,
default_client=client,
note='Downloading %s player API JSON' % client.replace('_', ' ').strip()
) or None
def _get_requested_clients(self, url, smuggled_data):
requested_clients = []
default = ['android', 'web']
allowed_clients = sorted(
(client for client in INNERTUBE_CLIENTS.keys() if client[:1] != '_'),
key=lambda client: INNERTUBE_CLIENTS[client]['priority'], reverse=True)
for client in self._configuration_arg('player_client'):
if client in allowed_clients:
requested_clients.append(client)
elif client == 'default':
requested_clients.extend(default)
elif client == 'all':
requested_clients.extend(allowed_clients)
else:
self.report_warning(f'Skipping unsupported client {client}')
if not requested_clients:
requested_clients = default
if smuggled_data.get('is_music_url') or self.is_music_url(url):
requested_clients.extend(
f'{client}_music' for client in requested_clients if f'{client}_music' in INNERTUBE_CLIENTS)
return orderedSet(requested_clients)
def _extract_player_responses(self, clients, video_id, webpage, master_ytcfg):
initial_pr = None
if webpage:
initial_pr = self._extract_yt_initial_variable(
webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE,
video_id, 'initial player response')
all_clients = set(clients)
clients = clients[::-1]
prs = []
def append_client(*client_names):
""" Append the first client name that exists but not already used """
for client_name in client_names:
actual_client = _split_innertube_client(client_name)[0]
if actual_client in INNERTUBE_CLIENTS:
if actual_client not in all_clients:
clients.append(client_name)
all_clients.add(actual_client)
return
# Android player_response does not have microFormats which are needed for
# extraction of some data. So we return the initial_pr with formats
# stripped out even if not requested by the user
# See: https://github.com/yt-dlp/yt-dlp/issues/501
if initial_pr:
pr = dict(initial_pr)
pr['streamingData'] = None
prs.append(pr)
last_error = None
tried_iframe_fallback = False
player_url = None
while clients:
client, base_client, variant = _split_innertube_client(clients.pop())
player_ytcfg = master_ytcfg if client == 'web' else {}
if 'configs' not in self._configuration_arg('player_skip') and client != 'web':
player_ytcfg = self._download_ytcfg(client, video_id) or player_ytcfg
player_url = player_url or self._extract_player_url(master_ytcfg, player_ytcfg, webpage=webpage)
require_js_player = self._get_default_ytcfg(client).get('REQUIRE_JS_PLAYER')
if 'js' in self._configuration_arg('player_skip'):
require_js_player = False
player_url = None
if not player_url and not tried_iframe_fallback and require_js_player:
player_url = self._download_player_url(video_id)
tried_iframe_fallback = True
try:
pr = initial_pr if client == 'web' and initial_pr else self._extract_player_response(
client, video_id, player_ytcfg or master_ytcfg, player_ytcfg, player_url if require_js_player else None, initial_pr)
except ExtractorError as e:
if last_error:
self.report_warning(last_error)
last_error = e
continue
if pr:
prs.append(pr)
# creator clients can bypass AGE_VERIFICATION_REQUIRED if logged in
if variant == 'embedded' and self._is_unplayable(pr) and self.is_authenticated:
append_client(f'{base_client}_creator')
elif self._is_agegated(pr):
if variant == 'tv_embedded':
append_client(f'{base_client}_embedded')
elif not variant:
append_client(f'tv_embedded.{base_client}', f'{base_client}_embedded')
if last_error:
if not len(prs):
raise last_error
self.report_warning(last_error)
return prs, player_url
def _extract_formats(self, streaming_data, video_id, player_url, is_live, duration):
itags, stream_ids = {}, []
itag_qualities, res_qualities = {}, {}
q = qualities([
# Normally tiny is the smallest video-only formats. But
# audio-only formats with unknown quality may get tagged as tiny
'tiny',
'audio_quality_ultralow', 'audio_quality_low', 'audio_quality_medium', 'audio_quality_high', # Audio only formats
'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres'
])
streaming_formats = traverse_obj(streaming_data, (..., ('formats', 'adaptiveFormats'), ...), default=[])
for fmt in streaming_formats:
if fmt.get('targetDurationSec'):
continue
itag = str_or_none(fmt.get('itag'))
audio_track = fmt.get('audioTrack') or {}
stream_id = '%s.%s' % (itag or '', audio_track.get('id', ''))
if stream_id in stream_ids:
continue
quality = fmt.get('quality')
height = int_or_none(fmt.get('height'))
if quality == 'tiny' or not quality:
quality = fmt.get('audioQuality', '').lower() or quality
# The 3gp format (17) in android client has a quality of "small",
# but is actually worse than other formats
if itag == '17':
quality = 'tiny'
if quality:
if itag:
itag_qualities[itag] = quality
if height:
res_qualities[height] = quality
# FORMAT_STREAM_TYPE_OTF(otf=1) requires downloading the init fragment
# (adding `&sq=0` to the URL) and parsing emsg box to determine the
# number of fragment that would subsequently requested with (`&sq=N`)
if fmt.get('type') == 'FORMAT_STREAM_TYPE_OTF':
continue
fmt_url = fmt.get('url')
if not fmt_url:
sc = compat_parse_qs(fmt.get('signatureCipher'))
fmt_url = url_or_none(try_get(sc, lambda x: x['url'][0]))
encrypted_sig = try_get(sc, lambda x: x['s'][0])
if not (sc and fmt_url and encrypted_sig):
continue
if not player_url:
continue
signature = self._decrypt_signature(sc['s'][0], video_id, player_url)
sp = try_get(sc, lambda x: x['sp'][0]) or 'signature'
fmt_url += '&' + sp + '=' + signature
query = parse_qs(fmt_url)
throttled = False
if query.get('n'):
try:
fmt_url = update_url_query(fmt_url, {
'n': self._decrypt_nsig(query['n'][0], video_id, player_url)})
except ExtractorError as e:
self.report_warning(
'nsig extraction failed: You may experience throttling for some formats\n'
f'n = {query["n"][0]} ; player = {player_url}\n{e}', only_once=True)
throttled = True
if itag:
itags[itag] = 'https'
stream_ids.append(stream_id)
tbr = float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000)
language_preference = (
10 if audio_track.get('audioIsDefault') and 10
else -10 if 'descriptive' in (audio_track.get('displayName') or '').lower() and -10
else -1)
# Some formats may have much smaller duration than others (possibly damaged during encoding)
# Eg: 2-nOtRESiUc Ref: https://github.com/yt-dlp/yt-dlp/issues/2823
# Make sure to avoid false positives with small duration differences.
# Eg: __2ABJjxzNo, ySuUZEjARPY
is_damaged = try_get(fmt, lambda x: float(x['approxDurationMs']) / duration < 500)
if is_damaged:
self.report_warning(f'{video_id}: Some formats are possibly damaged. They will be deprioritized', only_once=True)
dct = {
'asr': int_or_none(fmt.get('audioSampleRate')),
'filesize': int_or_none(fmt.get('contentLength')),
'format_id': itag,
'format_note': join_nonempty(
'%s%s' % (audio_track.get('displayName') or '',
' (default)' if language_preference > 0 else ''),
fmt.get('qualityLabel') or quality.replace('audio_quality_', ''),
throttled and 'THROTTLED', is_damaged and 'DAMAGED', delim=', '),
'source_preference': -10 if throttled else -1,
'fps': int_or_none(fmt.get('fps')) or None,
'height': height,
'quality': q(quality),
'has_drm': bool(fmt.get('drmFamilies')),
'tbr': tbr,
'url': fmt_url,
'width': int_or_none(fmt.get('width')),
'language': join_nonempty(audio_track.get('id', '').split('.')[0],
'desc' if language_preference < -1 else ''),
'language_preference': language_preference,
# Strictly de-prioritize damaged and 3gp formats
'preference': -10 if is_damaged else -2 if itag == '17' else None,
}
mime_mobj = re.match(
r'((?:[^/]+)/(?:[^;]+))(?:;\s*codecs="([^"]+)")?', fmt.get('mimeType') or '')
if mime_mobj:
dct['ext'] = mimetype2ext(mime_mobj.group(1))
dct.update(parse_codecs(mime_mobj.group(2)))
no_audio = dct.get('acodec') == 'none'
no_video = dct.get('vcodec') == 'none'
if no_audio:
dct['vbr'] = tbr
if no_video:
dct['abr'] = tbr
if no_audio or no_video:
dct['downloader_options'] = {
# Youtube throttles chunks >~10M
'http_chunk_size': 10485760,
}
if dct.get('ext'):
dct['container'] = dct['ext'] + '_dash'
yield dct
live_from_start = is_live and self.get_param('live_from_start')
skip_manifests = self._configuration_arg('skip')
if not self.get_param('youtube_include_hls_manifest', True):
skip_manifests.append('hls')
get_dash = 'dash' not in skip_manifests and (
not is_live or live_from_start or self._configuration_arg('include_live_dash'))
get_hls = not live_from_start and 'hls' not in skip_manifests
def process_manifest_format(f, proto, itag):
if itag in itags:
if itags[itag] == proto or f'{itag}-{proto}' in itags:
return False
itag = f'{itag}-{proto}'
if itag:
f['format_id'] = itag
itags[itag] = proto
f['quality'] = next((
q(qdict[val])
for val, qdict in ((f.get('format_id', '').split('-')[0], itag_qualities), (f.get('height'), res_qualities))
if val in qdict), -1)
return True
for sd in streaming_data:
hls_manifest_url = get_hls and sd.get('hlsManifestUrl')
if hls_manifest_url:
for f in self._extract_m3u8_formats(hls_manifest_url, video_id, 'mp4', fatal=False):
if process_manifest_format(f, 'hls', self._search_regex(
r'/itag/(\d+)', f['url'], 'itag', default=None)):
yield f
dash_manifest_url = get_dash and sd.get('dashManifestUrl')
if dash_manifest_url:
for f in self._extract_mpd_formats(dash_manifest_url, video_id, fatal=False):
if process_manifest_format(f, 'dash', f['format_id']):
f['filesize'] = int_or_none(self._search_regex(
r'/clen/(\d+)', f.get('fragment_base_url') or f['url'], 'file size', default=None))
if live_from_start:
f['is_from_start'] = True
yield f
def _extract_storyboard(self, player_responses, duration):
spec = get_first(
player_responses, ('storyboards', 'playerStoryboardSpecRenderer', 'spec'), default='').split('|')[::-1]
base_url = url_or_none(urljoin('https://i.ytimg.com/', spec.pop() or None))
if not base_url:
return
L = len(spec) - 1
for i, args in enumerate(spec):
args = args.split('#')
counts = list(map(int_or_none, args[:5]))
if len(args) != 8 or not all(counts):
self.report_warning(f'Malformed storyboard {i}: {"#".join(args)}{bug_reports_message()}')
continue
width, height, frame_count, cols, rows = counts
N, sigh = args[6:]
url = base_url.replace('$L', str(L - i)).replace('$N', N) + f'&sigh={sigh}'
fragment_count = frame_count / (cols * rows)
fragment_duration = duration / fragment_count
yield {
'format_id': f'sb{i}',
'format_note': 'storyboard',
'ext': 'mhtml',
'protocol': 'mhtml',
'acodec': 'none',
'vcodec': 'none',
'url': url,
'width': width,
'height': height,
'fragments': [{
'url': url.replace('$M', str(j)),
'duration': min(fragment_duration, duration - (j * fragment_duration)),
} for j in range(math.ceil(fragment_count))],
}
def _download_player_responses(self, url, smuggled_data, video_id, webpage_url):
webpage = None
if 'webpage' not in self._configuration_arg('player_skip'):
webpage = self._download_webpage(
webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False)
master_ytcfg = self.extract_ytcfg(video_id, webpage) or self._get_default_ytcfg()
player_responses, player_url = self._extract_player_responses(
self._get_requested_clients(url, smuggled_data),
video_id, webpage, master_ytcfg)
return webpage, master_ytcfg, player_responses, player_url
def _list_formats(self, video_id, microformats, video_details, player_responses, player_url, duration=None):
live_broadcast_details = traverse_obj(microformats, (..., 'liveBroadcastDetails'))
is_live = get_first(video_details, 'isLive')
if is_live is None:
is_live = get_first(live_broadcast_details, 'isLiveNow')
streaming_data = traverse_obj(player_responses, (..., 'streamingData'), default=[])
formats = list(self._extract_formats(streaming_data, video_id, player_url, is_live, duration))
return live_broadcast_details, is_live, streaming_data, formats
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
video_id = self._match_id(url)
base_url = self.http_scheme() + '//www.youtube.com/'
webpage_url = base_url + 'watch?v=' + video_id
webpage, master_ytcfg, player_responses, player_url = self._download_player_responses(url, smuggled_data, video_id, webpage_url)
playability_statuses = traverse_obj(
player_responses, (..., 'playabilityStatus'), expected_type=dict, default=[])
trailer_video_id = get_first(
playability_statuses,
('errorScreen', 'playerLegacyDesktopYpcTrailerRenderer', 'trailerVideoId'),
expected_type=str)
if trailer_video_id:
return self.url_result(
trailer_video_id, self.ie_key(), trailer_video_id)
search_meta = ((lambda x: self._html_search_meta(x, webpage, default=None))
if webpage else (lambda x: None))
video_details = traverse_obj(
player_responses, (..., 'videoDetails'), expected_type=dict, default=[])
microformats = traverse_obj(
player_responses, (..., 'microformat', 'playerMicroformatRenderer'),
expected_type=dict, default=[])
video_title = (
get_first(video_details, 'title')
or self._get_text(microformats, (..., 'title'))
or search_meta(['og:title', 'twitter:title', 'title']))
video_description = get_first(video_details, 'shortDescription')
multifeed_metadata_list = get_first(
player_responses,
('multicamera', 'playerLegacyMulticameraRenderer', 'metadataList'),
expected_type=str)
if multifeed_metadata_list and not smuggled_data.get('force_singlefeed'):
if self.get_param('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
else:
entries = []
feed_ids = []
for feed in multifeed_metadata_list.split(','):
# Unquote should take place before split on comma (,) since textual
# fields may contain comma as well (see
# https://github.com/ytdl-org/youtube-dl/issues/8536)
feed_data = compat_parse_qs(
compat_urllib_parse_unquote_plus(feed))
def feed_entry(name):
return try_get(
feed_data, lambda x: x[name][0], compat_str)
feed_id = feed_entry('id')
if not feed_id:
continue
feed_title = feed_entry('title')
title = video_title
if feed_title:
title += ' (%s)' % feed_title
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
'url': smuggle_url(
'%swatch?v=%s' % (base_url, feed_data['id'][0]),
{'force_singlefeed': True}),
'title': title,
})
feed_ids.append(feed_id)
self.to_screen(
'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
% (', '.join(feed_ids), video_id))
return self.playlist_result(
entries, video_id, video_title, video_description)
duration = int_or_none(
get_first(video_details, 'lengthSeconds')
or get_first(microformats, 'lengthSeconds')
or parse_duration(search_meta('duration'))) or None
live_broadcast_details, is_live, streaming_data, formats = self._list_formats(
video_id, microformats, video_details, player_responses, player_url, duration)
if not formats:
if not self.get_param('allow_unplayable_formats') and traverse_obj(streaming_data, (..., 'licenseInfos')):
self.report_drm(video_id)
pemr = get_first(
playability_statuses,
('errorScreen', 'playerErrorMessageRenderer'), expected_type=dict) or {}
reason = self._get_text(pemr, 'reason') or get_first(playability_statuses, 'reason')
subreason = clean_html(self._get_text(pemr, 'subreason') or '')
if subreason:
if subreason == 'The uploader has not made this video available in your country.':
countries = get_first(microformats, 'availableCountries')
if not countries:
regions_allowed = search_meta('regionsAllowed')
countries = regions_allowed.split(',') if regions_allowed else None
self.raise_geo_restricted(subreason, countries, metadata_available=True)
reason += f'. {subreason}'
if reason:
self.raise_no_formats(reason, expected=True)
keywords = get_first(video_details, 'keywords', expected_type=list) or []
if not keywords and webpage:
keywords = [
unescapeHTML(m.group('content'))
for m in re.finditer(self._meta_regex('og:video:tag'), webpage)]
for keyword in keywords:
if keyword.startswith('yt:stretch='):
mobj = re.search(r'(\d+)\s*:\s*(\d+)', keyword)
if mobj:
# NB: float is intentional for forcing float division
w, h = (float(v) for v in mobj.groups())
if w > 0 and h > 0:
ratio = w / h
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
break
thumbnails = self._extract_thumbnails((video_details, microformats), (..., ..., 'thumbnail'))
thumbnail_url = search_meta(['og:image', 'twitter:image'])
if thumbnail_url:
thumbnails.append({
'url': thumbnail_url,
})
original_thumbnails = thumbnails.copy()
# The best resolution thumbnails sometimes does not appear in the webpage
# See: https://github.com/yt-dlp/yt-dlp/issues/340
# List of possible thumbnails - Ref: <https://stackoverflow.com/a/20542029>
thumbnail_names = [
# While the *1,*2,*3 thumbnails are just below their correspnding "*default" variants
# in resolution, these are not the custom thumbnail. So de-prioritize them
'maxresdefault', 'hq720', 'sddefault', 'hqdefault', '0', 'mqdefault', 'default',
'sd1', 'sd2', 'sd3', 'hq1', 'hq2', 'hq3', 'mq1', 'mq2', 'mq3', '1', '2', '3'
]
n_thumbnail_names = len(thumbnail_names)
thumbnails.extend({
'url': 'https://i.ytimg.com/vi{webp}/{video_id}/{name}{live}.{ext}'.format(
video_id=video_id, name=name, ext=ext,
webp='_webp' if ext == 'webp' else '', live='_live' if is_live else ''),
} for name in thumbnail_names for ext in ('webp', 'jpg'))
for thumb in thumbnails:
i = next((i for i, t in enumerate(thumbnail_names) if f'/{video_id}/{t}' in thumb['url']), n_thumbnail_names)
thumb['preference'] = (0 if '.webp' in thumb['url'] else -1) - (2 * i)
self._remove_duplicate_formats(thumbnails)
self._downloader._sort_thumbnails(original_thumbnails)
category = get_first(microformats, 'category') or search_meta('genre')
channel_id = str_or_none(
get_first(video_details, 'channelId')
or get_first(microformats, 'externalChannelId')
or search_meta('channelId'))
owner_profile_url = get_first(microformats, 'ownerProfileUrl')
live_content = get_first(video_details, 'isLiveContent')
is_upcoming = get_first(video_details, 'isUpcoming')
if is_live is None:
if is_upcoming or live_content is False:
is_live = False
if is_upcoming is None and (live_content or is_live):
is_upcoming = False
live_start_time = parse_iso8601(get_first(live_broadcast_details, 'startTimestamp'))
live_end_time = parse_iso8601(get_first(live_broadcast_details, 'endTimestamp'))
if not duration and live_end_time and live_start_time:
duration = live_end_time - live_start_time
if is_live and self.get_param('live_from_start'):
self._prepare_live_from_start_formats(formats, video_id, live_start_time, url, webpage_url, smuggled_data)
formats.extend(self._extract_storyboard(player_responses, duration))
# Source is given priority since formats that throttle are given lower source_preference
# When throttling issue is fully fixed, remove this
self._sort_formats(formats, ('quality', 'res', 'fps', 'hdr:12', 'source', 'codec:vp9.2', 'lang', 'proto'))
info = {
'id': video_id,
'title': video_title,
'formats': formats,
'thumbnails': thumbnails,
# The best thumbnail that we are sure exists. Prevents unnecessary
# URL checking if user don't care about getting the best possible thumbnail
'thumbnail': traverse_obj(original_thumbnails, (-1, 'url')),
'description': video_description,
'uploader': get_first(video_details, 'author'),
'uploader_id': self._search_regex(r'/(?:channel|user)/([^/?&#]+)', owner_profile_url, 'uploader id') if owner_profile_url else None,
'uploader_url': owner_profile_url,
'channel_id': channel_id,
'channel_url': format_field(channel_id, template='https://www.youtube.com/channel/%s'),
'duration': duration,
'view_count': int_or_none(
get_first((video_details, microformats), (..., 'viewCount'))
or search_meta('interactionCount')),
'average_rating': float_or_none(get_first(video_details, 'averageRating')),
'age_limit': 18 if (
get_first(microformats, 'isFamilySafe') is False
or search_meta('isFamilyFriendly') == 'false'
or search_meta('og:restrictions:age') == '18+') else 0,
'webpage_url': webpage_url,
'categories': [category] if category else None,
'tags': keywords,
'playable_in_embed': get_first(playability_statuses, 'playableInEmbed'),
'is_live': is_live,
'was_live': (False if is_live or is_upcoming or live_content is False
else None if is_live is None or is_upcoming is None
else live_content),
'live_status': 'is_upcoming' if is_upcoming else None, # rest will be set by YoutubeDL
'release_timestamp': live_start_time,
}
pctr = traverse_obj(player_responses, (..., 'captions', 'playerCaptionsTracklistRenderer'), expected_type=dict)
if pctr:
def get_lang_code(track):
return (remove_start(track.get('vssId') or '', '.').replace('.', '-')
or track.get('languageCode'))
# Converted into dicts to remove duplicates
captions = {
get_lang_code(sub): sub
for sub in traverse_obj(pctr, (..., 'captionTracks', ...), default=[])}
translation_languages = {
lang.get('languageCode'): self._get_text(lang.get('languageName'), max_runs=1)
for lang in traverse_obj(pctr, (..., 'translationLanguages', ...), default=[])}
def process_language(container, base_url, lang_code, sub_name, query):
lang_subs = container.setdefault(lang_code, [])
for fmt in self._SUBTITLE_FORMATS:
query.update({
'fmt': fmt,
})
lang_subs.append({
'ext': fmt,
'url': urljoin('https://www.youtube.com', update_url_query(base_url, query)),
'name': sub_name,
})
subtitles, automatic_captions = {}, {}
for lang_code, caption_track in captions.items():
base_url = caption_track.get('baseUrl')
orig_lang = parse_qs(base_url).get('lang', [None])[-1]
if not base_url:
continue
lang_name = self._get_text(caption_track, 'name', max_runs=1)
if caption_track.get('kind') != 'asr':
if not lang_code:
continue
process_language(
subtitles, base_url, lang_code, lang_name, {})
if not caption_track.get('isTranslatable'):
continue
for trans_code, trans_name in translation_languages.items():
if not trans_code:
continue
orig_trans_code = trans_code
if caption_track.get('kind') != 'asr':
if 'translated_subs' in self._configuration_arg('skip'):
continue
trans_code += f'-{lang_code}'
trans_name += format_field(lang_name, template=' from %s')
# Add an "-orig" label to the original language so that it can be distinguished.
# The subs are returned without "-orig" as well for compatibility
if lang_code == f'a-{orig_trans_code}':
process_language(
automatic_captions, base_url, f'{trans_code}-orig', f'{trans_name} (Original)', {})
# Setting tlang=lang returns damaged subtitles.
process_language(automatic_captions, base_url, trans_code, trans_name,
{} if orig_lang == orig_trans_code else {'tlang': trans_code})
info['automatic_captions'] = automatic_captions
info['subtitles'] = subtitles
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
for k, v in query.items():
for d_k, s_ks in [('start', ('start', 't')), ('end', ('end',))]:
d_k += '_time'
if d_k not in info and k in s_ks:
info[d_k] = parse_duration(query[k][0])
# Youtube Music Auto-generated description
if video_description:
mobj = re.search(r'(?s)(?P<track>[^·\n]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?.+\nAuto-generated by YouTube\.\s*$', video_description)
if mobj:
release_year = mobj.group('release_year')
release_date = mobj.group('release_date')
if release_date:
release_date = release_date.replace('-', '')
if not release_year:
release_year = release_date[:4]
info.update({
'album': mobj.group('album'.strip()),
'artist': mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·')),
'track': mobj.group('track').strip(),
'release_date': release_date,
'release_year': int_or_none(release_year),
})
initial_data = None
if webpage:
initial_data = self._extract_yt_initial_variable(
webpage, self._YT_INITIAL_DATA_RE, video_id,
'yt initial data')
if not initial_data:
query = {'videoId': video_id}
query.update(self._get_checkok_params())
initial_data = self._extract_response(
item_id=video_id, ep='next', fatal=False,
ytcfg=master_ytcfg, query=query,
headers=self.generate_api_headers(ytcfg=master_ytcfg),
note='Downloading initial data API JSON')
try: # This will error if there is no livechat
initial_data['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['continuations'][0]['reloadContinuationData']['continuation']
except (KeyError, IndexError, TypeError):
pass
else:
info.setdefault('subtitles', {})['live_chat'] = [{
'url': f'https://www.youtube.com/watch?v={video_id}', # url is needed to set cookies
'video_id': video_id,
'ext': 'json',
'protocol': 'youtube_live_chat' if is_live or is_upcoming else 'youtube_live_chat_replay',
}]
if initial_data:
info['chapters'] = (
self._extract_chapters_from_json(initial_data, duration)
or self._extract_chapters_from_engagement_panel(initial_data, duration)
or None)
contents = traverse_obj(
initial_data, ('contents', 'twoColumnWatchNextResults', 'results', 'results', 'contents'),
expected_type=list, default=[])
vpir = get_first(contents, 'videoPrimaryInfoRenderer')
if vpir:
stl = vpir.get('superTitleLink')
if stl:
stl = self._get_text(stl)
if try_get(
vpir,
lambda x: x['superTitleIcon']['iconType']) == 'LOCATION_PIN':
info['location'] = stl
else:
mobj = re.search(r'(.+?)\s*S(\d+)\s*•?\s*E(\d+)', stl)
if mobj:
info.update({
'series': mobj.group(1),
'season_number': int(mobj.group(2)),
'episode_number': int(mobj.group(3)),
})
for tlb in (try_get(
vpir,
lambda x: x['videoActions']['menuRenderer']['topLevelButtons'],
list) or []):
tbr = tlb.get('toggleButtonRenderer') or {}
for getter, regex in [(
lambda x: x['defaultText']['accessibility']['accessibilityData'],
r'(?P<count>[\d,]+)\s*(?P<type>(?:dis)?like)'), ([
lambda x: x['accessibility'],
lambda x: x['accessibilityData']['accessibilityData'],
], r'(?P<type>(?:dis)?like) this video along with (?P<count>[\d,]+) other people')]:
label = (try_get(tbr, getter, dict) or {}).get('label')
if label:
mobj = re.match(regex, label)
if mobj:
info[mobj.group('type') + '_count'] = str_to_int(mobj.group('count'))
break
sbr_tooltip = try_get(
vpir, lambda x: x['sentimentBar']['sentimentBarRenderer']['tooltip'])
if sbr_tooltip:
like_count, dislike_count = sbr_tooltip.split(' / ')
info.update({
'like_count': str_to_int(like_count),
'dislike_count': str_to_int(dislike_count),
})
vsir = get_first(contents, 'videoSecondaryInfoRenderer')
if vsir:
vor = traverse_obj(vsir, ('owner', 'videoOwnerRenderer'))
info.update({
'channel': self._get_text(vor, 'title'),
'channel_follower_count': self._get_count(vor, 'subscriberCountText')})
rows = try_get(
vsir,
lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'],
list) or []
multiple_songs = False
for row in rows:
if try_get(row, lambda x: x['metadataRowRenderer']['hasDividerLine']) is True:
multiple_songs = True
break
for row in rows:
mrr = row.get('metadataRowRenderer') or {}
mrr_title = mrr.get('title')
if not mrr_title:
continue
mrr_title = self._get_text(mrr, 'title')
mrr_contents_text = self._get_text(mrr, ('contents', 0))
if mrr_title == 'License':
info['license'] = mrr_contents_text
elif not multiple_songs:
if mrr_title == 'Album':
info['album'] = mrr_contents_text
elif mrr_title == 'Artist':
info['artist'] = mrr_contents_text
elif mrr_title == 'Song':
info['track'] = mrr_contents_text
fallbacks = {
'channel': 'uploader',
'channel_id': 'uploader_id',
'channel_url': 'uploader_url',
}
# The upload date for scheduled, live and past live streams / premieres in microformats
# may be different from the stream date. Although not in UTC, we will prefer it in this case.
# See: https://github.com/yt-dlp/yt-dlp/pull/2223#issuecomment-1008485139
upload_date = (
unified_strdate(get_first(microformats, 'uploadDate'))
or unified_strdate(search_meta('uploadDate')))
if not upload_date or (not info.get('is_live') and not info.get('was_live') and info.get('live_status') != 'is_upcoming'):
upload_date = strftime_or_none(self._extract_time_text(vpir, 'dateText')[0], '%Y%m%d')
info['upload_date'] = upload_date
for to, frm in fallbacks.items():
if not info.get(to):
info[to] = info.get(frm)
for s_k, d_k in [('artist', 'creator'), ('track', 'alt_title')]:
v = info.get(s_k)
if v:
info[d_k] = v
is_private = get_first(video_details, 'isPrivate', expected_type=bool)
is_unlisted = get_first(microformats, 'isUnlisted', expected_type=bool)
is_membersonly = None
is_premium = None
if initial_data and is_private is not None:
is_membersonly = False
is_premium = False
contents = try_get(initial_data, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'], list) or []
badge_labels = set()
for content in contents:
if not isinstance(content, dict):
continue
badge_labels.update(self._extract_badges(content.get('videoPrimaryInfoRenderer')))
for badge_label in badge_labels:
if badge_label.lower() == 'members only':
is_membersonly = True
elif badge_label.lower() == 'premium':
is_premium = True
elif badge_label.lower() == 'unlisted':
is_unlisted = True
info['availability'] = self._availability(
is_private=is_private,
needs_premium=is_premium,
needs_subscription=is_membersonly,
needs_auth=info['age_limit'] >= 18,
is_unlisted=None if is_private is None else is_unlisted)
info['__post_extractor'] = self.extract_comments(master_ytcfg, video_id, contents, webpage)
self.mark_watched(video_id, player_responses)
return info
class YoutubeTabBaseInfoExtractor(YoutubeBaseInfoExtractor):
@staticmethod
def passthrough_smuggled_data(func):
def _smuggle(entries, smuggled_data):
for entry in entries:
# TODO: Convert URL to music.youtube instead.
# Do we need to passthrough any other smuggled_data?
entry['url'] = smuggle_url(entry['url'], smuggled_data)
yield entry
@functools.wraps(func)
def wrapper(self, url):
url, smuggled_data = unsmuggle_url(url, {})
if self.is_music_url(url):
smuggled_data['is_music_url'] = True
info_dict = func(self, url, smuggled_data)
if smuggled_data and info_dict.get('entries'):
info_dict['entries'] = _smuggle(info_dict['entries'], smuggled_data)
return info_dict
return wrapper
def _extract_channel_id(self, webpage):
channel_id = self._html_search_meta(
'channelId', webpage, 'channel id', default=None)
if channel_id:
return channel_id
channel_url = self._html_search_meta(
('og:url', 'al:ios:url', 'al:android:url', 'al:web:url',
'twitter:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad',
'twitter:app:url:googleplay'), webpage, 'channel url')
return self._search_regex(
r'https?://(?:www\.)?youtube\.com/channel/([^/?#&])+',
channel_url, 'channel id')
@staticmethod
def _extract_basic_item_renderer(item):
# Modified from _extract_grid_item_renderer
known_basic_renderers = (
'playlistRenderer', 'videoRenderer', 'channelRenderer', 'showRenderer', 'reelItemRenderer'
)
for key, renderer in item.items():
if not isinstance(renderer, dict):
continue
elif key in known_basic_renderers:
return renderer
elif key.startswith('grid') and key.endswith('Renderer'):
return renderer
def _grid_entries(self, grid_renderer):
for item in grid_renderer['items']:
if not isinstance(item, dict):
continue
renderer = self._extract_basic_item_renderer(item)
if not isinstance(renderer, dict):
continue
title = self._get_text(renderer, 'title')
# playlist
playlist_id = renderer.get('playlistId')
if playlist_id:
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id,
ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
continue
# video
video_id = renderer.get('videoId')
if video_id:
yield self._extract_video(renderer)
continue
# channel
channel_id = renderer.get('channelId')
if channel_id:
yield self.url_result(
'https://www.youtube.com/channel/%s' % channel_id,
ie=YoutubeTabIE.ie_key(), video_title=title)
continue
# generic endpoint URL support
ep_url = urljoin('https://www.youtube.com/', try_get(
renderer, lambda x: x['navigationEndpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str))
if ep_url:
for ie in (YoutubeTabIE, YoutubePlaylistIE, YoutubeIE):
if ie.suitable(ep_url):
yield self.url_result(
ep_url, ie=ie.ie_key(), video_id=ie._match_id(ep_url), video_title=title)
break
def _music_reponsive_list_entry(self, renderer):
video_id = traverse_obj(renderer, ('playlistItemData', 'videoId'))
if video_id:
return self.url_result(f'https://music.youtube.com/watch?v={video_id}',
ie=YoutubeIE.ie_key(), video_id=video_id)
playlist_id = traverse_obj(renderer, ('navigationEndpoint', 'watchEndpoint', 'playlistId'))
if playlist_id:
video_id = traverse_obj(renderer, ('navigationEndpoint', 'watchEndpoint', 'videoId'))
if video_id:
return self.url_result(f'https://music.youtube.com/watch?v={video_id}&list={playlist_id}',
ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
return self.url_result(f'https://music.youtube.com/playlist?list={playlist_id}',
ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
browse_id = traverse_obj(renderer, ('navigationEndpoint', 'browseEndpoint', 'browseId'))
if browse_id:
return self.url_result(f'https://music.youtube.com/browse/{browse_id}',
ie=YoutubeTabIE.ie_key(), video_id=browse_id)
def _shelf_entries_from_content(self, shelf_renderer):
content = shelf_renderer.get('content')
if not isinstance(content, dict):
return
renderer = content.get('gridRenderer') or content.get('expandedShelfContentsRenderer')
if renderer:
# TODO: add support for nested playlists so each shelf is processed
# as separate playlist
# TODO: this includes only first N items
yield from self._grid_entries(renderer)
renderer = content.get('horizontalListRenderer')
if renderer:
# TODO
pass
def _shelf_entries(self, shelf_renderer, skip_channels=False):
ep = try_get(
shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str)
shelf_url = urljoin('https://www.youtube.com', ep)
if shelf_url:
# Skipping links to another channels, note that checking for
# endpoint.commandMetadata.webCommandMetadata.webPageTypwebPageType == WEB_PAGE_TYPE_CHANNEL
# will not work
if skip_channels and '/channels?' in shelf_url:
return
title = self._get_text(shelf_renderer, 'title')
yield self.url_result(shelf_url, video_title=title)
# Shelf may not contain shelf URL, fallback to extraction from content
yield from self._shelf_entries_from_content(shelf_renderer)
def _playlist_entries(self, video_list_renderer):
for content in video_list_renderer['contents']:
if not isinstance(content, dict):
continue
renderer = content.get('playlistVideoRenderer') or content.get('playlistPanelVideoRenderer')
if not isinstance(renderer, dict):
continue
video_id = renderer.get('videoId')
if not video_id:
continue
yield self._extract_video(renderer)
def _rich_entries(self, rich_grid_renderer):
renderer = try_get(
rich_grid_renderer, lambda x: x['content']['videoRenderer'], dict) or {}
video_id = renderer.get('videoId')
if not video_id:
return
yield self._extract_video(renderer)
def _video_entry(self, video_renderer):
video_id = video_renderer.get('videoId')
if video_id:
return self._extract_video(video_renderer)
def _hashtag_tile_entry(self, hashtag_tile_renderer):
url = urljoin('https://youtube.com', traverse_obj(
hashtag_tile_renderer, ('onTapCommand', 'commandMetadata', 'webCommandMetadata', 'url')))
if url:
return self.url_result(
url, ie=YoutubeTabIE.ie_key(), title=self._get_text(hashtag_tile_renderer, 'hashtag'))
def _post_thread_entries(self, post_thread_renderer):
post_renderer = try_get(
post_thread_renderer, lambda x: x['post']['backstagePostRenderer'], dict)
if not post_renderer:
return
# video attachment
video_renderer = try_get(
post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict) or {}
video_id = video_renderer.get('videoId')
if video_id:
entry = self._extract_video(video_renderer)
if entry:
yield entry
# playlist attachment
playlist_id = try_get(
post_renderer, lambda x: x['backstageAttachment']['playlistRenderer']['playlistId'], compat_str)
if playlist_id:
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id,
ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
# inline video links
runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
for run in runs:
if not isinstance(run, dict):
continue
ep_url = try_get(
run, lambda x: x['navigationEndpoint']['urlEndpoint']['url'], compat_str)
if not ep_url:
continue
if not YoutubeIE.suitable(ep_url):
continue
ep_video_id = YoutubeIE._match_id(ep_url)
if video_id == ep_video_id:
continue
yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=ep_video_id)
def _post_thread_continuation_entries(self, post_thread_continuation):
contents = post_thread_continuation.get('contents')
if not isinstance(contents, list):
return
for content in contents:
renderer = content.get('backstagePostThreadRenderer')
if not isinstance(renderer, dict):
continue
yield from self._post_thread_entries(renderer)
r''' # unused
def _rich_grid_entries(self, contents):
for content in contents:
video_renderer = try_get(content, lambda x: x['richItemRenderer']['content']['videoRenderer'], dict)
if video_renderer:
entry = self._video_entry(video_renderer)
if entry:
yield entry
'''
def _extract_entries(self, parent_renderer, continuation_list):
# continuation_list is modified in-place with continuation_list = [continuation_token]
continuation_list[:] = [None]
contents = try_get(parent_renderer, lambda x: x['contents'], list) or []
for content in contents:
if not isinstance(content, dict):
continue
is_renderer = traverse_obj(
content, 'itemSectionRenderer', 'musicShelfRenderer', 'musicShelfContinuation',
expected_type=dict)
if not is_renderer:
renderer = content.get('richItemRenderer')
if renderer:
for entry in self._rich_entries(renderer):
yield entry
continuation_list[0] = self._extract_continuation(parent_renderer)
continue
isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
for isr_content in isr_contents:
if not isinstance(isr_content, dict):
continue
known_renderers = {
'playlistVideoListRenderer': self._playlist_entries,
'gridRenderer': self._grid_entries,
'reelShelfRenderer': self._grid_entries,
'shelfRenderer': self._shelf_entries,
'musicResponsiveListItemRenderer': lambda x: [self._music_reponsive_list_entry(x)],
'backstagePostThreadRenderer': self._post_thread_entries,
'videoRenderer': lambda x: [self._video_entry(x)],
'playlistRenderer': lambda x: self._grid_entries({'items': [{'playlistRenderer': x}]}),
'channelRenderer': lambda x: self._grid_entries({'items': [{'channelRenderer': x}]}),
'hashtagTileRenderer': lambda x: [self._hashtag_tile_entry(x)]
}
for key, renderer in isr_content.items():
if key not in known_renderers:
continue
for entry in known_renderers[key](renderer):
if entry:
yield entry
continuation_list[0] = self._extract_continuation(renderer)
break
if not continuation_list[0]:
continuation_list[0] = self._extract_continuation(is_renderer)
if not continuation_list[0]:
continuation_list[0] = self._extract_continuation(parent_renderer)
def _entries(self, tab, item_id, ytcfg, account_syncid, visitor_data):
continuation_list = [None]
extract_entries = lambda x: self._extract_entries(x, continuation_list)
tab_content = try_get(tab, lambda x: x['content'], dict)
if not tab_content:
return
parent_renderer = (
try_get(tab_content, lambda x: x['sectionListRenderer'], dict)
or try_get(tab_content, lambda x: x['richGridRenderer'], dict) or {})
yield from extract_entries(parent_renderer)
continuation = continuation_list[0]
for page_num in itertools.count(1):
if not continuation:
break
headers = self.generate_api_headers(
ytcfg=ytcfg, account_syncid=account_syncid, visitor_data=visitor_data)
response = self._extract_response(
item_id=f'{item_id} page {page_num}',
query=continuation, headers=headers, ytcfg=ytcfg,
check_get_keys=('continuationContents', 'onResponseReceivedActions', 'onResponseReceivedEndpoints'))
if not response:
break
# Extracting updated visitor data is required to prevent an infinite extraction loop in some cases
# See: https://github.com/ytdl-org/youtube-dl/issues/28702
visitor_data = self._extract_visitor_data(response) or visitor_data
known_continuation_renderers = {
'playlistVideoListContinuation': self._playlist_entries,
'gridContinuation': self._grid_entries,
'itemSectionContinuation': self._post_thread_continuation_entries,
'sectionListContinuation': extract_entries, # for feeds
}
continuation_contents = try_get(
response, lambda x: x['continuationContents'], dict) or {}
continuation_renderer = None
for key, value in continuation_contents.items():
if key not in known_continuation_renderers:
continue
continuation_renderer = value
continuation_list = [None]
yield from known_continuation_renderers[key](continuation_renderer)
continuation = continuation_list[0] or self._extract_continuation(continuation_renderer)
break
if continuation_renderer:
continue
known_renderers = {
'videoRenderer': (self._grid_entries, 'items'), # for membership tab
'gridPlaylistRenderer': (self._grid_entries, 'items'),
'gridVideoRenderer': (self._grid_entries, 'items'),
'gridChannelRenderer': (self._grid_entries, 'items'),
'playlistVideoRenderer': (self._playlist_entries, 'contents'),
'itemSectionRenderer': (extract_entries, 'contents'), # for feeds
'richItemRenderer': (extract_entries, 'contents'), # for hashtag
'backstagePostThreadRenderer': (self._post_thread_continuation_entries, 'contents')
}
on_response_received = dict_get(response, ('onResponseReceivedActions', 'onResponseReceivedEndpoints'))
continuation_items = try_get(
on_response_received, lambda x: x[0]['appendContinuationItemsAction']['continuationItems'], list)
continuation_item = try_get(continuation_items, lambda x: x[0], dict) or {}
video_items_renderer = None
for key, value in continuation_item.items():
if key not in known_renderers:
continue
video_items_renderer = {known_renderers[key][1]: continuation_items}
continuation_list = [None]
yield from known_renderers[key][0](video_items_renderer)
continuation = continuation_list[0] or self._extract_continuation(video_items_renderer)
break
if video_items_renderer:
continue
break
@staticmethod
def _extract_selected_tab(tabs, fatal=True):
for tab in tabs:
renderer = dict_get(tab, ('tabRenderer', 'expandableTabRenderer')) or {}
if renderer.get('selected') is True:
return renderer
else:
if fatal:
raise ExtractorError('Unable to find selected tab')
def _extract_uploader(self, data):
uploader = {}
renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarSecondaryInfoRenderer') or {}
owner = try_get(
renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict)
if owner:
owner_text = owner.get('text')
uploader['uploader'] = self._search_regex(
r'^by (.+) and \d+ others?$', owner_text, 'uploader', default=owner_text)
uploader['uploader_id'] = try_get(
owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], compat_str)
uploader['uploader_url'] = urljoin(
'https://www.youtube.com/',
try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str))
return {k: v for k, v in uploader.items() if v is not None}
def _extract_from_tabs(self, item_id, ytcfg, data, tabs):
playlist_id = title = description = channel_url = channel_name = channel_id = None
tags = []
selected_tab = self._extract_selected_tab(tabs)
primary_sidebar_renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer')
renderer = try_get(
data, lambda x: x['metadata']['channelMetadataRenderer'], dict)
if renderer:
channel_name = renderer.get('title')
channel_url = renderer.get('channelUrl')
channel_id = renderer.get('externalId')
else:
renderer = try_get(
data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
if renderer:
title = renderer.get('title')
description = renderer.get('description', '')
playlist_id = channel_id
tags = renderer.get('keywords', '').split()
# We can get the uncropped banner/avatar by replacing the crop params with '=s0'
# See: https://github.com/yt-dlp/yt-dlp/issues/2237#issuecomment-1013694714
def _get_uncropped(url):
return url_or_none((url or '').split('=')[0] + '=s0')
avatar_thumbnails = self._extract_thumbnails(renderer, 'avatar')
if avatar_thumbnails:
uncropped_avatar = _get_uncropped(avatar_thumbnails[0]['url'])
if uncropped_avatar:
avatar_thumbnails.append({
'url': uncropped_avatar,
'id': 'avatar_uncropped',
'preference': 1
})
channel_banners = self._extract_thumbnails(
data, ('header', ..., ['banner', 'mobileBanner', 'tvBanner']))
for banner in channel_banners:
banner['preference'] = -10
if channel_banners:
uncropped_banner = _get_uncropped(channel_banners[0]['url'])
if uncropped_banner:
channel_banners.append({
'url': uncropped_banner,
'id': 'banner_uncropped',
'preference': -5
})
primary_thumbnails = self._extract_thumbnails(
primary_sidebar_renderer, ('thumbnailRenderer', ('playlistVideoThumbnailRenderer', 'playlistCustomThumbnailRenderer'), 'thumbnail'))
if playlist_id is None:
playlist_id = item_id
playlist_stats = traverse_obj(primary_sidebar_renderer, 'stats')
last_updated_unix, _ = self._extract_time_text(playlist_stats, 2)
if title is None:
title = self._get_text(data, ('header', 'hashtagHeaderRenderer', 'hashtag')) or playlist_id
title += format_field(selected_tab, 'title', ' - %s')
title += format_field(selected_tab, 'expandedText', ' - %s')
metadata = {
'playlist_id': playlist_id,
'playlist_title': title,
'playlist_description': description,
'uploader': channel_name,
'uploader_id': channel_id,
'uploader_url': channel_url,
'thumbnails': primary_thumbnails + avatar_thumbnails + channel_banners,
'tags': tags,
'view_count': self._get_count(playlist_stats, 1),
'availability': self._extract_availability(data),
'modified_date': strftime_or_none(last_updated_unix, '%Y%m%d'),
'playlist_count': self._get_count(playlist_stats, 0),
'channel_follower_count': self._get_count(data, ('header', ..., 'subscriberCountText')),
}
if not channel_id:
metadata.update(self._extract_uploader(data))
metadata.update({
'channel': metadata['uploader'],
'channel_id': metadata['uploader_id'],
'channel_url': metadata['uploader_url']})
return self.playlist_result(
self._entries(
selected_tab, playlist_id, ytcfg,
self._extract_account_syncid(ytcfg, data),
self._extract_visitor_data(data, ytcfg)),
**metadata)
def _extract_mix_playlist(self, playlist, playlist_id, data, ytcfg):
first_id = last_id = response = None
for page_num in itertools.count(1):
videos = list(self._playlist_entries(playlist))
if not videos:
return
start = next((i for i, v in enumerate(videos) if v['id'] == last_id), -1) + 1
if start >= len(videos):
return
for video in videos[start:]:
if video['id'] == first_id:
self.to_screen('First video %s found again; Assuming end of Mix' % first_id)
return
yield video
first_id = first_id or videos[0]['id']
last_id = videos[-1]['id']
watch_endpoint = try_get(
playlist, lambda x: x['contents'][-1]['playlistPanelVideoRenderer']['navigationEndpoint']['watchEndpoint'])
headers = self.generate_api_headers(
ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data),
visitor_data=self._extract_visitor_data(response, data, ytcfg))
query = {
'playlistId': playlist_id,
'videoId': watch_endpoint.get('videoId') or last_id,
'index': watch_endpoint.get('index') or len(videos),
'params': watch_endpoint.get('params') or 'OAE%3D'
}
response = self._extract_response(
item_id='%s page %d' % (playlist_id, page_num),
query=query, ep='next', headers=headers, ytcfg=ytcfg,
check_get_keys='contents'
)
playlist = try_get(
response, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
def _extract_from_playlist(self, item_id, url, data, playlist, ytcfg):
title = playlist.get('title') or try_get(
data, lambda x: x['titleText']['simpleText'], compat_str)
playlist_id = playlist.get('playlistId') or item_id
# Delegating everything except mix playlists to regular tab-based playlist URL
playlist_url = urljoin(url, try_get(
playlist, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str))
if playlist_url and playlist_url != url:
return self.url_result(
playlist_url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
return self.playlist_result(
self._extract_mix_playlist(playlist, playlist_id, data, ytcfg),
playlist_id=playlist_id, playlist_title=title)
def _extract_availability(self, data):
"""
Gets the availability of a given playlist/tab.
Note: Unless YouTube tells us explicitly, we do not assume it is public
@param data: response
"""
is_private = is_unlisted = None
renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') or {}
badge_labels = self._extract_badges(renderer)
# Personal playlists, when authenticated, have a dropdown visibility selector instead of a badge
privacy_dropdown_entries = try_get(
renderer, lambda x: x['privacyForm']['dropdownFormFieldRenderer']['dropdown']['dropdownRenderer']['entries'], list) or []
for renderer_dict in privacy_dropdown_entries:
is_selected = try_get(
renderer_dict, lambda x: x['privacyDropdownItemRenderer']['isSelected'], bool) or False
if not is_selected:
continue
label = self._get_text(renderer_dict, ('privacyDropdownItemRenderer', 'label'))
if label:
badge_labels.add(label.lower())
break
for badge_label in badge_labels:
if badge_label == 'unlisted':
is_unlisted = True
elif badge_label == 'private':
is_private = True
elif badge_label == 'public':
is_unlisted = is_private = False
return self._availability(is_private, False, False, False, is_unlisted)
@staticmethod
def _extract_sidebar_info_renderer(data, info_renderer, expected_type=dict):
sidebar_renderer = try_get(
data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list) or []
for item in sidebar_renderer:
renderer = try_get(item, lambda x: x[info_renderer], expected_type)
if renderer:
return renderer
def _reload_with_unavailable_videos(self, item_id, data, ytcfg):
"""
Get playlist with unavailable videos if the 'show unavailable videos' button exists.
"""
browse_id = params = None
renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer')
if not renderer:
return
menu_renderer = try_get(
renderer, lambda x: x['menu']['menuRenderer']['items'], list) or []
for menu_item in menu_renderer:
if not isinstance(menu_item, dict):
continue
nav_item_renderer = menu_item.get('menuNavigationItemRenderer')
text = try_get(
nav_item_renderer, lambda x: x['text']['simpleText'], compat_str)
if not text or text.lower() != 'show unavailable videos':
continue
browse_endpoint = try_get(
nav_item_renderer, lambda x: x['navigationEndpoint']['browseEndpoint'], dict) or {}
browse_id = browse_endpoint.get('browseId')
params = browse_endpoint.get('params')
break
headers = self.generate_api_headers(
ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data),
visitor_data=self._extract_visitor_data(data, ytcfg))
query = {
'params': params or 'wgYCCAA=',
'browseId': browse_id or 'VL%s' % item_id
}
return self._extract_response(
item_id=item_id, headers=headers, query=query,
check_get_keys='contents', fatal=False, ytcfg=ytcfg,
note='Downloading API JSON with unavailable videos')
@property
def skip_webpage(self):
return 'webpage' in self._configuration_arg('skip', ie_key=YoutubeTabIE.ie_key())
def _extract_webpage(self, url, item_id, fatal=True):
retries = self.get_param('extractor_retries', 3)
count = -1
webpage = data = last_error = None
while count < retries:
count += 1
# Sometimes youtube returns a webpage with incomplete ytInitialData
# See: https://github.com/yt-dlp/yt-dlp/issues/116
if last_error:
self.report_warning('%s. Retrying ...' % last_error)
try:
webpage = self._download_webpage(
url, item_id,
note='Downloading webpage%s' % (' (retry #%d)' % count if count else '',))
data = self.extract_yt_initial_data(item_id, webpage or '', fatal=fatal) or {}
except ExtractorError as e:
if isinstance(e.cause, network_exceptions):
if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429):
last_error = error_to_compat_str(e.cause or e.msg)
if count < retries:
continue
if fatal:
raise
self.report_warning(error_to_compat_str(e))
break
else:
try:
self._extract_and_report_alerts(data)
except ExtractorError as e:
if fatal:
raise
self.report_warning(error_to_compat_str(e))
break
if dict_get(data, ('contents', 'currentVideoEndpoint', 'onResponseReceivedActions')):
break
last_error = 'Incomplete yt initial data received'
if count >= retries:
if fatal:
raise ExtractorError(last_error)
self.report_warning(last_error)
break
return webpage, data
def _report_playlist_authcheck(self, ytcfg, fatal=True):
"""Use if failed to extract ytcfg (and data) from initial webpage"""
if not ytcfg and self.is_authenticated:
msg = 'Playlists that require authentication may not extract correctly without a successful webpage download'
if 'authcheck' not in self._configuration_arg('skip', ie_key=YoutubeTabIE.ie_key()) and fatal:
raise ExtractorError(
f'{msg}. If you are not downloading private content, or '
'your cookies are only for the first account and channel,'
' pass "--extractor-args youtubetab:skip=authcheck" to skip this check',
expected=True)
self.report_warning(msg, only_once=True)
def _extract_data(self, url, item_id, ytcfg=None, fatal=True, webpage_fatal=False, default_client='web'):
data = None
if not self.skip_webpage:
webpage, data = self._extract_webpage(url, item_id, fatal=webpage_fatal)
ytcfg = ytcfg or self.extract_ytcfg(item_id, webpage)
# Reject webpage data if redirected to home page without explicitly requesting
selected_tab = self._extract_selected_tab(traverse_obj(
data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list, default=[]), fatal=False) or {}
if (url != 'https://www.youtube.com/feed/recommended'
and selected_tab.get('tabIdentifier') == 'FEwhat_to_watch' # Home page
and 'no-youtube-channel-redirect' not in self.get_param('compat_opts', [])):
msg = 'The channel/playlist does not exist and the URL redirected to youtube.com home page'
if fatal:
raise ExtractorError(msg, expected=True)
self.report_warning(msg, only_once=True)
if not data:
self._report_playlist_authcheck(ytcfg, fatal=fatal)
data = self._extract_tab_endpoint(url, item_id, ytcfg, fatal=fatal, default_client=default_client)
return data, ytcfg
def _extract_tab_endpoint(self, url, item_id, ytcfg=None, fatal=True, default_client='web'):
headers = self.generate_api_headers(ytcfg=ytcfg, default_client=default_client)
resolve_response = self._extract_response(
item_id=item_id, query={'url': url}, check_get_keys='endpoint', headers=headers, ytcfg=ytcfg, fatal=fatal,
ep='navigation/resolve_url', note='Downloading API parameters API JSON', default_client=default_client)
endpoints = {'browseEndpoint': 'browse', 'watchEndpoint': 'next'}
for ep_key, ep in endpoints.items():
params = try_get(resolve_response, lambda x: x['endpoint'][ep_key], dict)
if params:
return self._extract_response(
item_id=item_id, query=params, ep=ep, headers=headers,
ytcfg=ytcfg, fatal=fatal, default_client=default_client,
check_get_keys=('contents', 'currentVideoEndpoint', 'onResponseReceivedActions'))
err_note = 'Failed to resolve url (does the playlist exist?)'
if fatal:
raise ExtractorError(err_note, expected=True)
self.report_warning(err_note, item_id)
_SEARCH_PARAMS = None
def _search_results(self, query, params=NO_DEFAULT, default_client='web'):
data = {'query': query}
if params is NO_DEFAULT:
params = self._SEARCH_PARAMS
if params:
data['params'] = params
content_keys = (
('contents', 'twoColumnSearchResultsRenderer', 'primaryContents', 'sectionListRenderer', 'contents'),
('onResponseReceivedCommands', 0, 'appendContinuationItemsAction', 'continuationItems'),
# ytmusic search
('contents', 'tabbedSearchResultsRenderer', 'tabs', 0, 'tabRenderer', 'content', 'sectionListRenderer', 'contents'),
('continuationContents', ),
)
display_id = f'query "{query}"'
check_get_keys = tuple({keys[0] for keys in content_keys})
ytcfg = self._download_ytcfg(default_client, display_id) if not self.skip_webpage else {}
self._report_playlist_authcheck(ytcfg, fatal=False)
continuation_list = [None]
search = None
for page_num in itertools.count(1):
data.update(continuation_list[0] or {})
headers = self.generate_api_headers(
ytcfg=ytcfg, visitor_data=self._extract_visitor_data(search), default_client=default_client)
search = self._extract_response(
item_id=f'{display_id} page {page_num}', ep='search', query=data,
default_client=default_client, check_get_keys=check_get_keys, ytcfg=ytcfg, headers=headers)
slr_contents = traverse_obj(search, *content_keys)
yield from self._extract_entries({'contents': list(variadic(slr_contents))}, continuation_list)
if not continuation_list[0]:
break
class YoutubeTabIE(YoutubeTabBaseInfoExtractor):
IE_DESC = 'YouTube Tabs'
_VALID_URL = r'''(?x:
https?://
(?:\w+\.)?
(?:
youtube(?:kids)?\.com|
%(invidious)s
)/
(?:
(?P<channel_type>channel|c|user|browse)/|
(?P<not_channel>
feed/|hashtag/|
(?:playlist|watch)\?.*?\blist=
)|
(?!(?:%(reserved_names)s)\b) # Direct URLs
)
(?P<id>[^/?\#&]+)
)''' % {
'reserved_names': YoutubeBaseInfoExtractor._RESERVED_NAMES,
'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES),
}
IE_NAME = 'youtube:tab'
_TESTS = [{
'note': 'playlists, multipage',
'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
'playlist_mincount': 94,
'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Igor Kleiner - Playlists',
'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
'uploader': 'Igor Kleiner',
'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg',
'channel': 'Igor Kleiner',
'channel_id': 'UCqj7Cz7revf5maW9g5pgNcg',
'tags': ['"критическое', 'мышление"', '"наука', 'просто"', 'математика', '"анализ', 'данных"'],
'channel_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg',
'uploader_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg',
'channel_follower_count': int
},
}, {
'note': 'playlists, multipage, different order',
'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
'playlist_mincount': 94,
'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Igor Kleiner - Playlists',
'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg',
'uploader': 'Igor Kleiner',
'uploader_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg',
'tags': ['"критическое', 'мышление"', '"наука', 'просто"', 'математика', '"анализ', 'данных"'],
'channel_id': 'UCqj7Cz7revf5maW9g5pgNcg',
'channel': 'Igor Kleiner',
'channel_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg',
'channel_follower_count': int
},
}, {
'note': 'playlists, series',
'url': 'https://www.youtube.com/c/3blue1brown/playlists?view=50&sort=dd&shelf_id=3',
'playlist_mincount': 5,
'info_dict': {
'id': 'UCYO_jab_esuFRV4b17AJtAw',
'title': '3Blue1Brown - Playlists',
'description': 'md5:e1384e8a133307dd10edee76e875d62f',
'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw',
'uploader': '3Blue1Brown',
'channel_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
'uploader_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
'channel': '3Blue1Brown',
'channel_id': 'UCYO_jab_esuFRV4b17AJtAw',
'tags': ['Mathematics'],
'channel_follower_count': int
},
}, {
'note': 'playlists, singlepage',
'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
'playlist_mincount': 4,
'info_dict': {
'id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
'title': 'ThirstForScience - Playlists',
'description': 'md5:609399d937ea957b0f53cbffb747a14c',
'uploader': 'ThirstForScience',
'uploader_id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
'uploader_url': 'https://www.youtube.com/channel/UCAEtajcuhQ6an9WEzY9LEMQ',
'channel_url': 'https://www.youtube.com/channel/UCAEtajcuhQ6an9WEzY9LEMQ',
'channel_id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
'tags': 'count:13',
'channel': 'ThirstForScience',
'channel_follower_count': int
}
}, {
'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
'only_matching': True,
}, {
'note': 'basic, single video playlist',
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'title': 'youtube-dl public playlist',
'description': '',
'tags': [],
'view_count': int,
'modified_date': '20201130',
'channel': 'Sergey M.',
'channel_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'channel_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
},
'playlist_count': 1,
}, {
'note': 'empty playlist',
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'uploader': 'Sergey M.',
'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'title': 'youtube-dl empty playlist',
'tags': [],
'channel': 'Sergey M.',
'description': '',
'modified_date': '20160902',
'channel_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
'channel_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'uploader_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
},
'playlist_count': 0,
}, {
'note': 'Home tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Home',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'tags': ['bible', 'history', 'prophesy'],
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_follower_count': int
},
'playlist_mincount': 2,
}, {
'note': 'Videos tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Videos',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'tags': ['bible', 'history', 'prophesy'],
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'channel_follower_count': int
},
'playlist_mincount': 975,
}, {
'note': 'Videos tab, sorted by popular',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Videos',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'tags': ['bible', 'history', 'prophesy'],
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_follower_count': int
},
'playlist_mincount': 199,
}, {
'note': 'Playlists tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Playlists',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'tags': ['bible', 'history', 'prophesy'],
'channel_follower_count': int
},
'playlist_mincount': 17,
}, {
'note': 'Community tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Community',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'tags': ['bible', 'history', 'prophesy'],
'channel_follower_count': int
},
'playlist_mincount': 18,
}, {
'note': 'Channels tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Channels',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel': 'lex will',
'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'tags': ['bible', 'history', 'prophesy'],
'channel_follower_count': int
},
'playlist_mincount': 12,
}, {
'note': 'Search tab',
'url': 'https://www.youtube.com/c/3blue1brown/search?query=linear%20algebra',
'playlist_mincount': 40,
'info_dict': {
'id': 'UCYO_jab_esuFRV4b17AJtAw',
'title': '3Blue1Brown - Search - linear algebra',
'description': 'md5:e1384e8a133307dd10edee76e875d62f',
'uploader': '3Blue1Brown',
'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw',
'channel_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
'uploader_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
'tags': ['Mathematics'],
'channel': '3Blue1Brown',
'channel_id': 'UCYO_jab_esuFRV4b17AJtAw',
'channel_follower_count': int
},
}, {
'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
}, {
'url': 'https://www.youtubekids.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
}, {
'url': 'https://music.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'uploader': 'Christiaan008',
'uploader_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
'description': 'md5:a14dc1a8ef8307a9807fe136a0660268',
'tags': [],
'uploader_url': 'https://www.youtube.com/c/ChRiStIaAn008',
'view_count': int,
'modified_date': '20150605',
'channel_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
'channel_url': 'https://www.youtube.com/c/ChRiStIaAn008',
'channel': 'Christiaan008',
},
'playlist_count': 96,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
'uploader': 'Cauchemar',
'uploader_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
'channel_url': 'https://www.youtube.com/c/Cauchemar89',
'tags': [],
'modified_date': r're:\d{8}',
'channel': 'Cauchemar',
'uploader_url': 'https://www.youtube.com/c/Cauchemar89',
'view_count': int,
'description': '',
'channel_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
},
'playlist_mincount': 1123,
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'note': 'even larger playlist, 8832 videos',
'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
'only_matching': True,
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
'uploader': 'Interstellar Movie',
'uploader_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
'uploader_url': 'https://www.youtube.com/c/InterstellarMovie',
'tags': [],
'view_count': int,
'channel_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
'channel_url': 'https://www.youtube.com/c/InterstellarMovie',
'channel': 'Interstellar Movie',
'description': '',
'modified_date': r're:\d{8}',
},
'playlist_mincount': 21,
}, {
'note': 'Playlist with "show unavailable videos" button',
'url': 'https://www.youtube.com/playlist?list=UUTYLiWFZy8xtPwxFwX9rV7Q',
'info_dict': {
'title': 'Uploads from Phim Siêu Nhân Nhật Bản',
'id': 'UUTYLiWFZy8xtPwxFwX9rV7Q',
'uploader': 'Phim Siêu Nhân Nhật Bản',
'uploader_id': 'UCTYLiWFZy8xtPwxFwX9rV7Q',
'view_count': int,
'channel': 'Phim Siêu Nhân Nhật Bản',
'tags': [],
'uploader_url': 'https://www.youtube.com/channel/UCTYLiWFZy8xtPwxFwX9rV7Q',
'description': '',
'channel_url': 'https://www.youtube.com/channel/UCTYLiWFZy8xtPwxFwX9rV7Q',
'channel_id': 'UCTYLiWFZy8xtPwxFwX9rV7Q',
'modified_date': r're:\d{8}',
},
'playlist_mincount': 200,
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'note': 'Playlist with unavailable videos in page 7',
'url': 'https://www.youtube.com/playlist?list=UU8l9frL61Yl5KFOl87nIm2w',
'info_dict': {
'title': 'Uploads from BlankTV',
'id': 'UU8l9frL61Yl5KFOl87nIm2w',
'uploader': 'BlankTV',
'uploader_id': 'UC8l9frL61Yl5KFOl87nIm2w',
'channel': 'BlankTV',
'channel_url': 'https://www.youtube.com/c/blanktv',
'channel_id': 'UC8l9frL61Yl5KFOl87nIm2w',
'view_count': int,
'tags': [],
'uploader_url': 'https://www.youtube.com/c/blanktv',
'modified_date': r're:\d{8}',
'description': '',
},
'playlist_mincount': 1000,
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'note': 'https://github.com/ytdl-org/youtube-dl/issues/21844',
'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'info_dict': {
'title': 'Data Analysis with Dr Mike Pound',
'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'uploader_id': 'UC9-y-6csu5WGm29I7JiwpnA',
'uploader': 'Computerphile',
'description': 'md5:7f567c574d13d3f8c0954d9ffee4e487',
'uploader_url': 'https://www.youtube.com/user/Computerphile',
'tags': [],
'view_count': int,
'channel_id': 'UC9-y-6csu5WGm29I7JiwpnA',
'channel_url': 'https://www.youtube.com/user/Computerphile',
'channel': 'Computerphile',
},
'playlist_mincount': 11,
}, {
'url': 'https://invidio.us/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'only_matching': True,
}, {
'note': 'Playlist URL that does not actually serve a playlist',
'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
'info_dict': {
'id': 'FqZTN594JQw',
'ext': 'webm',
'title': "Smiley's People 01 detective, Adventure Series, Action",
'uploader': 'STREEM',
'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
'upload_date': '20150526',
'license': 'Standard YouTube License',
'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
'categories': ['People & Blogs'],
'tags': list,
'view_count': int,
'like_count': int,
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
'add_ie': [YoutubeIE.ie_key()],
}, {
'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live',
'info_dict': {
'id': 'GgL890LIznQ', # This will keep changing
'ext': 'mp4',
'title': str,
'uploader': 'Sky News',
'uploader_id': 'skynews',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/skynews',
'upload_date': r're:\d{8}',
'description': str,
'categories': ['News & Politics'],
'tags': list,
'like_count': int,
'release_timestamp': 1642502819,
'channel': 'Sky News',
'channel_id': 'UCoMdktPbSTixAyNGwb-UYkQ',
'age_limit': 0,
'view_count': int,
'thumbnail': 'https://i.ytimg.com/vi/GgL890LIznQ/maxresdefault_live.jpg',
'playable_in_embed': True,
'release_date': '20220118',
'availability': 'public',
'live_status': 'is_live',
'channel_url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ',
'channel_follower_count': int
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Ignoring subtitle tracks found in '],
}, {
'url': 'https://www.youtube.com/user/TheYoungTurks/live',
'info_dict': {
'id': 'a48o2S1cPoo',
'ext': 'mp4',
'title': 'The Young Turks - Live Main Show',
'uploader': 'The Young Turks',
'uploader_id': 'TheYoungTurks',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
'upload_date': '20150715',
'license': 'Standard YouTube License',
'description': 'md5:438179573adcdff3c97ebb1ee632b891',
'categories': ['News & Politics'],
'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
'like_count': int,
},
'params': {
'skip_download': True,
},
'only_matching': True,
}, {
'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
'only_matching': True,
}, {
'note': 'A channel that is not live. Should raise error',
'url': 'https://www.youtube.com/user/numberphile/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/trending',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/library',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/history',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/subscriptions',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/watch_later',
'only_matching': True,
}, {
'note': 'Recommended - redirects to home page.',
'url': 'https://www.youtube.com/feed/recommended',
'only_matching': True,
}, {
'note': 'inline playlist with not always working continuations',
'url': 'https://www.youtube.com/watch?v=UC6u0Tct-Fo&list=PL36D642111D65BE7C',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/course',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/zsecurity',
'only_matching': True,
}, {
'url': 'http://www.youtube.com/NASAgovVideo/videos',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/TheYoungTurks/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/hashtag/cctv9',
'info_dict': {
'id': 'cctv9',
'title': '#cctv9',
'tags': [],
},
'playlist_mincount': 350,
}, {
'url': 'https://www.youtube.com/watch?list=PLW4dVinRY435CBE_JD3t-0SRXKfnZHS1P&feature=youtu.be&v=M9cJMXmQ_ZU',
'only_matching': True,
}, {
'note': 'Requires Premium: should request additional YTM-info webpage (and have format 141) for videos in playlist',
'url': 'https://music.youtube.com/playlist?list=PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
'only_matching': True
}, {
'note': '/browse/ should redirect to /channel/',
'url': 'https://music.youtube.com/browse/UC1a8OFewdjuLq6KlF8M_8Ng',
'only_matching': True
}, {
'note': 'VLPL, should redirect to playlist?list=PL...',
'url': 'https://music.youtube.com/browse/VLPLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
'info_dict': {
'id': 'PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
'uploader': 'NoCopyrightSounds',
'description': 'Providing you with copyright free / safe music for gaming, live streaming, studying and more!',
'uploader_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
'title': 'NCS Releases',
'uploader_url': 'https://www.youtube.com/c/NoCopyrightSounds',
'channel_url': 'https://www.youtube.com/c/NoCopyrightSounds',
'modified_date': r're:\d{8}',
'view_count': int,
'channel_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
'tags': [],
'channel': 'NoCopyrightSounds',
},
'playlist_mincount': 166,
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'note': 'Topic, should redirect to playlist?list=UU...',
'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw',
'info_dict': {
'id': 'UU9ALqqC4aIeG5iDs7i90Bfw',
'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
'title': 'Uploads from Royalty Free Music - Topic',
'uploader': 'Royalty Free Music - Topic',
'tags': [],
'channel_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
'channel': 'Royalty Free Music - Topic',
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
'channel_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
'modified_date': r're:\d{8}',
'uploader_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
'description': '',
},
'expected_warnings': [
'The URL does not have a videos tab',
r'[Uu]navailable videos (are|will be) hidden',
],
'playlist_mincount': 101,
}, {
'note': 'Topic without a UU playlist',
'url': 'https://www.youtube.com/channel/UCtFRv9O2AHqOZjjynzrv-xg',
'info_dict': {
'id': 'UCtFRv9O2AHqOZjjynzrv-xg',
'title': 'UCtFRv9O2AHqOZjjynzrv-xg',
'tags': [],
},
'expected_warnings': [
'the playlist redirect gave error',
],
'playlist_mincount': 9,
}, {
'note': 'Youtube music Album',
'url': 'https://music.youtube.com/browse/MPREb_gTAcphH99wE',
'info_dict': {
'id': 'OLAK5uy_l1m0thk3g31NmIIz_vMIbWtyv7eZixlH0',
'title': 'Album - Royalty Free Music Library V2 (50 Songs)',
'tags': [],
'view_count': int,
'description': '',
'availability': 'unlisted',
'modified_date': r're:\d{8}',
},
'playlist_count': 50,
}, {
'note': 'unlisted single video playlist',
'url': 'https://www.youtube.com/playlist?list=PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf',
'info_dict': {
'uploader_id': 'UC9zHu_mHU96r19o-wV5Qs1Q',
'uploader': 'colethedj',
'id': 'PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf',
'title': 'yt-dlp unlisted playlist test',
'availability': 'unlisted',
'tags': [],
'modified_date': '20211208',
'channel': 'colethedj',
'view_count': int,
'description': '',
'uploader_url': 'https://www.youtube.com/channel/UC9zHu_mHU96r19o-wV5Qs1Q',
'channel_id': 'UC9zHu_mHU96r19o-wV5Qs1Q',
'channel_url': 'https://www.youtube.com/channel/UC9zHu_mHU96r19o-wV5Qs1Q',
},
'playlist_count': 1,
}, {
'note': 'API Fallback: Recommended - redirects to home page. Requires visitorData',
'url': 'https://www.youtube.com/feed/recommended',
'info_dict': {
'id': 'recommended',
'title': 'recommended',
'tags': [],
},
'playlist_mincount': 50,
'params': {
'skip_download': True,
'extractor_args': {'youtubetab': {'skip': ['webpage']}}
},
}, {
'note': 'API Fallback: /videos tab, sorted by oldest first',
'url': 'https://www.youtube.com/user/theCodyReeder/videos?view=0&sort=da&flow=grid',
'info_dict': {
'id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
'title': 'Cody\'sLab - Videos',
'description': 'md5:d083b7c2f0c67ee7a6c74c3e9b4243fa',
'uploader': 'Cody\'sLab',
'uploader_id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
'channel': 'Cody\'sLab',
'channel_id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
'tags': [],
'channel_url': 'https://www.youtube.com/channel/UCu6mSoMNzHQiBIOCkHUa2Aw',
'uploader_url': 'https://www.youtube.com/channel/UCu6mSoMNzHQiBIOCkHUa2Aw',
'channel_follower_count': int
},
'playlist_mincount': 650,
'params': {
'skip_download': True,
'extractor_args': {'youtubetab': {'skip': ['webpage']}}
},
}, {
'note': 'API Fallback: Topic, should redirect to playlist?list=UU...',
'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw',
'info_dict': {
'id': 'UU9ALqqC4aIeG5iDs7i90Bfw',
'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
'title': 'Uploads from Royalty Free Music - Topic',
'uploader': 'Royalty Free Music - Topic',
'modified_date': r're:\d{8}',
'channel_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
'description': '',
'channel_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
'tags': [],
'channel': 'Royalty Free Music - Topic',
'view_count': int,
'uploader_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
},
'expected_warnings': [
'does not have a videos tab',
r'[Uu]navailable videos (are|will be) hidden',
],
'playlist_mincount': 101,
'params': {
'skip_download': True,
'extractor_args': {'youtubetab': {'skip': ['webpage']}}
},
}, {
'note': 'non-standard redirect to regional channel',
'url': 'https://www.youtube.com/channel/UCwVVpHQ2Cs9iGJfpdFngePQ',
'only_matching': True
}, {
'note': 'collaborative playlist (uploader name in the form "by <uploader> and x other(s)")',
'url': 'https://www.youtube.com/playlist?list=PLx-_-Kk4c89oOHEDQAojOXzEzemXxoqx6',
'info_dict': {
'id': 'PLx-_-Kk4c89oOHEDQAojOXzEzemXxoqx6',
'modified_date': '20220407',
'channel_url': 'https://www.youtube.com/channel/UCKcqXmCcyqnhgpA5P0oHH_Q',
'tags': [],
'uploader_id': 'UCKcqXmCcyqnhgpA5P0oHH_Q',
'uploader': 'pukkandan',
'availability': 'unlisted',
'channel_id': 'UCKcqXmCcyqnhgpA5P0oHH_Q',
'channel': 'pukkandan',
'description': 'Test for collaborative playlist',
'title': 'yt-dlp test - collaborative playlist',
'uploader_url': 'https://www.youtube.com/channel/UCKcqXmCcyqnhgpA5P0oHH_Q',
},
'playlist_mincount': 2
}]
@classmethod
def suitable(cls, url):
return False if YoutubeIE.suitable(url) else super().suitable(url)
_URL_RE = re.compile(rf'(?P<pre>{_VALID_URL})(?(not_channel)|(?P<tab>/\w+))?(?P<post>.*)$')
@YoutubeTabBaseInfoExtractor.passthrough_smuggled_data
def _real_extract(self, url, smuggled_data):
item_id = self._match_id(url)
url = compat_urlparse.urlunparse(
compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
compat_opts = self.get_param('compat_opts', [])
def get_mobj(url):
mobj = self._URL_RE.match(url).groupdict()
mobj.update((k, '') for k, v in mobj.items() if v is None)
return mobj
mobj, redirect_warning = get_mobj(url), None
# Youtube returns incomplete data if tabname is not lower case
pre, tab, post, is_channel = mobj['pre'], mobj['tab'].lower(), mobj['post'], not mobj['not_channel']
if is_channel:
if smuggled_data.get('is_music_url'):
if item_id[:2] == 'VL': # Youtube music VL channels have an equivalent playlist
item_id = item_id[2:]
pre, tab, post, is_channel = f'https://www.youtube.com/playlist?list={item_id}', '', '', False
elif item_id[:2] == 'MP': # Resolve albums (/[channel/browse]/MP...) to their equivalent playlist
mdata = self._extract_tab_endpoint(
f'https://music.youtube.com/channel/{item_id}', item_id, default_client='web_music')
murl = traverse_obj(mdata, ('microformat', 'microformatDataRenderer', 'urlCanonical'),
get_all=False, expected_type=compat_str)
if not murl:
raise ExtractorError('Failed to resolve album to playlist')
return self.url_result(murl, ie=YoutubeTabIE.ie_key())
elif mobj['channel_type'] == 'browse': # Youtube music /browse/ should be changed to /channel/
pre = f'https://www.youtube.com/channel/{item_id}'
original_tab_name = tab
if is_channel and not tab and 'no-youtube-channel-redirect' not in compat_opts:
# Home URLs should redirect to /videos/
redirect_warning = ('A channel/user page was given. All the channel\'s videos will be downloaded. '
'To download only the videos in the home page, add a "/featured" to the URL')
tab = '/videos'
url = ''.join((pre, tab, post))
mobj = get_mobj(url)
# Handle both video/playlist URLs
qs = parse_qs(url)
video_id, playlist_id = (qs.get(key, [None])[0] for key in ('v', 'list'))
if not video_id and mobj['not_channel'].startswith('watch'):
if not playlist_id:
# If there is neither video or playlist ids, youtube redirects to home page, which is undesirable
raise ExtractorError('Unable to recognize tab page')
# Common mistake: https://www.youtube.com/watch?list=playlist_id
self.report_warning(f'A video URL was given without video ID. Trying to download playlist {playlist_id}')
url = f'https://www.youtube.com/playlist?list={playlist_id}'
mobj = get_mobj(url)
if video_id and playlist_id:
if self.get_param('noplaylist'):
self.to_screen(f'Downloading just video {video_id} because of --no-playlist')
return self.url_result(f'https://www.youtube.com/watch?v={video_id}',
ie=YoutubeIE.ie_key(), video_id=video_id)
self.to_screen(f'Downloading playlist {playlist_id}; add --no-playlist to just download video {video_id}')
data, ytcfg = self._extract_data(url, item_id)
# YouTube may provide a non-standard redirect to the regional channel
# See: https://github.com/yt-dlp/yt-dlp/issues/2694
redirect_url = traverse_obj(
data, ('onResponseReceivedActions', ..., 'navigateAction', 'endpoint', 'commandMetadata', 'webCommandMetadata', 'url'), get_all=False)
if redirect_url and 'no-youtube-channel-redirect' not in compat_opts:
redirect_url = ''.join((
urljoin('https://www.youtube.com', redirect_url), mobj['tab'], mobj['post']))
self.to_screen(f'This playlist is likely not available in your region. Following redirect to regional playlist {redirect_url}')
return self.url_result(redirect_url, ie=YoutubeTabIE.ie_key())
tabs = traverse_obj(data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list)
if tabs:
selected_tab = self._extract_selected_tab(tabs)
selected_tab_name = selected_tab.get('title', '').lower()
if selected_tab_name == 'home':
selected_tab_name = 'featured'
requested_tab_name = mobj['tab'][1:]
if 'no-youtube-channel-redirect' not in compat_opts:
if requested_tab_name == 'live':
# Live tab should have redirected to the video
raise ExtractorError('The channel is not currently live', expected=True)
if requested_tab_name not in ('', selected_tab_name):
redirect_warning = f'The channel does not have a {requested_tab_name} tab'
if not original_tab_name:
if item_id[:2] == 'UC':
# Topic channels don't have /videos. Use the equivalent playlist instead
pl_id = f'UU{item_id[2:]}'
pl_url = f'https://www.youtube.com/playlist?list={pl_id}'
try:
data, ytcfg = self._extract_data(pl_url, pl_id, ytcfg=ytcfg, fatal=True, webpage_fatal=True)
except ExtractorError:
redirect_warning += ' and the playlist redirect gave error'
else:
item_id, url, selected_tab_name = pl_id, pl_url, requested_tab_name
redirect_warning += f'. Redirecting to playlist {pl_id} instead'
if selected_tab_name and selected_tab_name != requested_tab_name:
redirect_warning += f'. {selected_tab_name} tab is being downloaded instead'
else:
raise ExtractorError(redirect_warning, expected=True)
if redirect_warning:
self.to_screen(redirect_warning)
self.write_debug(f'Final URL: {url}')
# YouTube sometimes provides a button to reload playlist with unavailable videos.
if 'no-youtube-unavailable-videos' not in compat_opts:
data = self._reload_with_unavailable_videos(item_id, data, ytcfg) or data
self._extract_and_report_alerts(data, only_once=True)
tabs = traverse_obj(data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list)
if tabs:
return self._extract_from_tabs(item_id, ytcfg, data, tabs)
playlist = traverse_obj(
data, ('contents', 'twoColumnWatchNextResults', 'playlist', 'playlist'), expected_type=dict)
if playlist:
return self._extract_from_playlist(item_id, url, data, playlist, ytcfg)
video_id = traverse_obj(
data, ('currentVideoEndpoint', 'watchEndpoint', 'videoId'), expected_type=str) or video_id
if video_id:
if mobj['tab'] != '/live': # live tab is expected to redirect to video
self.report_warning(f'Unable to recognize playlist. Downloading just video {video_id}')
return self.url_result(f'https://www.youtube.com/watch?v={video_id}',
ie=YoutubeIE.ie_key(), video_id=video_id)
raise ExtractorError('Unable to recognize tab page')
class YoutubePlaylistIE(InfoExtractor):
IE_DESC = 'YouTube playlists'
_VALID_URL = r'''(?x)(?:
(?:https?://)?
(?:\w+\.)?
(?:
(?:
youtube(?:kids)?\.com|
%(invidious)s
)
/.*?\?.*?\blist=
)?
(?P<id>%(playlist_id)s)
)''' % {
'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE,
'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES),
}
IE_NAME = 'youtube:playlist'
_TESTS = [{
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
'id': 'PLBB231211A4F62143',
'uploader': 'Wickman',
'uploader_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
'description': 'md5:8fa6f52abb47a9552002fa3ddfc57fc2',
'view_count': int,
'uploader_url': 'https://www.youtube.com/user/Wickydoo',
'modified_date': r're:\d{8}',
'channel_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
'channel': 'Wickman',
'tags': [],
'channel_url': 'https://www.youtube.com/user/Wickydoo',
},
'playlist_mincount': 29,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
},
'playlist_count': 2,
'skip': 'This playlist is private',
}, {
'note': 'embedded',
'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'uploader': 'milan',
'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
'description': '',
'channel_url': 'https://www.youtube.com/channel/UCEI1-PVPcYXjB73Hfelbmaw',
'tags': [],
'modified_date': '20140919',
'view_count': int,
'channel': 'milan',
'channel_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
'uploader_url': 'https://www.youtube.com/channel/UCEI1-PVPcYXjB73Hfelbmaw',
},
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'playlist_mincount': 654,
'info_dict': {
'title': '2018 Chinese New Singles (11/6 updated)',
'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'uploader': 'LBK',
'uploader_id': 'UC21nz3_MesPLqtDqwdvnoxA',
'description': 'md5:da521864744d60a198e3a88af4db0d9d',
'channel': 'LBK',
'view_count': int,
'channel_url': 'https://www.youtube.com/c/愛低音的國王',
'tags': [],
'uploader_url': 'https://www.youtube.com/c/愛低音的國王',
'channel_id': 'UC21nz3_MesPLqtDqwdvnoxA',
'modified_date': r're:\d{8}',
},
'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
}, {
'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
'only_matching': True,
}, {
# music album playlist
'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
if YoutubeTabIE.suitable(url):
return False
from ..utils import parse_qs
qs = parse_qs(url)
if qs.get('v', [None])[0]:
return False
return super().suitable(url)
def _real_extract(self, url):
playlist_id = self._match_id(url)
is_music_url = YoutubeBaseInfoExtractor.is_music_url(url)
url = update_url_query(
'https://www.youtube.com/playlist',
parse_qs(url) or {'list': playlist_id})
if is_music_url:
url = smuggle_url(url, {'is_music_url': True})
return self.url_result(url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
class YoutubeYtBeIE(InfoExtractor):
IE_DESC = 'youtu.be'
_VALID_URL = r'https?://youtu\.be/(?P<id>[0-9A-Za-z_-]{11})/*?.*?\blist=(?P<playlist_id>%(playlist_id)s)' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_TESTS = [{
'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
'info_dict': {
'id': 'yeWKywCrFtk',
'ext': 'mp4',
'title': 'Small Scale Baler and Braiding Rugs',
'uploader': 'Backus-Page House Museum',
'uploader_id': 'backuspagemuseum',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
'upload_date': '20161008',
'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
'categories': ['Nonprofits & Activism'],
'tags': list,
'like_count': int,
'age_limit': 0,
'playable_in_embed': True,
'thumbnail': 'https://i.ytimg.com/vi_webp/yeWKywCrFtk/maxresdefault.webp',
'channel': 'Backus-Page House Museum',
'channel_id': 'UCEfMCQ9bs3tjvjy1s451zaw',
'live_status': 'not_live',
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UCEfMCQ9bs3tjvjy1s451zaw',
'availability': 'public',
'duration': 59,
},
'params': {
'noplaylist': True,
'skip_download': True,
},
}, {
'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
playlist_id = mobj.group('playlist_id')
return self.url_result(
update_url_query('https://www.youtube.com/watch', {
'v': video_id,
'list': playlist_id,
'feature': 'youtu.be',
}), ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
class YoutubeLivestreamEmbedIE(InfoExtractor):
IE_DESC = 'YouTube livestream embeds'
_VALID_URL = r'https?://(?:\w+\.)?youtube\.com/embed/live_stream/?\?(?:[^#]+&)?channel=(?P<id>[^&#]+)'
_TESTS = [{
'url': 'https://www.youtube.com/embed/live_stream?channel=UC2_KI6RB__jGdlnK6dvFEZA',
'only_matching': True,
}]
def _real_extract(self, url):
channel_id = self._match_id(url)
return self.url_result(
f'https://www.youtube.com/channel/{channel_id}/live',
ie=YoutubeTabIE.ie_key(), video_id=channel_id)
class YoutubeYtUserIE(InfoExtractor):
IE_DESC = 'YouTube user videos; "ytuser:" prefix'
IE_NAME = 'youtube:user'
_VALID_URL = r'ytuser:(?P<id>.+)'
_TESTS = [{
'url': 'ytuser:phihag',
'only_matching': True,
}]
def _real_extract(self, url):
user_id = self._match_id(url)
return self.url_result(
'https://www.youtube.com/user/%s/videos' % user_id,
ie=YoutubeTabIE.ie_key(), video_id=user_id)
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
IE_DESC = 'YouTube liked videos; ":ytfav" keyword (requires cookies)'
_VALID_URL = r':ytfav(?:ou?rite)?s?'
_LOGIN_REQUIRED = True
_TESTS = [{
'url': ':ytfav',
'only_matching': True,
}, {
'url': ':ytfavorites',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/playlist?list=LL',
ie=YoutubeTabIE.ie_key())
class YoutubeNotificationsIE(YoutubeTabBaseInfoExtractor):
IE_NAME = 'youtube:notif'
IE_DESC = 'YouTube notifications; ":ytnotif" keyword (requires cookies)'
_VALID_URL = r':ytnotif(?:ication)?s?'
_LOGIN_REQUIRED = True
_TESTS = [{
'url': ':ytnotif',
'only_matching': True,
}, {
'url': ':ytnotifications',
'only_matching': True,
}]
def _extract_notification_menu(self, response, continuation_list):
notification_list = traverse_obj(
response,
('actions', 0, 'openPopupAction', 'popup', 'multiPageMenuRenderer', 'sections', 0, 'multiPageMenuNotificationSectionRenderer', 'items'),
('actions', 0, 'appendContinuationItemsAction', 'continuationItems'),
expected_type=list) or []
continuation_list[0] = None
for item in notification_list:
entry = self._extract_notification_renderer(item.get('notificationRenderer'))
if entry:
yield entry
continuation = item.get('continuationItemRenderer')
if continuation:
continuation_list[0] = continuation
def _extract_notification_renderer(self, notification):
video_id = traverse_obj(
notification, ('navigationEndpoint', 'watchEndpoint', 'videoId'), expected_type=str)
url = f'https://www.youtube.com/watch?v={video_id}'
channel_id = None
if not video_id:
browse_ep = traverse_obj(
notification, ('navigationEndpoint', 'browseEndpoint'), expected_type=dict)
channel_id = traverse_obj(browse_ep, 'browseId', expected_type=str)
post_id = self._search_regex(
r'/post/(.+)', traverse_obj(browse_ep, 'canonicalBaseUrl', expected_type=str),
'post id', default=None)
if not channel_id or not post_id:
return
# The direct /post url redirects to this in the browser
url = f'https://www.youtube.com/channel/{channel_id}/community?lb={post_id}'
channel = traverse_obj(
notification, ('contextualMenu', 'menuRenderer', 'items', 1, 'menuServiceItemRenderer', 'text', 'runs', 1, 'text'),
expected_type=str)
title = self._search_regex(
rf'{re.escape(channel)} [^:]+: (.+)', self._get_text(notification, 'shortMessage'),
'video title', default=None)
if title:
title = title.replace('\xad', '') # remove soft hyphens
upload_date = (strftime_or_none(self._extract_time_text(notification, 'sentTimeText')[0], '%Y%m%d')
if self._configuration_arg('approximate_date', ie_key=YoutubeTabIE.ie_key())
else None)
return {
'_type': 'url',
'url': url,
'ie_key': (YoutubeIE if video_id else YoutubeTabIE).ie_key(),
'video_id': video_id,
'title': title,
'channel_id': channel_id,
'channel': channel,
'thumbnails': self._extract_thumbnails(notification, 'videoThumbnail'),
'upload_date': upload_date,
}
def _notification_menu_entries(self, ytcfg):
continuation_list = [None]
response = None
for page in itertools.count(1):
ctoken = traverse_obj(
continuation_list, (0, 'continuationEndpoint', 'getNotificationMenuEndpoint', 'ctoken'), expected_type=str)
response = self._extract_response(
item_id=f'page {page}', query={'ctoken': ctoken} if ctoken else {}, ytcfg=ytcfg,
ep='notification/get_notification_menu', check_get_keys='actions',
headers=self.generate_api_headers(ytcfg=ytcfg, visitor_data=self._extract_visitor_data(response)))
yield from self._extract_notification_menu(response, continuation_list)
if not continuation_list[0]:
break
def _real_extract(self, url):
display_id = 'notifications'
ytcfg = self._download_ytcfg('web', display_id) if not self.skip_webpage else {}
self._report_playlist_authcheck(ytcfg)
return self.playlist_result(self._notification_menu_entries(ytcfg), display_id, display_id)
class YoutubeSearchIE(YoutubeTabBaseInfoExtractor, SearchInfoExtractor):
IE_DESC = 'YouTube search'
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_SEARCH_PARAMS = 'EgIQAQ%3D%3D' # Videos only
_TESTS = [{
'url': 'ytsearch5:youtube-dl test video',
'playlist_count': 5,
'info_dict': {
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
}
}]
class YoutubeSearchDateIE(YoutubeTabBaseInfoExtractor, SearchInfoExtractor):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube search, newest videos first'
_SEARCH_PARAMS = 'CAISAhAB' # Videos only, sorted by date
_TESTS = [{
'url': 'ytsearchdate5:youtube-dl test video',
'playlist_count': 5,
'info_dict': {
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
}
}]
class YoutubeSearchURLIE(YoutubeTabBaseInfoExtractor):
IE_DESC = 'YouTube search URLs with sorting and filter support'
IE_NAME = YoutubeSearchIE.IE_NAME + '_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:results|search)\?([^#]+&)?(?:search_query|q)=(?:[^&]+)(?:[&#]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
}
}, {
'url': 'https://www.youtube.com/results?search_query=python&sp=EgIQAg%253D%253D',
'playlist_mincount': 5,
'info_dict': {
'id': 'python',
'title': 'python',
}
}, {
'url': 'https://www.youtube.com/results?search_query=%23cats',
'playlist_mincount': 1,
'info_dict': {
'id': '#cats',
'title': '#cats',
'entries': [{
'url': r're:https://(www\.)?youtube\.com/hashtag/cats',
'title': '#cats',
}],
},
}, {
'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
'only_matching': True,
}]
def _real_extract(self, url):
qs = parse_qs(url)
query = (qs.get('search_query') or qs.get('q'))[0]
return self.playlist_result(self._search_results(query, qs.get('sp', (None,))[0]), query, query)
class YoutubeMusicSearchURLIE(YoutubeTabBaseInfoExtractor):
IE_DESC = 'YouTube music search URLs with selectable sections (Eg: #songs)'
IE_NAME = 'youtube:music:search_url'
_VALID_URL = r'https?://music\.youtube\.com/search\?([^#]+&)?(?:search_query|q)=(?:[^&]+)(?:[&#]|$)'
_TESTS = [{
'url': 'https://music.youtube.com/search?q=royalty+free+music',
'playlist_count': 16,
'info_dict': {
'id': 'royalty free music',
'title': 'royalty free music',
}
}, {
'url': 'https://music.youtube.com/search?q=royalty+free+music&sp=EgWKAQIIAWoKEAoQAxAEEAkQBQ%3D%3D',
'playlist_mincount': 30,
'info_dict': {
'id': 'royalty free music - songs',
'title': 'royalty free music - songs',
},
'params': {'extract_flat': 'in_playlist'}
}, {
'url': 'https://music.youtube.com/search?q=royalty+free+music#community+playlists',
'playlist_mincount': 30,
'info_dict': {
'id': 'royalty free music - community playlists',
'title': 'royalty free music - community playlists',
},
'params': {'extract_flat': 'in_playlist'}
}]
_SECTIONS = {
'albums': 'EgWKAQIYAWoKEAoQAxAEEAkQBQ==',
'artists': 'EgWKAQIgAWoKEAoQAxAEEAkQBQ==',
'community playlists': 'EgeKAQQoAEABagoQChADEAQQCRAF',
'featured playlists': 'EgeKAQQoADgBagwQAxAJEAQQDhAKEAU==',
'songs': 'EgWKAQIIAWoKEAoQAxAEEAkQBQ==',
'videos': 'EgWKAQIQAWoKEAoQAxAEEAkQBQ==',
}
def _real_extract(self, url):
qs = parse_qs(url)
query = (qs.get('search_query') or qs.get('q'))[0]
params = qs.get('sp', (None,))[0]
if params:
section = next((k for k, v in self._SECTIONS.items() if v == params), params)
else:
section = compat_urllib_parse_unquote_plus((url.split('#') + [''])[1]).lower()
params = self._SECTIONS.get(section)
if not params:
section = None
title = join_nonempty(query, section, delim=' - ')
return self.playlist_result(self._search_results(query, params, default_client='web_music'), title, title)
class YoutubeFeedsInfoExtractor(InfoExtractor):
"""
Base class for feed extractors
Subclasses must define the _FEED_NAME property.
"""
_LOGIN_REQUIRED = True
def _real_initialize(self):
YoutubeBaseInfoExtractor._check_login_required(self)
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_extract(self, url):
return self.url_result(
f'https://www.youtube.com/feed/{self._FEED_NAME}', ie=YoutubeTabIE.ie_key())
class YoutubeWatchLaterIE(InfoExtractor):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list; ":ytwatchlater" keyword (requires cookies)'
_VALID_URL = r':ytwatchlater'
_TESTS = [{
'url': ':ytwatchlater',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube recommended videos; ":ytrec" keyword'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/?(?:[?#]|$)|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_LOGIN_REQUIRED = False
_TESTS = [{
'url': ':ytrec',
'only_matching': True,
}, {
'url': ':ytrecommended',
'only_matching': True,
}, {
'url': 'https://youtube.com',
'only_matching': True,
}]
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube subscriptions feed; ":ytsubs" keyword (requires cookies)'
_VALID_URL = r':ytsub(?:scription)?s?'
_FEED_NAME = 'subscriptions'
_TESTS = [{
'url': ':ytsubs',
'only_matching': True,
}, {
'url': ':ytsubscriptions',
'only_matching': True,
}]
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history; ":ythis" keyword (requires cookies)'
_VALID_URL = r':ythis(?:tory)?'
_FEED_NAME = 'history'
_TESTS = [{
'url': ':ythistory',
'only_matching': True,
}]
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
t=[0-9]+
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?t=2372',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeClipIE(InfoExtractor):
IE_NAME = 'youtube:clip'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/clip/'
def _real_extract(self, url):
self.report_warning('YouTube clips are not currently supported. The entire video will be downloaded instead')
return self.url_result(url, 'Generic')
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
f'Incomplete YouTube ID {video_id}. URL {url} looks truncated.',
expected=True)
|
from numbers import Number
from time import time
from typing import Iterable, Hashable
from diskcache import Index
from .helper import hash
class CacheSet:
"""
A Set-like Cache that wraps :class:`diskcache.Index`
"""
def __init__(self, iterable=(), directory=None):
self.index = Index(directory)
self.update(*iterable)
def add(self, obj: object):
if not isinstance(obj, Hashable):
raise TypeError(f'{type(obj)} is not Hashable',
f'{str(obj)[:100]}...')
self.index[hash(obj)] = obj
def remove(self, obj):
try:
self.index.pop(hash(obj))
except KeyError:
raise KeyError(obj)
def pop(self):
return self.index.popitem()[1]
def update(self, *obj):
for o in obj:
self.add(o)
def clear(self):
self.index.clear()
def difference(self, other):
self.__sub__(other)
def copy(self):
return set(self).copy()
def __iter__(self):
return iter(self.index.values())
def __contains__(self, item):
self__hash = hash(item)
return self__hash in self.index
def __sub__(self, other: Iterable):
return set(self) - set(other)
def __len__(self):
return len(self.index)
def __str__(self):
return f'CacheSet({', '.join(self)})'
def __repr__(self):
return str(self)
@property
def directory(self):
return self.index.directory
class EvictingIndex(Index):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def set(self, key, value, expire):
self.__setitem__(key, value, expire)
def __getitem__(self, key):
obj: dict = super().__getitem__(key)
if not isinstance(obj, dict):
self.pop(key)
raise KeyError(key)
return obj.get('item')
def __contains__(self, key: object) -> bool:
if not super().__contains__(key):
return False
obj = self.get(key)
# try:
# self._check_expired(key, obj)
# except ExpiredError:
# return False
return True
def __setitem__(self, key, value, expire=0):
if not isinstance(expire, Number):
raise TypeError('expire must be a number')
value = dict(item=value, time_added=time(),
expire=expire)
super().__setitem__(key, value)
def _check_expired(self, key, obj: dict):
if not (isinstance(obj, dict) and 'expire' in obj):
self.pop(key)
raise ExpiredError(key)
time_added = obj.get('time_added')
expire = obj.get('expire')
valid = time_added and expire is not None
if not valid or time() - time_added > expire:
self.pop(key)
raise ExpiredError(key)
class ExpiredError(Exception):
def __init__(self, *args: object) -> None:
super().__init__(*args)
|
from numbers import Number
from time import time
from typing import Iterable, Hashable
from diskcache import Index
from .helper import hash
class CacheSet:
"""
A Set-like Cache that wraps :class:`diskcache.Index`
"""
def __init__(self, iterable=(), directory=None):
self.index = Index(directory)
self.update(*iterable)
def add(self, obj: object):
if not isinstance(obj, Hashable):
raise TypeError(f'{type(obj)} is not Hashable',
f'{str(obj)[:100]}...')
self.index[hash(obj)] = obj
def remove(self, obj):
try:
self.index.pop(hash(obj))
except KeyError:
raise KeyError(obj)
def pop(self):
return self.index.popitem()[1]
def update(self, *obj):
for o in obj:
self.add(o)
def clear(self):
self.index.clear()
def difference(self, other):
self.__sub__(other)
def copy(self):
return set(self).copy()
def __iter__(self):
return iter(self.index.values())
def __contains__(self, item):
self__hash = hash(item)
return self__hash in self.index
def __sub__(self, other: Iterable):
return set(self) - set(other)
def __len__(self):
return len(self.index)
def __str__(self):
return f'CacheSet({", ".join(self)})'
def __repr__(self):
return str(self)
@property
def directory(self):
return self.index.directory
class EvictingIndex(Index):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def set(self, key, value, expire):
self.__setitem__(key, value, expire)
def __getitem__(self, key):
obj: dict = super().__getitem__(key)
if not isinstance(obj, dict):
self.pop(key)
raise KeyError(key)
return obj.get('item')
def __contains__(self, key: object) -> bool:
if not super().__contains__(key):
return False
obj = self.get(key)
# try:
# self._check_expired(key, obj)
# except ExpiredError:
# return False
return True
def __setitem__(self, key, value, expire=0):
if not isinstance(expire, Number):
raise TypeError('expire must be a number')
value = dict(item=value, time_added=time(),
expire=expire)
super().__setitem__(key, value)
def _check_expired(self, key, obj: dict):
if not (isinstance(obj, dict) and 'expire' in obj):
self.pop(key)
raise ExpiredError(key)
time_added = obj.get('time_added')
expire = obj.get('expire')
valid = time_added and expire is not None
if not valid or time() - time_added > expire:
self.pop(key)
raise ExpiredError(key)
class ExpiredError(Exception):
def __init__(self, *args: object) -> None:
super().__init__(*args)
|
import contextlib
import datetime
import json
class Meta:
Title = 'title'
Artist = 'artist'
Year = 'year'
Comment = 'comment'
class WeSing:
def __init__(self, src):
self._src = src
self._data = None
def get_meta(self):
meta = {}
try:
data = self._extract_data()
detail = data['detail']
except KeyError:
pass
else:
song_name = detail.get('song_name')
if song_name:
meta[Meta.Title] = song_name
author = detail.get('nick')
if author:
meta[Meta.Artist] = author
ctime = detail.get('ctime')
with contextlib.suppress(Exception):
year = datetime.date.fromtimestamp(ctime).year
meta[Meta.Year] = str(year)
content = detail.get('content')
if content:
meta[Meta.Comment] = content
return meta
def get_cover(self):
data = self._extract_data()
return data['detail']['cover']
def get_music_url(self):
try:
return self._extract_data()['detail']['playurl']
except KeyError:
return
def _extract_data(self):
"""
{"shareid":"rz0rtglrkEYIpla3","isPcQQMusic":false,"nohref":false,"detail":{"activity_id":0,"avatar":"http://p.qpic.cn/wsinghead/2100829728/2100829728/100","client_key":"","comment_num":0,"content":"","cover":"https://y.gtimg.cn/music/photo_new/T002R500x500M000004GArUe26PXvZ.jpg?max_age=2592000","ctime":1508461315,"f_lat":"0","f_lon":"00","fb_cover":"https://y.gtimg.cn/music/photo_new/T002R300x300M000004GArUe26PXvZ.jpg?max_age=2592000","file_mid":"002NaDd93srdHz","flower_num":0,"gift_num":0,"hc_avatar":"","hc_level":0,"hc_nick":"","hc_second_sing_count":0,"hc_ugcid_half":"","hc_uid":"669c9982242a368b3d4a","iHasCp":0,"is_anonymous":0,"is_segment":0,"kg_nick":"Wilson","ksong_mid":"003BRxnX4bD3fu","lSongMask":0,"level":3,"mapAuth":{"0":""},"nick":"Wilson","play_num":8,"playurl":"http://ws.stream.kg.qq.com/szkge/4c8a1b1fdb3df38dc25b71b4512552977b7ff5c9?ftnrkey=c47cae3aaaf226cd3a140b6b592ad425c1dd02b25fe64372939d7ba7dd3ab53b556f0ecece6e51ede3aae4adada9b6e86d0fd5e425de2e81c6d1fa437d7efa38&vkey=E781387579518871F86F53D047694DC4508CCFD4D9EACAD8FB7270324FF66F464524EE16EB757F91D8E756D785BC1F1EFADC376CAAA2ED305A7D597DBCA05E58559D9142BCCF7F4D1972D27595C6D8BB2DEA60F6FD516E60&fname=82_afe744ec35033fc6567e47ef80f9fafc5981b0ac.48.m4a&fromtag=1000&sdtfrom=v1000","playurl_video":"","poi_id":"","score":2774,"scoreRank":6,"segment_end":200451,"segment_start":0,"sentence_count":28,"singer_mid":"004WgCsE3KBddt","singer_name":"陈粒","song_name":"易燃易爆炸","tail_name":"iPhone 5","total":4294967295,"ugc_id":"","ugc_mask":0,"ugctype":0,"uid":"609c9d852d2f3e8c3646","comments":[],"flower":[],"photos":[]},"lyric":null,"bullet":null,"share":{"title":"易燃易爆炸","link":"https://wesingapp.com/play?s=rz0rtglrkEYIpla3&lang=en","content":"Wilson that you sing 易燃易爆炸 is great. Listen to it (from WeSing, download it immediately)( WeSing, a social KTV community https://c.y.qq.com/r/8zQU )","img_url":"https://y.gtimg.cn/music/photo_new/T002R300x300M000004GArUe26PXvZ.jpg?max_age=2592000"},"langType":"en","lang":{"bullet":"Bullet screen","scan_follow":"Scan to follow me","introduce":"In WeSing, you can view song details and interact","alert_title":"Bullet screen title","close":"Close","qrcode":"QR code","share_tit_s":" that you sing ","share_tit_e":" is great. Listen to it (from WeSing, download it immediately)","seo_tit_s":" has recorded the song, ","seo_tit_e":", on WeSing. Download the application now to compete with your friends for the championship!","view_detail":"View song details","view_more_comment":"View more comments","send_flower":"Send flowers to the publisher","share_to":"Shared to: ","qzone":"QZone","weibo":"Sina Weibo","wechat_scan":"Scan with WeChat and share it on Moments","qiangshafa":"You've only got one chance to get the sofa","gift_rank":"Gift ranking","no_data_hint":"The song does not exist or has been removed","share_intro":"WeSing, a social KTV community","musicerr_hint":"Play errors. Refresh to rfeplay","open":"Open","download":"Download","use_browser":"Open this page with the browser to use the function","yesterday":"Yesterday","today":"Today","comments":"Comments","singer_count":" Users Sung","want_sing":"Go to Sing","open_see_more":"Open sing this song","download_see_more":"Download sing this song","open_see_more_comment":"Open view more comments","download_see_more_comment":"Download view more comments","more_songs":"More Songs","want_your_comment":"Your comment","send_comment":"Send","register_flower":"Received 10 flowers for join WeSing","give_flower":"You could send some flowers to your friends!","recieve_flower":"Receive","comment_success":"Success","download_see_friends_comment":"Download view your friends reply!","dowload_now":"Download now","none_flower":"No flowers","download_get_flower":"Download get more flowers!","follow":"follow","retry":"Server error, please try later.","confirm":"OK","comment_max_length":"Comments can not exceed 140 characters.","login":"Login information is invalid, please login"},"wxlogined":false,"code":"","rawCode":0,"rawMessage":"commlogin fail uin error.","isMV":false}
"""
if not self._data:
_1 = self._src[self._src.index('window.__DATA__'):]
_2 = _1[:_1.index('</script>')]
json_str = _2[_2.find('{'):_2.rfind('}') + 1]
self._data = json.loads(json_str)
return self._data
|
import contextlib
import datetime
import json
class Meta:
Title = 'title'
Artist = 'artist'
Year = 'year'
Comment = 'comment'
class WeSing:
def __init__(self, src):
self._src = src
self._data = None
def get_meta(self):
meta = {}
try:
data = self._extract_data()
detail = data['detail']
except KeyError:
pass
else:
song_name = detail.get('song_name')
if song_name:
meta[Meta.Title] = song_name
author = detail.get('nick')
if author:
meta[Meta.Artist] = author
ctime = detail.get('ctime')
with contextlib.suppress(Exception):
year = datetime.date.fromtimestamp(ctime).year
meta[Meta.Year] = str(year)
content = detail.get('content')
if content:
meta[Meta.Comment] = content
return meta
def get_cover(self):
data = self._extract_data()
return data['detail']['cover']
def get_music_url(self):
try:
return self._extract_data()['detail']['playurl']
except KeyError:
return
def _extract_data(self):
"""
{"shareid":"rz0rtglrkEYIpla3","isPcQQMusic":false,"nohref":false,"detail":{"activity_id":0,"avatar":"http://p.qpic.cn/wsinghead/2100829728/2100829728/100","client_key":"","comment_num":0,"content":"","cover":"https://y.gtimg.cn/music/photo_new/T002R500x500M000004GArUe26PXvZ.jpg?max_age=2592000","ctime":1508461315,"f_lat":"0","f_lon":"00","fb_cover":"https://y.gtimg.cn/music/photo_new/T002R300x300M000004GArUe26PXvZ.jpg?max_age=2592000","file_mid":"002NaDd93srdHz","flower_num":0,"gift_num":0,"hc_avatar":"","hc_level":0,"hc_nick":"","hc_second_sing_count":0,"hc_ugcid_half":"","hc_uid":"669c9982242a368b3d4a","iHasCp":0,"is_anonymous":0,"is_segment":0,"kg_nick":"Wilson","ksong_mid":"003BRxnX4bD3fu","lSongMask":0,"level":3,"mapAuth":{"0":""},"nick":"Wilson","play_num":8,"playurl":"http://ws.stream.kg.qq.com/szkge/4c8a1b1fdb3df38dc25b71b4512552977b7ff5c9?ftnrkey=c47cae3aaaf226cd3a140b6b592ad425c1dd02b25fe64372939d7ba7dd3ab53b556f0ecece6e51ede3aae4adada9b6e86d0fd5e425de2e81c6d1fa437d7efa38&vkey=E781387579518871F86F53D047694DC4508CCFD4D9EACAD8FB7270324FF66F464524EE16EB757F91D8E756D785BC1F1EFADC376CAAA2ED305A7D597DBCA05E58559D9142BCCF7F4D1972D27595C6D8BB2DEA60F6FD516E60&fname=82_afe744ec35033fc6567e47ef80f9fafc5981b0ac.48.m4a&fromtag=1000&sdtfrom=v1000","playurl_video":"","poi_id":"","score":2774,"scoreRank":6,"segment_end":200451,"segment_start":0,"sentence_count":28,"singer_mid":"004WgCsE3KBddt","singer_name":"陈粒","song_name":"易燃易爆炸","tail_name":"iPhone 5","total":4294967295,"ugc_id":"","ugc_mask":0,"ugctype":0,"uid":"609c9d852d2f3e8c3646","comments":[],"flower":[],"photos":[]},"lyric":null,"bullet":null,"share":{"title":"易燃易爆炸","link":"https://wesingapp.com/play?s=rz0rtglrkEYIpla3&lang=en","content":"Wilson that you sing 易燃易爆炸 is great. Listen to it (from WeSing, download it immediately)( WeSing, a social KTV community https://c.y.qq.com/r/8zQU )","img_url":"https://y.gtimg.cn/music/photo_new/T002R300x300M000004GArUe26PXvZ.jpg?max_age=2592000"},"langType":"en","lang":{"bullet":"Bullet screen","scan_follow":"Scan to follow me","introduce":"In WeSing, you can view song details and interact","alert_title":"Bullet screen title","close":"Close","qrcode":"QR code","share_tit_s":" that you sing ","share_tit_e":" is great. Listen to it (from WeSing, download it immediately)","seo_tit_s":" has recorded the song, ","seo_tit_e":", on WeSing. Download the application now to compete with your friends for the championship!","view_detail":"View song details","view_more_comment":"View more comments","send_flower":"Send flowers to the publisher","share_to":"Shared to: ","qzone":"QZone","weibo":"Sina Weibo","wechat_scan":"Scan with WeChat and share it on Moments","qiangshafa":"You've only got one chance to get the sofa","gift_rank":"Gift ranking","no_data_hint":"The song does not exist or has been removed","share_intro":"WeSing, a social KTV community","musicerr_hint":"Play errors. Refresh to rfeplay","open":"Open","download":"Download","use_browser":"Open this page with the browser to use the function","yesterday":"Yesterday","today":"Today","comments":"Comments","singer_count":" Users Sung","want_sing":"Go to Sing","open_see_more":"Open sing this song","download_see_more":"Download sing this song","open_see_more_comment":"Open view more comments","download_see_more_comment":"Download view more comments","more_songs":"More Songs","want_your_comment":"Your comment","send_comment":"Send","register_flower":"Received 10 flowers for join WeSing","give_flower":"You could send some flowers to your friends!","recieve_flower":"Receive","comment_success":"Success","download_see_friends_comment":"Download view your friends reply!","dowload_now":"Download now","none_flower":"No flowers","download_get_flower":"Download get more flowers!","follow":"follow","retry":"Server error, please try later.","confirm":"OK","comment_max_length":"Comments can not exceed 140 characters.","login":"Login information is invalid, please login"},"wxlogined":false,"code":"","rawCode":0,"rawMessage":"commlogin fail uin error.","isMV":false}
"""
if not self._data:
_1 = self._src[self._src.index('window.__DATA__'):]
_2 = _1[:_1.index('</script>')]
json_str = _2[_2.find('{'):_2.rfind('}') + 1]
self._data = json.loads(json_str)
return self._data
|
# Copyright 2020 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import imp
import inspect
import dill
from cortex.lib.log import refresh_logger, cx_logger
from cortex.lib.exceptions import CortexException, UserException, UserRuntimeException
class Predictor:
def __init__(self, provider, cache_dir, **kwargs):
self.provider = provider
self.type = kwargs["type"]
self.path = kwargs["path"]
self.model = kwargs.get("model")
self.python_path = kwargs.get("python_path")
self.config = kwargs.get("config", {})
self.env = kwargs.get("env")
self.signature_key = kwargs.get("signature_key")
self.cache_dir = cache_dir
def initialize_client(self, model_dir=None, tf_serving_host=None, tf_serving_port=None):
if self.type == "onnx":
from cortex.lib.client.onnx import ONNXClient
model_path = os.path.join(model_dir, os.path.basename(self.model))
client = ONNXClient(model_path)
cx_logger().info("ONNX model signature: {}".format(client.input_signature))
return client
elif self.type == "tensorflow":
from cortex.lib.client.tensorflow import TensorFlowClient
tf_serving_address = tf_serving_host + ":" + tf_serving_port
validate_model_dir(model_dir)
client = TensorFlowClient(tf_serving_address, self.signature_key)
cx_logger().info("TensorFlow model signature: {}".format(client.input_signature))
return client
return None
def initialize_impl(self, project_dir, client=None):
class_impl = self.class_impl(project_dir)
try:
if self.type == "onnx":
return class_impl(onnx_client=client, config=self.config)
elif self.type == "tensorflow":
return class_impl(tensorflow_client=client, config=self.config)
else:
return class_impl(config=self.config)
except Exception as e:
raise UserRuntimeException(self.path, "__init__", str(e)) from e
finally:
refresh_logger()
def class_impl(self, project_dir):
if self.type == "tensorflow":
target_class_name = "TensorFlowPredictor"
validations = TENSORFLOW_CLASS_VALIDATION
elif self.type == "onnx":
target_class_name = "ONNXPredictor"
validations = ONNX_CLASS_VALIDATION
elif self.type == "python":
target_class_name = "PythonPredictor"
validations = PYTHON_CLASS_VALIDATION
try:
impl = self._load_module("cortex_predictor", os.path.join(project_dir, self.path))
except CortexException as e:
e.wrap("error in " + self.path)
raise
finally:
refresh_logger()
try:
classes = inspect.getmembers(impl, inspect.isclass)
predictor_class = None
for class_df in classes:
if class_df[0] == target_class_name:
if predictor_class is not None:
raise UserException(
"multiple definitions for {} class found; please check your imports and class definitions and ensure that there is only one Predictor class definition".format(
target_class_name
)
)
predictor_class = class_df[1]
if predictor_class is None:
raise UserException("{} class is not defined".format(target_class_name))
_validate_impl(predictor_class, validations)
except CortexException as e:
e.wrap("error in " + self.path)
raise
return predictor_class
def _load_module(self, module_name, impl_path):
if impl_path.endswith(".pickle"):
try:
impl = imp.new_module(module_name)
with open(impl_path, "rb") as pickle_file:
pickled_dict = dill.load(pickle_file)
for key in pickled_dict:
setattr(impl, key, pickled_dict[key])
except Exception as e:
raise UserException("unable to load pickle", str(e)) from e
else:
try:
impl = imp.load_source(module_name, impl_path)
except Exception as e:
raise UserException(str(e)) from e
return impl
PYTHON_CLASS_VALIDATION = {
"required": [
{"name": "__init__", "required_args": ["self", "config"]},
{
"name": "predict",
"required_args": ["self"],
"optional_args": ["payload", "query_params", "headers"],
},
]
}
TENSORFLOW_CLASS_VALIDATION = {
"required": [
{"name": "__init__", "required_args": ["self", "tensorflow_client", "config"]},
{
"name": "predict",
"required_args": ["self"],
"optional_args": ["payload", "query_params", "headers"],
},
]
}
ONNX_CLASS_VALIDATION = {
"required": [
{"name": "__init__", "required_args": ["self", "onnx_client", "config"]},
{
"name": "predict",
"required_args": ["self"],
"optional_args": ["payload", "query_params", "headers"],
},
]
}
def _validate_impl(impl, impl_req):
for optional_func_signature in impl_req.get("optional", []):
_validate_optional_fn_args(impl, optional_func_signature)
for required_func_signature in impl_req.get("required", []):
_validate_required_fn_args(impl, required_func_signature)
def _validate_optional_fn_args(impl, func_signature):
if getattr(impl, func_signature["name"], None):
_validate_required_fn_args(impl, func_signature)
def _validate_required_fn_args(impl, func_signature):
fn = getattr(impl, func_signature["name"], None)
if not fn:
raise UserException(f'required function "{func_signature['name']}" is not defined')
if not callable(fn):
raise UserException(f'"{func_signature['name']}" is defined, but is not a function')
argspec = inspect.getfullargspec(fn)
required_args = func_signature.get("required_args", [])
optional_args = func_signature.get("optional_args", [])
fn_str = f'{func_signature['name']}({', '.join(argspec.args)})'
for arg_name in required_args:
if arg_name not in argspec.args:
raise UserException(
f'invalid signature for function "{fn_str}": "{arg_name}" is a required argument, but was not provided'
)
if arg_name == "self":
if argspec.args[0] != "self":
raise UserException(
f'invalid signature for function "{fn_str}": "self" must be the first argument'
)
seen_args = []
for arg_name in argspec.args:
if arg_name not in required_args and arg_name not in optional_args:
raise UserException(
f'invalid signature for function "{fn_str}": "{arg_name}" is not a supported argument'
)
if arg_name in seen_args:
raise UserException(
f'invalid signature for function "{fn_str}": "{arg_name}" is duplicated'
)
seen_args.append(arg_name)
tf_expected_dir_structure = """tensorflow model directories must have the following structure:
1523423423/ (version prefix, usually a timestamp)
├── saved_model.pb
└── variables/
├── variables.index
├── variables.data-00000-of-00003
├── variables.data-00001-of-00003
└── variables.data-00002-of-...`"""
def validate_model_dir(model_dir):
version = None
for file_name in os.listdir(model_dir):
if file_name.isdigit():
version = file_name
break
if version is None:
cx_logger().error(tf_expected_dir_structure)
raise UserException("no top-level version folder found")
if not os.path.isdir(os.path.join(model_dir, version)):
cx_logger().error(tf_expected_dir_structure)
raise UserException("no top-level version folder found")
if not os.path.isfile(os.path.join(model_dir, version, "saved_model.pb")):
cx_logger().error(tf_expected_dir_structure)
raise UserException('expected a "saved_model.pb" file')
if not os.path.isdir(os.path.join(model_dir, version, "variables")):
cx_logger().error(tf_expected_dir_structure)
raise UserException('expected a "variables" directory')
if not os.path.isfile(os.path.join(model_dir, version, "variables", "variables.index")):
cx_logger().error(tf_expected_dir_structure)
raise UserException('expected a "variables/variables.index" file')
for file_name in os.listdir(os.path.join(model_dir, version, "variables")):
if file_name.startswith("variables.data-00000-of"):
return
cx_logger().error(tf_expected_dir_structure)
raise UserException(
'expected at least one variables data file, starting with "variables.data-00000-of-"'
)
|
# Copyright 2020 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import imp
import inspect
import dill
from cortex.lib.log import refresh_logger, cx_logger
from cortex.lib.exceptions import CortexException, UserException, UserRuntimeException
class Predictor:
def __init__(self, provider, cache_dir, **kwargs):
self.provider = provider
self.type = kwargs["type"]
self.path = kwargs["path"]
self.model = kwargs.get("model")
self.python_path = kwargs.get("python_path")
self.config = kwargs.get("config", {})
self.env = kwargs.get("env")
self.signature_key = kwargs.get("signature_key")
self.cache_dir = cache_dir
def initialize_client(self, model_dir=None, tf_serving_host=None, tf_serving_port=None):
if self.type == "onnx":
from cortex.lib.client.onnx import ONNXClient
model_path = os.path.join(model_dir, os.path.basename(self.model))
client = ONNXClient(model_path)
cx_logger().info("ONNX model signature: {}".format(client.input_signature))
return client
elif self.type == "tensorflow":
from cortex.lib.client.tensorflow import TensorFlowClient
tf_serving_address = tf_serving_host + ":" + tf_serving_port
validate_model_dir(model_dir)
client = TensorFlowClient(tf_serving_address, self.signature_key)
cx_logger().info("TensorFlow model signature: {}".format(client.input_signature))
return client
return None
def initialize_impl(self, project_dir, client=None):
class_impl = self.class_impl(project_dir)
try:
if self.type == "onnx":
return class_impl(onnx_client=client, config=self.config)
elif self.type == "tensorflow":
return class_impl(tensorflow_client=client, config=self.config)
else:
return class_impl(config=self.config)
except Exception as e:
raise UserRuntimeException(self.path, "__init__", str(e)) from e
finally:
refresh_logger()
def class_impl(self, project_dir):
if self.type == "tensorflow":
target_class_name = "TensorFlowPredictor"
validations = TENSORFLOW_CLASS_VALIDATION
elif self.type == "onnx":
target_class_name = "ONNXPredictor"
validations = ONNX_CLASS_VALIDATION
elif self.type == "python":
target_class_name = "PythonPredictor"
validations = PYTHON_CLASS_VALIDATION
try:
impl = self._load_module("cortex_predictor", os.path.join(project_dir, self.path))
except CortexException as e:
e.wrap("error in " + self.path)
raise
finally:
refresh_logger()
try:
classes = inspect.getmembers(impl, inspect.isclass)
predictor_class = None
for class_df in classes:
if class_df[0] == target_class_name:
if predictor_class is not None:
raise UserException(
"multiple definitions for {} class found; please check your imports and class definitions and ensure that there is only one Predictor class definition".format(
target_class_name
)
)
predictor_class = class_df[1]
if predictor_class is None:
raise UserException("{} class is not defined".format(target_class_name))
_validate_impl(predictor_class, validations)
except CortexException as e:
e.wrap("error in " + self.path)
raise
return predictor_class
def _load_module(self, module_name, impl_path):
if impl_path.endswith(".pickle"):
try:
impl = imp.new_module(module_name)
with open(impl_path, "rb") as pickle_file:
pickled_dict = dill.load(pickle_file)
for key in pickled_dict:
setattr(impl, key, pickled_dict[key])
except Exception as e:
raise UserException("unable to load pickle", str(e)) from e
else:
try:
impl = imp.load_source(module_name, impl_path)
except Exception as e:
raise UserException(str(e)) from e
return impl
PYTHON_CLASS_VALIDATION = {
"required": [
{"name": "__init__", "required_args": ["self", "config"]},
{
"name": "predict",
"required_args": ["self"],
"optional_args": ["payload", "query_params", "headers"],
},
]
}
TENSORFLOW_CLASS_VALIDATION = {
"required": [
{"name": "__init__", "required_args": ["self", "tensorflow_client", "config"]},
{
"name": "predict",
"required_args": ["self"],
"optional_args": ["payload", "query_params", "headers"],
},
]
}
ONNX_CLASS_VALIDATION = {
"required": [
{"name": "__init__", "required_args": ["self", "onnx_client", "config"]},
{
"name": "predict",
"required_args": ["self"],
"optional_args": ["payload", "query_params", "headers"],
},
]
}
def _validate_impl(impl, impl_req):
for optional_func_signature in impl_req.get("optional", []):
_validate_optional_fn_args(impl, optional_func_signature)
for required_func_signature in impl_req.get("required", []):
_validate_required_fn_args(impl, required_func_signature)
def _validate_optional_fn_args(impl, func_signature):
if getattr(impl, func_signature["name"], None):
_validate_required_fn_args(impl, func_signature)
def _validate_required_fn_args(impl, func_signature):
fn = getattr(impl, func_signature["name"], None)
if not fn:
raise UserException(f'required function "{func_signature["name"]}" is not defined')
if not callable(fn):
raise UserException(f'"{func_signature["name"]}" is defined, but is not a function')
argspec = inspect.getfullargspec(fn)
required_args = func_signature.get("required_args", [])
optional_args = func_signature.get("optional_args", [])
fn_str = f'{func_signature["name"]}({", ".join(argspec.args)})'
for arg_name in required_args:
if arg_name not in argspec.args:
raise UserException(
f'invalid signature for function "{fn_str}": "{arg_name}" is a required argument, but was not provided'
)
if arg_name == "self":
if argspec.args[0] != "self":
raise UserException(
f'invalid signature for function "{fn_str}": "self" must be the first argument'
)
seen_args = []
for arg_name in argspec.args:
if arg_name not in required_args and arg_name not in optional_args:
raise UserException(
f'invalid signature for function "{fn_str}": "{arg_name}" is not a supported argument'
)
if arg_name in seen_args:
raise UserException(
f'invalid signature for function "{fn_str}": "{arg_name}" is duplicated'
)
seen_args.append(arg_name)
tf_expected_dir_structure = """tensorflow model directories must have the following structure:
1523423423/ (version prefix, usually a timestamp)
├── saved_model.pb
└── variables/
├── variables.index
├── variables.data-00000-of-00003
├── variables.data-00001-of-00003
└── variables.data-00002-of-...`"""
def validate_model_dir(model_dir):
version = None
for file_name in os.listdir(model_dir):
if file_name.isdigit():
version = file_name
break
if version is None:
cx_logger().error(tf_expected_dir_structure)
raise UserException("no top-level version folder found")
if not os.path.isdir(os.path.join(model_dir, version)):
cx_logger().error(tf_expected_dir_structure)
raise UserException("no top-level version folder found")
if not os.path.isfile(os.path.join(model_dir, version, "saved_model.pb")):
cx_logger().error(tf_expected_dir_structure)
raise UserException('expected a "saved_model.pb" file')
if not os.path.isdir(os.path.join(model_dir, version, "variables")):
cx_logger().error(tf_expected_dir_structure)
raise UserException('expected a "variables" directory')
if not os.path.isfile(os.path.join(model_dir, version, "variables", "variables.index")):
cx_logger().error(tf_expected_dir_structure)
raise UserException('expected a "variables/variables.index" file')
for file_name in os.listdir(os.path.join(model_dir, version, "variables")):
if file_name.startswith("variables.data-00000-of"):
return
cx_logger().error(tf_expected_dir_structure)
raise UserException(
'expected at least one variables data file, starting with "variables.data-00000-of-"'
)
|
import matplotlib.image as mpimg
import matplotlib.style as style
import matplotlib.pyplot as plt
from matplotlib import rcParams
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
import seaborn as sns
from math import exp
import pandas as pd
import mdtraj as md
import pickle as pk
import numpy as np
import statistics
import itertools
import fileinput
import fnmatch
import shutil
import random
import math
import os
import re
def fix_cap_remove_ace(pdb_file):
"""
Removes the H atoms of the capped ACE residue.
"""
remove_words = [
"H1 ACE",
"H2 ACE",
"H3 ACE",
"H31 ACE",
"H32 ACE",
"H33 ACE",
]
with open(pdb_file) as oldfile, open("intermediate.pdb", "w") as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
command = "rm -rf " + pdb_file
os.system(command)
command = "mv intermediate.pdb " + pdb_file
os.system(command)
def fix_cap_replace_ace(pdb_file):
"""
Replaces the alpha carbon atom of the
capped ACE residue with a standard name.
"""
fin = open(pdb_file, "rt")
data = fin.read()
data = data.replace("CA ACE", "CH3 ACE")
data = data.replace("C ACE", "CH3 ACE")
fin.close()
fin = open(pdb_file, "wt")
fin.write(data)
fin.close()
def fix_cap_remove_nme(pdb_file):
"""
Removes the H atoms of the capped NME residue.
"""
remove_words = [
"H1 NME",
"H2 NME",
"H3 NME",
"H31 NME",
"H32 NME",
"H33 NME",
]
with open(pdb_file) as oldfile, open("intermediate.pdb", "w") as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
command = "rm -rf " + pdb_file
os.system(command)
command = "mv intermediate.pdb " + pdb_file
os.system(command)
def fix_cap_replace_nme(pdb_file):
"""
Replaces the alpha carbon atom of the
capped NME residue with a standard name.
"""
fin = open(pdb_file, "rt")
data = fin.read()
data = data.replace("CA NME", "CH3 NME")
data = data.replace("C NME", "CH3 NME")
fin.close()
fin = open(pdb_file, "wt")
fin.write(data)
fin.close()
def prepare_alanine_dipeptide():
"""
Prepares the alanine dipeptide system for Gaussian
Accelerated Molecular Dynamics (GaMD) simulations.
Downloads the pdb structure from
https://markovmodel.github.io/mdshare/ALA2/ and
parameterizes it using General Amber Force Field
(GAFF).
"""
os.system(
"curl -O http://ftp.imp.fu-berlin.de/pub/cmb-data/alanine-dipeptide-nowater.pdb"
)
os.system(
"rm -rf system_inputs"
) # Removes any existing directory named system_inputs
os.system("mkdir system_inputs") # Creates a directory named system_inputs
cwd = os.getcwd()
target_dir = cwd + "/" + "system_inputs"
os.system("pdb4amber -i alanine-dipeptide-nowater.pdb -o intermediate.pdb")
# Delete HH31, HH32 and HH33 from the ACE residue (tleap adds them later)
remove_words = ["HH31 ACE", "HH32 ACE", "HH33 ACE"]
with open("intermediate.pdb") as oldfile, open(
"system.pdb", "w"
) as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
os.system("rm -rf intermediate*")
# save the tleap script to file
with open("input_TIP3P.leap", "w") as f:
f.write(
"""
source leaprc.protein.ff14SB
source leaprc.water.tip3p
set default FlexibleWater on
set default PBRadii mbondi2
pdb = loadpdb system.pdb
solvateBox pdb TIP3PBOX 15
saveamberparm pdb system_TIP3P.prmtop system_TIP3P.inpcrd
saveamberparm pdb system_TIP3P.parm7 system_TIP3P.rst7
savepdb pdb system_TIP3P.pdb
quit
"""
)
os.system("tleap -f input_TIP3P.leap")
os.system("rm -rf leap.log")
shutil.copy(
cwd + "/" + "system_TIP3P.inpcrd",
target_dir + "/" + "system_TIP3P.inpcrd",
)
shutil.copy(
cwd + "/" + "system_TIP3P.parm7",
target_dir + "/" + "system_TIP3P.parm7",
)
shutil.copy(
cwd + "/" + "system_TIP3P.pdb", target_dir + "/" + "system_TIP3P.pdb"
)
shutil.copy(
cwd + "/" + "system_TIP3P.prmtop",
target_dir + "/" + "system_TIP3P.prmtop",
)
shutil.copy(
cwd + "/" + "system_TIP3P.rst7", target_dir + "/" + "system_TIP3P.rst7"
)
shutil.copy(cwd + "/" + "system.pdb", target_dir + "/" + "system.pdb")
shutil.copy(
cwd + "/" + "alanine-dipeptide-nowater.pdb",
target_dir + "/" + "alanine-dipeptide-nowater.pdb",
)
shutil.copy(
cwd + "/" + "input_TIP3P.leap", target_dir + "/" + "input_TIP3P.leap"
)
os.system("rm -rf system_TIP3P.inpcrd")
os.system("rm -rf system_TIP3P.parm7")
os.system("rm -rf system_TIP3P.pdb")
os.system("rm -rf system_TIP3P.inpcrd")
os.system("rm -rf system_TIP3P.rst7")
os.system("rm -rf system_TIP3P.prmtop")
os.system("rm -rf system.pdb")
os.system("rm -rf input_TIP3P.leap")
os.system("rm -rf alanine-dipeptide-nowater.pdb")
def create_vectors(x):
"""
Extracts peridic box information from the
given line.
"""
x = str(x)
x = x.replace("Vec3", "")
x = re.findall("\d*\.?\d+", x)
for i in range(0, len(x)):
x[i] = float(x[i])
x = tuple(x)
n = int(len(x) / 3)
x = [x[i * n : (i + 1) * n] for i in range((len(x) + n - 1) // n)]
return x
def simulated_annealing(
parm="system_TIP3P.prmtop",
rst="system_TIP3P.inpcrd",
annealing_output_pdb="system_annealing_output.pdb",
annealing_steps=100000,
pdb_freq=100000,
starting_temp=0,
target_temp=300,
temp_incr=3,
):
"""
Performs simulated annealing of the system from
0K to 300 K (default) using OpenMM MD engine and
saves the last frame of the simulation to be
accessed by the next simulation.
Parameters
----------
parm: str
System's topology file
rst: str
System's coordinate file
annealing_output_pdb: str
System's output trajectory file
annealing_steps: int
Aneealing steps at each temperatrure jump
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
starting_temp: int
Initial temperature of Simulated Annealing
target_temp: int
Final temperature of Simulated Annealing
temp_incr: int
Temmperature increase for every step
"""
prmtop = AmberPrmtopFile(parm)
inpcrd = AmberInpcrdFile(rst)
annealing_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
annealing_integrator = LangevinIntegrator(
0 * kelvin, 1 / picosecond, 2 * femtoseconds
)
total_steps = ((target_temp / temp_incr) + 1) * annealing_steps
annealing_temp_range = int((target_temp / temp_incr) + 1)
annealing_platform = Platform.getPlatformByName("CUDA")
annealing_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
annealing_simulation = Simulation(
prmtop.topology,
annealing_system,
annealing_integrator,
annealing_platform,
annealing_properties,
)
annealing_simulation.context.setPositions(inpcrd.positions)
if inpcrd.boxVectors is not None:
annealing_simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)
annealing_simulation.minimizeEnergy()
annealing_simulation.reporters.append(
PDBReporter(annealing_output_pdb, pdb_freq)
)
simulated_annealing_last_frame = (
annealing_output_pdb[:-4] + "_last_frame.pdb"
)
annealing_simulation.reporters.append(
PDBReporter(simulated_annealing_last_frame, total_steps)
)
annealing_simulation.reporters.append(
StateDataReporter(
stdout,
pdb_freq,
step=True,
time=True,
potentialEnergy=True,
totalSteps=total_steps,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
separator="\t",
)
)
temp = starting_temp
while temp <= target_temp:
annealing_integrator.setTemperature(temp * kelvin)
if temp == starting_temp:
annealing_simulation.step(annealing_steps)
annealing_simulation.saveState("annealing.state")
else:
annealing_simulation.loadState("annealing.state")
annealing_simulation.step(annealing_steps)
temp += temp_incr
state = annealing_simulation.context.getState()
print(state.getPeriodicBoxVectors())
annealing_simulation_box_vectors = state.getPeriodicBoxVectors()
print(annealing_simulation_box_vectors)
with open("annealing_simulation_box_vectors.pkl", "wb") as f:
pk.dump(annealing_simulation_box_vectors, f)
print("Finshed NVT Simulated Annealing Simulation")
def npt_equilibration(
parm="system_TIP3P.prmtop",
npt_output_pdb="system_npt_output.pdb",
pdb_freq=500000,
npt_steps=5000000,
target_temp=300,
npt_pdb="system_annealing_output_last_frame.pdb",
):
"""
Performs NPT equilibration MD of the system
using OpenMM MD engine and saves the last
frame of the simulation to be accessed by
the next simulation.
Parameters
----------
parm: str
System's topology file
npt_output_pdb: str
System's output trajectory file
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
npt_steps: int
NPT simulation steps
target_temp: int
Temperature for MD simulation
npt_pdb: str
Last frame of the simulation
"""
npt_init_pdb = PDBFile(npt_pdb)
prmtop = AmberPrmtopFile(parm)
npt_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
barostat = MonteCarloBarostat(25.0 * bar, target_temp * kelvin, 25)
npt_system.addForce(barostat)
npt_integrator = LangevinIntegrator(
target_temp * kelvin, 1 / picosecond, 2 * femtoseconds
)
npt_platform = Platform.getPlatformByName("CUDA")
npt_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
npt_simulation = Simulation(
prmtop.topology,
npt_system,
npt_integrator,
npt_platform,
npt_properties,
)
npt_simulation.context.setPositions(npt_init_pdb.positions)
npt_simulation.context.setVelocitiesToTemperature(target_temp * kelvin)
with open("annealing_simulation_box_vectors.pkl", "rb") as f:
annealing_simulation_box_vectors = pk.load(f)
annealing_simulation_box_vectors = create_vectors(
annealing_simulation_box_vectors
)
npt_simulation.context.setPeriodicBoxVectors(
annealing_simulation_box_vectors[0],
annealing_simulation_box_vectors[1],
annealing_simulation_box_vectors[2],
)
npt_last_frame = npt_output_pdb[:-4] + "_last_frame.pdb"
npt_simulation.reporters.append(PDBReporter(npt_output_pdb, pdb_freq))
npt_simulation.reporters.append(PDBReporter(npt_last_frame, npt_steps))
npt_simulation.reporters.append(
StateDataReporter(
stdout,
pdb_freq,
step=True,
time=True,
potentialEnergy=True,
totalSteps=npt_steps,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
separator="\t",
)
)
npt_simulation.minimizeEnergy()
npt_simulation.step(npt_steps)
npt_simulation.saveState("npt_simulation.state")
state = npt_simulation.context.getState()
print(state.getPeriodicBoxVectors())
npt_simulation_box_vectors = state.getPeriodicBoxVectors()
print(npt_simulation_box_vectors)
with open("npt_simulation_box_vectors.pkl", "wb") as f:
pk.dump(npt_simulation_box_vectors, f)
print("Finished NPT Simulation")
def nvt_equilibration(
parm="system_TIP3P.prmtop",
nvt_output_pdb="system_nvt_output.pdb",
pdb_freq=500000,
nvt_steps=5000000,
target_temp=300,
nvt_pdb="system_npt_output_last_frame.pdb",
):
"""
Performs NVT equilibration MD of the system
using OpenMM MD engine saves the last
frame of the simulation to be accessed by
the next simulation.
Parameters
----------
parm: str
System's topology file
nvt_output_pdb: str
System's output trajectory file
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
nvt_steps: int
NVT simulation steps
target_temp: int
Temperature for MD simulation
nvt_pdb: str
Last frame of the simulation
"""
nvt_init_pdb = PDBFile(nvt_pdb)
prmtop = AmberPrmtopFile(parm)
nvt_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
nvt_integrator = LangevinIntegrator(
target_temp * kelvin, 1 / picosecond, 2 * femtoseconds
)
nvt_platform = Platform.getPlatformByName("CUDA")
nvt_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
nvt_simulation = Simulation(
prmtop.topology,
nvt_system,
nvt_integrator,
nvt_platform,
nvt_properties,
)
nvt_simulation.context.setPositions(nvt_init_pdb.positions)
nvt_simulation.context.setVelocitiesToTemperature(target_temp * kelvin)
with open("npt_simulation_box_vectors.pkl", "rb") as f:
npt_simulation_box_vectors = pk.load(f)
npt_simulation_box_vectors = create_vectors(npt_simulation_box_vectors)
nvt_simulation.context.setPeriodicBoxVectors(
npt_simulation_box_vectors[0],
npt_simulation_box_vectors[1],
npt_simulation_box_vectors[2],
)
nvt_last_frame = nvt_output_pdb[:-4] + "_last_frame.pdb"
nvt_simulation.reporters.append(PDBReporter(nvt_output_pdb, pdb_freq))
nvt_simulation.reporters.append(PDBReporter(nvt_last_frame, nvt_steps))
nvt_simulation.reporters.append(
StateDataReporter(
stdout,
pdb_freq,
step=True,
time=True,
potentialEnergy=True,
totalSteps=nvt_steps,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
separator="\t",
)
)
nvt_simulation.minimizeEnergy()
nvt_simulation.step(nvt_steps)
nvt_simulation.saveState("nvt_simulation.state")
state = nvt_simulation.context.getState()
print(state.getPeriodicBoxVectors())
nvt_simulation_box_vectors = state.getPeriodicBoxVectors()
print(nvt_simulation_box_vectors)
with open("nvt_simulation_box_vectors.pkl", "wb") as f:
pk.dump(nvt_simulation_box_vectors, f)
print("Finished NVT Simulation")
def run_equilibration():
"""
Runs systematic simulated annealing followed by
NPT and NVT equilibration MD simulation.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "equilibration"
os.system("rm -rf equilibration")
os.system("mkdir equilibration")
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.inpcrd",
target_dir + "/" + "system_TIP3P.inpcrd",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.parm7",
target_dir + "/" + "system_TIP3P.parm7",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.pdb",
target_dir + "/" + "system_TIP3P.pdb",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.prmtop",
target_dir + "/" + "system_TIP3P.prmtop",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.rst7",
target_dir + "/" + "system_TIP3P.rst7",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system.pdb",
target_dir + "/" + "system.pdb",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "alanine-dipeptide-nowater.pdb",
target_dir + "/" + "alanine-dipeptide-nowater.pdb",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "input_TIP3P.leap",
target_dir + "/" + "input_TIP3P.leap",
)
os.chdir(target_dir)
simulated_annealing()
npt_equilibration()
nvt_equilibration()
os.system("rm -rf system_TIP3P.inpcrd")
os.system("rm -rf system_TIP3P.parm7")
os.system("rm -rf system_TIP3P.pdb")
os.system("rm -rf system_TIP3P.rst7")
os.system("rm -rf system_TIP3P.prmtop")
os.system("rm -rf system.pdb")
os.system("rm -rf alanine-dipeptide-nowater.pdb")
os.system("rm -rf input_TIP3P.leap")
os.chdir(cwd)
def create_starting_structures():
"""
Prepares starting structures for Amber GaMD simulations.
All input files required to run Amber GaMD simulations are
placed in the starting_structures directory.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "starting_structures"
os.system("rm -rf starting_structures")
os.system("mkdir starting_structures")
shutil.copy(
cwd + "/" + "equilibration" + "/" + "system_nvt_output_last_frame.pdb",
target_dir + "/" + "system_nvt_output_last_frame.pdb",
)
os.chdir(target_dir)
fix_cap_remove_nme("system_nvt_output_last_frame.pdb")
fix_cap_replace_nme("system_nvt_output_last_frame.pdb")
# Save the tleap script to file
with open("final_input_TIP3P.leap", "w") as f:
f.write(
"""
source leaprc.protein.ff14SB
source leaprc.water.tip3p
set default FlexibleWater on
set default PBRadii mbondi2
pdb = loadpdb system_nvt_output_last_frame.pdb
saveamberparm pdb system_final.prmtop system_final.inpcrd
saveamberparm pdb system_final.parm7 system_final.rst7
savepdb pdb system_final.pdb
quit
"""
)
os.system("tleap -f final_input_TIP3P.leap")
os.system("rm -rf leap.log")
os.system("rm -rf system_nvt_output_last_frame.pdb")
os.chdir(cwd)
def add_vec_inpcrd():
"""
Adds box dimensions captured from the last saved
frame of the NVT simulations to the inpcrd file.
Only to be used when the box dimensions are not
present in the inpcrd file.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "starting_structures"
shutil.copy(
cwd + "/" + "equilibration" + "/" + "nvt_simulation_box_vectors.pkl",
target_dir + "/" + "nvt_simulation_box_vectors.pkl",
)
os.chdir(target_dir)
with open("nvt_simulation_box_vectors.pkl", "rb") as f:
nvt_simulation_box_vectors = pk.load(f)
nvt_simulation_box_vectors = create_vectors(nvt_simulation_box_vectors)
vectors = (
(nvt_simulation_box_vectors[0][0]) * 10,
(nvt_simulation_box_vectors[1][1]) * 10,
(nvt_simulation_box_vectors[2][2]) * 10,
)
vectors = (
round(vectors[0], 7),
round(vectors[1], 7),
round(vectors[2], 7),
)
last_line = (
" "
+ str(vectors[0])
+ " "
+ str(vectors[1])
+ " "
+ str(vectors[2])
+ " 90.0000000"
+ " 90.0000000"
+ " 90.0000000"
)
with open("system_final.inpcrd", "a+") as f:
f.write(last_line)
os.system("rm -rf nvt_simulation_box_vectors.pkl")
os.chdir(cwd)
def add_vec_prmtop():
"""
Adds box dimensions captured from the last saved
frame of the NVT simulations to the prmtop file.
Only to be used when the box dimensions are not
present in the prmtop file.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "starting_structures"
shutil.copy(
cwd + "/" + "equilibration" + "/" + "nvt_simulation_box_vectors.pkl",
target_dir + "/" + "nvt_simulation_box_vectors.pkl",
)
os.chdir(target_dir)
with open("nvt_simulation_box_vectors.pkl", "rb") as f:
nvt_simulation_box_vectors = pk.load(f)
nvt_simulation_box_vectors = create_vectors(nvt_simulation_box_vectors)
vectors = (
nvt_simulation_box_vectors[0][0],
nvt_simulation_box_vectors[1][1],
nvt_simulation_box_vectors[2][2],
)
vectors = round(vectors[0], 7), round(vectors[1], 7), round(vectors[2], 7)
oldbeta = "9.00000000E+01"
x = str(vectors[0]) + str(0) + "E+" + "01"
y = str(vectors[1]) + str(0) + "E+" + "01"
z = str(vectors[2]) + str(0) + "E+" + "01"
line1 = "%FLAG BOX_DIMENSIONS"
line2 = "%FORMAT(5E16.8)"
line3 = " " + oldbeta + " " + x + " " + y + " " + z
with open("system_final.prmtop") as i, open(
"system_intermediate_final.prmtop", "w"
) as f:
for line in i:
if line.startswith("%FLAG RADIUS_SET"):
line = line1 + "\n" + line2 + "\n" + line3 + "\n" + line
f.write(line)
os.system("rm -rf system_final.prmtop")
os.system("mv system_intermediate_final.prmtop system_final.prmtop")
os.system("rm -rf nvt_simulation_box_vectors.pkl")
os.chdir(cwd)
def create_filetree(
nst_lim=26000000,
ntw_x=1000,
nt_cmd=1000000,
n_teb=1000000,
n_tave=50000,
ntcmd_prep=200000,
nteb_prep=200000,
):
"""
Creates a directory named gamd_simulations. Inside
this directory, there are subdirectories for dihedral,
dual and total potential-boosted GaMD with upper and
lower threshold boosts separately.
Parameters
----------
nst_lim: int
Total simulation time including preparatory simulation.
For example, if nst_lim = 26000000, then, we may have
2 ns of preparatory simulation i.e. 1000000 preparation steps
and 50 ns of GaMD simulation i.e. 25000000 simulation steps
ntw_x: int
Saving coordinates of the simulation every ntw_x
timesteps. For example, 2 ps implies 1000 timesteps
nt_cmd: int
Number of initial MD simulation step, 2 ns of
preparatory simulation requires 1000000 preparation
timesteps
n_teb: int
Number of biasing MD simulation steps
n_tave: int
Number of simulation steps used to calculate the
average and standard deviation of potential energies
ntcmd_prep: int
Number of preparation conventional molecular dynamics
steps.This is used for system equilibration and
potential energies are not collected for statistics
nteb_prep: int
Number of preparation biasing molecular dynamics
simulation steps. This is used for system
equilibration
"""
cwd = os.getcwd()
os.system("rm -rf gamd_simulations")
os.system("mkdir gamd_simulations")
os.chdir(cwd + "/" + "gamd_simulations")
source_dir = cwd + "/" + "starting_structures"
target_dir = cwd + "/" + "gamd_simulations"
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
for i in range(len(dir_list)):
os.mkdir(dir_list[i])
os.chdir(target_dir + "/" + dir_list[i])
shutil.copy(
source_dir + "/" + "system_final.inpcrd",
target_dir + "/" + dir_list[i] + "/" + "system_final.inpcrd",
)
shutil.copy(
source_dir + "/" + "system_final.prmtop",
target_dir + "/" + dir_list[i] + "/" + "system_final.prmtop",
)
if "lower" in dir_list[i]:
i_E = 1
if "upper" in dir_list[i]:
i_E = 2
if "total" in dir_list[i]:
i_gamd = 1
if "dihedral" in dir_list[i]:
i_gamd = 2
if "dual" in dir_list[i]:
i_gamd = 3
with open("md.in", "w") as f:
f.write("&cntrl" + "\n")
f.write(" imin = 0, irest = 0, ntx = 1," + "\n")
f.write(" nstlim = " + str(nst_lim) + ", dt = 0.002," + "\n")
f.write(" ntc = 2, ntf = 2, tol = 0.000001," + "\n")
f.write(" iwrap = 1, ntb = 1, cut = 8.0," + "\n")
f.write(" ntt = 3, temp0 = 300.0, gamma_ln = 1.0, " + "\n")
f.write(
" ntpr = 500, ntwx = " + str(ntw_x) + ", ntwr = 500," + "\n"
)
f.write(" ntxo = 2, ioutfm = 1, ig = -1, ntwprt = 0," + "\n")
f.write(
" igamd = "
+ str(i_gamd)
+ ", iE = "
+ str(i_E)
+ ", irest_gamd = 0,"
+ "\n"
)
f.write(
" ntcmd = "
+ str(nt_cmd)
+ ", nteb = "
+ str(n_teb)
+ ", ntave = "
+ str(n_tave)
+ ","
+ "\n"
)
f.write(
" ntcmdprep = "
+ str(ntcmd_prep)
+ ", ntebprep = "
+ str(nteb_prep)
+ ","
+ "\n"
)
f.write(" sigma0D = 6.0, sigma0P = 6.0" + " \n")
f.write("&end" + "\n")
os.chdir(target_dir)
os.chdir(cwd)
def run_simulations():
"""
Runs GaMD simulations for each of the dihedral, dual and total
potential boosts for both thresholds i.e. upper and lower potential
thresholds. (Remember to check md.in files for further details and
flag information).
"""
cwd = os.getcwd()
os.chdir(cwd + "/" + "gamd_simulations")
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dihedral_threshold_lower")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dihedral_threshold_upper")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dual_threshold_lower")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dual_threshold_upper")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "total_threshold_lower")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "total_threshold_upper")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations")
os.chdir(cwd)
def create_data_files(
jump=10,
traj="system_final.nc",
topology="system_final.prmtop",
T=300,
):
"""
Extracts data from GaMD log files and saves them as
weights.dat, Psi.dat and Phi_Psi.dat. gamd.log file
contains data excluding the initial equilibration MD
simulation steps but trajectory output file has all
the trajectories including the initial equilibration
MD steps. This part has ben taken care to make the
data consistent.
Parameters
----------
jump: int
Every nth frame to be considered for reweighting
traj: str
System's trajectory file
topology: str
System's topology file
T: int
MD simulation temperature
"""
# To make data consistent with gamd.log and .nc file
factor = 0.001987 * T
with open("md.in") as f:
lines = f.readlines()
for i in lines:
if "nstlim =" in i:
nstlim_line = i
if "ntcmd =" in i:
ntcmd_line = i
if "ntwx =" in i:
ntwx_line = i
x = re.findall(r"\b\d+\b", ntcmd_line)
ntcmd = int(x[0])
x = re.findall(r"\b\d+\b", nstlim_line)
nstlim = int(x[0])
x = re.findall(r"\b\d+\b", ntwx_line)
ntwx = int(x[1])
# From the .nc trajectory files, we will not consider ntcmd trajectories
leave_frames = int(ntcmd / ntwx)
no_frames = int(nstlim / ntwx)
# Recheck conditions
file = open("gamd.log", "r")
number_of_lines = 0
for line in file:
line = line.strip("\n")
number_of_lines += 1
file.close()
f = open("gamd.log")
fourth_line = f.readlines()[3]
if str(ntcmd) in fourth_line:
datapoints = number_of_lines - 4
if not str(ntcmd) in fourth_line:
datapoints = number_of_lines - 3
print(datapoints == int((nstlim - ntcmd) / ntwx))
# Creating Psi.dat and Phi_Psi.dat
traj = md.load(traj, top=topology)
traj = traj[leave_frames:no_frames:jump]
phi = md.compute_phi(traj)
phi = phi[1] # 0:indices, 1:phi angles
phi = np.array([math.degrees(i) for i in phi]) # radians to degrees
psi = md.compute_psi(traj)
psi = psi[1] # 0:indices, 1:psi angles
psi = np.array([math.degrees(i) for i in psi]) # radians to degrees
df_psi = pd.DataFrame(phi, columns=["Psi"])
df_psi = df_psi.tail(int(datapoints))
df_psi.to_csv("Psi.dat", sep="\t", index=False, header=False)
df_phi = pd.DataFrame(psi, columns=["Phi"])
df_phi = df_phi.tail(int(datapoints))
df_phi_psi = pd.concat([df_phi, df_psi], axis=1)
df_phi_psi.to_csv("Phi_Psi.dat", sep="\t", index=False, header=False)
# Creating weights.dat
with open("gamd.log") as f:
lines = f.readlines()
column_names = lines[2]
column_names = column_names.replace("#", "")
column_names = column_names.replace("\n", "")
column_names = column_names.replace(" ", "")
column_names = column_names.split(",")
list_words = ["#"]
with open("gamd.log") as oldfile, open("data.log", "w") as newfile:
for line in oldfile:
if not any(word in line for word in list_words):
newfile.write(line)
df = pd.read_csv("data.log", delim_whitespace=True, header=None)
df.columns = column_names
df["dV(kcal/mol)"] = (
df["Boost-Energy-Potential"] + df["Boost-Energy-Dihedral"]
)
df["dV(kbT)"] = df["dV(kcal/mol)"] / factor
df_ = df[["dV(kbT)", "total_nstep", "dV(kcal/mol)"]]
df_ = df_[::jump]
df_.to_csv("weights.dat", sep="\t", index=False, header=False)
os.system("rm -rf data.log")
print(df_phi_psi.shape)
print(df_phi.shape)
print(df_.shape)
def create_bins(lower_bound, width, upper_bound):
"""
Creates bin if given the lower and upper bound
with the wirdth information.
"""
bins = []
for low in range(lower_bound, upper_bound, width):
bins.append([low, low + width])
return bins
def find_bin(value, bins):
"""
Finds which value belongs to which bin.
"""
for i in range(0, len(bins)):
if bins[i][0] <= value < bins[i][1]:
return i
return -1
def reweight_1d(
binspace=10, n_structures=4, Xdim=[-180, 180], T=300.0, min_prob=0.000001
):
"""
Reweights boosted potential energies in one-dimension based on
Maclaurin series expansion to one, two and three degrees.
Parameters
----------
binspace: int
Spacing between the bins
n_structures: int
Number of structures per bin chosen
for Weighted Ensemble (WE) simulations
Xdim: list
Range of dihedral angles
T: float
MD simulation temperature
min_prob: float
minimum probability threshold
"""
beta = 1.0 / (0.001987 * float(T))
df_Psi = pd.read_csv("Psi.dat", delim_whitespace=True, header=None)
df_Psi.columns = ["Psi"]
df_weight = pd.read_csv("weights.dat", delim_whitespace=True, header=None)
df_weight.columns = ["dV_kBT", "timestep", "dVkcalmol"]
sum_total = df_Psi.shape[0]
binsX = np.arange(float(Xdim[0]), (float(Xdim[1]) + binspace), binspace)
hist, hist_edges = np.histogram(df_Psi[["Psi"]], bins=binsX, weights=None)
pstarA = [i / sum_total for i in list(hist)]
bins = create_bins(
lower_bound=int(Xdim[0]), width=binspace, upper_bound=int(Xdim[1])
)
data = df_Psi["Psi"].values.tolist()
binned_weights = []
for value in data:
bin_index = find_bin(value, bins)
binned_weights.append(bin_index)
df_index = pd.DataFrame(binned_weights)
df_index.columns = ["index"]
df = pd.concat([df_index, df_Psi, df_weight], axis=1)
dV_c1 = []
dV_c2 = []
dV_c3 = []
dV = []
for i in range(len(bins)):
df_i = df.loc[(df["index"] == i)]
dV_list = df_i["dVkcalmol"].values.tolist()
if len(dV_list) >= 10:
dV_c1.append(statistics.mean(dV_list))
dV_c2.append(
statistics.mean([i ** 2 for i in dV_list])
- (statistics.mean(dV_list)) ** 2
)
dV_c3.append(
statistics.mean([i ** 3 for i in dV_list])
- 3
* (statistics.mean([i ** 2 for i in dV_list]))
* (statistics.mean(dV_list))
+ 2 * (statistics.mean(dV_list)) ** 3
)
if len(dV_list) < 10:
dV_c1.append(0)
dV_c2.append(0)
dV_c3.append(0)
dV.append(dV_list)
c1 = [i * beta for i in dV_c1]
c2 = [i * ((beta ** 2) / 2) for i in dV_c2]
c3 = [i * ((beta ** 3) / 6) for i in dV_c3]
c1 = c1
c12 = [a + b for a, b in zip(c1, c2)]
c123 = [a + b for a, b in zip(c12, c3)]
for i in range(len(c1)):
if c1[i] >= 700:
c1[i] = 700
for i in range(len(c12)):
if c12[i] >= 700:
c12[i] = 700
for i in range(len(c123)):
if c123[i] >= 700:
c123[i] = 700
ensemble_average_c1 = [exp(i) for i in c1]
ensemble_average_c12 = [exp(i) for i in c12]
ensemble_average_c123 = [exp(i) for i in c123]
numerator_c1 = [a * b for a, b in zip(pstarA, ensemble_average_c1)]
numerator_c12 = [a * b for a, b in zip(pstarA, ensemble_average_c12)]
numerator_c123 = [a * b for a, b in zip(pstarA, ensemble_average_c123)]
#### c1
denominatorc1 = []
for i in range(len(bins)):
product_c1 = pstarA[i] * ensemble_average_c1[i]
denominatorc1.append(product_c1)
denominator_c1 = sum(denominatorc1)
pA_c1 = [i / denominator_c1 for i in numerator_c1]
#### c12
denominatorc12 = []
for i in range(len(bins)):
product_c12 = pstarA[i] * ensemble_average_c12[i]
denominatorc12.append(product_c12)
denominator_c12 = sum(denominatorc12)
pA_c12 = [i / denominator_c12 for i in numerator_c12]
#### c123
denominatorc123 = []
for i in range(len(bins)):
product_c123 = pstarA[i] * ensemble_average_c123[i]
denominatorc123.append(product_c123)
denominator_c123 = sum(denominatorc123)
pA_c123 = [i / denominator_c123 for i in numerator_c123]
data_c1 = list(zip(bins, pA_c1))
data_c12 = list(zip(bins, pA_c12))
data_c123 = list(zip(bins, pA_c123))
df_c1 = pd.DataFrame(data_c1, columns=["bins", "pA_c1"])
df_c12 = pd.DataFrame(data_c12, columns=["bins", "pA_c12"])
df_c123 = pd.DataFrame(data_c123, columns=["bins", "pA_c123"])
####c1
df_c1.to_csv("c1_1d.txt", header=True, index=None, sep=" ", mode="w")
with open("c1_1d.txt", "r") as f1, open("pA_c1_1d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_1d.txt")
####c12
df_c12.to_csv("c12_1d.txt", header=True, index=None, sep=" ", mode="w")
with open("c12_1d.txt", "r") as f1, open("pA_c12_1d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_1d.txt")
####c123
df_c123.to_csv("c123_1d.txt", header=True, index=None, sep=" ", mode="w")
with open("c123_1d.txt", "r") as f1, open("pA_c123_1d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_1d.txt")
####c1_arranged
df_c1_arranged = df_c1.sort_values(by="pA_c1", ascending=False)
df_c1_arranged = df_c1_arranged[df_c1_arranged.pA_c1 > min_prob]
df_c1_arranged.to_csv(
"c1_arranged_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_arranged_1d.txt", "r") as f1, open(
"pA_c1_arranged_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_arranged_1d.txt")
####c12_arranged
df_c12_arranged = df_c12.sort_values(by="pA_c12", ascending=False)
df_c12_arranged = df_c12_arranged[df_c12_arranged.pA_c12 > min_prob]
df_c12_arranged.to_csv(
"c12_arranged_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_arranged_1d.txt", "r") as f1, open(
"pA_c12_arranged_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_arranged_1d.txt")
####c123_arranged
df_c123_arranged = df_c123.sort_values(by="pA_c123", ascending=False)
df_c123_arranged = df_c123_arranged[df_c123_arranged.pA_c123 > min_prob]
df_c123_arranged.to_csv(
"c123_arranged_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_arranged_1d.txt", "r") as f1, open(
"pA_c123_arranged_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_arranged_1d.txt")
####c1_arranged
df_c1_arranged["index"] = df_c1_arranged.index
index_list_c1 = df_c1_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c1 = []
index_indces_c1 = []
for i in index_list_c1:
df_index_list_c1 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c1 = df_index_list_c1["frame_index"].tolist()
frame_indices_c1.append(frame_c1)
index_c1 = [i] * len(frame_c1)
index_indces_c1.append(index_c1)
frame_indices_c1 = [item for elem in frame_indices_c1 for item in elem]
index_indces_c1 = [item for elem in index_indces_c1 for item in elem]
df_c1_frame = pd.DataFrame(frame_indices_c1, columns=["frame_index"])
df_c1_index = pd.DataFrame(index_indces_c1, columns=["index"])
df_c1_frame_index = pd.concat([df_c1_frame, df_c1_index], axis=1)
df_c1_frame_index = df_c1_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c1_frame_index.to_csv(
"c1_frame_index_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_frame_index_1d.txt", "r") as f1, open(
"c1_frame_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_frame_index_1d.txt")
####c12_arranged
df_c12_arranged["index"] = df_c12_arranged.index
index_list_c12 = df_c12_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c12 = []
index_indces_c12 = []
for i in index_list_c12:
df_index_list_c12 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c12 = df_index_list_c12["frame_index"].tolist()
frame_indices_c12.append(frame_c12)
index_c12 = [i] * len(frame_c12)
index_indces_c12.append(index_c12)
frame_indices_c12 = [item for elem in frame_indices_c12 for item in elem]
index_indces_c12 = [item for elem in index_indces_c12 for item in elem]
df_c12_frame = pd.DataFrame(frame_indices_c12, columns=["frame_index"])
df_c12_index = pd.DataFrame(index_indces_c12, columns=["index"])
df_c12_frame_index = pd.concat([df_c12_frame, df_c12_index], axis=1)
df_c12_frame_index = df_c12_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c12_frame_index.to_csv(
"c12_frame_index_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_frame_index_1d.txt", "r") as f1, open(
"c12_frame_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_frame_index_1d.txt")
####c123_arranged
df_c123_arranged["index"] = df_c123_arranged.index
index_list_c123 = df_c123_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c123 = []
index_indces_c123 = []
for i in index_list_c123:
df_index_list_c123 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c123 = df_index_list_c123["frame_index"].tolist()
frame_indices_c123.append(frame_c123)
index_c123 = [i] * len(frame_c123)
index_indces_c123.append(index_c123)
frame_indices_c123 = [item for elem in frame_indices_c123 for item in elem]
index_indces_c123 = [item for elem in index_indces_c123 for item in elem]
df_c123_frame = pd.DataFrame(frame_indices_c123, columns=["frame_index"])
df_c123_index = pd.DataFrame(index_indces_c123, columns=["index"])
df_c123_frame_index = pd.concat([df_c123_frame, df_c123_index], axis=1)
df_c123_frame_index = df_c123_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c123_frame_index.to_csv(
"c123_frame_index_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_frame_index_1d.txt", "r") as f1, open(
"c123_frame_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_frame_index_1d.txt")
####c1
indices_c1_1d = df_c1_frame_index["index"].unique()
frames_c1 = []
for i in indices_c1_1d:
x = df_c1_frame_index.loc[df_c1_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c1.append(z)
frames_c1_1d = [item for elem in frames_c1 for item in elem]
with open("frames_c1_1d.pickle", "wb") as f:
pk.dump(frames_c1_1d, f)
with open("indices_c1_1d.pickle", "wb") as f:
pk.dump(indices_c1_1d, f)
####c12
indices_c12_1d = df_c12_frame_index["index"].unique()
frames_c12 = []
for i in indices_c12_1d:
x = df_c12_frame_index.loc[df_c12_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c12.append(z)
frames_c12_1d = [item for elem in frames_c12 for item in elem]
with open("frames_c12_1d.pickle", "wb") as f:
pk.dump(frames_c12_1d, f)
with open("indices_c12_1d.pickle", "wb") as f:
pk.dump(indices_c12_1d, f)
####c123
indices_c123_1d = df_c123_frame_index["index"].unique()
frames_c123 = []
for i in indices_c123_1d:
x = df_c123_frame_index.loc[df_c123_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c123.append(z)
frames_c123_1d = [item for elem in frames_c123 for item in elem]
with open("frames_c123_1d.pickle", "wb") as f:
pk.dump(frames_c123_1d, f)
with open("indices_c123_1d.pickle", "wb") as f:
pk.dump(indices_c123_1d, f)
##saving probabilities for each selected frame
####c1
prob_c1_1d_list = []
for i in indices_c1_1d:
prob_c1_1d_list.append(df_c1["pA_c1"][i])
prob_c1_1d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c1_1d_list
)
)
prob_c1_1d_list = [x / n_structures for x in prob_c1_1d_list]
with open("prob_c1_1d_list.pickle", "wb") as f:
pk.dump(prob_c1_1d_list, f)
####c12
prob_c12_1d_list = []
for i in indices_c12_1d:
prob_c12_1d_list.append(df_c12["pA_c12"][i])
prob_c12_1d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c12_1d_list
)
)
prob_c12_1d_list = [x / n_structures for x in prob_c12_1d_list]
with open("prob_c12_1d_list.pickle", "wb") as f:
pk.dump(prob_c12_1d_list, f)
####c123
prob_c123_1d_list = []
for i in indices_c123_1d:
prob_c123_1d_list.append(df_c123["pA_c123"][i])
prob_c123_1d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c123_1d_list
)
)
prob_c123_1d_list = [x / n_structures for x in prob_c123_1d_list]
with open("prob_c123_1d_list.pickle", "wb") as f:
pk.dump(prob_c123_1d_list, f)
ref_df_1d = pd.DataFrame(bins, columns=["dim0", "dim1"])
ref_df_1d["bins"] = ref_df_1d.agg(
lambda x: f"[{x["dim0"]} , {x["dim1"]}]", axis=1
)
ref_df_1d = ref_df_1d[["bins"]]
index_ref_1d = []
for i in range(len(bins)):
index_ref_1d.append(i)
index_ref_df_1d = pd.DataFrame(index_ref_1d, columns=["index"])
df_ref_1d = pd.concat([ref_df_1d, index_ref_df_1d], axis=1)
df_ref_1d.to_csv("ref_1d.txt", header=True, index=None, sep=" ", mode="w")
df.to_csv("df_1d.csv", index=False)
os.system("rm -rf __pycache__")
print("Successfully Completed Reweighing")
def reweight_2d(
binspace=10,
n_structures=4,
Xdim=[-180, 180],
Ydim=[-180, 180],
T=300.0,
min_prob=0.000001,
):
"""
Reweights boosted potential energies in two-dimensions
based on Maclaurin series expansion to one, two and
three degrees.
Parameters
----------
binspace: int
Spacing between the bins
n_structures: int
Number of structures per bin chosen
for Weighted Ensemble (WE) simulations
Xdim: list
Range of dihedral angles (1st dimension)
Ydim: list
Range of dihedral angles (2nd dimension)
T: float
MD simulation temperature
min_prob: float
minimum probability threshold
"""
beta = 1.0 / (0.001987 * float(T))
df_Phi_Psi = pd.read_csv("Phi_Psi.dat", delim_whitespace=True, header=None)
df_Phi_Psi.columns = ["Phi", "Psi"]
df_weight = pd.read_csv("weights.dat", delim_whitespace=True, header=None)
df_weight.columns = ["dV_kBT", "timestep", "dVkcalmol"]
sum_total = df_Phi_Psi.shape[0]
binsX = np.arange(float(Xdim[0]), (float(Xdim[1]) + binspace), binspace)
binsY = np.arange(float(Ydim[0]), (float(Ydim[1]) + binspace), binspace)
hist2D, hist_edgesX, hist_edgesY = np.histogram2d(
df_Phi_Psi["Phi"].values.tolist(),
df_Phi_Psi["Psi"].values.tolist(),
bins=(binsX, binsY),
weights=None,
)
pstarA_2D = [i / sum_total for i in list(hist2D)]
bins_tuple_X = create_bins(
lower_bound=int(Xdim[0]), width=binspace, upper_bound=int(Xdim[1])
)
bins_tuple_Y = create_bins(
lower_bound=int(Ydim[0]), width=binspace, upper_bound=int(Ydim[1])
)
bins = []
for i in range(len(bins_tuple_X)):
for j in range(len(bins_tuple_Y)):
bins.append([bins_tuple_X[i], bins_tuple_Y[j]])
pstarA = [item for elem in pstarA_2D for item in elem]
hist = [item for elem in hist2D for item in elem]
hist = [int(i) for i in hist]
data_X = df_Phi_Psi["Phi"].values.tolist()
binned_weights_X = []
for value in data_X:
bin_index_X = find_bin(value, bins_tuple_X)
binned_weights_X.append(bin_index_X)
data_Y = df_Phi_Psi["Psi"].values.tolist()
binned_weights_Y = []
for value in data_Y:
bin_index_Y = find_bin(value, bins_tuple_Y)
binned_weights_Y.append(bin_index_Y)
binned_weights_2D = []
for i in range(len(binned_weights_X)):
binned_weights_2D.append([binned_weights_X[i], binned_weights_Y[i]])
binned_weights = []
for i in range(len(binned_weights_2D)):
binned_weights.append(
(binned_weights_2D[i][0] * len(bins_tuple_Y))
+ (binned_weights_2D[i][1] + 1)
)
df_index = pd.DataFrame(binned_weights)
df_index.columns = ["index"]
df_index["index"] = df_index["index"] - 1
df = pd.concat([df_index, df_Phi_Psi, df_weight], axis=1)
dV_c1 = []
dV_c2 = []
dV_c3 = []
dV = []
for i in range(len(bins)):
df_i = df.loc[(df["index"] == i)]
dV_list = df_i["dVkcalmol"].values.tolist()
if len(dV_list) >= 10:
dV_c1.append(statistics.mean(dV_list))
dV_c2.append(
statistics.mean([i ** 2 for i in dV_list])
- (statistics.mean(dV_list)) ** 2
)
dV_c3.append(
statistics.mean([i ** 3 for i in dV_list])
- 3
* (statistics.mean([i ** 2 for i in dV_list]))
* (statistics.mean(dV_list))
+ 2 * (statistics.mean(dV_list)) ** 3
)
if len(dV_list) < 10:
dV_c1.append(0)
dV_c2.append(0)
dV_c3.append(0)
dV.append(dV_list)
c1 = [i * beta for i in dV_c1]
c2 = [i * ((beta ** 2) / 2) for i in dV_c2]
c3 = [i * ((beta ** 3) / 6) for i in dV_c3]
c1 = c1
c12 = [a + b for a, b in zip(c1, c2)]
c123 = [a + b for a, b in zip(c12, c3)]
for i in range(len(c1)):
if c1[i] >= 700:
c1[i] = 700
for i in range(len(c12)):
if c12[i] >= 700:
c12[i] = 700
for i in range(len(c123)):
if c123[i] >= 700:
c123[i] = 700
ensemble_average_c1 = [exp(i) for i in c1]
ensemble_average_c12 = [exp(i) for i in c12]
ensemble_average_c123 = [exp(i) for i in c123]
numerator_c1 = [a * b for a, b in zip(pstarA, ensemble_average_c1)]
numerator_c12 = [a * b for a, b in zip(pstarA, ensemble_average_c12)]
numerator_c123 = [a * b for a, b in zip(pstarA, ensemble_average_c123)]
#### c1
denominatorc1 = []
for i in range(len(bins)):
product_c1 = pstarA[i] * ensemble_average_c1[i]
denominatorc1.append(product_c1)
denominator_c1 = sum(denominatorc1)
pA_c1 = [i / denominator_c1 for i in numerator_c1]
#### c12
denominatorc12 = []
for i in range(len(bins)):
product_c12 = pstarA[i] * ensemble_average_c12[i]
denominatorc12.append(product_c12)
denominator_c12 = sum(denominatorc12)
pA_c12 = [i / denominator_c12 for i in numerator_c12]
#### c123
denominatorc123 = []
for i in range(len(bins)):
product_c123 = pstarA[i] * ensemble_average_c123[i]
denominatorc123.append(product_c123)
denominator_c123 = sum(denominatorc123)
pA_c123 = [i / denominator_c123 for i in numerator_c123]
data_c1 = list(zip(bins, pA_c1))
data_c12 = list(zip(bins, pA_c12))
data_c123 = list(zip(bins, pA_c123))
df_c1 = pd.DataFrame(data_c1, columns=["bins", "pA_c1"])
df_c12 = pd.DataFrame(data_c12, columns=["bins", "pA_c12"])
df_c123 = pd.DataFrame(data_c123, columns=["bins", "pA_c123"])
df_c1.to_csv("c1_2d.txt", header=True, index=None, sep=" ", mode="w")
with open("c1_2d.txt", "r") as f1, open("pA_c1_2d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_2d.txt")
####c12
df_c12.to_csv("c12_2d.txt", header=True, index=None, sep=" ", mode="w")
with open("c12_2d.txt", "r") as f1, open("pA_c12_2d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_2d.txt")
####c123
df_c123.to_csv("c123_2d.txt", header=True, index=None, sep=" ", mode="w")
with open("c123_2d.txt", "r") as f1, open("pA_c123_2d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_2d.txt")
####c1_arranged
df_c1_arranged = df_c1.sort_values(by="pA_c1", ascending=False)
df_c1_arranged = df_c1_arranged[df_c1_arranged.pA_c1 > min_prob]
df_c1_arranged.to_csv(
"c1_arranged_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_arranged_2d.txt", "r") as f1, open(
"pA_c1_arranged_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_arranged_2d.txt")
####c12_arranged
df_c12_arranged = df_c12.sort_values(by="pA_c12", ascending=False)
df_c12_arranged = df_c12_arranged[df_c12_arranged.pA_c12 > min_prob]
df_c12_arranged.to_csv(
"c12_arranged_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_arranged_2d.txt", "r") as f1, open(
"pA_c12_arranged_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_arranged_2d.txt")
####c123_arranged
df_c123_arranged = df_c123.sort_values(by="pA_c123", ascending=False)
df_c123_arranged = df_c123_arranged[df_c123_arranged.pA_c123 > min_prob]
df_c123_arranged.to_csv(
"c123_arranged_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_arranged_2d.txt", "r") as f1, open(
"pA_c123_arranged_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_arranged_2d.txt")
####c1_arranged
df_c1_arranged["index"] = df_c1_arranged.index
index_list_c1 = df_c1_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c1 = []
index_indces_c1 = []
for i in index_list_c1:
df_index_list_c1 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c1 = df_index_list_c1["frame_index"].tolist()
frame_indices_c1.append(frame_c1)
index_c1 = [i] * len(frame_c1)
index_indces_c1.append(index_c1)
frame_indices_c1 = [item for elem in frame_indices_c1 for item in elem]
index_indces_c1 = [item for elem in index_indces_c1 for item in elem]
df_c1_frame = pd.DataFrame(frame_indices_c1, columns=["frame_index"])
df_c1_index = pd.DataFrame(index_indces_c1, columns=["index"])
df_c1_frame_index = pd.concat([df_c1_frame, df_c1_index], axis=1)
df_c1_frame_index = df_c1_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c1_frame_index.to_csv(
"c1_frame_index_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_frame_index_2d.txt", "r") as f1, open(
"c1_frame_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_frame_index_2d.txt")
####c12_arranged
df_c12_arranged["index"] = df_c12_arranged.index
index_list_c12 = df_c12_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c12 = []
index_indces_c12 = []
for i in index_list_c12:
df_index_list_c12 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c12 = df_index_list_c12["frame_index"].tolist()
frame_indices_c12.append(frame_c12)
index_c12 = [i] * len(frame_c12)
index_indces_c12.append(index_c12)
frame_indices_c12 = [item for elem in frame_indices_c12 for item in elem]
index_indces_c12 = [item for elem in index_indces_c12 for item in elem]
df_c12_frame = pd.DataFrame(frame_indices_c12, columns=["frame_index"])
df_c12_index = pd.DataFrame(index_indces_c12, columns=["index"])
df_c12_frame_index = pd.concat([df_c12_frame, df_c12_index], axis=1)
df_c12_frame_index = df_c12_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c12_frame_index.to_csv(
"c12_frame_index_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_frame_index_2d.txt", "r") as f1, open(
"c12_frame_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_frame_index_2d.txt")
####c123_arranged
df_c123_arranged["index"] = df_c123_arranged.index
df_c123_arranged["index"] = df_c123_arranged.index
index_list_c123 = df_c123_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c123 = []
index_indces_c123 = []
for i in index_list_c123:
df_index_list_c123 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c123 = df_index_list_c123["frame_index"].tolist()
frame_indices_c123.append(frame_c123)
index_c123 = [i] * len(frame_c123)
index_indces_c123.append(index_c123)
frame_indices_c123 = [item for elem in frame_indices_c123 for item in elem]
index_indces_c123 = [item for elem in index_indces_c123 for item in elem]
df_c123_frame = pd.DataFrame(frame_indices_c123, columns=["frame_index"])
df_c123_index = pd.DataFrame(index_indces_c123, columns=["index"])
df_c123_frame_index = pd.concat([df_c123_frame, df_c123_index], axis=1)
df_c123_frame_index = df_c123_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c123_frame_index.to_csv(
"c123_frame_index_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_frame_index_2d.txt", "r") as f1, open(
"c123_frame_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_frame_index_2d.txt")
####c1
indices_c1_2d = df_c1_frame_index["index"].unique()
frames_c1 = []
for i in indices_c1_2d:
x = df_c1_frame_index.loc[df_c1_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c1.append(z)
frames_c1_2d = [item for elem in frames_c1 for item in elem]
with open("frames_c1_2d.pickle", "wb") as f:
pk.dump(frames_c1_2d, f)
with open("indices_c1_2d.pickle", "wb") as f:
pk.dump(indices_c1_2d, f)
####c12
indices_c12_2d = df_c12_frame_index["index"].unique()
frames_c12 = []
for i in indices_c12_2d:
x = df_c12_frame_index.loc[df_c12_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c12.append(z)
frames_c12_2d = [item for elem in frames_c12 for item in elem]
with open("frames_c12_2d.pickle", "wb") as f:
pk.dump(frames_c12_2d, f)
with open("indices_c12_2d.pickle", "wb") as f:
pk.dump(indices_c12_2d, f)
####c123
indices_c123_2d = df_c123_frame_index["index"].unique()
frames_c123 = []
for i in indices_c123_2d:
x = df_c123_frame_index.loc[df_c123_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c123.append(z)
frames_c123_2d = [item for elem in frames_c123 for item in elem]
with open("frames_c123_2d.pickle", "wb") as f:
pk.dump(frames_c123_2d, f)
with open("indices_c123_2d.pickle", "wb") as f:
pk.dump(indices_c123_2d, f)
##saving probabilities for each selected frame
####c1
prob_c1_2d_list = []
for i in indices_c1_2d:
prob_c1_2d_list.append(df_c1["pA_c1"][i])
prob_c1_2d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c1_2d_list
)
)
prob_c1_2d_list = [x / n_structures for x in prob_c1_2d_list]
with open("prob_c1_2d_list.pickle", "wb") as f:
pk.dump(prob_c1_2d_list, f)
####c12
prob_c12_2d_list = []
for i in indices_c12_2d:
prob_c12_2d_list.append(df_c12["pA_c12"][i])
prob_c12_2d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c12_2d_list
)
)
prob_c12_2d_list = [x / n_structures for x in prob_c12_2d_list]
with open("prob_c12_2d_list.pickle", "wb") as f:
pk.dump(prob_c12_2d_list, f)
####c123
prob_c123_2d_list = []
for i in indices_c123_2d:
prob_c123_2d_list.append(df_c123["pA_c123"][i])
prob_c123_2d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c123_2d_list
)
)
prob_c123_2d_list = [x / n_structures for x in prob_c123_2d_list]
with open("prob_c123_2d_list.pickle", "wb") as f:
pk.dump(prob_c123_2d_list, f)
ref_df_2d = pd.DataFrame(bins, columns=["binsX", "binsY"])
ref_df_2d["XY"] = ref_df_2d.agg(
lambda x: f"{x["binsX"]} , {x["binsX"]}", axis=1
)
ref_df_2d = ref_df_2d[["XY"]]
index_ref_2d = []
for i in range(len(bins_tuple_X) * len(bins_tuple_Y)):
index_ref_2d.append(i)
index_ref_df_2d = pd.DataFrame(index_ref_2d, columns=["index"])
df_ref_2d = pd.concat([ref_df_2d, index_ref_df_2d], axis=1)
df_ref_2d.to_csv("ref_2d.txt", header=True, index=None, sep=" ", mode="w")
df.to_csv("df_2d.csv", index=False)
os.system("rm -rf __pycache__")
print("Successfully Completed Reweighing")
def save_frames():
"""
Creates a directory named we_structures. Inside this
directory, there are six subdirectories (three for
one-dimension reweighing and other three for
two-dimensional reweighted frames). All frames
for one, two and three-degree Maclaurin series
expanded reweighted frames are present in their
respective folders.
"""
cwd = os.getcwd()
os.system("rm -rf we_structures")
os.system("mkdir we_structures")
os.chdir(cwd + "/" + "we_structures")
os.system("mkdir 1d_c1")
os.system("mkdir 1d_c12")
os.system("mkdir 1d_c123")
os.system("mkdir 2d_c1")
os.system("mkdir 2d_c12")
os.system("mkdir 2d_c123")
os.chdir(cwd)
df1 = pd.read_csv("df_1d.csv")
index = df1["index"].tolist()
frame = df1["frame_index"].tolist()
index_frame = dict(zip(frame, index))
df2 = pd.read_csv("ref_1d.txt", sep=" ", delimiter=None, header="infer")
index_ = df2["index"].tolist()
bins = df2["bins"].tolist()
index_bins = dict(zip(index_, bins))
#### 1d
with open("frames_c1_1d.pickle", "rb") as input_file:
frames_c1_1d = pk.load(input_file)
for i in frames_c1_1d:
j = index_frame[i]
frame_index = frames_c1_1d.index(i)
k = index_bins[j]
k = k.strip("[]")
k = k.replace(" , ", "_")
# traj = pt.load("system_final.nc", top="system_final.prmtop", frame_indices=[i])
traj = md.load_frame(
"system_final.nc", top="system_final.prmtop", index=i
)
frame_pdb = str(frame_index) + "_" + k + "_1d_c1_" + str(i) + ".pdb"
# pt.save(frame_pdb, traj, overwrite=True)
traj.save_pdb(frame_pdb, force_overwrite=True)
target_dir = cwd + "/" + "we_structures" + "/" + "1d_c1"
shutil.move(cwd + "/" + frame_pdb, target_dir + "/" + frame_pdb)
with open("frames_c12_1d.pickle", "rb") as input_file:
frames_c12_1d = pk.load(input_file)
for i in frames_c12_1d:
j = index_frame[i]
frame_index = frames_c12_1d.index(i)
k = index_bins[j]
k = k.strip("[]")
k = k.replace(" , ", "_")
# traj = pt.load("system_final.nc", top="system_final.prmtop", frame_indices=[i])
traj = md.load_frame(
"system_final.nc", top="system_final.prmtop", index=i
)
frame_pdb = str(frame_index) + "_" + k + "_1d_c12_" + str(i) + ".pdb"
# pt.save(frame_pdb, traj, overwrite=True)
traj.save_pdb(frame_pdb, force_overwrite=True)
target_dir = cwd + "/" + "we_structures" + "/" + "1d_c12"
shutil.move(cwd + "/" + frame_pdb, target_dir + "/" + frame_pdb)
with open("frames_c123_1d.pickle", "rb") as input_file:
frames_c123_1d = pk.load(input_file)
for i in frames_c123_1d:
j = index_frame[i]
frame_index = frames_c123_1d.index(i)
k = index_bins[j]
k = k.strip("[]")
k = k.replace(" , ", "_")
# traj = pt.load("system_final.nc", top="system_final.prmtop", frame_indices=[i])
traj = md.load_frame(
"system_final.nc", top="system_final.prmtop", index=i
)
frame_pdb = str(frame_index) + "_" + k + "_1d_c123_" + str(i) + ".pdb"
# pt.save(frame_pdb, traj, overwrite=True)
traj.save_pdb(frame_pdb, force_overwrite=True)
target_dir = cwd + "/" + "we_structures" + "/" + "1d_c123"
shutil.move(cwd + "/" + frame_pdb, target_dir + "/" + frame_pdb)
df1 = pd.read_csv("df_2d.csv")
index = df1["index"].tolist()
frame = df1["frame_index"].tolist()
index_frame = dict(zip(frame, index))
df2 = pd.read_csv("ref_2d.txt", sep=" ", delimiter=None, header="infer")
index_ = df2["index"].tolist()
bins = df2["XY"].tolist()
index_bins = dict(zip(index_, bins))
#### 2d
with open("frames_c1_2d.pickle", "rb") as input_file:
frames_c1_2d = pk.load(input_file)
for i in frames_c1_2d:
j = index_frame[i]
frame_index = frames_c1_2d.index(i)
k = index_bins[j]
k = k.strip("[]")
k = k.replace("] , [", "_")
k = k.replace(", ", "_")
# traj = pt.load("system_final.nc", top="system_final.prmtop", frame_indices=[i])
traj = md.load_frame(
"system_final.nc", top="system_final.prmtop", index=i
)
frame_pdb = str(frame_index) + "_" + k + "_2d_c1_" + str(i) + ".pdb"
# pt.save(frame_pdb, traj, overwrite=True)
traj.save_pdb(frame_pdb, force_overwrite=True)
target_dir = cwd + "/" + "we_structures" + "/" + "2d_c1"
shutil.move(cwd + "/" + frame_pdb, target_dir + "/" + frame_pdb)
with open("frames_c12_2d.pickle", "rb") as input_file:
frames_c12_2d = pk.load(input_file)
for i in frames_c12_2d:
j = index_frame[i]
frame_index = frames_c12_2d.index(i)
k = index_bins[j]
k = k.strip("[]")
k = k.replace("] , [", "_")
k = k.replace(", ", "_")
# traj = pt.load("system_final.nc", top="system_final.prmtop", frame_indices=[i])
traj = md.load_frame(
"system_final.nc", top="system_final.prmtop", index=i
)
frame_pdb = str(frame_index) + "_" + k + "_2d_c12_" + str(i) + ".pdb"
# pt.save(frame_pdb, traj, overwrite=True)
traj.save_pdb(frame_pdb, force_overwrite=True)
target_dir = cwd + "/" + "we_structures" + "/" + "2d_c12"
shutil.move(cwd + "/" + frame_pdb, target_dir + "/" + frame_pdb)
with open("frames_c123_2d.pickle", "rb") as input_file:
frames_c123_2d = pk.load(input_file)
for i in frames_c123_2d:
j = index_frame[i]
frame_index = frames_c123_2d.index(i)
k = index_bins[j]
k = k.strip("[]")
k = k.replace("] , [", "_")
k = k.replace(", ", "_")
# traj = pt.load("system_final.nc", top="system_final.prmtop", frame_indices=[i])
traj = md.load_frame(
"system_final.nc", top="system_final.prmtop", index=i
)
frame_pdb = str(frame_index) + "_" + k + "_2d_c123_" + str(i) + ".pdb"
# pt.save(frame_pdb, traj, overwrite=True)
traj.save_pdb(frame_pdb, force_overwrite=True)
target_dir = cwd + "/" + "we_structures" + "/" + "2d_c123"
shutil.move(cwd + "/" + frame_pdb, target_dir + "/" + frame_pdb)
def save_we_inputs():
"""
Writes an input file in each of the simulation folder.
Input file contains one column each for the name of
the PDB file and its respective probability.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "we_structures"
dir_list = ["1d_c1", "1d_c12", "1d_c123", "2d_c1", "2d_c12", "2d_c123"]
for i in dir_list:
os.chdir(target_dir + "/" + i)
pdbs = os.listdir(".")
pickle_file = "pdb_" + i + ".pickle"
with open(pickle_file, "wb") as f:
pk.dump(pdbs, f)
shutil.move(
target_dir + "/" + i + "/" + pickle_file, cwd + "/" + pickle_file
)
os.chdir(cwd)
# c1_1d
with open("prob_c1_1d_list.pickle", "rb") as input_file:
prob_c1_1d_list = pk.load(input_file)
prob_c1_1d_list = [i / min(prob_c1_1d_list) for i in prob_c1_1d_list]
prob_c1_1d_list = [i / sum(prob_c1_1d_list) for i in prob_c1_1d_list]
with open("pdb_1d_c1.pickle", "rb") as input_file:
pdb_1d_c1 = pk.load(input_file)
pdb_1d_c1_index = []
for i in range(len(pdb_1d_c1)):
pdb_1d_c1_index.append(int(re.findall(r"\d+", pdb_1d_c1[i])[0]))
df = pd.DataFrame(
list(zip(pdb_1d_c1, prob_c1_1d_list, pdb_1d_c1_index)),
columns=["pdb_name", "probability", "pdb_index"],
)
df = df.sort_values(by=["pdb_index"])
df = df[["probability", "pdb_name"]]
index_row = []
for i in range(df.shape[0]):
index_row.append(i)
df_index = pd.DataFrame(index_row, columns=["index_"])
df_merged = pd.concat([df_index, df], axis=1)
df_merged.to_csv(
"we_input_c1_1d.txt", header=False, index=None, sep=" ", mode="w"
)
# c12_1d
with open("prob_c12_1d_list.pickle", "rb") as input_file:
prob_c12_1d_list = pk.load(input_file)
prob_c12_1d_list = [i / min(prob_c12_1d_list) for i in prob_c12_1d_list]
prob_c12_1d_list = [i / sum(prob_c12_1d_list) for i in prob_c12_1d_list]
with open("pdb_1d_c12.pickle", "rb") as input_file:
pdb_1d_c12 = pk.load(input_file)
pdb_1d_c12_index = []
for i in range(len(pdb_1d_c12)):
pdb_1d_c12_index.append(int(re.findall(r"\d+", pdb_1d_c12[i])[0]))
df = pd.DataFrame(
list(zip(pdb_1d_c12, prob_c12_1d_list, pdb_1d_c12_index)),
columns=["pdb_name", "probability", "pdb_index"],
)
df = df.sort_values(by=["pdb_index"])
df = df[["probability", "pdb_name"]]
index_row = []
for i in range(df.shape[0]):
index_row.append(i)
df_index = pd.DataFrame(index_row, columns=["index_"])
df_merged = pd.concat([df_index, df], axis=1)
df_merged.to_csv(
"we_input_c12_1d.txt", header=False, index=None, sep=" ", mode="w"
)
# c123_1d
with open("prob_c123_1d_list.pickle", "rb") as input_file:
prob_c123_1d_list = pk.load(input_file)
prob_c123_1d_list = [i / min(prob_c123_1d_list) for i in prob_c123_1d_list]
prob_c123_1d_list = [i / sum(prob_c123_1d_list) for i in prob_c123_1d_list]
with open("pdb_1d_c123.pickle", "rb") as input_file:
pdb_1d_c123 = pk.load(input_file)
pdb_1d_c123_index = []
for i in range(len(pdb_1d_c123)):
pdb_1d_c123_index.append(int(re.findall(r"\d+", pdb_1d_c123[i])[0]))
df = pd.DataFrame(
list(zip(pdb_1d_c123, prob_c123_1d_list, pdb_1d_c123_index)),
columns=["pdb_name", "probability", "pdb_index"],
)
df = df.sort_values(by=["pdb_index"])
df = df[["probability", "pdb_name"]]
index_row = []
for i in range(df.shape[0]):
index_row.append(i)
df_index = pd.DataFrame(index_row, columns=["index_"])
df_merged = pd.concat([df_index, df], axis=1)
df_merged.to_csv(
"we_input_c123_1d.txt", header=False, index=None, sep=" ", mode="w"
)
# c1_2d
with open("prob_c1_2d_list.pickle", "rb") as input_file:
prob_c1_2d_list = pk.load(input_file)
prob_c1_2d_list = [i / min(prob_c1_2d_list) for i in prob_c1_2d_list]
prob_c1_2d_list = [i / sum(prob_c1_2d_list) for i in prob_c1_2d_list]
with open("pdb_2d_c1.pickle", "rb") as input_file:
pdb_2d_c1 = pk.load(input_file)
pdb_2d_c1_index = []
for i in range(len(pdb_2d_c1)):
pdb_2d_c1_index.append(int(re.findall(r"\d+", pdb_2d_c1[i])[0]))
df = pd.DataFrame(
list(zip(pdb_2d_c1, prob_c1_2d_list, pdb_2d_c1_index)),
columns=["pdb_name", "probability", "pdb_index"],
)
df = df.sort_values(by=["pdb_index"])
df = df[["probability", "pdb_name"]]
index_row = []
for i in range(df.shape[0]):
index_row.append(i)
df_index = pd.DataFrame(index_row, columns=["index_"])
df_merged = pd.concat([df_index, df], axis=1)
df_merged.to_csv(
"we_input_c1_2d.txt", header=False, index=None, sep=" ", mode="w"
)
# c12_2d
with open("prob_c12_2d_list.pickle", "rb") as input_file:
prob_c12_2d_list = pk.load(input_file)
prob_c12_2d_list = [i / min(prob_c12_2d_list) for i in prob_c12_2d_list]
prob_c12_2d_list = [i / sum(prob_c12_2d_list) for i in prob_c12_2d_list]
with open("pdb_2d_c12.pickle", "rb") as input_file:
pdb_2d_c12 = pk.load(input_file)
pdb_2d_c12_index = []
for i in range(len(pdb_2d_c12)):
pdb_2d_c12_index.append(int(re.findall(r"\d+", pdb_2d_c12[i])[0]))
df = pd.DataFrame(
list(zip(pdb_2d_c12, prob_c12_2d_list, pdb_2d_c12_index)),
columns=["pdb_name", "probability", "pdb_index"],
)
df = df.sort_values(by=["pdb_index"])
df = df[["probability", "pdb_name"]]
index_row = []
for i in range(df.shape[0]):
index_row.append(i)
df_index = pd.DataFrame(index_row, columns=["index_"])
df_merged = pd.concat([df_index, df], axis=1)
df_merged.to_csv(
"we_input_c12_2d.txt", header=False, index=None, sep=" ", mode="w"
)
# c123_2d
with open("prob_c123_2d_list.pickle", "rb") as input_file:
prob_c123_2d_list = pk.load(input_file)
prob_c123_2d_list = [i / min(prob_c123_2d_list) for i in prob_c123_2d_list]
prob_c123_2d_list = [i / sum(prob_c123_2d_list) for i in prob_c123_2d_list]
with open("pdb_2d_c123.pickle", "rb") as input_file:
pdb_2d_c123 = pk.load(input_file)
pdb_2d_c123_index = []
for i in range(len(pdb_2d_c123)):
pdb_2d_c123_index.append(int(re.findall(r"\d+", pdb_2d_c123[i])[0]))
df = pd.DataFrame(
list(zip(pdb_2d_c123, prob_c123_2d_list, pdb_2d_c123_index)),
columns=["pdb_name", "probability", "pdb_index"],
)
df = df.sort_values(by=["pdb_index"])
df = df[["probability", "pdb_name"]]
index_row = []
for i in range(df.shape[0]):
index_row.append(i)
df_index = pd.DataFrame(index_row, columns=["index_"])
df_merged = pd.concat([df_index, df], axis=1)
df_merged.to_csv(
"we_input_c123_2d.txt", header=False, index=None, sep=" ", mode="w"
)
def arrange_files():
"""
Creates directories and move files to appropriate folders.
"""
cwd = os.getcwd()
os.system("rm -rf txt_csv_files")
os.system("rm -rf we_inputs")
os.system("rm -rf dat_files")
os.system("rm -rf pickle_files")
os.system("rm -rf system_files")
os.system("mkdir txt_csv_files")
os.system("mkdir we_inputs")
os.system("mkdir dat_files")
os.system("mkdir pickle_files")
os.system("mkdir system_files")
shutil.move(
cwd + "/" + "c1_frame_1d.txt",
cwd + "/" + "txt_csv_files" + "/" + "c1_frame_1d.txt",
)
shutil.move(
cwd + "/" + "c12_frame_1d.txt",
cwd + "/" + "txt_csv_files" + "/" + "c12_frame_1d.txt",
)
shutil.move(
cwd + "/" + "c123_frame_1d.txt",
cwd + "/" + "txt_csv_files" + "/" + "c123_frame_1d.txt",
)
shutil.move(
cwd + "/" + "c1_frame_2d.txt",
cwd + "/" + "txt_csv_files" + "/" + "c1_frame_2d.txt",
)
shutil.move(
cwd + "/" + "c12_frame_2d.txt",
cwd + "/" + "txt_csv_files" + "/" + "c12_frame_2d.txt",
)
shutil.move(
cwd + "/" + "c123_frame_2d.txt",
cwd + "/" + "txt_csv_files" + "/" + "c123_frame_2d.txt",
)
shutil.move(
cwd + "/" + "pA_c1_1d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c1_1d.txt",
)
shutil.move(
cwd + "/" + "pA_c12_1d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c12_1d.txt",
)
shutil.move(
cwd + "/" + "pA_c123_1d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c123_1d.txt",
)
shutil.move(
cwd + "/" + "pA_c1_2d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c1_2d.txt",
)
shutil.move(
cwd + "/" + "pA_c12_2d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c12_2d.txt",
)
shutil.move(
cwd + "/" + "pA_c123_2d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c123_2d.txt",
)
shutil.move(
cwd + "/" + "pA_c1_arranged_1d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c1_arranged_1d.txt",
)
shutil.move(
cwd + "/" + "pA_c12_arranged_1d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c12_arranged_1d.txt",
)
shutil.move(
cwd + "/" + "pA_c123_arranged_1d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c123_arranged_1d.txt",
)
shutil.move(
cwd + "/" + "pA_c1_arranged_2d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c1_arranged_2d.txt",
)
shutil.move(
cwd + "/" + "pA_c12_arranged_2d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c12_arranged_2d.txt",
)
shutil.move(
cwd + "/" + "pA_c123_arranged_2d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c123_arranged_2d.txt",
)
shutil.move(
cwd + "/" + "ref_1d.txt",
cwd + "/" + "txt_csv_files" + "/" + "ref_1d.txt",
)
shutil.move(
cwd + "/" + "ref_2d.txt",
cwd + "/" + "txt_csv_files" + "/" + "ref_2d.txt",
)
shutil.move(
cwd + "/" + "df_1d.csv",
cwd + "/" + "txt_csv_files" + "/" + "df_1d.csv",
)
shutil.move(
cwd + "/" + "df_2d.csv",
cwd + "/" + "txt_csv_files" + "/" + "df_2d.csv",
)
shutil.move(
cwd + "/" + "we_input_c1_1d.txt",
cwd + "/" + "we_inputs" + "/" + "we_input_c1_1d.txt",
)
shutil.move(
cwd + "/" + "we_input_c12_1d.txt",
cwd + "/" + "we_inputs" + "/" + "we_input_c12_1d.txt",
)
shutil.move(
cwd + "/" + "we_input_c123_1d.txt",
cwd + "/" + "we_inputs" + "/" + "we_input_c123_1d.txt",
)
shutil.move(
cwd + "/" + "we_input_c1_2d.txt",
cwd + "/" + "we_inputs" + "/" + "we_input_c1_2d.txt",
)
shutil.move(
cwd + "/" + "we_input_c12_2d.txt",
cwd + "/" + "we_inputs" + "/" + "we_input_c12_2d.txt",
)
shutil.move(
cwd + "/" + "we_input_c123_2d.txt",
cwd + "/" + "we_inputs" + "/" + "we_input_c123_2d.txt",
)
shutil.move(
cwd + "/" + "weights.dat",
cwd + "/" + "dat_files" + "/" + "weights.txt",
)
shutil.move(
cwd + "/" + "Psi.dat", cwd + "/" + "dat_files" + "/" + "Psi.txt"
)
shutil.move(
cwd + "/" + "Phi_Psi.dat",
cwd + "/" + "dat_files" + "/" + "Phi_Psi.txt",
)
shutil.move(
cwd + "/" + "prob_c1_1d_list.pickle",
cwd + "/" + "pickle_files" + "/" + "prob_c1_1d_list.pickle",
)
shutil.move(
cwd + "/" + "prob_c12_1d_list.pickle",
cwd + "/" + "pickle_files" + "/" + "prob_c12_1d_list.pickle",
)
shutil.move(
cwd + "/" + "prob_c123_1d_list.pickle",
cwd + "/" + "pickle_files" + "/" + "prob_c123_1d_list.pickle",
)
shutil.move(
cwd + "/" + "prob_c1_2d_list.pickle",
cwd + "/" + "pickle_files" + "/" + "prob_c1_2d_list.pickle",
)
shutil.move(
cwd + "/" + "prob_c12_2d_list.pickle",
cwd + "/" + "pickle_files" + "/" + "prob_c12_2d_list.pickle",
)
shutil.move(
cwd + "/" + "prob_c123_2d_list.pickle",
cwd + "/" + "pickle_files" + "/" + "prob_c123_2d_list.pickle",
)
shutil.move(
cwd + "/" + "pdb_1d_c1.pickle",
cwd + "/" + "pickle_files" + "/" + "pdb_1d_c1.pickle",
)
shutil.move(
cwd + "/" + "pdb_1d_c12.pickle",
cwd + "/" + "pickle_files" + "/" + "pdb_1d_c12.pickle",
)
shutil.move(
cwd + "/" + "pdb_1d_c123.pickle",
cwd + "/" + "pickle_files" + "/" + "pdb_1d_c123.pickle",
)
shutil.move(
cwd + "/" + "pdb_2d_c1.pickle",
cwd + "/" + "pickle_files" + "/" + "pdb_2d_c1.pickle",
)
shutil.move(
cwd + "/" + "pdb_2d_c12.pickle",
cwd + "/" + "pickle_files" + "/" + "pdb_2d_c12.pickle",
)
shutil.move(
cwd + "/" + "pdb_2d_c123.pickle",
cwd + "/" + "pickle_files" + "/" + "pdb_2d_c123.pickle",
)
shutil.move(
cwd + "/" + "frames_c1_1d.pickle",
cwd + "/" + "pickle_files" + "/" + "frames_c1_1d.pickle",
)
shutil.move(
cwd + "/" + "frames_c12_1d.pickle",
cwd + "/" + "pickle_files" + "/" + "frames_c12_1d.pickle",
)
shutil.move(
cwd + "/" + "frames_c123_1d.pickle",
cwd + "/" + "pickle_files" + "/" + "frames_c123_1d.pickle",
)
shutil.move(
cwd + "/" + "frames_c1_2d.pickle",
cwd + "/" + "pickle_files" + "/" + "frames_c1_2d.pickle",
)
shutil.move(
cwd + "/" + "frames_c12_2d.pickle",
cwd + "/" + "pickle_files" + "/" + "frames_c12_2d.pickle",
)
shutil.move(
cwd + "/" + "frames_c123_2d.pickle",
cwd + "/" + "pickle_files" + "/" + "frames_c123_2d.pickle",
)
shutil.move(
cwd + "/" + "indices_c1_1d.pickle",
cwd + "/" + "pickle_files" + "/" + "indices_c1_1d.pickle",
)
shutil.move(
cwd + "/" + "indices_c12_1d.pickle",
cwd + "/" + "pickle_files" + "/" + "indices_c12_1d.pickle",
)
shutil.move(
cwd + "/" + "indices_c123_1d.pickle",
cwd + "/" + "pickle_files" + "/" + "indices_c123_1d.pickle",
)
shutil.move(
cwd + "/" + "indices_c1_2d.pickle",
cwd + "/" + "pickle_files" + "/" + "indices_c1_2d.pickle",
)
shutil.move(
cwd + "/" + "indices_c12_2d.pickle",
cwd + "/" + "pickle_files" + "/" + "indices_c12_2d.pickle",
)
shutil.move(
cwd + "/" + "indices_c123_2d.pickle",
cwd + "/" + "pickle_files" + "/" + "indices_c123_2d.pickle",
)
shutil.move(
cwd + "/" + "system_final.inpcrd",
cwd + "/" + "system_files" + "/" + "system_final.inpcrd",
)
shutil.move(
cwd + "/" + "system_final.nc",
cwd + "/" + "system_files" + "/" + "system_final.nc",
)
shutil.move(
cwd + "/" + "system_final.out",
cwd + "/" + "system_files" + "/" + "system_final.out",
)
shutil.move(
cwd + "/" + "system_final.prmtop",
cwd + "/" + "system_files" + "/" + "system_final.prmtop",
)
shutil.move(
cwd + "/" + "system_final.rst",
cwd + "/" + "system_files" + "/" + "system_final.rst",
)
shutil.move(
cwd + "/" + "gamd.log", cwd + "/" + "system_files" + "/" + "gamd.log"
)
shutil.move(
cwd + "/" + "md.in", cwd + "/" + "system_files" + "/" + "md.in"
)
shutil.move(
cwd + "/" + "mdinfo", cwd + "/" + "system_files" + "/" + "mdinfo"
)
shutil.move(
cwd + "/" + "gamd-restart.dat",
cwd + "/" + "system_files" + "/" + "gamd-restart.dat",
)
def run_reweigh():
"""
Runs reweighing calculations systematically
in the simulation folder.
"""
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
cwd = os.getcwd()
target_dir = cwd + "/" + "gamd_simulations" + "/"
# run reweighting and analysis in each of the simulation folder
for i in dir_list:
os.chdir(target_dir + i)
create_data_files()
reweight_1d()
reweight_2d()
save_frames()
save_we_inputs()
arrange_files()
os.chdir(cwd)
def save_westpa_inputs():
"""
Creates separate folders to initiate WE simulations.
"""
cwd = os.getcwd()
list_dir = ["1d_c1", "1d_c12", "1d_c123", "2d_c1", "2d_c12", "2d_c123"]
for i in list_dir:
os.chdir(cwd + "/" + "we_structures" + "/" + i)
files = os.listdir(".")
file_to_find = "*.pdb"
pdb_list = []
for x in files:
if fnmatch.fnmatch(x, file_to_find):
pdb_list.append(x)
for j in pdb_list:
fix_cap_remove_nme(j)
fix_cap_replace_nme(j)
inpcrd_file = j[:-4] + ".inpcrd"
filename = "input_" + j[:-4] + ".leap"
file = open(filename, "w")
file.write("source leaprc.protein.ff14SB" + "\n")
file.write("source leaprc.water.tip3p" + "\n")
file.write("set default FlexibleWater on" + "\n")
file.write("set default PBRadii mbondi2" + "\n")
file.write("pdb = loadpdb " + j + "\n")
file.write(
"saveamberparm pdb "
+ j[:-4]
+ ".prmtop "
+ j[:-4]
+ ".inpcrd"
+ "\n"
)
file.write("quit" + "\n")
file.close()
files = os.listdir(".")
file_to_find = "*.leap"
leap_list = []
for y in files:
if fnmatch.fnmatch(y, file_to_find):
leap_list.append(y)
for k in leap_list:
command = "tleap -f {}".format(k)
os.system(command)
os.system("rm -rf leap.log")
os.system("rm -rf *prmtop*")
os.system("rm -rf *leap*")
os.system("rm -rf bstates")
os.system("mkdir bstates")
for j in pdb_list:
shutil.move(
cwd
+ "/"
+ "we_structures"
+ "/"
+ i
+ "/"
+ j[:-4]
+ ".inpcrd",
cwd
+ "/"
+ "we_structures"
+ "/"
+ i
+ "/"
+ "bstates"
+ "/"
+ j[:-4]
+ ".inpcrd",
)
os.chdir(cwd)
os.system("rm -rf westpa_inputs")
os.system("mkdir westpa_inputs")
for l in list_dir:
os.chdir(cwd + "/" + "westpa_inputs")
command = "rm -rf {}".format(l)
os.system(command)
command = "mkdir {}".format(l)
os.system(command)
shutil.move(
cwd + "/" + "we_structures" + "/" + l + "/" + "bstates",
cwd + "/" + "westpa_inputs" + "/" + l + "/" + "bstates",
)
os.chdir(cwd)
shutil.copy(
cwd + "/" + "we_inputs" + "/" + "we_input_c1_1d.txt",
cwd
+ "/"
+ "westpa_inputs"
+ "/"
+ list_dir[0]
+ "/"
+ "we_input_c1_1d.txt",
)
shutil.copy(
cwd + "/" + "we_inputs" + "/" + "we_input_c12_1d.txt",
cwd
+ "/"
+ "westpa_inputs"
+ "/"
+ list_dir[1]
+ "/"
+ "we_input_c12_1d.txt",
)
shutil.copy(
cwd + "/" + "we_inputs" + "/" + "we_input_c123_1d.txt",
cwd
+ "/"
+ "westpa_inputs"
+ "/"
+ list_dir[2]
+ "/"
+ "we_input_c123_1d.txt",
)
shutil.copy(
cwd + "/" + "we_inputs" + "/" + "we_input_c1_2d.txt",
cwd
+ "/"
+ "westpa_inputs"
+ "/"
+ list_dir[3]
+ "/"
+ "we_input_c1_2d.txt",
)
shutil.copy(
cwd + "/" + "we_inputs" + "/" + "we_input_c12_2d.txt",
cwd
+ "/"
+ "westpa_inputs"
+ "/"
+ list_dir[4]
+ "/"
+ "we_input_c12_2d.txt",
)
shutil.copy(
cwd + "/" + "we_inputs" + "/" + "we_input_c123_2d.txt",
cwd
+ "/"
+ "westpa_inputs"
+ "/"
+ list_dir[5]
+ "/"
+ "we_input_c123_2d.txt",
)
for i in list_dir:
os.chdir(cwd + "/" + "westpa_inputs" + "/" + i)
for file in os.listdir("."):
if fnmatch.fnmatch(file, "*.txt"):
file_to_rename = file
f = open(file_to_rename, "rt")
data = f.read()
data = data.replace("pdb", "inpcrd")
f.close()
f = open(file_to_rename, "wt")
f.write(data)
f.close()
os.rename(file_to_rename, "BASIS_STATES")
os.chdir(cwd)
for i in list_dir:
os.chdir(cwd + "/" + "westpa_inputs" + "/" + i)
os.mkdir("CONFIG")
shutil.copy(
cwd + "/" + "system_files" + "/" + "system_final.prmtop",
cwd
+ "/"
+ "westpa_inputs"
+ "/"
+ i
+ "/"
+ "CONFIG"
+ "/"
+ "system_final.prmtop",
)
os.chdir(cwd)
def run_westpa_inputs():
"""
Systematically runs save_westpa_inputs function in
the simulation directory.
"""
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
cwd = os.getcwd()
source_dir = cwd + "/"
target_dir = cwd + "/" + "gamd_simulations" + "/"
for i in dir_list:
os.chdir(target_dir + i)
save_westpa_inputs()
os.chdir(cwd)
def transfer_files():
"""
Deletes unnecessary files in the simulation
directory and creates a new WE simulation folder
"""
os.system("rm -rf westpa_dir")
os.system("mkdir westpa_dir")
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
cwd = os.getcwd()
source_dir = cwd + "/"
target_dir = cwd + "/" + "gamd_simulations" + "/"
for i in dir_list:
os.chdir(source_dir + "westpa_dir")
command = "mkdir {}".format(i)
os.system(command)
os.chdir(cwd)
for i in dir_list:
shutil.copytree(
target_dir + i + "/" + "westpa_inputs",
source_dir + "westpa_dir" + "/" + i + "/" "westpa_inputs",
)
we_list = ["1d_c1", "1d_c12", "1d_c123", "2d_c1", "2d_c12", "2d_c123"]
for j in we_list:
shutil.copytree(
source_dir
+ "westpa_dir"
+ "/"
+ i
+ "/"
+ "westpa_inputs"
+ "/"
+ j,
source_dir + "westpa_dir" + "/" + i + "/" + j,
)
dest_dir = source_dir + "westpa_dir" + "/" + i
os.chdir(dest_dir)
os.system("rm -rf westpa_inputs")
os.chdir(cwd)
os.chdir(cwd)
def add_vectors_westpa_files():
"""
Adds box vector dimensions to the inpcrd file.
To be used only when the box vector dimensions
are not available at the last line of inpcrd file.
"""
cwd = os.getcwd()
source_dir = cwd
westpa_dir = cwd + "/" + "westpa_dir"
os.chdir(source_dir + "/" + "starting_structures")
with open("system_final.inpcrd") as f:
for line in f:
pass
vector_information = line
print(vector_information)
os.chdir(source_dir)
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
we_list = ["1d_c1", "1d_c12", "1d_c123", "2d_c1", "2d_c12", "2d_c123"]
for i in dir_list:
os.chdir(westpa_dir + "/" + str(i))
for j in we_list:
os.chdir(
westpa_dir + "/" + str(i) + "/" + str(j) + "/" + "bstates"
)
files = os.listdir(".")
file_to_find = "*.inpcrd"
inpcrd_list = []
for k in files:
if fnmatch.fnmatch(k, file_to_find):
inpcrd_list.append(k)
for l in inpcrd_list:
with open(l, "a+") as f:
f.write(vector_information)
os.chdir(cwd)
def we_analysis():
"""
Runs short MD simulation for saved inpcrd files.
"""
cwd = os.getcwd()
os.chdir(cwd + "/" + "westpa_dir")
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
we_list = ["1d_c1", "1d_c12", "1d_c123", "2d_c1", "2d_c12", "2d_c123"]
for i in dir_list:
for j in we_list:
os.chdir(cwd + "/" + "westpa_dir" + "/" + str(i))
os.chdir(cwd + "/" + "westpa_dir" + "/" + str(i) + "/" + str(j))
if len(open("BASIS_STATES").readlines()) > 0:
df = pd.read_csv("BASIS_STATES", delimiter=" ", header=None)
df.columns = [["descriptor", "probability", "file_name"]]
df1 = df[["file_name"]]
inpcrd_list = df1.values.tolist()
inpcrd_list = list(itertools.chain(*inpcrd_list))
os.system("rm -rf md_sims")
os.system("mkdir md_sims")
os.chdir(
cwd
+ "/"
+ "westpa_dir"
+ "/"
+ str(i)
+ "/"
+ str(j)
+ "/"
+ "md_sims"
)
with open("md.in", "w") as f:
f.write(
"Run minimization followed by saving rst file" + "\n"
)
f.write("&cntrl" + "\n")
f.write(
" imin = 1, maxcyc = 10000, ntpr = 5, iwrap = 1, ntxo = 1"
+ "\n"
)
f.write("&end" + "\n")
for k in inpcrd_list:
source_dir = (
cwd
+ "/"
+ "westpa_dir"
+ "/"
+ str(i)
+ "/"
+ str(j)
+ "/"
+ "bstates"
)
target_dir = (
cwd
+ "/"
+ "westpa_dir"
+ "/"
+ str(i)
+ "/"
+ str(j)
+ "/"
+ "md_sims"
)
shutil.copy(
source_dir + "/" + str(k), target_dir + "/" + str(k)
)
source_dir = cwd + "/" + "starting_structures"
target_dir = (
cwd
+ "/"
+ "westpa_dir"
+ "/"
+ str(i)
+ "/"
+ str(j)
+ "/"
+ "md_sims"
)
shutil.copy(
source_dir + "/" + "system_final.prmtop",
target_dir + "/" + "system_final.prmtop",
)
for l in range(len(inpcrd_list)):
command = (
"pmemd.cuda -O -i md.in -o "
+ inpcrd_list[l][:-6]
+ "out"
+ " -p system_final.prmtop -c "
+ inpcrd_list[l]
+ " -r "
+ inpcrd_list[l][:-6]
+ "rst"
)
print(command)
os.system(command)
os.chdir(cwd)
def correction_westpa():
"""
Eliminates all inpcrd files crashed during the short MD simulation
run. Also create folders for .rst files in case it is needed for
WE simulations
"""
cwd = os.getcwd()
os.chdir(cwd + "/" + "westpa_dir")
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
we_list = ["1d_c1", "1d_c12", "1d_c123", "2d_c1", "2d_c12", "2d_c123"]
for i in dir_list:
for j in we_list:
os.chdir(cwd + "/" + "westpa_dir" + "/" + str(i))
os.chdir(cwd + "/" + "westpa_dir" + "/" + str(i) + "/" + str(j))
if len(open("BASIS_STATES").readlines()) > 0:
os.chdir(
cwd
+ "/"
+ "westpa_dir"
+ "/"
+ str(i)
+ "/"
+ str(j)
+ "/"
+ "md_sims"
)
files = os.listdir(".")
file_to_find = "*.out"
out_list = []
for y in files:
if fnmatch.fnmatch(y, file_to_find):
out_list.append(y)
list_failed_jobs = []
for out_file in out_list:
with open(out_file, "r") as f:
last_line = f.readlines()[-2]
if last_line.startswith("|") == False:
list_failed_jobs.append(out_file)
for c in range(len(list_failed_jobs)):
command = "rm -rf " + list_failed_jobs[c]
os.system(command)
for d in range(len(list_failed_jobs)):
command = "rm -rf " + list_failed_jobs[d][:-3] + "rst"
os.system(command)
for e in range(len(list_failed_jobs)):
command = "rm -rf " + list_failed_jobs[e][:-3] + "inpcrd"
os.system(command)
for f in range(len(list_failed_jobs)):
command = "rm -rf " + list_failed_jobs[f][:-3] + "nc"
os.system(command)
files = os.listdir(".")
file_to_find = "*.rst"
rst_list = []
for y in files:
if fnmatch.fnmatch(y, file_to_find):
rst_list.append(y)
rst_failed_jobs = []
for rst_file in rst_list:
with open(rst_file, "r") as f:
req_line = f.readlines()[2]
if "NaN" in req_line:
rst_failed_jobs.append(rst_file)
for g in range(len(rst_failed_jobs)):
command = "rm -rf " + rst_failed_jobs[g]
os.system(command)
for h in range(len(rst_failed_jobs)):
command = "rm -rf " + rst_failed_jobs[h][:-3] + "out"
os.system(command)
for u in range(len(rst_failed_jobs)):
command = "rm -rf " + rst_failed_jobs[u][:-3] + "inpcrd"
os.system(command)
for v in range(len(rst_failed_jobs)):
command = "rm -rf " + rst_failed_jobs[v][:-3] + "nc"
os.system(command)
files_2 = os.listdir(".")
file_to_find_2 = "*.rst"
rst_list_2 = []
for y in files_2:
if fnmatch.fnmatch(y, file_to_find_2):
rst_list_2.append(y)
rst_failed_jobs_2 = []
for rst_file_2 in rst_list_2:
with open(rst_file_2, "r") as f:
lines_file = f.readlines()
for req_line in lines_file:
if "*" in req_line:
rst_failed_jobs_2.append(rst_file_2)
for g in range(len(rst_failed_jobs_2)):
command = "rm -rf " + rst_failed_jobs_2[g]
os.system(command)
for h in range(len(rst_failed_jobs_2)):
command = "rm -rf " + rst_failed_jobs_2[h][:-3] + "out"
os.system(command)
for u in range(len(rst_failed_jobs_2)):
command = "rm -rf " + rst_failed_jobs_2[u][:-3] + "inpcrd"
os.system(command)
for v in range(len(rst_failed_jobs_2)):
command = "rm -rf " + rst_failed_jobs_2[v][:-3] + "nc"
os.system(command)
os.system("rm -rf md.in")
os.system("rm -rf system_final.prmtop")
os.system("rm -rf mdinfo")
files = os.listdir(".")
inpcrd_file_to_find = "*.inpcrd"
rst_file_to_find = "*.rst"
inpcrd_file_list = []
for y in files:
if fnmatch.fnmatch(y, inpcrd_file_to_find):
inpcrd_file_list.append(y)
rst_file_list = []
for z in files:
if fnmatch.fnmatch(z, rst_file_to_find):
rst_file_list.append(z)
os.chdir(
cwd + "/" + "westpa_dir" + "/" + str(i) + "/" + str(j)
)
os.system("rm -rf bstates_corrected_rst")
os.system("mkdir bstates_corrected_rst")
os.system("rm -rf bstates_corrected_inpcrd")
os.system("mkdir bstates_corrected_inpcrd")
for x in inpcrd_file_list:
shutil.copy(
cwd
+ "/"
+ "westpa_dir"
+ "/"
+ str(i)
+ "/"
+ str(j)
+ "/"
+ "md_sims"
+ "/"
+ str(x),
cwd
+ "/"
+ "westpa_dir"
+ "/"
+ str(i)
+ "/"
+ str(j)
+ "/"
+ "bstates_corrected_inpcrd"
+ "/"
+ str(x),
)
for y in rst_file_list:
shutil.copy(
cwd
+ "/"
+ "westpa_dir"
+ "/"
+ str(i)
+ "/"
+ str(j)
+ "/"
+ "md_sims"
+ "/"
+ str(y),
cwd
+ "/"
+ "westpa_dir"
+ "/"
+ str(i)
+ "/"
+ str(j)
+ "/"
+ "bstates_corrected_rst"
+ "/"
+ str(y),
)
df = pd.read_csv("BASIS_STATES", sep=" ", header=None)
df.columns = ["index_df", "probability", "inpcrd"]
df = df[["probability", "inpcrd"]]
df = df[df.inpcrd.str.contains("|".join(inpcrd_file_list))]
index_row_list = []
for n in range(df.shape[0]):
index_row_list.append(n)
df = df.assign(index_=index_row_list)
df = df[["index_", "probability", "inpcrd"]]
df.to_csv(
"BASIS_STATES_CORRECTED_INPCRD",
header=False,
index=None,
sep=" ",
mode="w",
)
fin = open("BASIS_STATES_CORRECTED_INPCRD", "rt")
fout = open("BASIS_STATES_CORRECTED_RST", "wt")
for line in fin:
fout.write(line.replace("inpcrd", "rst"))
fin.close()
fout.close()
os.chdir(cwd)
def plot_contrib():
"""
Plots to review the analysis done. Plot bar
graphs for the number of structures obtained
for WE simulation for each of the potential
boosts during GaMD simulation.
"""
cwd = os.getcwd()
os.chdir(cwd + "/" + "westpa_dir")
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
we_list = ["1d_c1", "1d_c12", "1d_c123", "2d_c1", "2d_c12", "2d_c123"]
confs = []
for i in dir_list:
conf_within = []
for j in we_list:
os.chdir(cwd + "/" + "westpa_dir" + "/" + str(i))
os.chdir(cwd + "/" + "westpa_dir" + "/" + str(i) + "/" + str(j))
if len(open("BASIS_STATES").readlines()) > 0:
count1 = len(open("BASIS_STATES").readlines())
count2 = len(open("BASIS_STATES_CORRECTED_RST").readlines())
conf = str(i), str(j), count1, count2
conf_within.append(conf)
confs.append(conf_within)
print(confs)
os.chdir(cwd)
corrected_list = []
for i in range(len(confs)):
corrected_list_1 = []
for j in range(len(confs[i])):
corrected_list_1.append(confs[i][j][3])
corrected_list.append(corrected_list_1)
print(corrected_list)
expanse_list = []
for i in range(len(confs)):
expanse_list_1 = []
for j in range(len(confs[i])):
expanse_list_1.append(confs[i][j][1])
expanse_list.append(expanse_list_1)
print(expanse_list)
x0 = expanse_list[0]
y0 = corrected_list[0]
x1 = expanse_list[1]
y1 = corrected_list[1]
x2 = expanse_list[2]
y2 = corrected_list[2]
x3 = expanse_list[3]
y3 = corrected_list[3]
x4 = expanse_list[4]
y4 = corrected_list[4]
x5 = expanse_list[5]
y5 = corrected_list[5]
y = y0
x = x0
title = "Configurations vs Different Expansions" + " for " + dir_list[0]
print(title)
sns.set(font_scale=1)
plt.rcParams["figure.figsize"] = (8, 4)
plt.rcParams["font.family"] = "serif"
style.use("fivethirtyeight")
g = sns.barplot(y, x, palette=("binary"))
g.grid(False)
g.set_title(title)
g.set(xlabel="Configurations", ylabel="Expansion")
ax = g
for i, v in enumerate(y):
ax.text(v + 1, i + 0.25, str(v), color="black", fontweight="bold")
fig_name = dir_list[0]
plt.savefig(fig_name, bbox_inches="tight")
plt.show(block=False)
plt.pause(1)
plt.close()
y = y1
x = x1
title = "Configurations vs Different Expansions" + " for " + dir_list[1]
print(title)
sns.set(font_scale=1)
plt.rcParams["figure.figsize"] = (8, 4)
plt.rcParams["font.family"] = "serif"
style.use("fivethirtyeight")
g = sns.barplot(y, x, palette=("binary"))
g.grid(False)
g.set_title(title)
g.set(xlabel="Configurations", ylabel="Expansion")
ax = g
for i, v in enumerate(y):
ax.text(v + 1, i + 0.25, str(v), color="black", fontweight="bold")
fig_name = dir_list[1]
plt.savefig(fig_name, bbox_inches="tight")
plt.show(block=False)
plt.pause(1)
plt.close()
y = y2
x = x2
title = "Configurations vs Different Expansions" + " for " + dir_list[2]
print(title)
sns.set(font_scale=1)
plt.rcParams["figure.figsize"] = (8, 4)
plt.rcParams["font.family"] = "serif"
style.use("fivethirtyeight")
g = sns.barplot(y, x, palette=("binary"))
g.grid(False)
g.set_title(title)
g.set(xlabel="Configurations", ylabel="Expansion")
ax = g
for i, v in enumerate(y):
ax.text(v + 1, i + 0.25, str(v), color="black", fontweight="bold")
fig_name = dir_list[2]
plt.savefig(fig_name, bbox_inches="tight")
plt.show(block=False)
plt.pause(1)
plt.close()
y = y3
x = x3
title = "Configurations vs Different Expansions" + " for " + dir_list[3]
print(title)
sns.set(font_scale=1)
plt.rcParams["figure.figsize"] = (8, 4)
plt.rcParams["font.family"] = "serif"
style.use("fivethirtyeight")
g = sns.barplot(y, x, palette=("binary"))
g.grid(False)
g.set_title(title)
g.set(xlabel="Configurations", ylabel="Expansion")
ax = g
for i, v in enumerate(y):
ax.text(v + 1, i + 0.25, str(v), color="black", fontweight="bold")
fig_name = dir_list[3]
plt.savefig(fig_name, bbox_inches="tight")
plt.show(block=False)
plt.pause(1)
plt.close()
y = y4
x = x4
title = "Configurations vs Different Expansions" + " for " + dir_list[4]
print(title)
sns.set(font_scale=1)
plt.rcParams["figure.figsize"] = (8, 4)
plt.rcParams["font.family"] = "serif"
style.use("fivethirtyeight")
g = sns.barplot(y, x, palette=("binary"))
g.grid(False)
g.set_title(title)
g.set(xlabel="Configurations", ylabel="Expansion")
ax = g
for i, v in enumerate(y):
ax.text(v + 1, i + 0.25, str(v), color="black", fontweight="bold")
fig_name = dir_list[4]
plt.savefig(fig_name, bbox_inches="tight")
plt.show(block=False)
plt.pause(1)
plt.close()
y = y5
x = x5
title = "Configurations vs Different Expansions" + " for " + dir_list[5]
print(title)
sns.set(font_scale=1)
plt.rcParams["figure.figsize"] = (8, 4)
plt.rcParams["font.family"] = "serif"
style.use("fivethirtyeight")
g = sns.barplot(y, x, palette=("binary"))
g.grid(False)
g.set_title(title)
g.set(xlabel="Configurations", ylabel="Expansion")
ax = g
for i, v in enumerate(y):
ax.text(v + 1, i + 0.25, str(v), color="black", fontweight="bold")
fig_name = dir_list[5]
plt.savefig(fig_name, bbox_inches="tight")
plt.show(block=False)
plt.pause(1)
plt.close()
rcParams["figure.figsize"] = 30, 20
plt.rcParams["axes.grid"] = False
img_1 = mpimg.imread("dihedral_threshold_lower.png")
img_2 = mpimg.imread("dihedral_threshold_upper.png")
img_3 = mpimg.imread("dual_threshold_lower.png")
img_4 = mpimg.imread("dual_threshold_upper.png")
img_5 = mpimg.imread("total_threshold_lower.png")
img_6 = mpimg.imread("total_threshold_upper.png")
fig, ax = plt.subplots(3, 2)
fig.suptitle("")
ax[0, 1].imshow(img_1)
ax[1, 1].imshow(img_2)
ax[0, 0].imshow(img_3)
ax[1, 0].imshow(img_4)
ax[2, 0].imshow(img_5)
ax[2, 1].imshow(img_6)
plt.savefig("analysis.png")
plt.show(block=False)
plt.pause(3)
plt.close()
cwd = os.getcwd()
os.system("rm -rf analysis")
os.system("mkdir analysis")
target_dir = cwd + "/" + "analysis"
command = "mv analysis.png " + target_dir
os.system(command)
os.system("rm -rf *.png*")
def clean_for_analysis():
"""
Rstructures the entire filetree to start reweighing
analysis again. Used only when we want to run the analysis
again.
"""
os.system("rm -rf westpa_dir")
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
cwd = os.getcwd()
source_dir = cwd + "/"
target_dir = cwd + "/" + "gamd_simulations" + "/"
for i in dir_list:
os.chdir(target_dir + i)
os.system(
"rm -rf pickle_files dat_files txt_csv_files we_inputs westpa_inputs we_structures"
)
os.chdir(cwd)
for i in dir_list:
shutil.move(
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_files"
+ "/"
+ "gamd.log",
cwd + "/" + "gamd_simulations" + "/" + i + "/" + "gamd.log",
)
shutil.move(
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_files"
+ "/"
+ "gamd-restart.dat",
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "gamd-restart.dat",
)
shutil.move(
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_files"
+ "/"
+ "md.in",
cwd + "/" + "gamd_simulations" + "/" + i + "/" + "md.in",
)
shutil.move(
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_files"
+ "/"
+ "mdinfo",
cwd + "/" + "gamd_simulations" + "/" + i + "/" + "mdinfo",
)
shutil.move(
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_files"
+ "/"
+ "system_final.inpcrd",
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_final.inpcrd",
)
shutil.move(
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_files"
+ "/"
+ "system_final.nc",
cwd + "/" + "gamd_simulations" + "/" + i + "/" + "system_final.nc",
)
shutil.move(
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_files"
+ "/"
+ "system_final.out",
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_final.out",
)
shutil.move(
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_files"
+ "/"
+ "system_final.prmtop",
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_final.prmtop",
)
shutil.move(
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_files"
+ "/"
+ "system_final.rst",
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_final.rst",
)
for i in dir_list:
os.chdir(target_dir + i)
os.system("rm -rf system_files")
os.chdir(cwd)
"""
prepare_alanine_dipeptide()
run_equilibration()
create_starting_structures()
add_vec_inpcrd()
add_vec_prmtop()
create_filetree()
run_simulations()
run_reweigh()
run_westpa_inputs()
transfer_files()
add_vectors_westpa_files()
we_analysis()
correction_westpa()
plot_contrib()
clean_for_analysis()
"""
|
import matplotlib.image as mpimg
import matplotlib.style as style
import matplotlib.pyplot as plt
from matplotlib import rcParams
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
import seaborn as sns
from math import exp
import pandas as pd
import mdtraj as md
import pickle as pk
import numpy as np
import statistics
import itertools
import fileinput
import fnmatch
import shutil
import random
import math
import os
import re
def fix_cap_remove_ace(pdb_file):
"""
Removes the H atoms of the capped ACE residue.
"""
remove_words = [
"H1 ACE",
"H2 ACE",
"H3 ACE",
"H31 ACE",
"H32 ACE",
"H33 ACE",
]
with open(pdb_file) as oldfile, open("intermediate.pdb", "w") as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
command = "rm -rf " + pdb_file
os.system(command)
command = "mv intermediate.pdb " + pdb_file
os.system(command)
def fix_cap_replace_ace(pdb_file):
"""
Replaces the alpha carbon atom of the
capped ACE residue with a standard name.
"""
fin = open(pdb_file, "rt")
data = fin.read()
data = data.replace("CA ACE", "CH3 ACE")
data = data.replace("C ACE", "CH3 ACE")
fin.close()
fin = open(pdb_file, "wt")
fin.write(data)
fin.close()
def fix_cap_remove_nme(pdb_file):
"""
Removes the H atoms of the capped NME residue.
"""
remove_words = [
"H1 NME",
"H2 NME",
"H3 NME",
"H31 NME",
"H32 NME",
"H33 NME",
]
with open(pdb_file) as oldfile, open("intermediate.pdb", "w") as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
command = "rm -rf " + pdb_file
os.system(command)
command = "mv intermediate.pdb " + pdb_file
os.system(command)
def fix_cap_replace_nme(pdb_file):
"""
Replaces the alpha carbon atom of the
capped NME residue with a standard name.
"""
fin = open(pdb_file, "rt")
data = fin.read()
data = data.replace("CA NME", "CH3 NME")
data = data.replace("C NME", "CH3 NME")
fin.close()
fin = open(pdb_file, "wt")
fin.write(data)
fin.close()
def prepare_alanine_dipeptide():
"""
Prepares the alanine dipeptide system for Gaussian
Accelerated Molecular Dynamics (GaMD) simulations.
Downloads the pdb structure from
https://markovmodel.github.io/mdshare/ALA2/ and
parameterizes it using General Amber Force Field
(GAFF).
"""
os.system(
"curl -O http://ftp.imp.fu-berlin.de/pub/cmb-data/alanine-dipeptide-nowater.pdb"
)
os.system(
"rm -rf system_inputs"
) # Removes any existing directory named system_inputs
os.system("mkdir system_inputs") # Creates a directory named system_inputs
cwd = os.getcwd()
target_dir = cwd + "/" + "system_inputs"
os.system("pdb4amber -i alanine-dipeptide-nowater.pdb -o intermediate.pdb")
# Delete HH31, HH32 and HH33 from the ACE residue (tleap adds them later)
remove_words = ["HH31 ACE", "HH32 ACE", "HH33 ACE"]
with open("intermediate.pdb") as oldfile, open(
"system.pdb", "w"
) as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
os.system("rm -rf intermediate*")
# save the tleap script to file
with open("input_TIP3P.leap", "w") as f:
f.write(
"""
source leaprc.protein.ff14SB
source leaprc.water.tip3p
set default FlexibleWater on
set default PBRadii mbondi2
pdb = loadpdb system.pdb
solvateBox pdb TIP3PBOX 15
saveamberparm pdb system_TIP3P.prmtop system_TIP3P.inpcrd
saveamberparm pdb system_TIP3P.parm7 system_TIP3P.rst7
savepdb pdb system_TIP3P.pdb
quit
"""
)
os.system("tleap -f input_TIP3P.leap")
os.system("rm -rf leap.log")
shutil.copy(
cwd + "/" + "system_TIP3P.inpcrd",
target_dir + "/" + "system_TIP3P.inpcrd",
)
shutil.copy(
cwd + "/" + "system_TIP3P.parm7",
target_dir + "/" + "system_TIP3P.parm7",
)
shutil.copy(
cwd + "/" + "system_TIP3P.pdb", target_dir + "/" + "system_TIP3P.pdb"
)
shutil.copy(
cwd + "/" + "system_TIP3P.prmtop",
target_dir + "/" + "system_TIP3P.prmtop",
)
shutil.copy(
cwd + "/" + "system_TIP3P.rst7", target_dir + "/" + "system_TIP3P.rst7"
)
shutil.copy(cwd + "/" + "system.pdb", target_dir + "/" + "system.pdb")
shutil.copy(
cwd + "/" + "alanine-dipeptide-nowater.pdb",
target_dir + "/" + "alanine-dipeptide-nowater.pdb",
)
shutil.copy(
cwd + "/" + "input_TIP3P.leap", target_dir + "/" + "input_TIP3P.leap"
)
os.system("rm -rf system_TIP3P.inpcrd")
os.system("rm -rf system_TIP3P.parm7")
os.system("rm -rf system_TIP3P.pdb")
os.system("rm -rf system_TIP3P.inpcrd")
os.system("rm -rf system_TIP3P.rst7")
os.system("rm -rf system_TIP3P.prmtop")
os.system("rm -rf system.pdb")
os.system("rm -rf input_TIP3P.leap")
os.system("rm -rf alanine-dipeptide-nowater.pdb")
def create_vectors(x):
"""
Extracts peridic box information from the
given line.
"""
x = str(x)
x = x.replace("Vec3", "")
x = re.findall("\d*\.?\d+", x)
for i in range(0, len(x)):
x[i] = float(x[i])
x = tuple(x)
n = int(len(x) / 3)
x = [x[i * n : (i + 1) * n] for i in range((len(x) + n - 1) // n)]
return x
def simulated_annealing(
parm="system_TIP3P.prmtop",
rst="system_TIP3P.inpcrd",
annealing_output_pdb="system_annealing_output.pdb",
annealing_steps=100000,
pdb_freq=100000,
starting_temp=0,
target_temp=300,
temp_incr=3,
):
"""
Performs simulated annealing of the system from
0K to 300 K (default) using OpenMM MD engine and
saves the last frame of the simulation to be
accessed by the next simulation.
Parameters
----------
parm: str
System's topology file
rst: str
System's coordinate file
annealing_output_pdb: str
System's output trajectory file
annealing_steps: int
Aneealing steps at each temperatrure jump
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
starting_temp: int
Initial temperature of Simulated Annealing
target_temp: int
Final temperature of Simulated Annealing
temp_incr: int
Temmperature increase for every step
"""
prmtop = AmberPrmtopFile(parm)
inpcrd = AmberInpcrdFile(rst)
annealing_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
annealing_integrator = LangevinIntegrator(
0 * kelvin, 1 / picosecond, 2 * femtoseconds
)
total_steps = ((target_temp / temp_incr) + 1) * annealing_steps
annealing_temp_range = int((target_temp / temp_incr) + 1)
annealing_platform = Platform.getPlatformByName("CUDA")
annealing_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
annealing_simulation = Simulation(
prmtop.topology,
annealing_system,
annealing_integrator,
annealing_platform,
annealing_properties,
)
annealing_simulation.context.setPositions(inpcrd.positions)
if inpcrd.boxVectors is not None:
annealing_simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)
annealing_simulation.minimizeEnergy()
annealing_simulation.reporters.append(
PDBReporter(annealing_output_pdb, pdb_freq)
)
simulated_annealing_last_frame = (
annealing_output_pdb[:-4] + "_last_frame.pdb"
)
annealing_simulation.reporters.append(
PDBReporter(simulated_annealing_last_frame, total_steps)
)
annealing_simulation.reporters.append(
StateDataReporter(
stdout,
pdb_freq,
step=True,
time=True,
potentialEnergy=True,
totalSteps=total_steps,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
separator="\t",
)
)
temp = starting_temp
while temp <= target_temp:
annealing_integrator.setTemperature(temp * kelvin)
if temp == starting_temp:
annealing_simulation.step(annealing_steps)
annealing_simulation.saveState("annealing.state")
else:
annealing_simulation.loadState("annealing.state")
annealing_simulation.step(annealing_steps)
temp += temp_incr
state = annealing_simulation.context.getState()
print(state.getPeriodicBoxVectors())
annealing_simulation_box_vectors = state.getPeriodicBoxVectors()
print(annealing_simulation_box_vectors)
with open("annealing_simulation_box_vectors.pkl", "wb") as f:
pk.dump(annealing_simulation_box_vectors, f)
print("Finshed NVT Simulated Annealing Simulation")
def npt_equilibration(
parm="system_TIP3P.prmtop",
npt_output_pdb="system_npt_output.pdb",
pdb_freq=500000,
npt_steps=5000000,
target_temp=300,
npt_pdb="system_annealing_output_last_frame.pdb",
):
"""
Performs NPT equilibration MD of the system
using OpenMM MD engine and saves the last
frame of the simulation to be accessed by
the next simulation.
Parameters
----------
parm: str
System's topology file
npt_output_pdb: str
System's output trajectory file
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
npt_steps: int
NPT simulation steps
target_temp: int
Temperature for MD simulation
npt_pdb: str
Last frame of the simulation
"""
npt_init_pdb = PDBFile(npt_pdb)
prmtop = AmberPrmtopFile(parm)
npt_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
barostat = MonteCarloBarostat(25.0 * bar, target_temp * kelvin, 25)
npt_system.addForce(barostat)
npt_integrator = LangevinIntegrator(
target_temp * kelvin, 1 / picosecond, 2 * femtoseconds
)
npt_platform = Platform.getPlatformByName("CUDA")
npt_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
npt_simulation = Simulation(
prmtop.topology,
npt_system,
npt_integrator,
npt_platform,
npt_properties,
)
npt_simulation.context.setPositions(npt_init_pdb.positions)
npt_simulation.context.setVelocitiesToTemperature(target_temp * kelvin)
with open("annealing_simulation_box_vectors.pkl", "rb") as f:
annealing_simulation_box_vectors = pk.load(f)
annealing_simulation_box_vectors = create_vectors(
annealing_simulation_box_vectors
)
npt_simulation.context.setPeriodicBoxVectors(
annealing_simulation_box_vectors[0],
annealing_simulation_box_vectors[1],
annealing_simulation_box_vectors[2],
)
npt_last_frame = npt_output_pdb[:-4] + "_last_frame.pdb"
npt_simulation.reporters.append(PDBReporter(npt_output_pdb, pdb_freq))
npt_simulation.reporters.append(PDBReporter(npt_last_frame, npt_steps))
npt_simulation.reporters.append(
StateDataReporter(
stdout,
pdb_freq,
step=True,
time=True,
potentialEnergy=True,
totalSteps=npt_steps,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
separator="\t",
)
)
npt_simulation.minimizeEnergy()
npt_simulation.step(npt_steps)
npt_simulation.saveState("npt_simulation.state")
state = npt_simulation.context.getState()
print(state.getPeriodicBoxVectors())
npt_simulation_box_vectors = state.getPeriodicBoxVectors()
print(npt_simulation_box_vectors)
with open("npt_simulation_box_vectors.pkl", "wb") as f:
pk.dump(npt_simulation_box_vectors, f)
print("Finished NPT Simulation")
def nvt_equilibration(
parm="system_TIP3P.prmtop",
nvt_output_pdb="system_nvt_output.pdb",
pdb_freq=500000,
nvt_steps=5000000,
target_temp=300,
nvt_pdb="system_npt_output_last_frame.pdb",
):
"""
Performs NVT equilibration MD of the system
using OpenMM MD engine saves the last
frame of the simulation to be accessed by
the next simulation.
Parameters
----------
parm: str
System's topology file
nvt_output_pdb: str
System's output trajectory file
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
nvt_steps: int
NVT simulation steps
target_temp: int
Temperature for MD simulation
nvt_pdb: str
Last frame of the simulation
"""
nvt_init_pdb = PDBFile(nvt_pdb)
prmtop = AmberPrmtopFile(parm)
nvt_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
nvt_integrator = LangevinIntegrator(
target_temp * kelvin, 1 / picosecond, 2 * femtoseconds
)
nvt_platform = Platform.getPlatformByName("CUDA")
nvt_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
nvt_simulation = Simulation(
prmtop.topology,
nvt_system,
nvt_integrator,
nvt_platform,
nvt_properties,
)
nvt_simulation.context.setPositions(nvt_init_pdb.positions)
nvt_simulation.context.setVelocitiesToTemperature(target_temp * kelvin)
with open("npt_simulation_box_vectors.pkl", "rb") as f:
npt_simulation_box_vectors = pk.load(f)
npt_simulation_box_vectors = create_vectors(npt_simulation_box_vectors)
nvt_simulation.context.setPeriodicBoxVectors(
npt_simulation_box_vectors[0],
npt_simulation_box_vectors[1],
npt_simulation_box_vectors[2],
)
nvt_last_frame = nvt_output_pdb[:-4] + "_last_frame.pdb"
nvt_simulation.reporters.append(PDBReporter(nvt_output_pdb, pdb_freq))
nvt_simulation.reporters.append(PDBReporter(nvt_last_frame, nvt_steps))
nvt_simulation.reporters.append(
StateDataReporter(
stdout,
pdb_freq,
step=True,
time=True,
potentialEnergy=True,
totalSteps=nvt_steps,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
separator="\t",
)
)
nvt_simulation.minimizeEnergy()
nvt_simulation.step(nvt_steps)
nvt_simulation.saveState("nvt_simulation.state")
state = nvt_simulation.context.getState()
print(state.getPeriodicBoxVectors())
nvt_simulation_box_vectors = state.getPeriodicBoxVectors()
print(nvt_simulation_box_vectors)
with open("nvt_simulation_box_vectors.pkl", "wb") as f:
pk.dump(nvt_simulation_box_vectors, f)
print("Finished NVT Simulation")
def run_equilibration():
"""
Runs systematic simulated annealing followed by
NPT and NVT equilibration MD simulation.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "equilibration"
os.system("rm -rf equilibration")
os.system("mkdir equilibration")
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.inpcrd",
target_dir + "/" + "system_TIP3P.inpcrd",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.parm7",
target_dir + "/" + "system_TIP3P.parm7",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.pdb",
target_dir + "/" + "system_TIP3P.pdb",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.prmtop",
target_dir + "/" + "system_TIP3P.prmtop",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.rst7",
target_dir + "/" + "system_TIP3P.rst7",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system.pdb",
target_dir + "/" + "system.pdb",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "alanine-dipeptide-nowater.pdb",
target_dir + "/" + "alanine-dipeptide-nowater.pdb",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "input_TIP3P.leap",
target_dir + "/" + "input_TIP3P.leap",
)
os.chdir(target_dir)
simulated_annealing()
npt_equilibration()
nvt_equilibration()
os.system("rm -rf system_TIP3P.inpcrd")
os.system("rm -rf system_TIP3P.parm7")
os.system("rm -rf system_TIP3P.pdb")
os.system("rm -rf system_TIP3P.rst7")
os.system("rm -rf system_TIP3P.prmtop")
os.system("rm -rf system.pdb")
os.system("rm -rf alanine-dipeptide-nowater.pdb")
os.system("rm -rf input_TIP3P.leap")
os.chdir(cwd)
def create_starting_structures():
"""
Prepares starting structures for Amber GaMD simulations.
All input files required to run Amber GaMD simulations are
placed in the starting_structures directory.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "starting_structures"
os.system("rm -rf starting_structures")
os.system("mkdir starting_structures")
shutil.copy(
cwd + "/" + "equilibration" + "/" + "system_nvt_output_last_frame.pdb",
target_dir + "/" + "system_nvt_output_last_frame.pdb",
)
os.chdir(target_dir)
fix_cap_remove_nme("system_nvt_output_last_frame.pdb")
fix_cap_replace_nme("system_nvt_output_last_frame.pdb")
# Save the tleap script to file
with open("final_input_TIP3P.leap", "w") as f:
f.write(
"""
source leaprc.protein.ff14SB
source leaprc.water.tip3p
set default FlexibleWater on
set default PBRadii mbondi2
pdb = loadpdb system_nvt_output_last_frame.pdb
saveamberparm pdb system_final.prmtop system_final.inpcrd
saveamberparm pdb system_final.parm7 system_final.rst7
savepdb pdb system_final.pdb
quit
"""
)
os.system("tleap -f final_input_TIP3P.leap")
os.system("rm -rf leap.log")
os.system("rm -rf system_nvt_output_last_frame.pdb")
os.chdir(cwd)
def add_vec_inpcrd():
"""
Adds box dimensions captured from the last saved
frame of the NVT simulations to the inpcrd file.
Only to be used when the box dimensions are not
present in the inpcrd file.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "starting_structures"
shutil.copy(
cwd + "/" + "equilibration" + "/" + "nvt_simulation_box_vectors.pkl",
target_dir + "/" + "nvt_simulation_box_vectors.pkl",
)
os.chdir(target_dir)
with open("nvt_simulation_box_vectors.pkl", "rb") as f:
nvt_simulation_box_vectors = pk.load(f)
nvt_simulation_box_vectors = create_vectors(nvt_simulation_box_vectors)
vectors = (
(nvt_simulation_box_vectors[0][0]) * 10,
(nvt_simulation_box_vectors[1][1]) * 10,
(nvt_simulation_box_vectors[2][2]) * 10,
)
vectors = (
round(vectors[0], 7),
round(vectors[1], 7),
round(vectors[2], 7),
)
last_line = (
" "
+ str(vectors[0])
+ " "
+ str(vectors[1])
+ " "
+ str(vectors[2])
+ " 90.0000000"
+ " 90.0000000"
+ " 90.0000000"
)
with open("system_final.inpcrd", "a+") as f:
f.write(last_line)
os.system("rm -rf nvt_simulation_box_vectors.pkl")
os.chdir(cwd)
def add_vec_prmtop():
"""
Adds box dimensions captured from the last saved
frame of the NVT simulations to the prmtop file.
Only to be used when the box dimensions are not
present in the prmtop file.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "starting_structures"
shutil.copy(
cwd + "/" + "equilibration" + "/" + "nvt_simulation_box_vectors.pkl",
target_dir + "/" + "nvt_simulation_box_vectors.pkl",
)
os.chdir(target_dir)
with open("nvt_simulation_box_vectors.pkl", "rb") as f:
nvt_simulation_box_vectors = pk.load(f)
nvt_simulation_box_vectors = create_vectors(nvt_simulation_box_vectors)
vectors = (
nvt_simulation_box_vectors[0][0],
nvt_simulation_box_vectors[1][1],
nvt_simulation_box_vectors[2][2],
)
vectors = round(vectors[0], 7), round(vectors[1], 7), round(vectors[2], 7)
oldbeta = "9.00000000E+01"
x = str(vectors[0]) + str(0) + "E+" + "01"
y = str(vectors[1]) + str(0) + "E+" + "01"
z = str(vectors[2]) + str(0) + "E+" + "01"
line1 = "%FLAG BOX_DIMENSIONS"
line2 = "%FORMAT(5E16.8)"
line3 = " " + oldbeta + " " + x + " " + y + " " + z
with open("system_final.prmtop") as i, open(
"system_intermediate_final.prmtop", "w"
) as f:
for line in i:
if line.startswith("%FLAG RADIUS_SET"):
line = line1 + "\n" + line2 + "\n" + line3 + "\n" + line
f.write(line)
os.system("rm -rf system_final.prmtop")
os.system("mv system_intermediate_final.prmtop system_final.prmtop")
os.system("rm -rf nvt_simulation_box_vectors.pkl")
os.chdir(cwd)
def create_filetree(
nst_lim=26000000,
ntw_x=1000,
nt_cmd=1000000,
n_teb=1000000,
n_tave=50000,
ntcmd_prep=200000,
nteb_prep=200000,
):
"""
Creates a directory named gamd_simulations. Inside
this directory, there are subdirectories for dihedral,
dual and total potential-boosted GaMD with upper and
lower threshold boosts separately.
Parameters
----------
nst_lim: int
Total simulation time including preparatory simulation.
For example, if nst_lim = 26000000, then, we may have
2 ns of preparatory simulation i.e. 1000000 preparation steps
and 50 ns of GaMD simulation i.e. 25000000 simulation steps
ntw_x: int
Saving coordinates of the simulation every ntw_x
timesteps. For example, 2 ps implies 1000 timesteps
nt_cmd: int
Number of initial MD simulation step, 2 ns of
preparatory simulation requires 1000000 preparation
timesteps
n_teb: int
Number of biasing MD simulation steps
n_tave: int
Number of simulation steps used to calculate the
average and standard deviation of potential energies
ntcmd_prep: int
Number of preparation conventional molecular dynamics
steps.This is used for system equilibration and
potential energies are not collected for statistics
nteb_prep: int
Number of preparation biasing molecular dynamics
simulation steps. This is used for system
equilibration
"""
cwd = os.getcwd()
os.system("rm -rf gamd_simulations")
os.system("mkdir gamd_simulations")
os.chdir(cwd + "/" + "gamd_simulations")
source_dir = cwd + "/" + "starting_structures"
target_dir = cwd + "/" + "gamd_simulations"
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
for i in range(len(dir_list)):
os.mkdir(dir_list[i])
os.chdir(target_dir + "/" + dir_list[i])
shutil.copy(
source_dir + "/" + "system_final.inpcrd",
target_dir + "/" + dir_list[i] + "/" + "system_final.inpcrd",
)
shutil.copy(
source_dir + "/" + "system_final.prmtop",
target_dir + "/" + dir_list[i] + "/" + "system_final.prmtop",
)
if "lower" in dir_list[i]:
i_E = 1
if "upper" in dir_list[i]:
i_E = 2
if "total" in dir_list[i]:
i_gamd = 1
if "dihedral" in dir_list[i]:
i_gamd = 2
if "dual" in dir_list[i]:
i_gamd = 3
with open("md.in", "w") as f:
f.write("&cntrl" + "\n")
f.write(" imin = 0, irest = 0, ntx = 1," + "\n")
f.write(" nstlim = " + str(nst_lim) + ", dt = 0.002," + "\n")
f.write(" ntc = 2, ntf = 2, tol = 0.000001," + "\n")
f.write(" iwrap = 1, ntb = 1, cut = 8.0," + "\n")
f.write(" ntt = 3, temp0 = 300.0, gamma_ln = 1.0, " + "\n")
f.write(
" ntpr = 500, ntwx = " + str(ntw_x) + ", ntwr = 500," + "\n"
)
f.write(" ntxo = 2, ioutfm = 1, ig = -1, ntwprt = 0," + "\n")
f.write(
" igamd = "
+ str(i_gamd)
+ ", iE = "
+ str(i_E)
+ ", irest_gamd = 0,"
+ "\n"
)
f.write(
" ntcmd = "
+ str(nt_cmd)
+ ", nteb = "
+ str(n_teb)
+ ", ntave = "
+ str(n_tave)
+ ","
+ "\n"
)
f.write(
" ntcmdprep = "
+ str(ntcmd_prep)
+ ", ntebprep = "
+ str(nteb_prep)
+ ","
+ "\n"
)
f.write(" sigma0D = 6.0, sigma0P = 6.0" + " \n")
f.write("&end" + "\n")
os.chdir(target_dir)
os.chdir(cwd)
def run_simulations():
"""
Runs GaMD simulations for each of the dihedral, dual and total
potential boosts for both thresholds i.e. upper and lower potential
thresholds. (Remember to check md.in files for further details and
flag information).
"""
cwd = os.getcwd()
os.chdir(cwd + "/" + "gamd_simulations")
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dihedral_threshold_lower")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dihedral_threshold_upper")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dual_threshold_lower")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dual_threshold_upper")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "total_threshold_lower")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "total_threshold_upper")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations")
os.chdir(cwd)
def create_data_files(
jump=10,
traj="system_final.nc",
topology="system_final.prmtop",
T=300,
):
"""
Extracts data from GaMD log files and saves them as
weights.dat, Psi.dat and Phi_Psi.dat. gamd.log file
contains data excluding the initial equilibration MD
simulation steps but trajectory output file has all
the trajectories including the initial equilibration
MD steps. This part has ben taken care to make the
data consistent.
Parameters
----------
jump: int
Every nth frame to be considered for reweighting
traj: str
System's trajectory file
topology: str
System's topology file
T: int
MD simulation temperature
"""
# To make data consistent with gamd.log and .nc file
factor = 0.001987 * T
with open("md.in") as f:
lines = f.readlines()
for i in lines:
if "nstlim =" in i:
nstlim_line = i
if "ntcmd =" in i:
ntcmd_line = i
if "ntwx =" in i:
ntwx_line = i
x = re.findall(r"\b\d+\b", ntcmd_line)
ntcmd = int(x[0])
x = re.findall(r"\b\d+\b", nstlim_line)
nstlim = int(x[0])
x = re.findall(r"\b\d+\b", ntwx_line)
ntwx = int(x[1])
# From the .nc trajectory files, we will not consider ntcmd trajectories
leave_frames = int(ntcmd / ntwx)
no_frames = int(nstlim / ntwx)
# Recheck conditions
file = open("gamd.log", "r")
number_of_lines = 0
for line in file:
line = line.strip("\n")
number_of_lines += 1
file.close()
f = open("gamd.log")
fourth_line = f.readlines()[3]
if str(ntcmd) in fourth_line:
datapoints = number_of_lines - 4
if not str(ntcmd) in fourth_line:
datapoints = number_of_lines - 3
print(datapoints == int((nstlim - ntcmd) / ntwx))
# Creating Psi.dat and Phi_Psi.dat
traj = md.load(traj, top=topology)
traj = traj[leave_frames:no_frames:jump]
phi = md.compute_phi(traj)
phi = phi[1] # 0:indices, 1:phi angles
phi = np.array([math.degrees(i) for i in phi]) # radians to degrees
psi = md.compute_psi(traj)
psi = psi[1] # 0:indices, 1:psi angles
psi = np.array([math.degrees(i) for i in psi]) # radians to degrees
df_psi = pd.DataFrame(phi, columns=["Psi"])
df_psi = df_psi.tail(int(datapoints))
df_psi.to_csv("Psi.dat", sep="\t", index=False, header=False)
df_phi = pd.DataFrame(psi, columns=["Phi"])
df_phi = df_phi.tail(int(datapoints))
df_phi_psi = pd.concat([df_phi, df_psi], axis=1)
df_phi_psi.to_csv("Phi_Psi.dat", sep="\t", index=False, header=False)
# Creating weights.dat
with open("gamd.log") as f:
lines = f.readlines()
column_names = lines[2]
column_names = column_names.replace("#", "")
column_names = column_names.replace("\n", "")
column_names = column_names.replace(" ", "")
column_names = column_names.split(",")
list_words = ["#"]
with open("gamd.log") as oldfile, open("data.log", "w") as newfile:
for line in oldfile:
if not any(word in line for word in list_words):
newfile.write(line)
df = pd.read_csv("data.log", delim_whitespace=True, header=None)
df.columns = column_names
df["dV(kcal/mol)"] = (
df["Boost-Energy-Potential"] + df["Boost-Energy-Dihedral"]
)
df["dV(kbT)"] = df["dV(kcal/mol)"] / factor
df_ = df[["dV(kbT)", "total_nstep", "dV(kcal/mol)"]]
df_ = df_[::jump]
df_.to_csv("weights.dat", sep="\t", index=False, header=False)
os.system("rm -rf data.log")
print(df_phi_psi.shape)
print(df_phi.shape)
print(df_.shape)
def create_bins(lower_bound, width, upper_bound):
"""
Creates bin if given the lower and upper bound
with the wirdth information.
"""
bins = []
for low in range(lower_bound, upper_bound, width):
bins.append([low, low + width])
return bins
def find_bin(value, bins):
"""
Finds which value belongs to which bin.
"""
for i in range(0, len(bins)):
if bins[i][0] <= value < bins[i][1]:
return i
return -1
def reweight_1d(
binspace=10, n_structures=4, Xdim=[-180, 180], T=300.0, min_prob=0.000001
):
"""
Reweights boosted potential energies in one-dimension based on
Maclaurin series expansion to one, two and three degrees.
Parameters
----------
binspace: int
Spacing between the bins
n_structures: int
Number of structures per bin chosen
for Weighted Ensemble (WE) simulations
Xdim: list
Range of dihedral angles
T: float
MD simulation temperature
min_prob: float
minimum probability threshold
"""
beta = 1.0 / (0.001987 * float(T))
df_Psi = pd.read_csv("Psi.dat", delim_whitespace=True, header=None)
df_Psi.columns = ["Psi"]
df_weight = pd.read_csv("weights.dat", delim_whitespace=True, header=None)
df_weight.columns = ["dV_kBT", "timestep", "dVkcalmol"]
sum_total = df_Psi.shape[0]
binsX = np.arange(float(Xdim[0]), (float(Xdim[1]) + binspace), binspace)
hist, hist_edges = np.histogram(df_Psi[["Psi"]], bins=binsX, weights=None)
pstarA = [i / sum_total for i in list(hist)]
bins = create_bins(
lower_bound=int(Xdim[0]), width=binspace, upper_bound=int(Xdim[1])
)
data = df_Psi["Psi"].values.tolist()
binned_weights = []
for value in data:
bin_index = find_bin(value, bins)
binned_weights.append(bin_index)
df_index = pd.DataFrame(binned_weights)
df_index.columns = ["index"]
df = pd.concat([df_index, df_Psi, df_weight], axis=1)
dV_c1 = []
dV_c2 = []
dV_c3 = []
dV = []
for i in range(len(bins)):
df_i = df.loc[(df["index"] == i)]
dV_list = df_i["dVkcalmol"].values.tolist()
if len(dV_list) >= 10:
dV_c1.append(statistics.mean(dV_list))
dV_c2.append(
statistics.mean([i ** 2 for i in dV_list])
- (statistics.mean(dV_list)) ** 2
)
dV_c3.append(
statistics.mean([i ** 3 for i in dV_list])
- 3
* (statistics.mean([i ** 2 for i in dV_list]))
* (statistics.mean(dV_list))
+ 2 * (statistics.mean(dV_list)) ** 3
)
if len(dV_list) < 10:
dV_c1.append(0)
dV_c2.append(0)
dV_c3.append(0)
dV.append(dV_list)
c1 = [i * beta for i in dV_c1]
c2 = [i * ((beta ** 2) / 2) for i in dV_c2]
c3 = [i * ((beta ** 3) / 6) for i in dV_c3]
c1 = c1
c12 = [a + b for a, b in zip(c1, c2)]
c123 = [a + b for a, b in zip(c12, c3)]
for i in range(len(c1)):
if c1[i] >= 700:
c1[i] = 700
for i in range(len(c12)):
if c12[i] >= 700:
c12[i] = 700
for i in range(len(c123)):
if c123[i] >= 700:
c123[i] = 700
ensemble_average_c1 = [exp(i) for i in c1]
ensemble_average_c12 = [exp(i) for i in c12]
ensemble_average_c123 = [exp(i) for i in c123]
numerator_c1 = [a * b for a, b in zip(pstarA, ensemble_average_c1)]
numerator_c12 = [a * b for a, b in zip(pstarA, ensemble_average_c12)]
numerator_c123 = [a * b for a, b in zip(pstarA, ensemble_average_c123)]
#### c1
denominatorc1 = []
for i in range(len(bins)):
product_c1 = pstarA[i] * ensemble_average_c1[i]
denominatorc1.append(product_c1)
denominator_c1 = sum(denominatorc1)
pA_c1 = [i / denominator_c1 for i in numerator_c1]
#### c12
denominatorc12 = []
for i in range(len(bins)):
product_c12 = pstarA[i] * ensemble_average_c12[i]
denominatorc12.append(product_c12)
denominator_c12 = sum(denominatorc12)
pA_c12 = [i / denominator_c12 for i in numerator_c12]
#### c123
denominatorc123 = []
for i in range(len(bins)):
product_c123 = pstarA[i] * ensemble_average_c123[i]
denominatorc123.append(product_c123)
denominator_c123 = sum(denominatorc123)
pA_c123 = [i / denominator_c123 for i in numerator_c123]
data_c1 = list(zip(bins, pA_c1))
data_c12 = list(zip(bins, pA_c12))
data_c123 = list(zip(bins, pA_c123))
df_c1 = pd.DataFrame(data_c1, columns=["bins", "pA_c1"])
df_c12 = pd.DataFrame(data_c12, columns=["bins", "pA_c12"])
df_c123 = pd.DataFrame(data_c123, columns=["bins", "pA_c123"])
####c1
df_c1.to_csv("c1_1d.txt", header=True, index=None, sep=" ", mode="w")
with open("c1_1d.txt", "r") as f1, open("pA_c1_1d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_1d.txt")
####c12
df_c12.to_csv("c12_1d.txt", header=True, index=None, sep=" ", mode="w")
with open("c12_1d.txt", "r") as f1, open("pA_c12_1d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_1d.txt")
####c123
df_c123.to_csv("c123_1d.txt", header=True, index=None, sep=" ", mode="w")
with open("c123_1d.txt", "r") as f1, open("pA_c123_1d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_1d.txt")
####c1_arranged
df_c1_arranged = df_c1.sort_values(by="pA_c1", ascending=False)
df_c1_arranged = df_c1_arranged[df_c1_arranged.pA_c1 > min_prob]
df_c1_arranged.to_csv(
"c1_arranged_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_arranged_1d.txt", "r") as f1, open(
"pA_c1_arranged_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_arranged_1d.txt")
####c12_arranged
df_c12_arranged = df_c12.sort_values(by="pA_c12", ascending=False)
df_c12_arranged = df_c12_arranged[df_c12_arranged.pA_c12 > min_prob]
df_c12_arranged.to_csv(
"c12_arranged_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_arranged_1d.txt", "r") as f1, open(
"pA_c12_arranged_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_arranged_1d.txt")
####c123_arranged
df_c123_arranged = df_c123.sort_values(by="pA_c123", ascending=False)
df_c123_arranged = df_c123_arranged[df_c123_arranged.pA_c123 > min_prob]
df_c123_arranged.to_csv(
"c123_arranged_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_arranged_1d.txt", "r") as f1, open(
"pA_c123_arranged_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_arranged_1d.txt")
####c1_arranged
df_c1_arranged["index"] = df_c1_arranged.index
index_list_c1 = df_c1_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c1 = []
index_indces_c1 = []
for i in index_list_c1:
df_index_list_c1 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c1 = df_index_list_c1["frame_index"].tolist()
frame_indices_c1.append(frame_c1)
index_c1 = [i] * len(frame_c1)
index_indces_c1.append(index_c1)
frame_indices_c1 = [item for elem in frame_indices_c1 for item in elem]
index_indces_c1 = [item for elem in index_indces_c1 for item in elem]
df_c1_frame = pd.DataFrame(frame_indices_c1, columns=["frame_index"])
df_c1_index = pd.DataFrame(index_indces_c1, columns=["index"])
df_c1_frame_index = pd.concat([df_c1_frame, df_c1_index], axis=1)
df_c1_frame_index = df_c1_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c1_frame_index.to_csv(
"c1_frame_index_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_frame_index_1d.txt", "r") as f1, open(
"c1_frame_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_frame_index_1d.txt")
####c12_arranged
df_c12_arranged["index"] = df_c12_arranged.index
index_list_c12 = df_c12_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c12 = []
index_indces_c12 = []
for i in index_list_c12:
df_index_list_c12 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c12 = df_index_list_c12["frame_index"].tolist()
frame_indices_c12.append(frame_c12)
index_c12 = [i] * len(frame_c12)
index_indces_c12.append(index_c12)
frame_indices_c12 = [item for elem in frame_indices_c12 for item in elem]
index_indces_c12 = [item for elem in index_indces_c12 for item in elem]
df_c12_frame = pd.DataFrame(frame_indices_c12, columns=["frame_index"])
df_c12_index = pd.DataFrame(index_indces_c12, columns=["index"])
df_c12_frame_index = pd.concat([df_c12_frame, df_c12_index], axis=1)
df_c12_frame_index = df_c12_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c12_frame_index.to_csv(
"c12_frame_index_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_frame_index_1d.txt", "r") as f1, open(
"c12_frame_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_frame_index_1d.txt")
####c123_arranged
df_c123_arranged["index"] = df_c123_arranged.index
index_list_c123 = df_c123_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c123 = []
index_indces_c123 = []
for i in index_list_c123:
df_index_list_c123 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c123 = df_index_list_c123["frame_index"].tolist()
frame_indices_c123.append(frame_c123)
index_c123 = [i] * len(frame_c123)
index_indces_c123.append(index_c123)
frame_indices_c123 = [item for elem in frame_indices_c123 for item in elem]
index_indces_c123 = [item for elem in index_indces_c123 for item in elem]
df_c123_frame = pd.DataFrame(frame_indices_c123, columns=["frame_index"])
df_c123_index = pd.DataFrame(index_indces_c123, columns=["index"])
df_c123_frame_index = pd.concat([df_c123_frame, df_c123_index], axis=1)
df_c123_frame_index = df_c123_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c123_frame_index.to_csv(
"c123_frame_index_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_frame_index_1d.txt", "r") as f1, open(
"c123_frame_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_frame_index_1d.txt")
####c1
indices_c1_1d = df_c1_frame_index["index"].unique()
frames_c1 = []
for i in indices_c1_1d:
x = df_c1_frame_index.loc[df_c1_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c1.append(z)
frames_c1_1d = [item for elem in frames_c1 for item in elem]
with open("frames_c1_1d.pickle", "wb") as f:
pk.dump(frames_c1_1d, f)
with open("indices_c1_1d.pickle", "wb") as f:
pk.dump(indices_c1_1d, f)
####c12
indices_c12_1d = df_c12_frame_index["index"].unique()
frames_c12 = []
for i in indices_c12_1d:
x = df_c12_frame_index.loc[df_c12_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c12.append(z)
frames_c12_1d = [item for elem in frames_c12 for item in elem]
with open("frames_c12_1d.pickle", "wb") as f:
pk.dump(frames_c12_1d, f)
with open("indices_c12_1d.pickle", "wb") as f:
pk.dump(indices_c12_1d, f)
####c123
indices_c123_1d = df_c123_frame_index["index"].unique()
frames_c123 = []
for i in indices_c123_1d:
x = df_c123_frame_index.loc[df_c123_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c123.append(z)
frames_c123_1d = [item for elem in frames_c123 for item in elem]
with open("frames_c123_1d.pickle", "wb") as f:
pk.dump(frames_c123_1d, f)
with open("indices_c123_1d.pickle", "wb") as f:
pk.dump(indices_c123_1d, f)
##saving probabilities for each selected frame
####c1
prob_c1_1d_list = []
for i in indices_c1_1d:
prob_c1_1d_list.append(df_c1["pA_c1"][i])
prob_c1_1d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c1_1d_list
)
)
prob_c1_1d_list = [x / n_structures for x in prob_c1_1d_list]
with open("prob_c1_1d_list.pickle", "wb") as f:
pk.dump(prob_c1_1d_list, f)
####c12
prob_c12_1d_list = []
for i in indices_c12_1d:
prob_c12_1d_list.append(df_c12["pA_c12"][i])
prob_c12_1d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c12_1d_list
)
)
prob_c12_1d_list = [x / n_structures for x in prob_c12_1d_list]
with open("prob_c12_1d_list.pickle", "wb") as f:
pk.dump(prob_c12_1d_list, f)
####c123
prob_c123_1d_list = []
for i in indices_c123_1d:
prob_c123_1d_list.append(df_c123["pA_c123"][i])
prob_c123_1d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c123_1d_list
)
)
prob_c123_1d_list = [x / n_structures for x in prob_c123_1d_list]
with open("prob_c123_1d_list.pickle", "wb") as f:
pk.dump(prob_c123_1d_list, f)
ref_df_1d = pd.DataFrame(bins, columns=["dim0", "dim1"])
ref_df_1d["bins"] = ref_df_1d.agg(
lambda x: f"[{x['dim0']} , {x['dim1']}]", axis=1
)
ref_df_1d = ref_df_1d[["bins"]]
index_ref_1d = []
for i in range(len(bins)):
index_ref_1d.append(i)
index_ref_df_1d = pd.DataFrame(index_ref_1d, columns=["index"])
df_ref_1d = pd.concat([ref_df_1d, index_ref_df_1d], axis=1)
df_ref_1d.to_csv("ref_1d.txt", header=True, index=None, sep=" ", mode="w")
df.to_csv("df_1d.csv", index=False)
os.system("rm -rf __pycache__")
print("Successfully Completed Reweighing")
def reweight_2d(
binspace=10,
n_structures=4,
Xdim=[-180, 180],
Ydim=[-180, 180],
T=300.0,
min_prob=0.000001,
):
"""
Reweights boosted potential energies in two-dimensions
based on Maclaurin series expansion to one, two and
three degrees.
Parameters
----------
binspace: int
Spacing between the bins
n_structures: int
Number of structures per bin chosen
for Weighted Ensemble (WE) simulations
Xdim: list
Range of dihedral angles (1st dimension)
Ydim: list
Range of dihedral angles (2nd dimension)
T: float
MD simulation temperature
min_prob: float
minimum probability threshold
"""
beta = 1.0 / (0.001987 * float(T))
df_Phi_Psi = pd.read_csv("Phi_Psi.dat", delim_whitespace=True, header=None)
df_Phi_Psi.columns = ["Phi", "Psi"]
df_weight = pd.read_csv("weights.dat", delim_whitespace=True, header=None)
df_weight.columns = ["dV_kBT", "timestep", "dVkcalmol"]
sum_total = df_Phi_Psi.shape[0]
binsX = np.arange(float(Xdim[0]), (float(Xdim[1]) + binspace), binspace)
binsY = np.arange(float(Ydim[0]), (float(Ydim[1]) + binspace), binspace)
hist2D, hist_edgesX, hist_edgesY = np.histogram2d(
df_Phi_Psi["Phi"].values.tolist(),
df_Phi_Psi["Psi"].values.tolist(),
bins=(binsX, binsY),
weights=None,
)
pstarA_2D = [i / sum_total for i in list(hist2D)]
bins_tuple_X = create_bins(
lower_bound=int(Xdim[0]), width=binspace, upper_bound=int(Xdim[1])
)
bins_tuple_Y = create_bins(
lower_bound=int(Ydim[0]), width=binspace, upper_bound=int(Ydim[1])
)
bins = []
for i in range(len(bins_tuple_X)):
for j in range(len(bins_tuple_Y)):
bins.append([bins_tuple_X[i], bins_tuple_Y[j]])
pstarA = [item for elem in pstarA_2D for item in elem]
hist = [item for elem in hist2D for item in elem]
hist = [int(i) for i in hist]
data_X = df_Phi_Psi["Phi"].values.tolist()
binned_weights_X = []
for value in data_X:
bin_index_X = find_bin(value, bins_tuple_X)
binned_weights_X.append(bin_index_X)
data_Y = df_Phi_Psi["Psi"].values.tolist()
binned_weights_Y = []
for value in data_Y:
bin_index_Y = find_bin(value, bins_tuple_Y)
binned_weights_Y.append(bin_index_Y)
binned_weights_2D = []
for i in range(len(binned_weights_X)):
binned_weights_2D.append([binned_weights_X[i], binned_weights_Y[i]])
binned_weights = []
for i in range(len(binned_weights_2D)):
binned_weights.append(
(binned_weights_2D[i][0] * len(bins_tuple_Y))
+ (binned_weights_2D[i][1] + 1)
)
df_index = pd.DataFrame(binned_weights)
df_index.columns = ["index"]
df_index["index"] = df_index["index"] - 1
df = pd.concat([df_index, df_Phi_Psi, df_weight], axis=1)
dV_c1 = []
dV_c2 = []
dV_c3 = []
dV = []
for i in range(len(bins)):
df_i = df.loc[(df["index"] == i)]
dV_list = df_i["dVkcalmol"].values.tolist()
if len(dV_list) >= 10:
dV_c1.append(statistics.mean(dV_list))
dV_c2.append(
statistics.mean([i ** 2 for i in dV_list])
- (statistics.mean(dV_list)) ** 2
)
dV_c3.append(
statistics.mean([i ** 3 for i in dV_list])
- 3
* (statistics.mean([i ** 2 for i in dV_list]))
* (statistics.mean(dV_list))
+ 2 * (statistics.mean(dV_list)) ** 3
)
if len(dV_list) < 10:
dV_c1.append(0)
dV_c2.append(0)
dV_c3.append(0)
dV.append(dV_list)
c1 = [i * beta for i in dV_c1]
c2 = [i * ((beta ** 2) / 2) for i in dV_c2]
c3 = [i * ((beta ** 3) / 6) for i in dV_c3]
c1 = c1
c12 = [a + b for a, b in zip(c1, c2)]
c123 = [a + b for a, b in zip(c12, c3)]
for i in range(len(c1)):
if c1[i] >= 700:
c1[i] = 700
for i in range(len(c12)):
if c12[i] >= 700:
c12[i] = 700
for i in range(len(c123)):
if c123[i] >= 700:
c123[i] = 700
ensemble_average_c1 = [exp(i) for i in c1]
ensemble_average_c12 = [exp(i) for i in c12]
ensemble_average_c123 = [exp(i) for i in c123]
numerator_c1 = [a * b for a, b in zip(pstarA, ensemble_average_c1)]
numerator_c12 = [a * b for a, b in zip(pstarA, ensemble_average_c12)]
numerator_c123 = [a * b for a, b in zip(pstarA, ensemble_average_c123)]
#### c1
denominatorc1 = []
for i in range(len(bins)):
product_c1 = pstarA[i] * ensemble_average_c1[i]
denominatorc1.append(product_c1)
denominator_c1 = sum(denominatorc1)
pA_c1 = [i / denominator_c1 for i in numerator_c1]
#### c12
denominatorc12 = []
for i in range(len(bins)):
product_c12 = pstarA[i] * ensemble_average_c12[i]
denominatorc12.append(product_c12)
denominator_c12 = sum(denominatorc12)
pA_c12 = [i / denominator_c12 for i in numerator_c12]
#### c123
denominatorc123 = []
for i in range(len(bins)):
product_c123 = pstarA[i] * ensemble_average_c123[i]
denominatorc123.append(product_c123)
denominator_c123 = sum(denominatorc123)
pA_c123 = [i / denominator_c123 for i in numerator_c123]
data_c1 = list(zip(bins, pA_c1))
data_c12 = list(zip(bins, pA_c12))
data_c123 = list(zip(bins, pA_c123))
df_c1 = pd.DataFrame(data_c1, columns=["bins", "pA_c1"])
df_c12 = pd.DataFrame(data_c12, columns=["bins", "pA_c12"])
df_c123 = pd.DataFrame(data_c123, columns=["bins", "pA_c123"])
df_c1.to_csv("c1_2d.txt", header=True, index=None, sep=" ", mode="w")
with open("c1_2d.txt", "r") as f1, open("pA_c1_2d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_2d.txt")
####c12
df_c12.to_csv("c12_2d.txt", header=True, index=None, sep=" ", mode="w")
with open("c12_2d.txt", "r") as f1, open("pA_c12_2d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_2d.txt")
####c123
df_c123.to_csv("c123_2d.txt", header=True, index=None, sep=" ", mode="w")
with open("c123_2d.txt", "r") as f1, open("pA_c123_2d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_2d.txt")
####c1_arranged
df_c1_arranged = df_c1.sort_values(by="pA_c1", ascending=False)
df_c1_arranged = df_c1_arranged[df_c1_arranged.pA_c1 > min_prob]
df_c1_arranged.to_csv(
"c1_arranged_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_arranged_2d.txt", "r") as f1, open(
"pA_c1_arranged_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_arranged_2d.txt")
####c12_arranged
df_c12_arranged = df_c12.sort_values(by="pA_c12", ascending=False)
df_c12_arranged = df_c12_arranged[df_c12_arranged.pA_c12 > min_prob]
df_c12_arranged.to_csv(
"c12_arranged_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_arranged_2d.txt", "r") as f1, open(
"pA_c12_arranged_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_arranged_2d.txt")
####c123_arranged
df_c123_arranged = df_c123.sort_values(by="pA_c123", ascending=False)
df_c123_arranged = df_c123_arranged[df_c123_arranged.pA_c123 > min_prob]
df_c123_arranged.to_csv(
"c123_arranged_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_arranged_2d.txt", "r") as f1, open(
"pA_c123_arranged_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_arranged_2d.txt")
####c1_arranged
df_c1_arranged["index"] = df_c1_arranged.index
index_list_c1 = df_c1_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c1 = []
index_indces_c1 = []
for i in index_list_c1:
df_index_list_c1 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c1 = df_index_list_c1["frame_index"].tolist()
frame_indices_c1.append(frame_c1)
index_c1 = [i] * len(frame_c1)
index_indces_c1.append(index_c1)
frame_indices_c1 = [item for elem in frame_indices_c1 for item in elem]
index_indces_c1 = [item for elem in index_indces_c1 for item in elem]
df_c1_frame = pd.DataFrame(frame_indices_c1, columns=["frame_index"])
df_c1_index = pd.DataFrame(index_indces_c1, columns=["index"])
df_c1_frame_index = pd.concat([df_c1_frame, df_c1_index], axis=1)
df_c1_frame_index = df_c1_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c1_frame_index.to_csv(
"c1_frame_index_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_frame_index_2d.txt", "r") as f1, open(
"c1_frame_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_frame_index_2d.txt")
####c12_arranged
df_c12_arranged["index"] = df_c12_arranged.index
index_list_c12 = df_c12_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c12 = []
index_indces_c12 = []
for i in index_list_c12:
df_index_list_c12 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c12 = df_index_list_c12["frame_index"].tolist()
frame_indices_c12.append(frame_c12)
index_c12 = [i] * len(frame_c12)
index_indces_c12.append(index_c12)
frame_indices_c12 = [item for elem in frame_indices_c12 for item in elem]
index_indces_c12 = [item for elem in index_indces_c12 for item in elem]
df_c12_frame = pd.DataFrame(frame_indices_c12, columns=["frame_index"])
df_c12_index = pd.DataFrame(index_indces_c12, columns=["index"])
df_c12_frame_index = pd.concat([df_c12_frame, df_c12_index], axis=1)
df_c12_frame_index = df_c12_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c12_frame_index.to_csv(
"c12_frame_index_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_frame_index_2d.txt", "r") as f1, open(
"c12_frame_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_frame_index_2d.txt")
####c123_arranged
df_c123_arranged["index"] = df_c123_arranged.index
df_c123_arranged["index"] = df_c123_arranged.index
index_list_c123 = df_c123_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c123 = []
index_indces_c123 = []
for i in index_list_c123:
df_index_list_c123 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c123 = df_index_list_c123["frame_index"].tolist()
frame_indices_c123.append(frame_c123)
index_c123 = [i] * len(frame_c123)
index_indces_c123.append(index_c123)
frame_indices_c123 = [item for elem in frame_indices_c123 for item in elem]
index_indces_c123 = [item for elem in index_indces_c123 for item in elem]
df_c123_frame = pd.DataFrame(frame_indices_c123, columns=["frame_index"])
df_c123_index = pd.DataFrame(index_indces_c123, columns=["index"])
df_c123_frame_index = pd.concat([df_c123_frame, df_c123_index], axis=1)
df_c123_frame_index = df_c123_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c123_frame_index.to_csv(
"c123_frame_index_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_frame_index_2d.txt", "r") as f1, open(
"c123_frame_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_frame_index_2d.txt")
####c1
indices_c1_2d = df_c1_frame_index["index"].unique()
frames_c1 = []
for i in indices_c1_2d:
x = df_c1_frame_index.loc[df_c1_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c1.append(z)
frames_c1_2d = [item for elem in frames_c1 for item in elem]
with open("frames_c1_2d.pickle", "wb") as f:
pk.dump(frames_c1_2d, f)
with open("indices_c1_2d.pickle", "wb") as f:
pk.dump(indices_c1_2d, f)
####c12
indices_c12_2d = df_c12_frame_index["index"].unique()
frames_c12 = []
for i in indices_c12_2d:
x = df_c12_frame_index.loc[df_c12_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c12.append(z)
frames_c12_2d = [item for elem in frames_c12 for item in elem]
with open("frames_c12_2d.pickle", "wb") as f:
pk.dump(frames_c12_2d, f)
with open("indices_c12_2d.pickle", "wb") as f:
pk.dump(indices_c12_2d, f)
####c123
indices_c123_2d = df_c123_frame_index["index"].unique()
frames_c123 = []
for i in indices_c123_2d:
x = df_c123_frame_index.loc[df_c123_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c123.append(z)
frames_c123_2d = [item for elem in frames_c123 for item in elem]
with open("frames_c123_2d.pickle", "wb") as f:
pk.dump(frames_c123_2d, f)
with open("indices_c123_2d.pickle", "wb") as f:
pk.dump(indices_c123_2d, f)
##saving probabilities for each selected frame
####c1
prob_c1_2d_list = []
for i in indices_c1_2d:
prob_c1_2d_list.append(df_c1["pA_c1"][i])
prob_c1_2d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c1_2d_list
)
)
prob_c1_2d_list = [x / n_structures for x in prob_c1_2d_list]
with open("prob_c1_2d_list.pickle", "wb") as f:
pk.dump(prob_c1_2d_list, f)
####c12
prob_c12_2d_list = []
for i in indices_c12_2d:
prob_c12_2d_list.append(df_c12["pA_c12"][i])
prob_c12_2d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c12_2d_list
)
)
prob_c12_2d_list = [x / n_structures for x in prob_c12_2d_list]
with open("prob_c12_2d_list.pickle", "wb") as f:
pk.dump(prob_c12_2d_list, f)
####c123
prob_c123_2d_list = []
for i in indices_c123_2d:
prob_c123_2d_list.append(df_c123["pA_c123"][i])
prob_c123_2d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c123_2d_list
)
)
prob_c123_2d_list = [x / n_structures for x in prob_c123_2d_list]
with open("prob_c123_2d_list.pickle", "wb") as f:
pk.dump(prob_c123_2d_list, f)
ref_df_2d = pd.DataFrame(bins, columns=["binsX", "binsY"])
ref_df_2d["XY"] = ref_df_2d.agg(
lambda x: f"{x['binsX']} , {x['binsX']}", axis=1
)
ref_df_2d = ref_df_2d[["XY"]]
index_ref_2d = []
for i in range(len(bins_tuple_X) * len(bins_tuple_Y)):
index_ref_2d.append(i)
index_ref_df_2d = pd.DataFrame(index_ref_2d, columns=["index"])
df_ref_2d = pd.concat([ref_df_2d, index_ref_df_2d], axis=1)
df_ref_2d.to_csv("ref_2d.txt", header=True, index=None, sep=" ", mode="w")
df.to_csv("df_2d.csv", index=False)
os.system("rm -rf __pycache__")
print("Successfully Completed Reweighing")
def save_frames():
"""
Creates a directory named we_structures. Inside this
directory, there are six subdirectories (three for
one-dimension reweighing and other three for
two-dimensional reweighted frames). All frames
for one, two and three-degree Maclaurin series
expanded reweighted frames are present in their
respective folders.
"""
cwd = os.getcwd()
os.system("rm -rf we_structures")
os.system("mkdir we_structures")
os.chdir(cwd + "/" + "we_structures")
os.system("mkdir 1d_c1")
os.system("mkdir 1d_c12")
os.system("mkdir 1d_c123")
os.system("mkdir 2d_c1")
os.system("mkdir 2d_c12")
os.system("mkdir 2d_c123")
os.chdir(cwd)
df1 = pd.read_csv("df_1d.csv")
index = df1["index"].tolist()
frame = df1["frame_index"].tolist()
index_frame = dict(zip(frame, index))
df2 = pd.read_csv("ref_1d.txt", sep=" ", delimiter=None, header="infer")
index_ = df2["index"].tolist()
bins = df2["bins"].tolist()
index_bins = dict(zip(index_, bins))
#### 1d
with open("frames_c1_1d.pickle", "rb") as input_file:
frames_c1_1d = pk.load(input_file)
for i in frames_c1_1d:
j = index_frame[i]
frame_index = frames_c1_1d.index(i)
k = index_bins[j]
k = k.strip("[]")
k = k.replace(" , ", "_")
# traj = pt.load("system_final.nc", top="system_final.prmtop", frame_indices=[i])
traj = md.load_frame(
"system_final.nc", top="system_final.prmtop", index=i
)
frame_pdb = str(frame_index) + "_" + k + "_1d_c1_" + str(i) + ".pdb"
# pt.save(frame_pdb, traj, overwrite=True)
traj.save_pdb(frame_pdb, force_overwrite=True)
target_dir = cwd + "/" + "we_structures" + "/" + "1d_c1"
shutil.move(cwd + "/" + frame_pdb, target_dir + "/" + frame_pdb)
with open("frames_c12_1d.pickle", "rb") as input_file:
frames_c12_1d = pk.load(input_file)
for i in frames_c12_1d:
j = index_frame[i]
frame_index = frames_c12_1d.index(i)
k = index_bins[j]
k = k.strip("[]")
k = k.replace(" , ", "_")
# traj = pt.load("system_final.nc", top="system_final.prmtop", frame_indices=[i])
traj = md.load_frame(
"system_final.nc", top="system_final.prmtop", index=i
)
frame_pdb = str(frame_index) + "_" + k + "_1d_c12_" + str(i) + ".pdb"
# pt.save(frame_pdb, traj, overwrite=True)
traj.save_pdb(frame_pdb, force_overwrite=True)
target_dir = cwd + "/" + "we_structures" + "/" + "1d_c12"
shutil.move(cwd + "/" + frame_pdb, target_dir + "/" + frame_pdb)
with open("frames_c123_1d.pickle", "rb") as input_file:
frames_c123_1d = pk.load(input_file)
for i in frames_c123_1d:
j = index_frame[i]
frame_index = frames_c123_1d.index(i)
k = index_bins[j]
k = k.strip("[]")
k = k.replace(" , ", "_")
# traj = pt.load("system_final.nc", top="system_final.prmtop", frame_indices=[i])
traj = md.load_frame(
"system_final.nc", top="system_final.prmtop", index=i
)
frame_pdb = str(frame_index) + "_" + k + "_1d_c123_" + str(i) + ".pdb"
# pt.save(frame_pdb, traj, overwrite=True)
traj.save_pdb(frame_pdb, force_overwrite=True)
target_dir = cwd + "/" + "we_structures" + "/" + "1d_c123"
shutil.move(cwd + "/" + frame_pdb, target_dir + "/" + frame_pdb)
df1 = pd.read_csv("df_2d.csv")
index = df1["index"].tolist()
frame = df1["frame_index"].tolist()
index_frame = dict(zip(frame, index))
df2 = pd.read_csv("ref_2d.txt", sep=" ", delimiter=None, header="infer")
index_ = df2["index"].tolist()
bins = df2["XY"].tolist()
index_bins = dict(zip(index_, bins))
#### 2d
with open("frames_c1_2d.pickle", "rb") as input_file:
frames_c1_2d = pk.load(input_file)
for i in frames_c1_2d:
j = index_frame[i]
frame_index = frames_c1_2d.index(i)
k = index_bins[j]
k = k.strip("[]")
k = k.replace("] , [", "_")
k = k.replace(", ", "_")
# traj = pt.load("system_final.nc", top="system_final.prmtop", frame_indices=[i])
traj = md.load_frame(
"system_final.nc", top="system_final.prmtop", index=i
)
frame_pdb = str(frame_index) + "_" + k + "_2d_c1_" + str(i) + ".pdb"
# pt.save(frame_pdb, traj, overwrite=True)
traj.save_pdb(frame_pdb, force_overwrite=True)
target_dir = cwd + "/" + "we_structures" + "/" + "2d_c1"
shutil.move(cwd + "/" + frame_pdb, target_dir + "/" + frame_pdb)
with open("frames_c12_2d.pickle", "rb") as input_file:
frames_c12_2d = pk.load(input_file)
for i in frames_c12_2d:
j = index_frame[i]
frame_index = frames_c12_2d.index(i)
k = index_bins[j]
k = k.strip("[]")
k = k.replace("] , [", "_")
k = k.replace(", ", "_")
# traj = pt.load("system_final.nc", top="system_final.prmtop", frame_indices=[i])
traj = md.load_frame(
"system_final.nc", top="system_final.prmtop", index=i
)
frame_pdb = str(frame_index) + "_" + k + "_2d_c12_" + str(i) + ".pdb"
# pt.save(frame_pdb, traj, overwrite=True)
traj.save_pdb(frame_pdb, force_overwrite=True)
target_dir = cwd + "/" + "we_structures" + "/" + "2d_c12"
shutil.move(cwd + "/" + frame_pdb, target_dir + "/" + frame_pdb)
with open("frames_c123_2d.pickle", "rb") as input_file:
frames_c123_2d = pk.load(input_file)
for i in frames_c123_2d:
j = index_frame[i]
frame_index = frames_c123_2d.index(i)
k = index_bins[j]
k = k.strip("[]")
k = k.replace("] , [", "_")
k = k.replace(", ", "_")
# traj = pt.load("system_final.nc", top="system_final.prmtop", frame_indices=[i])
traj = md.load_frame(
"system_final.nc", top="system_final.prmtop", index=i
)
frame_pdb = str(frame_index) + "_" + k + "_2d_c123_" + str(i) + ".pdb"
# pt.save(frame_pdb, traj, overwrite=True)
traj.save_pdb(frame_pdb, force_overwrite=True)
target_dir = cwd + "/" + "we_structures" + "/" + "2d_c123"
shutil.move(cwd + "/" + frame_pdb, target_dir + "/" + frame_pdb)
def save_we_inputs():
"""
Writes an input file in each of the simulation folder.
Input file contains one column each for the name of
the PDB file and its respective probability.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "we_structures"
dir_list = ["1d_c1", "1d_c12", "1d_c123", "2d_c1", "2d_c12", "2d_c123"]
for i in dir_list:
os.chdir(target_dir + "/" + i)
pdbs = os.listdir(".")
pickle_file = "pdb_" + i + ".pickle"
with open(pickle_file, "wb") as f:
pk.dump(pdbs, f)
shutil.move(
target_dir + "/" + i + "/" + pickle_file, cwd + "/" + pickle_file
)
os.chdir(cwd)
# c1_1d
with open("prob_c1_1d_list.pickle", "rb") as input_file:
prob_c1_1d_list = pk.load(input_file)
prob_c1_1d_list = [i / min(prob_c1_1d_list) for i in prob_c1_1d_list]
prob_c1_1d_list = [i / sum(prob_c1_1d_list) for i in prob_c1_1d_list]
with open("pdb_1d_c1.pickle", "rb") as input_file:
pdb_1d_c1 = pk.load(input_file)
pdb_1d_c1_index = []
for i in range(len(pdb_1d_c1)):
pdb_1d_c1_index.append(int(re.findall(r"\d+", pdb_1d_c1[i])[0]))
df = pd.DataFrame(
list(zip(pdb_1d_c1, prob_c1_1d_list, pdb_1d_c1_index)),
columns=["pdb_name", "probability", "pdb_index"],
)
df = df.sort_values(by=["pdb_index"])
df = df[["probability", "pdb_name"]]
index_row = []
for i in range(df.shape[0]):
index_row.append(i)
df_index = pd.DataFrame(index_row, columns=["index_"])
df_merged = pd.concat([df_index, df], axis=1)
df_merged.to_csv(
"we_input_c1_1d.txt", header=False, index=None, sep=" ", mode="w"
)
# c12_1d
with open("prob_c12_1d_list.pickle", "rb") as input_file:
prob_c12_1d_list = pk.load(input_file)
prob_c12_1d_list = [i / min(prob_c12_1d_list) for i in prob_c12_1d_list]
prob_c12_1d_list = [i / sum(prob_c12_1d_list) for i in prob_c12_1d_list]
with open("pdb_1d_c12.pickle", "rb") as input_file:
pdb_1d_c12 = pk.load(input_file)
pdb_1d_c12_index = []
for i in range(len(pdb_1d_c12)):
pdb_1d_c12_index.append(int(re.findall(r"\d+", pdb_1d_c12[i])[0]))
df = pd.DataFrame(
list(zip(pdb_1d_c12, prob_c12_1d_list, pdb_1d_c12_index)),
columns=["pdb_name", "probability", "pdb_index"],
)
df = df.sort_values(by=["pdb_index"])
df = df[["probability", "pdb_name"]]
index_row = []
for i in range(df.shape[0]):
index_row.append(i)
df_index = pd.DataFrame(index_row, columns=["index_"])
df_merged = pd.concat([df_index, df], axis=1)
df_merged.to_csv(
"we_input_c12_1d.txt", header=False, index=None, sep=" ", mode="w"
)
# c123_1d
with open("prob_c123_1d_list.pickle", "rb") as input_file:
prob_c123_1d_list = pk.load(input_file)
prob_c123_1d_list = [i / min(prob_c123_1d_list) for i in prob_c123_1d_list]
prob_c123_1d_list = [i / sum(prob_c123_1d_list) for i in prob_c123_1d_list]
with open("pdb_1d_c123.pickle", "rb") as input_file:
pdb_1d_c123 = pk.load(input_file)
pdb_1d_c123_index = []
for i in range(len(pdb_1d_c123)):
pdb_1d_c123_index.append(int(re.findall(r"\d+", pdb_1d_c123[i])[0]))
df = pd.DataFrame(
list(zip(pdb_1d_c123, prob_c123_1d_list, pdb_1d_c123_index)),
columns=["pdb_name", "probability", "pdb_index"],
)
df = df.sort_values(by=["pdb_index"])
df = df[["probability", "pdb_name"]]
index_row = []
for i in range(df.shape[0]):
index_row.append(i)
df_index = pd.DataFrame(index_row, columns=["index_"])
df_merged = pd.concat([df_index, df], axis=1)
df_merged.to_csv(
"we_input_c123_1d.txt", header=False, index=None, sep=" ", mode="w"
)
# c1_2d
with open("prob_c1_2d_list.pickle", "rb") as input_file:
prob_c1_2d_list = pk.load(input_file)
prob_c1_2d_list = [i / min(prob_c1_2d_list) for i in prob_c1_2d_list]
prob_c1_2d_list = [i / sum(prob_c1_2d_list) for i in prob_c1_2d_list]
with open("pdb_2d_c1.pickle", "rb") as input_file:
pdb_2d_c1 = pk.load(input_file)
pdb_2d_c1_index = []
for i in range(len(pdb_2d_c1)):
pdb_2d_c1_index.append(int(re.findall(r"\d+", pdb_2d_c1[i])[0]))
df = pd.DataFrame(
list(zip(pdb_2d_c1, prob_c1_2d_list, pdb_2d_c1_index)),
columns=["pdb_name", "probability", "pdb_index"],
)
df = df.sort_values(by=["pdb_index"])
df = df[["probability", "pdb_name"]]
index_row = []
for i in range(df.shape[0]):
index_row.append(i)
df_index = pd.DataFrame(index_row, columns=["index_"])
df_merged = pd.concat([df_index, df], axis=1)
df_merged.to_csv(
"we_input_c1_2d.txt", header=False, index=None, sep=" ", mode="w"
)
# c12_2d
with open("prob_c12_2d_list.pickle", "rb") as input_file:
prob_c12_2d_list = pk.load(input_file)
prob_c12_2d_list = [i / min(prob_c12_2d_list) for i in prob_c12_2d_list]
prob_c12_2d_list = [i / sum(prob_c12_2d_list) for i in prob_c12_2d_list]
with open("pdb_2d_c12.pickle", "rb") as input_file:
pdb_2d_c12 = pk.load(input_file)
pdb_2d_c12_index = []
for i in range(len(pdb_2d_c12)):
pdb_2d_c12_index.append(int(re.findall(r"\d+", pdb_2d_c12[i])[0]))
df = pd.DataFrame(
list(zip(pdb_2d_c12, prob_c12_2d_list, pdb_2d_c12_index)),
columns=["pdb_name", "probability", "pdb_index"],
)
df = df.sort_values(by=["pdb_index"])
df = df[["probability", "pdb_name"]]
index_row = []
for i in range(df.shape[0]):
index_row.append(i)
df_index = pd.DataFrame(index_row, columns=["index_"])
df_merged = pd.concat([df_index, df], axis=1)
df_merged.to_csv(
"we_input_c12_2d.txt", header=False, index=None, sep=" ", mode="w"
)
# c123_2d
with open("prob_c123_2d_list.pickle", "rb") as input_file:
prob_c123_2d_list = pk.load(input_file)
prob_c123_2d_list = [i / min(prob_c123_2d_list) for i in prob_c123_2d_list]
prob_c123_2d_list = [i / sum(prob_c123_2d_list) for i in prob_c123_2d_list]
with open("pdb_2d_c123.pickle", "rb") as input_file:
pdb_2d_c123 = pk.load(input_file)
pdb_2d_c123_index = []
for i in range(len(pdb_2d_c123)):
pdb_2d_c123_index.append(int(re.findall(r"\d+", pdb_2d_c123[i])[0]))
df = pd.DataFrame(
list(zip(pdb_2d_c123, prob_c123_2d_list, pdb_2d_c123_index)),
columns=["pdb_name", "probability", "pdb_index"],
)
df = df.sort_values(by=["pdb_index"])
df = df[["probability", "pdb_name"]]
index_row = []
for i in range(df.shape[0]):
index_row.append(i)
df_index = pd.DataFrame(index_row, columns=["index_"])
df_merged = pd.concat([df_index, df], axis=1)
df_merged.to_csv(
"we_input_c123_2d.txt", header=False, index=None, sep=" ", mode="w"
)
def arrange_files():
"""
Creates directories and move files to appropriate folders.
"""
cwd = os.getcwd()
os.system("rm -rf txt_csv_files")
os.system("rm -rf we_inputs")
os.system("rm -rf dat_files")
os.system("rm -rf pickle_files")
os.system("rm -rf system_files")
os.system("mkdir txt_csv_files")
os.system("mkdir we_inputs")
os.system("mkdir dat_files")
os.system("mkdir pickle_files")
os.system("mkdir system_files")
shutil.move(
cwd + "/" + "c1_frame_1d.txt",
cwd + "/" + "txt_csv_files" + "/" + "c1_frame_1d.txt",
)
shutil.move(
cwd + "/" + "c12_frame_1d.txt",
cwd + "/" + "txt_csv_files" + "/" + "c12_frame_1d.txt",
)
shutil.move(
cwd + "/" + "c123_frame_1d.txt",
cwd + "/" + "txt_csv_files" + "/" + "c123_frame_1d.txt",
)
shutil.move(
cwd + "/" + "c1_frame_2d.txt",
cwd + "/" + "txt_csv_files" + "/" + "c1_frame_2d.txt",
)
shutil.move(
cwd + "/" + "c12_frame_2d.txt",
cwd + "/" + "txt_csv_files" + "/" + "c12_frame_2d.txt",
)
shutil.move(
cwd + "/" + "c123_frame_2d.txt",
cwd + "/" + "txt_csv_files" + "/" + "c123_frame_2d.txt",
)
shutil.move(
cwd + "/" + "pA_c1_1d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c1_1d.txt",
)
shutil.move(
cwd + "/" + "pA_c12_1d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c12_1d.txt",
)
shutil.move(
cwd + "/" + "pA_c123_1d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c123_1d.txt",
)
shutil.move(
cwd + "/" + "pA_c1_2d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c1_2d.txt",
)
shutil.move(
cwd + "/" + "pA_c12_2d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c12_2d.txt",
)
shutil.move(
cwd + "/" + "pA_c123_2d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c123_2d.txt",
)
shutil.move(
cwd + "/" + "pA_c1_arranged_1d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c1_arranged_1d.txt",
)
shutil.move(
cwd + "/" + "pA_c12_arranged_1d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c12_arranged_1d.txt",
)
shutil.move(
cwd + "/" + "pA_c123_arranged_1d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c123_arranged_1d.txt",
)
shutil.move(
cwd + "/" + "pA_c1_arranged_2d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c1_arranged_2d.txt",
)
shutil.move(
cwd + "/" + "pA_c12_arranged_2d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c12_arranged_2d.txt",
)
shutil.move(
cwd + "/" + "pA_c123_arranged_2d.txt",
cwd + "/" + "txt_csv_files" + "/" + "pA_c123_arranged_2d.txt",
)
shutil.move(
cwd + "/" + "ref_1d.txt",
cwd + "/" + "txt_csv_files" + "/" + "ref_1d.txt",
)
shutil.move(
cwd + "/" + "ref_2d.txt",
cwd + "/" + "txt_csv_files" + "/" + "ref_2d.txt",
)
shutil.move(
cwd + "/" + "df_1d.csv",
cwd + "/" + "txt_csv_files" + "/" + "df_1d.csv",
)
shutil.move(
cwd + "/" + "df_2d.csv",
cwd + "/" + "txt_csv_files" + "/" + "df_2d.csv",
)
shutil.move(
cwd + "/" + "we_input_c1_1d.txt",
cwd + "/" + "we_inputs" + "/" + "we_input_c1_1d.txt",
)
shutil.move(
cwd + "/" + "we_input_c12_1d.txt",
cwd + "/" + "we_inputs" + "/" + "we_input_c12_1d.txt",
)
shutil.move(
cwd + "/" + "we_input_c123_1d.txt",
cwd + "/" + "we_inputs" + "/" + "we_input_c123_1d.txt",
)
shutil.move(
cwd + "/" + "we_input_c1_2d.txt",
cwd + "/" + "we_inputs" + "/" + "we_input_c1_2d.txt",
)
shutil.move(
cwd + "/" + "we_input_c12_2d.txt",
cwd + "/" + "we_inputs" + "/" + "we_input_c12_2d.txt",
)
shutil.move(
cwd + "/" + "we_input_c123_2d.txt",
cwd + "/" + "we_inputs" + "/" + "we_input_c123_2d.txt",
)
shutil.move(
cwd + "/" + "weights.dat",
cwd + "/" + "dat_files" + "/" + "weights.txt",
)
shutil.move(
cwd + "/" + "Psi.dat", cwd + "/" + "dat_files" + "/" + "Psi.txt"
)
shutil.move(
cwd + "/" + "Phi_Psi.dat",
cwd + "/" + "dat_files" + "/" + "Phi_Psi.txt",
)
shutil.move(
cwd + "/" + "prob_c1_1d_list.pickle",
cwd + "/" + "pickle_files" + "/" + "prob_c1_1d_list.pickle",
)
shutil.move(
cwd + "/" + "prob_c12_1d_list.pickle",
cwd + "/" + "pickle_files" + "/" + "prob_c12_1d_list.pickle",
)
shutil.move(
cwd + "/" + "prob_c123_1d_list.pickle",
cwd + "/" + "pickle_files" + "/" + "prob_c123_1d_list.pickle",
)
shutil.move(
cwd + "/" + "prob_c1_2d_list.pickle",
cwd + "/" + "pickle_files" + "/" + "prob_c1_2d_list.pickle",
)
shutil.move(
cwd + "/" + "prob_c12_2d_list.pickle",
cwd + "/" + "pickle_files" + "/" + "prob_c12_2d_list.pickle",
)
shutil.move(
cwd + "/" + "prob_c123_2d_list.pickle",
cwd + "/" + "pickle_files" + "/" + "prob_c123_2d_list.pickle",
)
shutil.move(
cwd + "/" + "pdb_1d_c1.pickle",
cwd + "/" + "pickle_files" + "/" + "pdb_1d_c1.pickle",
)
shutil.move(
cwd + "/" + "pdb_1d_c12.pickle",
cwd + "/" + "pickle_files" + "/" + "pdb_1d_c12.pickle",
)
shutil.move(
cwd + "/" + "pdb_1d_c123.pickle",
cwd + "/" + "pickle_files" + "/" + "pdb_1d_c123.pickle",
)
shutil.move(
cwd + "/" + "pdb_2d_c1.pickle",
cwd + "/" + "pickle_files" + "/" + "pdb_2d_c1.pickle",
)
shutil.move(
cwd + "/" + "pdb_2d_c12.pickle",
cwd + "/" + "pickle_files" + "/" + "pdb_2d_c12.pickle",
)
shutil.move(
cwd + "/" + "pdb_2d_c123.pickle",
cwd + "/" + "pickle_files" + "/" + "pdb_2d_c123.pickle",
)
shutil.move(
cwd + "/" + "frames_c1_1d.pickle",
cwd + "/" + "pickle_files" + "/" + "frames_c1_1d.pickle",
)
shutil.move(
cwd + "/" + "frames_c12_1d.pickle",
cwd + "/" + "pickle_files" + "/" + "frames_c12_1d.pickle",
)
shutil.move(
cwd + "/" + "frames_c123_1d.pickle",
cwd + "/" + "pickle_files" + "/" + "frames_c123_1d.pickle",
)
shutil.move(
cwd + "/" + "frames_c1_2d.pickle",
cwd + "/" + "pickle_files" + "/" + "frames_c1_2d.pickle",
)
shutil.move(
cwd + "/" + "frames_c12_2d.pickle",
cwd + "/" + "pickle_files" + "/" + "frames_c12_2d.pickle",
)
shutil.move(
cwd + "/" + "frames_c123_2d.pickle",
cwd + "/" + "pickle_files" + "/" + "frames_c123_2d.pickle",
)
shutil.move(
cwd + "/" + "indices_c1_1d.pickle",
cwd + "/" + "pickle_files" + "/" + "indices_c1_1d.pickle",
)
shutil.move(
cwd + "/" + "indices_c12_1d.pickle",
cwd + "/" + "pickle_files" + "/" + "indices_c12_1d.pickle",
)
shutil.move(
cwd + "/" + "indices_c123_1d.pickle",
cwd + "/" + "pickle_files" + "/" + "indices_c123_1d.pickle",
)
shutil.move(
cwd + "/" + "indices_c1_2d.pickle",
cwd + "/" + "pickle_files" + "/" + "indices_c1_2d.pickle",
)
shutil.move(
cwd + "/" + "indices_c12_2d.pickle",
cwd + "/" + "pickle_files" + "/" + "indices_c12_2d.pickle",
)
shutil.move(
cwd + "/" + "indices_c123_2d.pickle",
cwd + "/" + "pickle_files" + "/" + "indices_c123_2d.pickle",
)
shutil.move(
cwd + "/" + "system_final.inpcrd",
cwd + "/" + "system_files" + "/" + "system_final.inpcrd",
)
shutil.move(
cwd + "/" + "system_final.nc",
cwd + "/" + "system_files" + "/" + "system_final.nc",
)
shutil.move(
cwd + "/" + "system_final.out",
cwd + "/" + "system_files" + "/" + "system_final.out",
)
shutil.move(
cwd + "/" + "system_final.prmtop",
cwd + "/" + "system_files" + "/" + "system_final.prmtop",
)
shutil.move(
cwd + "/" + "system_final.rst",
cwd + "/" + "system_files" + "/" + "system_final.rst",
)
shutil.move(
cwd + "/" + "gamd.log", cwd + "/" + "system_files" + "/" + "gamd.log"
)
shutil.move(
cwd + "/" + "md.in", cwd + "/" + "system_files" + "/" + "md.in"
)
shutil.move(
cwd + "/" + "mdinfo", cwd + "/" + "system_files" + "/" + "mdinfo"
)
shutil.move(
cwd + "/" + "gamd-restart.dat",
cwd + "/" + "system_files" + "/" + "gamd-restart.dat",
)
def run_reweigh():
"""
Runs reweighing calculations systematically
in the simulation folder.
"""
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
cwd = os.getcwd()
target_dir = cwd + "/" + "gamd_simulations" + "/"
# run reweighting and analysis in each of the simulation folder
for i in dir_list:
os.chdir(target_dir + i)
create_data_files()
reweight_1d()
reweight_2d()
save_frames()
save_we_inputs()
arrange_files()
os.chdir(cwd)
def save_westpa_inputs():
"""
Creates separate folders to initiate WE simulations.
"""
cwd = os.getcwd()
list_dir = ["1d_c1", "1d_c12", "1d_c123", "2d_c1", "2d_c12", "2d_c123"]
for i in list_dir:
os.chdir(cwd + "/" + "we_structures" + "/" + i)
files = os.listdir(".")
file_to_find = "*.pdb"
pdb_list = []
for x in files:
if fnmatch.fnmatch(x, file_to_find):
pdb_list.append(x)
for j in pdb_list:
fix_cap_remove_nme(j)
fix_cap_replace_nme(j)
inpcrd_file = j[:-4] + ".inpcrd"
filename = "input_" + j[:-4] + ".leap"
file = open(filename, "w")
file.write("source leaprc.protein.ff14SB" + "\n")
file.write("source leaprc.water.tip3p" + "\n")
file.write("set default FlexibleWater on" + "\n")
file.write("set default PBRadii mbondi2" + "\n")
file.write("pdb = loadpdb " + j + "\n")
file.write(
"saveamberparm pdb "
+ j[:-4]
+ ".prmtop "
+ j[:-4]
+ ".inpcrd"
+ "\n"
)
file.write("quit" + "\n")
file.close()
files = os.listdir(".")
file_to_find = "*.leap"
leap_list = []
for y in files:
if fnmatch.fnmatch(y, file_to_find):
leap_list.append(y)
for k in leap_list:
command = "tleap -f {}".format(k)
os.system(command)
os.system("rm -rf leap.log")
os.system("rm -rf *prmtop*")
os.system("rm -rf *leap*")
os.system("rm -rf bstates")
os.system("mkdir bstates")
for j in pdb_list:
shutil.move(
cwd
+ "/"
+ "we_structures"
+ "/"
+ i
+ "/"
+ j[:-4]
+ ".inpcrd",
cwd
+ "/"
+ "we_structures"
+ "/"
+ i
+ "/"
+ "bstates"
+ "/"
+ j[:-4]
+ ".inpcrd",
)
os.chdir(cwd)
os.system("rm -rf westpa_inputs")
os.system("mkdir westpa_inputs")
for l in list_dir:
os.chdir(cwd + "/" + "westpa_inputs")
command = "rm -rf {}".format(l)
os.system(command)
command = "mkdir {}".format(l)
os.system(command)
shutil.move(
cwd + "/" + "we_structures" + "/" + l + "/" + "bstates",
cwd + "/" + "westpa_inputs" + "/" + l + "/" + "bstates",
)
os.chdir(cwd)
shutil.copy(
cwd + "/" + "we_inputs" + "/" + "we_input_c1_1d.txt",
cwd
+ "/"
+ "westpa_inputs"
+ "/"
+ list_dir[0]
+ "/"
+ "we_input_c1_1d.txt",
)
shutil.copy(
cwd + "/" + "we_inputs" + "/" + "we_input_c12_1d.txt",
cwd
+ "/"
+ "westpa_inputs"
+ "/"
+ list_dir[1]
+ "/"
+ "we_input_c12_1d.txt",
)
shutil.copy(
cwd + "/" + "we_inputs" + "/" + "we_input_c123_1d.txt",
cwd
+ "/"
+ "westpa_inputs"
+ "/"
+ list_dir[2]
+ "/"
+ "we_input_c123_1d.txt",
)
shutil.copy(
cwd + "/" + "we_inputs" + "/" + "we_input_c1_2d.txt",
cwd
+ "/"
+ "westpa_inputs"
+ "/"
+ list_dir[3]
+ "/"
+ "we_input_c1_2d.txt",
)
shutil.copy(
cwd + "/" + "we_inputs" + "/" + "we_input_c12_2d.txt",
cwd
+ "/"
+ "westpa_inputs"
+ "/"
+ list_dir[4]
+ "/"
+ "we_input_c12_2d.txt",
)
shutil.copy(
cwd + "/" + "we_inputs" + "/" + "we_input_c123_2d.txt",
cwd
+ "/"
+ "westpa_inputs"
+ "/"
+ list_dir[5]
+ "/"
+ "we_input_c123_2d.txt",
)
for i in list_dir:
os.chdir(cwd + "/" + "westpa_inputs" + "/" + i)
for file in os.listdir("."):
if fnmatch.fnmatch(file, "*.txt"):
file_to_rename = file
f = open(file_to_rename, "rt")
data = f.read()
data = data.replace("pdb", "inpcrd")
f.close()
f = open(file_to_rename, "wt")
f.write(data)
f.close()
os.rename(file_to_rename, "BASIS_STATES")
os.chdir(cwd)
for i in list_dir:
os.chdir(cwd + "/" + "westpa_inputs" + "/" + i)
os.mkdir("CONFIG")
shutil.copy(
cwd + "/" + "system_files" + "/" + "system_final.prmtop",
cwd
+ "/"
+ "westpa_inputs"
+ "/"
+ i
+ "/"
+ "CONFIG"
+ "/"
+ "system_final.prmtop",
)
os.chdir(cwd)
def run_westpa_inputs():
"""
Systematically runs save_westpa_inputs function in
the simulation directory.
"""
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
cwd = os.getcwd()
source_dir = cwd + "/"
target_dir = cwd + "/" + "gamd_simulations" + "/"
for i in dir_list:
os.chdir(target_dir + i)
save_westpa_inputs()
os.chdir(cwd)
def transfer_files():
"""
Deletes unnecessary files in the simulation
directory and creates a new WE simulation folder
"""
os.system("rm -rf westpa_dir")
os.system("mkdir westpa_dir")
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
cwd = os.getcwd()
source_dir = cwd + "/"
target_dir = cwd + "/" + "gamd_simulations" + "/"
for i in dir_list:
os.chdir(source_dir + "westpa_dir")
command = "mkdir {}".format(i)
os.system(command)
os.chdir(cwd)
for i in dir_list:
shutil.copytree(
target_dir + i + "/" + "westpa_inputs",
source_dir + "westpa_dir" + "/" + i + "/" "westpa_inputs",
)
we_list = ["1d_c1", "1d_c12", "1d_c123", "2d_c1", "2d_c12", "2d_c123"]
for j in we_list:
shutil.copytree(
source_dir
+ "westpa_dir"
+ "/"
+ i
+ "/"
+ "westpa_inputs"
+ "/"
+ j,
source_dir + "westpa_dir" + "/" + i + "/" + j,
)
dest_dir = source_dir + "westpa_dir" + "/" + i
os.chdir(dest_dir)
os.system("rm -rf westpa_inputs")
os.chdir(cwd)
os.chdir(cwd)
def add_vectors_westpa_files():
"""
Adds box vector dimensions to the inpcrd file.
To be used only when the box vector dimensions
are not available at the last line of inpcrd file.
"""
cwd = os.getcwd()
source_dir = cwd
westpa_dir = cwd + "/" + "westpa_dir"
os.chdir(source_dir + "/" + "starting_structures")
with open("system_final.inpcrd") as f:
for line in f:
pass
vector_information = line
print(vector_information)
os.chdir(source_dir)
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
we_list = ["1d_c1", "1d_c12", "1d_c123", "2d_c1", "2d_c12", "2d_c123"]
for i in dir_list:
os.chdir(westpa_dir + "/" + str(i))
for j in we_list:
os.chdir(
westpa_dir + "/" + str(i) + "/" + str(j) + "/" + "bstates"
)
files = os.listdir(".")
file_to_find = "*.inpcrd"
inpcrd_list = []
for k in files:
if fnmatch.fnmatch(k, file_to_find):
inpcrd_list.append(k)
for l in inpcrd_list:
with open(l, "a+") as f:
f.write(vector_information)
os.chdir(cwd)
def we_analysis():
"""
Runs short MD simulation for saved inpcrd files.
"""
cwd = os.getcwd()
os.chdir(cwd + "/" + "westpa_dir")
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
we_list = ["1d_c1", "1d_c12", "1d_c123", "2d_c1", "2d_c12", "2d_c123"]
for i in dir_list:
for j in we_list:
os.chdir(cwd + "/" + "westpa_dir" + "/" + str(i))
os.chdir(cwd + "/" + "westpa_dir" + "/" + str(i) + "/" + str(j))
if len(open("BASIS_STATES").readlines()) > 0:
df = pd.read_csv("BASIS_STATES", delimiter=" ", header=None)
df.columns = [["descriptor", "probability", "file_name"]]
df1 = df[["file_name"]]
inpcrd_list = df1.values.tolist()
inpcrd_list = list(itertools.chain(*inpcrd_list))
os.system("rm -rf md_sims")
os.system("mkdir md_sims")
os.chdir(
cwd
+ "/"
+ "westpa_dir"
+ "/"
+ str(i)
+ "/"
+ str(j)
+ "/"
+ "md_sims"
)
with open("md.in", "w") as f:
f.write(
"Run minimization followed by saving rst file" + "\n"
)
f.write("&cntrl" + "\n")
f.write(
" imin = 1, maxcyc = 10000, ntpr = 5, iwrap = 1, ntxo = 1"
+ "\n"
)
f.write("&end" + "\n")
for k in inpcrd_list:
source_dir = (
cwd
+ "/"
+ "westpa_dir"
+ "/"
+ str(i)
+ "/"
+ str(j)
+ "/"
+ "bstates"
)
target_dir = (
cwd
+ "/"
+ "westpa_dir"
+ "/"
+ str(i)
+ "/"
+ str(j)
+ "/"
+ "md_sims"
)
shutil.copy(
source_dir + "/" + str(k), target_dir + "/" + str(k)
)
source_dir = cwd + "/" + "starting_structures"
target_dir = (
cwd
+ "/"
+ "westpa_dir"
+ "/"
+ str(i)
+ "/"
+ str(j)
+ "/"
+ "md_sims"
)
shutil.copy(
source_dir + "/" + "system_final.prmtop",
target_dir + "/" + "system_final.prmtop",
)
for l in range(len(inpcrd_list)):
command = (
"pmemd.cuda -O -i md.in -o "
+ inpcrd_list[l][:-6]
+ "out"
+ " -p system_final.prmtop -c "
+ inpcrd_list[l]
+ " -r "
+ inpcrd_list[l][:-6]
+ "rst"
)
print(command)
os.system(command)
os.chdir(cwd)
def correction_westpa():
"""
Eliminates all inpcrd files crashed during the short MD simulation
run. Also create folders for .rst files in case it is needed for
WE simulations
"""
cwd = os.getcwd()
os.chdir(cwd + "/" + "westpa_dir")
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
we_list = ["1d_c1", "1d_c12", "1d_c123", "2d_c1", "2d_c12", "2d_c123"]
for i in dir_list:
for j in we_list:
os.chdir(cwd + "/" + "westpa_dir" + "/" + str(i))
os.chdir(cwd + "/" + "westpa_dir" + "/" + str(i) + "/" + str(j))
if len(open("BASIS_STATES").readlines()) > 0:
os.chdir(
cwd
+ "/"
+ "westpa_dir"
+ "/"
+ str(i)
+ "/"
+ str(j)
+ "/"
+ "md_sims"
)
files = os.listdir(".")
file_to_find = "*.out"
out_list = []
for y in files:
if fnmatch.fnmatch(y, file_to_find):
out_list.append(y)
list_failed_jobs = []
for out_file in out_list:
with open(out_file, "r") as f:
last_line = f.readlines()[-2]
if last_line.startswith("|") == False:
list_failed_jobs.append(out_file)
for c in range(len(list_failed_jobs)):
command = "rm -rf " + list_failed_jobs[c]
os.system(command)
for d in range(len(list_failed_jobs)):
command = "rm -rf " + list_failed_jobs[d][:-3] + "rst"
os.system(command)
for e in range(len(list_failed_jobs)):
command = "rm -rf " + list_failed_jobs[e][:-3] + "inpcrd"
os.system(command)
for f in range(len(list_failed_jobs)):
command = "rm -rf " + list_failed_jobs[f][:-3] + "nc"
os.system(command)
files = os.listdir(".")
file_to_find = "*.rst"
rst_list = []
for y in files:
if fnmatch.fnmatch(y, file_to_find):
rst_list.append(y)
rst_failed_jobs = []
for rst_file in rst_list:
with open(rst_file, "r") as f:
req_line = f.readlines()[2]
if "NaN" in req_line:
rst_failed_jobs.append(rst_file)
for g in range(len(rst_failed_jobs)):
command = "rm -rf " + rst_failed_jobs[g]
os.system(command)
for h in range(len(rst_failed_jobs)):
command = "rm -rf " + rst_failed_jobs[h][:-3] + "out"
os.system(command)
for u in range(len(rst_failed_jobs)):
command = "rm -rf " + rst_failed_jobs[u][:-3] + "inpcrd"
os.system(command)
for v in range(len(rst_failed_jobs)):
command = "rm -rf " + rst_failed_jobs[v][:-3] + "nc"
os.system(command)
files_2 = os.listdir(".")
file_to_find_2 = "*.rst"
rst_list_2 = []
for y in files_2:
if fnmatch.fnmatch(y, file_to_find_2):
rst_list_2.append(y)
rst_failed_jobs_2 = []
for rst_file_2 in rst_list_2:
with open(rst_file_2, "r") as f:
lines_file = f.readlines()
for req_line in lines_file:
if "*" in req_line:
rst_failed_jobs_2.append(rst_file_2)
for g in range(len(rst_failed_jobs_2)):
command = "rm -rf " + rst_failed_jobs_2[g]
os.system(command)
for h in range(len(rst_failed_jobs_2)):
command = "rm -rf " + rst_failed_jobs_2[h][:-3] + "out"
os.system(command)
for u in range(len(rst_failed_jobs_2)):
command = "rm -rf " + rst_failed_jobs_2[u][:-3] + "inpcrd"
os.system(command)
for v in range(len(rst_failed_jobs_2)):
command = "rm -rf " + rst_failed_jobs_2[v][:-3] + "nc"
os.system(command)
os.system("rm -rf md.in")
os.system("rm -rf system_final.prmtop")
os.system("rm -rf mdinfo")
files = os.listdir(".")
inpcrd_file_to_find = "*.inpcrd"
rst_file_to_find = "*.rst"
inpcrd_file_list = []
for y in files:
if fnmatch.fnmatch(y, inpcrd_file_to_find):
inpcrd_file_list.append(y)
rst_file_list = []
for z in files:
if fnmatch.fnmatch(z, rst_file_to_find):
rst_file_list.append(z)
os.chdir(
cwd + "/" + "westpa_dir" + "/" + str(i) + "/" + str(j)
)
os.system("rm -rf bstates_corrected_rst")
os.system("mkdir bstates_corrected_rst")
os.system("rm -rf bstates_corrected_inpcrd")
os.system("mkdir bstates_corrected_inpcrd")
for x in inpcrd_file_list:
shutil.copy(
cwd
+ "/"
+ "westpa_dir"
+ "/"
+ str(i)
+ "/"
+ str(j)
+ "/"
+ "md_sims"
+ "/"
+ str(x),
cwd
+ "/"
+ "westpa_dir"
+ "/"
+ str(i)
+ "/"
+ str(j)
+ "/"
+ "bstates_corrected_inpcrd"
+ "/"
+ str(x),
)
for y in rst_file_list:
shutil.copy(
cwd
+ "/"
+ "westpa_dir"
+ "/"
+ str(i)
+ "/"
+ str(j)
+ "/"
+ "md_sims"
+ "/"
+ str(y),
cwd
+ "/"
+ "westpa_dir"
+ "/"
+ str(i)
+ "/"
+ str(j)
+ "/"
+ "bstates_corrected_rst"
+ "/"
+ str(y),
)
df = pd.read_csv("BASIS_STATES", sep=" ", header=None)
df.columns = ["index_df", "probability", "inpcrd"]
df = df[["probability", "inpcrd"]]
df = df[df.inpcrd.str.contains("|".join(inpcrd_file_list))]
index_row_list = []
for n in range(df.shape[0]):
index_row_list.append(n)
df = df.assign(index_=index_row_list)
df = df[["index_", "probability", "inpcrd"]]
df.to_csv(
"BASIS_STATES_CORRECTED_INPCRD",
header=False,
index=None,
sep=" ",
mode="w",
)
fin = open("BASIS_STATES_CORRECTED_INPCRD", "rt")
fout = open("BASIS_STATES_CORRECTED_RST", "wt")
for line in fin:
fout.write(line.replace("inpcrd", "rst"))
fin.close()
fout.close()
os.chdir(cwd)
def plot_contrib():
"""
Plots to review the analysis done. Plot bar
graphs for the number of structures obtained
for WE simulation for each of the potential
boosts during GaMD simulation.
"""
cwd = os.getcwd()
os.chdir(cwd + "/" + "westpa_dir")
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
we_list = ["1d_c1", "1d_c12", "1d_c123", "2d_c1", "2d_c12", "2d_c123"]
confs = []
for i in dir_list:
conf_within = []
for j in we_list:
os.chdir(cwd + "/" + "westpa_dir" + "/" + str(i))
os.chdir(cwd + "/" + "westpa_dir" + "/" + str(i) + "/" + str(j))
if len(open("BASIS_STATES").readlines()) > 0:
count1 = len(open("BASIS_STATES").readlines())
count2 = len(open("BASIS_STATES_CORRECTED_RST").readlines())
conf = str(i), str(j), count1, count2
conf_within.append(conf)
confs.append(conf_within)
print(confs)
os.chdir(cwd)
corrected_list = []
for i in range(len(confs)):
corrected_list_1 = []
for j in range(len(confs[i])):
corrected_list_1.append(confs[i][j][3])
corrected_list.append(corrected_list_1)
print(corrected_list)
expanse_list = []
for i in range(len(confs)):
expanse_list_1 = []
for j in range(len(confs[i])):
expanse_list_1.append(confs[i][j][1])
expanse_list.append(expanse_list_1)
print(expanse_list)
x0 = expanse_list[0]
y0 = corrected_list[0]
x1 = expanse_list[1]
y1 = corrected_list[1]
x2 = expanse_list[2]
y2 = corrected_list[2]
x3 = expanse_list[3]
y3 = corrected_list[3]
x4 = expanse_list[4]
y4 = corrected_list[4]
x5 = expanse_list[5]
y5 = corrected_list[5]
y = y0
x = x0
title = "Configurations vs Different Expansions" + " for " + dir_list[0]
print(title)
sns.set(font_scale=1)
plt.rcParams["figure.figsize"] = (8, 4)
plt.rcParams["font.family"] = "serif"
style.use("fivethirtyeight")
g = sns.barplot(y, x, palette=("binary"))
g.grid(False)
g.set_title(title)
g.set(xlabel="Configurations", ylabel="Expansion")
ax = g
for i, v in enumerate(y):
ax.text(v + 1, i + 0.25, str(v), color="black", fontweight="bold")
fig_name = dir_list[0]
plt.savefig(fig_name, bbox_inches="tight")
plt.show(block=False)
plt.pause(1)
plt.close()
y = y1
x = x1
title = "Configurations vs Different Expansions" + " for " + dir_list[1]
print(title)
sns.set(font_scale=1)
plt.rcParams["figure.figsize"] = (8, 4)
plt.rcParams["font.family"] = "serif"
style.use("fivethirtyeight")
g = sns.barplot(y, x, palette=("binary"))
g.grid(False)
g.set_title(title)
g.set(xlabel="Configurations", ylabel="Expansion")
ax = g
for i, v in enumerate(y):
ax.text(v + 1, i + 0.25, str(v), color="black", fontweight="bold")
fig_name = dir_list[1]
plt.savefig(fig_name, bbox_inches="tight")
plt.show(block=False)
plt.pause(1)
plt.close()
y = y2
x = x2
title = "Configurations vs Different Expansions" + " for " + dir_list[2]
print(title)
sns.set(font_scale=1)
plt.rcParams["figure.figsize"] = (8, 4)
plt.rcParams["font.family"] = "serif"
style.use("fivethirtyeight")
g = sns.barplot(y, x, palette=("binary"))
g.grid(False)
g.set_title(title)
g.set(xlabel="Configurations", ylabel="Expansion")
ax = g
for i, v in enumerate(y):
ax.text(v + 1, i + 0.25, str(v), color="black", fontweight="bold")
fig_name = dir_list[2]
plt.savefig(fig_name, bbox_inches="tight")
plt.show(block=False)
plt.pause(1)
plt.close()
y = y3
x = x3
title = "Configurations vs Different Expansions" + " for " + dir_list[3]
print(title)
sns.set(font_scale=1)
plt.rcParams["figure.figsize"] = (8, 4)
plt.rcParams["font.family"] = "serif"
style.use("fivethirtyeight")
g = sns.barplot(y, x, palette=("binary"))
g.grid(False)
g.set_title(title)
g.set(xlabel="Configurations", ylabel="Expansion")
ax = g
for i, v in enumerate(y):
ax.text(v + 1, i + 0.25, str(v), color="black", fontweight="bold")
fig_name = dir_list[3]
plt.savefig(fig_name, bbox_inches="tight")
plt.show(block=False)
plt.pause(1)
plt.close()
y = y4
x = x4
title = "Configurations vs Different Expansions" + " for " + dir_list[4]
print(title)
sns.set(font_scale=1)
plt.rcParams["figure.figsize"] = (8, 4)
plt.rcParams["font.family"] = "serif"
style.use("fivethirtyeight")
g = sns.barplot(y, x, palette=("binary"))
g.grid(False)
g.set_title(title)
g.set(xlabel="Configurations", ylabel="Expansion")
ax = g
for i, v in enumerate(y):
ax.text(v + 1, i + 0.25, str(v), color="black", fontweight="bold")
fig_name = dir_list[4]
plt.savefig(fig_name, bbox_inches="tight")
plt.show(block=False)
plt.pause(1)
plt.close()
y = y5
x = x5
title = "Configurations vs Different Expansions" + " for " + dir_list[5]
print(title)
sns.set(font_scale=1)
plt.rcParams["figure.figsize"] = (8, 4)
plt.rcParams["font.family"] = "serif"
style.use("fivethirtyeight")
g = sns.barplot(y, x, palette=("binary"))
g.grid(False)
g.set_title(title)
g.set(xlabel="Configurations", ylabel="Expansion")
ax = g
for i, v in enumerate(y):
ax.text(v + 1, i + 0.25, str(v), color="black", fontweight="bold")
fig_name = dir_list[5]
plt.savefig(fig_name, bbox_inches="tight")
plt.show(block=False)
plt.pause(1)
plt.close()
rcParams["figure.figsize"] = 30, 20
plt.rcParams["axes.grid"] = False
img_1 = mpimg.imread("dihedral_threshold_lower.png")
img_2 = mpimg.imread("dihedral_threshold_upper.png")
img_3 = mpimg.imread("dual_threshold_lower.png")
img_4 = mpimg.imread("dual_threshold_upper.png")
img_5 = mpimg.imread("total_threshold_lower.png")
img_6 = mpimg.imread("total_threshold_upper.png")
fig, ax = plt.subplots(3, 2)
fig.suptitle("")
ax[0, 1].imshow(img_1)
ax[1, 1].imshow(img_2)
ax[0, 0].imshow(img_3)
ax[1, 0].imshow(img_4)
ax[2, 0].imshow(img_5)
ax[2, 1].imshow(img_6)
plt.savefig("analysis.png")
plt.show(block=False)
plt.pause(3)
plt.close()
cwd = os.getcwd()
os.system("rm -rf analysis")
os.system("mkdir analysis")
target_dir = cwd + "/" + "analysis"
command = "mv analysis.png " + target_dir
os.system(command)
os.system("rm -rf *.png*")
def clean_for_analysis():
"""
Rstructures the entire filetree to start reweighing
analysis again. Used only when we want to run the analysis
again.
"""
os.system("rm -rf westpa_dir")
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
cwd = os.getcwd()
source_dir = cwd + "/"
target_dir = cwd + "/" + "gamd_simulations" + "/"
for i in dir_list:
os.chdir(target_dir + i)
os.system(
"rm -rf pickle_files dat_files txt_csv_files we_inputs westpa_inputs we_structures"
)
os.chdir(cwd)
for i in dir_list:
shutil.move(
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_files"
+ "/"
+ "gamd.log",
cwd + "/" + "gamd_simulations" + "/" + i + "/" + "gamd.log",
)
shutil.move(
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_files"
+ "/"
+ "gamd-restart.dat",
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "gamd-restart.dat",
)
shutil.move(
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_files"
+ "/"
+ "md.in",
cwd + "/" + "gamd_simulations" + "/" + i + "/" + "md.in",
)
shutil.move(
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_files"
+ "/"
+ "mdinfo",
cwd + "/" + "gamd_simulations" + "/" + i + "/" + "mdinfo",
)
shutil.move(
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_files"
+ "/"
+ "system_final.inpcrd",
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_final.inpcrd",
)
shutil.move(
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_files"
+ "/"
+ "system_final.nc",
cwd + "/" + "gamd_simulations" + "/" + i + "/" + "system_final.nc",
)
shutil.move(
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_files"
+ "/"
+ "system_final.out",
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_final.out",
)
shutil.move(
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_files"
+ "/"
+ "system_final.prmtop",
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_final.prmtop",
)
shutil.move(
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_files"
+ "/"
+ "system_final.rst",
cwd
+ "/"
+ "gamd_simulations"
+ "/"
+ i
+ "/"
+ "system_final.rst",
)
for i in dir_list:
os.chdir(target_dir + i)
os.system("rm -rf system_files")
os.chdir(cwd)
"""
prepare_alanine_dipeptide()
run_equilibration()
create_starting_structures()
add_vec_inpcrd()
add_vec_prmtop()
create_filetree()
run_simulations()
run_reweigh()
run_westpa_inputs()
transfer_files()
add_vectors_westpa_files()
we_analysis()
correction_westpa()
plot_contrib()
clean_for_analysis()
"""
|
import asyncio
import datetime
import logging
from copy import copy
from io import BytesIO
from typing import Dict, List, Literal, Optional, Tuple, Union, cast
import aiohttp
import discord
from redbot import VersionInfo, version_info
from redbot.core import Config, checks, commands
from redbot.core.bot import Red
from redbot.core.i18n import Translator, cog_i18n
from redbot.core.utils import AsyncIter
from redbot.core.utils.chat_formatting import (
bold,
box,
escape,
humanize_list,
humanize_number,
humanize_timedelta,
pagify,
)
from redbot.core.utils.menus import start_adding_reactions
from redbot.core.utils.predicates import MessagePredicate, ReactionPredicate
from .converters import (
ChannelConverter,
FuzzyMember,
GuildConverter,
MultiGuildConverter,
PermissionConverter,
)
from .menus import AvatarPages, BaseMenu, GuildPages, ListPages
_ = Translator("ServerStats", __file__)
log = logging.getLogger("red.trusty-cogs.ServerStats")
@cog_i18n(_)
class ServerStats(commands.Cog):
"""
Gather useful information about servers the bot is in
A lot of commands are bot owner only
"""
__author__ = ["TrustyJAID", "Preda"]
__version__ = "1.6.9"
def __init__(self, bot):
self.bot: Red = bot
default_global: dict = {"join_channel": None}
default_guild: dict = {"last_checked": 0, "members": {}, "total": 0, "channels": {}}
self.config: Config = Config.get_conf(self, 54853421465543, force_registration=True)
self.config.register_global(**default_global)
self.config.register_guild(**default_guild)
def format_help_for_context(self, ctx: commands.Context) -> str:
"""
Thanks Sinbad!
"""
pre_processed = super().format_help_for_context(ctx)
return f"{pre_processed}\n\nCog Version: {self.__version__}"
async def red_delete_data_for_user(
self,
*,
requester: Literal["discord_deleted_user", "owner", "user", "user_strict"],
user_id: int,
):
"""
Method for finding users data inside the cog and deleting it.
"""
all_guilds = await self.config.all_guilds()
for guild_id, data in all_guilds.items():
save = False
if str(user_id) in data["members"]:
del data["members"][str(user_id)]
save = True
for channel_id, chan_data in data["channels"].items():
if str(user_id) in chan_data["members"]:
del chan_data["members"][str(user_id)]
save = True
if save:
await self.config.guild_from_id(guild_id).set(data)
@commands.command()
@commands.bot_has_permissions(read_message_history=True, add_reactions=True, embed_links=True)
async def avatar(self, ctx: commands.Context, *, members: Optional[FuzzyMember]):
"""
Display a users avatar in chat
"""
if members is None:
members = [ctx.author]
await BaseMenu(
source=AvatarPages(members=members),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
).start(ctx=ctx)
@commands.Cog.listener()
async def on_guild_join(self, guild: discord.Guild):
"""Build and send a message containing serverinfo when the bot joins a new server"""
channel_id = await self.config.join_channel()
if channel_id is None:
return
channel = self.bot.get_channel(channel_id)
passed = f"<t:{int(guild.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:R>"
created_at = _(
"{bot} has joined a server!\n "
"That's **{num}** servers now!\n"
"That's a total of **{users}** users !\n"
"Server created on **{since}**. "
"That's over **{passed}**!"
).format(
bot=channel.guild.me.mention,
num=humanize_number(len(self.bot.guilds)),
users=humanize_number(len(self.bot.users)),
since=f"<t:{int(guild.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:D>",
passed=passed,
)
try:
em = await self.guild_embed(guild)
em.description = created_at
await channel.send(embed=em)
except Exception:
log.error(f"Error creating guild embed for new guild ID {guild.id}", exc_info=True)
async def guild_embed(self, guild: discord.Guild) -> discord.Embed:
"""
Builds the guild embed information used throughout the cog
"""
def _size(num):
for unit in ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB"]:
if abs(num) < 1024.0:
return "{0:.1f}{1}".format(num, unit)
num /= 1024.0
return "{0:.1f}{1}".format(num, "YB")
def _bitsize(num):
for unit in ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB"]:
if abs(num) < 1000.0:
return "{0:.1f}{1}".format(num, unit)
num /= 1000.0
return "{0:.1f}{1}".format(num, "YB")
passed = (datetime.datetime.utcnow() - guild.created_at).days
created_at = _("Created on {date}. That's over {num}!").format(
date=bold(f"<t:{int(guild.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:D>"),
num=bold(f"<t:{int(guild.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:R>"),
)
total_users = humanize_number(guild.member_count)
try:
joined_at = guild.me.joined_at
except AttributeError:
joined_at = datetime.datetime.utcnow()
bot_joined = f"<t:{int(joined_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:D>"
since_joined = f"<t:{int(joined_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:R>"
joined_on = _(
"**{bot_name}** joined this server on **{bot_join}**.\n"
"That's over **{since_join}**!"
).format(bot_name=self.bot.user.name, bot_join=bot_joined, since_join=since_joined)
shard = (
_("\nShard ID: **{shard_id}/{shard_count}**").format(
shard_id=humanize_number(guild.shard_id + 1),
shard_count=humanize_number(self.bot.shard_count),
)
if self.bot.shard_count > 1
else ""
)
colour = guild.roles[-1].colour
online_stats = {
_("Humans: "): lambda x: not x.bot,
_(" • Bots: "): lambda x: x.bot,
"\N{LARGE GREEN CIRCLE}": lambda x: x.status is discord.Status.online,
"\N{LARGE ORANGE CIRCLE}": lambda x: x.status is discord.Status.idle,
"\N{LARGE RED CIRCLE}": lambda x: x.status is discord.Status.do_not_disturb,
"\N{MEDIUM WHITE CIRCLE}": lambda x: x.status is discord.Status.offline,
"\N{LARGE PURPLE CIRCLE}": lambda x: (
x.activity is not None and x.activity.type is discord.ActivityType.streaming
),
}
member_msg = _("Total Users: {}\n").format(bold(total_users))
count = 1
for emoji, value in online_stats.items():
try:
num = len([m for m in guild.members if value(m)])
except Exception as error:
print(error)
continue
else:
member_msg += f"{emoji} {bold(humanize_number(num))} " + (
"\n" if count % 2 == 0 else ""
)
count += 1
text_channels = len(guild.text_channels)
nsfw_channels = len([c for c in guild.text_channels if c.is_nsfw()])
voice_channels = len(guild.voice_channels)
vc_regions = {
"vip-us-east": _("__VIP__ US East ") + "\U0001F1FA\U0001F1F8",
"vip-us-west": _("__VIP__ US West ") + "\U0001F1FA\U0001F1F8",
"vip-amsterdam": _("__VIP__ Amsterdam ") + "\U0001F1F3\U0001F1F1",
"eu-west": _("EU West ") + "\U0001F1EA\U0001F1FA",
"eu-central": _("EU Central ") + "\U0001F1EA\U0001F1FA",
"europe": _("Europe ") + "\U0001F1EA\U0001F1FA",
"london": _("London ") + "\U0001F1EC\U0001F1E7",
"frankfurt": _("Frankfurt ") + "\U0001F1E9\U0001F1EA",
"amsterdam": _("Amsterdam ") + "\U0001F1F3\U0001F1F1",
"us-west": _("US West ") + "\U0001F1FA\U0001F1F8",
"us-east": _("US East ") + "\U0001F1FA\U0001F1F8",
"us-south": _("US South ") + "\U0001F1FA\U0001F1F8",
"us-central": _("US Central ") + "\U0001F1FA\U0001F1F8",
"singapore": _("Singapore ") + "\U0001F1F8\U0001F1EC",
"sydney": _("Sydney ") + "\U0001F1E6\U0001F1FA",
"brazil": _("Brazil ") + "\U0001F1E7\U0001F1F7",
"hongkong": _("Hong Kong ") + "\U0001F1ED\U0001F1F0",
"russia": _("Russia ") + "\U0001F1F7\U0001F1FA",
"japan": _("Japan ") + "\U0001F1EF\U0001F1F5",
"southafrica": _("South Africa ") + "\U0001F1FF\U0001F1E6",
"india": _("India ") + "\U0001F1EE\U0001F1F3",
"south-korea": _("South Korea ") + "\U0001f1f0\U0001f1f7",
} # Unicode is needed because bold() is escaping emojis for some reason in this case.
verif = {
"none": _("0 - None"),
"low": _("1 - Low"),
"medium": _("2 - Medium"),
"high": _("3 - High"),
"extreme": _("4 - Extreme"),
}
features = {
"ANIMATED_ICON": _("Animated Icon"),
"BANNER": _("Banner Image"),
"COMMERCE": _("Commerce"),
"COMMUNITY": _("Community"),
"DISCOVERABLE": _("Server Discovery"),
"FEATURABLE": _("Featurable"),
"INVITE_SPLASH": _("Splash Invite"),
"MEMBER_LIST_DISABLED": _("Member list disabled"),
"MEMBER_VERIFICATION_GATE_ENABLED": _("Membership Screening enabled"),
"MORE_EMOJI": _("More Emojis"),
"NEWS": _("News Channels"),
"PARTNERED": _("Partnered"),
"PREVIEW_ENABLED": _("Preview enabled"),
"PUBLIC_DISABLED": _("Public disabled"),
"VANITY_URL": _("Vanity URL"),
"VERIFIED": _("Verified"),
"VIP_REGIONS": _("VIP Voice Servers"),
"WELCOME_SCREEN_ENABLED": _("Welcome Screen enabled"),
}
guild_features_list = [
f"✅ {name}" for feature, name in features.items() if feature in guild.features
]
em = discord.Embed(
description=(f"{guild.description}\n\n" if guild.description else "")
+ f"{created_at}\n{joined_on}",
colour=colour,
)
em.set_author(
name=guild.name,
icon_url="https://cdn.discordapp.com/emojis/457879292152381443.png"
if "VERIFIED" in guild.features
else "https://cdn.discordapp.com/emojis/508929941610430464.png"
if "PARTNERED" in guild.features
else discord.Embed.Empty,
url=guild.icon_url
if guild.icon_url
else "https://cdn.discordapp.com/embed/avatars/1.png",
)
em.set_thumbnail(
url=guild.icon_url
if guild.icon_url
else "https://cdn.discordapp.com/embed/avatars/1.png"
)
em.add_field(name=_("Members:"), value=member_msg)
em.add_field(
name=_("Channels:"),
value=_(
"\N{SPEECH BALLOON} Text: {text}\n{nsfw}"
"\N{SPEAKER WITH THREE SOUND WAVES} Voice: {voice}"
).format(
text=bold(humanize_number(text_channels)),
nsfw=_("\N{NO ONE UNDER EIGHTEEN SYMBOL} Nsfw: {}\n").format(
bold(humanize_number(nsfw_channels))
)
if nsfw_channels
else "",
voice=bold(humanize_number(voice_channels)),
),
)
owner = guild.owner if guild.owner else await self.bot.get_or_fetch_user(guild.owner_id)
em.add_field(
name=_("Utility:"),
value=_(
"Owner: {owner_mention}\n{owner}\nRegion: {region}\nVerif. level: {verif}\nServer ID: {id}{shard}"
).format(
owner_mention=bold(str(owner.mention)),
owner=bold(str(owner)),
region=f"**{vc_regions.get(str(guild.region)) or str(guild.region)}**",
verif=bold(verif[str(guild.verification_level)]),
id=bold(str(guild.id)),
shard=shard,
),
inline=False,
)
em.add_field(
name=_("Misc:"),
value=_(
"AFK channel: {afk_chan}\nAFK timeout: {afk_timeout}\nCustom emojis: {emojis}\nRoles: {roles}"
).format(
afk_chan=bold(str(guild.afk_channel)) if guild.afk_channel else bold(_("Not set")),
afk_timeout=bold(humanize_timedelta(seconds=guild.afk_timeout)),
emojis=bold(humanize_number(len(guild.emojis))),
roles=bold(humanize_number(len(guild.roles))),
),
inline=False,
)
if guild_features_list:
em.add_field(name=_("Server features:"), value="\n".join(guild_features_list))
if guild.premium_tier != 0:
nitro_boost = _(
"Tier {boostlevel} with {nitroboosters} boosters\n"
"File size limit: {filelimit}\n"
"Emoji limit: {emojis_limit}\n"
"VCs max bitrate: {bitrate}"
).format(
boostlevel=bold(str(guild.premium_tier)),
nitroboosters=bold(humanize_number(guild.premium_subscription_count)),
filelimit=bold(_size(guild.filesize_limit)),
emojis_limit=bold(str(guild.emoji_limit)),
bitrate=bold(_bitsize(guild.bitrate_limit)),
)
em.add_field(name=_("Nitro Boost:"), value=nitro_boost)
if guild.splash:
em.set_image(url=guild.splash_url_as(format="png"))
return em
@commands.Cog.listener()
async def on_guild_remove(self, guild: discord.Guild) -> None:
"""Build and send a message containing serverinfo when the bot leaves a server"""
channel_id = await self.config.join_channel()
if channel_id is None:
return
channel = self.bot.get_channel(channel_id)
passed = f"<t:{int(guild.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:R>"
created_at = _(
"{bot} has left a server!\n "
"That's **{num}** servers now!\n"
"That's a total of **{users}** users !\n"
"Server created on **{since}**. "
"That's over **{passed}**!"
).format(
bot=channel.guild.me.mention,
num=humanize_number(len(self.bot.guilds)),
users=humanize_number(len(self.bot.users)),
since=f"<t:{int(guild.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:D>",
passed=passed,
)
try:
em = await self.guild_embed(guild)
em.description = created_at
await channel.send(embed=em)
except Exception:
log.error(f"Error creating guild embed for old guild ID {guild.id}", exc_info=True)
@commands.command()
async def emoji(
self, ctx: commands.Context, emoji: Union[discord.Emoji, discord.PartialEmoji, str]
) -> None:
"""
Post a large size emojis in chat
"""
await ctx.channel.trigger_typing()
if type(emoji) in [discord.PartialEmoji, discord.Emoji]:
d_emoji = cast(discord.Emoji, emoji)
ext = "gif" if d_emoji.animated else "png"
url = "https://cdn.discordapp.com/emojis/{id}.{ext}?v=1".format(id=d_emoji.id, ext=ext)
filename = "{name}.{ext}".format(name=d_emoji.name, ext=ext)
else:
try:
"""https://github.com/glasnt/emojificate/blob/master/emojificate/filter.py"""
cdn_fmt = "https://twemoji.maxcdn.com/2/72x72/{codepoint:x}.png"
url = cdn_fmt.format(codepoint=ord(str(emoji)))
filename = "emoji.png"
except TypeError:
await ctx.send(_("That doesn't appear to be a valid emoji"))
return
try:
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
image = BytesIO(await resp.read())
except Exception:
await ctx.send(_("That doesn't appear to be a valid emoji"))
return
file = discord.File(image, filename=filename)
await ctx.send(file=file)
@commands.command()
async def botstats(self, ctx: commands.Context) -> None:
"""Display stats about the bot"""
async with ctx.typing():
servers = humanize_number(len(ctx.bot.guilds))
members = humanize_number(len(self.bot.users))
passed = f"<t:{int(ctx.me.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:R>"
since = f"<t:{int(ctx.me.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:D>"
msg = _(
"{bot} is on {servers} servers serving {members} members!\n"
"{bot} was created on **{since}**.\n"
"That's over **{passed}**!"
).format(
bot=ctx.me.mention,
servers=servers,
members=members,
since=since,
passed=passed,
)
em = discord.Embed(
description=msg, colour=await ctx.embed_colour(), timestamp=ctx.message.created_at
)
if ctx.guild:
em.set_author(
name=f"{ctx.me} {f"~ {ctx.me.nick}" if ctx.me.nick else ""}",
icon_url=ctx.me.avatar_url,
)
else:
em.set_author(
name=f"{ctx.me}",
icon_url=ctx.me.avatar_url,
)
em.set_thumbnail(url=ctx.me.avatar_url)
if ctx.channel.permissions_for(ctx.me).embed_links:
await ctx.send(embed=em)
else:
await ctx.send(msg)
@commands.command()
@checks.mod_or_permissions(manage_channels=True)
@checks.bot_has_permissions(manage_channels=True)
async def topic(
self, ctx: commands.Context, channel: Optional[discord.TextChannel], *, topic: str = ""
) -> None:
"""
Sets a specified channels topic
`channel` is optional and if not supplied will use the current channel
Note: The maximum number of characters is 1024
"""
if channel is None:
channel = ctx.channel
if not channel.permissions_for(ctx.author).manage_messages:
return
if not channel.permissions_for(ctx.me).manage_channels:
await ctx.send(
_('I require the "Manage Channels" permission to execute that command.')
)
return
await channel.edit(
topic=topic[:1024], reason=_("Requested by {author}").format(author=ctx.author)
)
await ctx.tick()
@commands.group()
@checks.mod_or_permissions(manage_channels=True)
@checks.bot_has_permissions(manage_channels=True)
async def channeledit(self, ctx: commands.Context) -> None:
"""Modify channel options"""
pass
@channeledit.command(name="name")
@checks.mod_or_permissions(manage_channels=True)
@checks.bot_has_permissions(manage_channels=True)
async def channel_name(
self, ctx: commands.Context, channel: Optional[ChannelConverter], *, name: str
) -> None:
"""Edit a channels name"""
if not channel:
channel = ctx.channel
await channel.edit(
name=name[:100], reason=_("Requested by {author}").format(author=ctx.author)
)
await ctx.tick()
@channeledit.command(name="position")
@checks.mod_or_permissions(manage_channels=True)
@checks.bot_has_permissions(manage_channels=True)
async def channel_position(
self, ctx: commands.Context, channel: Optional[ChannelConverter], position: int
) -> None:
"""Edit a channels position"""
if not channel:
channel = ctx.channel
try:
await channel.edit(
position=position, reason=_("Requested by {author}").format(author=ctx.author)
)
except Exception as e:
print(e)
return
await ctx.tick()
@channeledit.command(name="sync")
@checks.mod_or_permissions(manage_channels=True)
@checks.bot_has_permissions(manage_channels=True)
async def channel_sync(
self, ctx: commands.Context, channel: Optional[ChannelConverter], toggle: bool
) -> None:
"""Set whether or not to sync permissions with the channels Category"""
if not channel:
channel = ctx.channel
await channel.edit(
sync_permissions=toggle, reason=_("Requested by {author}").format(author=ctx.author)
)
await ctx.tick()
@channeledit.command(name="nsfw")
@checks.mod_or_permissions(manage_channels=True)
@checks.bot_has_permissions(manage_channels=True)
async def channel_nsfw(
self, ctx: commands.Context, toggle: bool, channel: discord.TextChannel = None
) -> None:
"""Set whether or not a channel is NSFW"""
if not channel:
channel = ctx.channel
await channel.edit(
nsfw=toggle, reason=_("Requested by {author}").format(author=ctx.author)
)
await ctx.tick()
@channeledit.command(name="topic")
@checks.mod_or_permissions(manage_channels=True)
@checks.bot_has_permissions(manage_channels=True)
async def channel_topic(
self, ctx: commands.Context, channel: Optional[discord.TextChannel], *, topic: str
) -> None:
"""Edit a channels topic"""
if not channel:
channel = ctx.channel
await channel.edit(
topic=topic[:1024], reason=_("Requested by {author}").format(author=ctx.author)
)
await ctx.tick()
@channeledit.command(name="bitrate")
@checks.mod_or_permissions(manage_channels=True)
@checks.bot_has_permissions(manage_channels=True)
async def channel_bitrate(
self, ctx: commands.Context, channel: discord.VoiceChannel, bitrate: int
) -> None:
"""Edit a voice channels bitrate"""
try:
await channel.edit(
bitrate=bitrate, reason=_("Requested by {author}").format(author=ctx.author)
)
except Exception:
await ctx.send(
_(
"`{bitrate}` is either too high or too low please "
"provide a number between 8000 and 96000."
).format(bitrate=bitrate)
)
return
await ctx.tick()
@channeledit.command(name="userlimit")
@checks.mod_or_permissions(manage_channels=True)
@checks.bot_has_permissions(manage_channels=True)
async def channel_userlimit(
self, ctx: commands.Context, channel: discord.VoiceChannel, limit: int
) -> None:
"""Edit a voice channels user limit"""
try:
await channel.edit(
user_limit=limit, reason=_("Requested by {author}").format(author=ctx.author)
)
except Exception:
await ctx.send(
_(
"`{limit}` is either too high or too low please "
"provide a number between 0 and 99."
).format(limit=limit)
)
return
await ctx.tick()
@channeledit.command(name="permissions", aliases=["perms", "permission"])
@checks.mod_or_permissions(manage_permissions=True)
@checks.bot_has_permissions(manage_permissions=True)
async def edit_channel_perms(
self,
ctx: commands.Context,
permission: PermissionConverter,
channel: Optional[ChannelConverter],
true_or_false: Optional[bool],
*roles_or_users: Union[discord.Member, discord.Role, str],
) -> None:
"""
Edit channel read permissions for designated role
`[channel]` The channel you would like to edit. If no channel is provided
the channel this command is run in will be used.
`[true_or_false]` `True` or `False` to set the permission level. If this is not
provided `None` will be used instead which signifies the default state of the permission.
`[roles_or_users]` the roles or users you want to edit this setting for.
`<permission>` Must be one of the following:
add_reactions
attach_files
connect
create_instant_invite
deafen_members
embed_links
external_emojis
manage_messages
manage_permissions
manage_roles
manage_webhooks
move_members
mute_members
priority_speaker
read_message_history
read_messages
send_messages
send_tts_messages
speak
stream
use_external_emojis
use_slash_commands
use_voice_activation
"""
if channel is None:
channel = ctx.channel
if (
not channel.permissions_for(ctx.author).manage_permissions
or not channel.permissions_for(ctx.author).manage_channels
):
return await ctx.send(
_("You do not have the correct permissions to edit {channel}.").format(
channel=channel.mention
)
)
if (
not channel.permissions_for(ctx.me).manage_permissions
or not channel.permissions_for(ctx.author).manage_channels
):
return await ctx.send(
_("I do not have the correct permissions to edit {channel}.").format(
channel=channel.mention
)
)
targets = list(roles_or_users)
for r in roles_or_users:
if isinstance(r, str):
if r == "everyone":
targets.remove(r)
targets.append(ctx.guild.default_role)
else:
targets.remove(r)
if not targets:
return await ctx.send(
_("You need to provide a role or user you want to edit permissions for")
)
overs = channel.overwrites
for target in targets:
if target in overs:
overs[target].update(**{permission: true_or_false})
else:
perm = discord.PermissionOverwrite(**{permission: true_or_false})
overs[target] = perm
try:
await channel.edit(overwrites=overs)
await ctx.send(
_(
"The following roles or users have had `{perm}` "
"in {channel} set to `{perm_level}`:\n{roles_or_users}"
).format(
perm=permission,
channel=channel.mention,
perm_level=true_or_false,
roles_or_users=humanize_list([i.mention for i in targets]),
)
)
except Exception:
log.exception(f"Error editing permissions in channel {channel.name}")
return await ctx.send(_("There was an issue editing permissions on that channel."))
async def ask_for_invite(self, ctx: commands.Context) -> Optional[str]:
"""
Ask the user to provide an invite link
if reinvite is True
"""
msg_send = _(
"Please provide a reinvite link/message.\n" "Type `exit` for no invite link/message."
)
await ctx.send(msg_send)
try:
msg = await ctx.bot.wait_for(
"message", check=lambda m: m.author == ctx.message.author, timeout=30
)
except asyncio.TimeoutError:
await ctx.send(_("I Guess not."))
return None
if "exit" in msg.content:
return None
else:
return msg.content
async def get_members_since(
self,
ctx: commands.Context,
days: int,
role: Union[discord.Role, Tuple[discord.Role], None],
) -> List[discord.Member]:
now = datetime.datetime.utcnow()
after = now - datetime.timedelta(days=days)
member_list = []
if role:
if not isinstance(role, discord.Role):
for r in role:
for m in r.members:
if m.top_role < ctx.me.top_role:
member_list.append(m)
else:
member_list = [m for m in role.members if m.top_role < ctx.me.top_role]
else:
member_list = [m for m in ctx.guild.members if m.top_role < ctx.me.top_role]
for channel in ctx.guild.text_channels:
if not channel.permissions_for(ctx.me).read_message_history:
continue
async for message in channel.history(limit=None, after=after):
if message.author in member_list:
member_list.remove(message.author)
return member_list
@commands.group()
@commands.guild_only()
@checks.bot_has_permissions(add_reactions=True)
async def pruneroles(self, ctx: commands.Context) -> None:
"""
Perform various actions on users who haven't spoken in x days
Note: This will only check if a user has talked in the past x days whereas
discords built in Prune checks online status
"""
pass
@pruneroles.command()
@commands.bot_has_permissions(read_message_history=True, add_reactions=True)
async def list(self, ctx: commands.Context, days: int, role: discord.Role = None) -> None:
"""
List the users who have not talked in x days
"""
if days < 1:
return await ctx.send(_("You must provide a value of more than 0 days."))
member_list = await self.get_members_since(ctx, days, role)
x = [member_list[i : i + 10] for i in range(0, len(member_list), 10)]
msg_list = []
count = 1
for page in x:
if ctx.channel.permissions_for(ctx.me).embed_links:
em = discord.Embed(colour=await ctx.embed_colour())
if role:
em.add_field(name=_("Role"), value=role.mention)
else:
estimate = await ctx.guild.estimate_pruned_members(
days=days if days < 30 else 30
)
em.add_field(name=_("Discord Estimate"), value=str(estimate))
em.description = "\n".join(m.mention for m in page)
em.set_author(name=f"{ctx.guild.name}", icon_url=ctx.guild.icon_url)
em.title = _("Estimated members to be pruned ") + str(len(member_list))
em.set_footer(text="Page {} of {}".format(count, len(x)))
count += 1
msg_list.append(em)
else:
if not role:
estimate = await ctx.guild.estimate_pruned_members(days=days)
role_msg = _("Discord Estimate: {estimate}").format(estimate=estimate)
else:
role_msg = _("Role: {role.name}").format(role=role)
members = "\n".join(str(m) for m in page)
msg = _(
"Estimated members to be pruned {num_members}\n" "{role}\n{members}\n"
).format(num_members=len(member_list), role=role_msg, members=members)
msg += "Page {} of {}".format(count, len(x))
count += 1
msg_list.append(msg)
if msg_list != []:
await BaseMenu(
source=ListPages(pages=msg_list),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
else:
await ctx.send(_("No one was found to be inactive in this time."))
@pruneroles.command()
@checks.mod_or_permissions(kick_members=True)
@checks.bot_has_permissions(kick_members=True, add_reactions=True)
async def kick(
self, ctx: commands.Context, days: int, role: discord.Role = None, reinvite: bool = True
) -> None:
"""
Kick users from the server who have been inactive for x days
`days` is the number of days since last seen talking on the server
`role` is the specified role you would like to kick defaults to everyone
`reinvite` True/False whether to try to send the user a message before kicking
"""
if days < 1:
return await ctx.send(_("You must provide a value of more than 0 days."))
if role is not None and role >= ctx.me.top_role:
msg = _("That role is higher than my " "role so I cannot kick those members.")
await ctx.send(msg)
return
member_list = await self.get_members_since(ctx, days, role)
send_msg = str(len(member_list)) + _(
" estimated users to kick. " "Would you like to kick them?"
)
msg = await ctx.send(send_msg)
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
try:
await self.bot.wait_for("reaction_add", check=pred, timeout=60)
except asyncio.TimeoutError:
await ctx.send(_("I guess not."), delete_after=30)
return
if pred.result is True:
link = await self.ask_for_invite(ctx)
no_invite = []
for member in member_list:
if link:
try:
await member.send(link)
except Exception:
no_invite.append(member.id)
await member.kick(reason=_("Kicked due to inactivity."))
if link and len(no_invite) > 0:
msg = str(len(no_invite)) + _(" users could not be DM'd an invite link")
await ctx.send(msg)
else:
await ctx.send(_("I guess not."), delete_after=30)
return
await ctx.send(_("Done."))
@pruneroles.command()
@checks.mod_or_permissions(manage_roles=True)
@checks.bot_has_permissions(manage_roles=True, add_reactions=True)
async def add(self, ctx: commands.Context, days: int, *new_roles: discord.Role) -> None:
"""
Give roles to users who haven't spoken in x days
`days` is the number of days since last seen talking on the server
`new_roles` The new roles to apply to a user who is inactive
"""
if days < 1:
return await ctx.send(_("You must provide a value of more than 0 days."))
if any([r >= ctx.me.top_role for r in new_roles]):
msg = _(
"At least one of those roles is higher than my "
"role so I cannot add those roles."
)
await ctx.send(msg)
return
member_list = await self.get_members_since(ctx, days, None)
send_msg = str(len(member_list)) + _(
" estimated users to give the role. " "Would you like to reassign their roles now?"
)
msg = await ctx.send(send_msg)
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
try:
await self.bot.wait_for("reaction_add", check=pred, timeout=60)
except asyncio.TimeoutError:
await ctx.send(_("I guess not."), delete_after=30)
return
if pred.result is True:
for member in member_list:
roles = list(set(member.roles + list(new_roles)))
await member.edit(roles=roles, reason=_("Given role due to inactivity."))
else:
await ctx.send(_("I guess not."), delete_after=30)
return
await ctx.send(_("Done."))
@pruneroles.command()
@checks.mod_or_permissions(manage_roles=True)
@checks.bot_has_permissions(manage_roles=True, add_reactions=True)
async def remove(self, ctx: commands.Context, days: int, *removed_roles: discord.Role) -> None:
"""
Remove roles from users who haven't spoken in x days
`days` is the number of days since last seen talking on the server
`role` is the specified role you would like to remove roles defaults to everyone
`removed_roles` the roles to remove from inactive users
"""
if days < 1:
return await ctx.send(_("You must provide a value of more than 0 days."))
if any([r >= ctx.me.top_role for r in removed_roles]):
msg = _(
"At least one of those roles is higher than my "
"role so I cannot remove those roles."
)
await ctx.send(msg)
return
member_list = await self.get_members_since(ctx, days, removed_roles)
send_msg = str(len(member_list)) + _(
" estimated users to remove their roles. "
"Would you like to reassign their roles now?"
)
msg = await ctx.send(send_msg)
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
try:
await self.bot.wait_for("reaction_add", check=pred, timeout=60)
except asyncio.TimeoutError:
await ctx.send(_("I guess not."), delete_after=30)
return
if pred.result is True:
for member in member_list:
roles = list(set(member.roles) - set(removed_roles))
await member.edit(roles=roles, reason=_("Roles removed due to inactivity."))
else:
await ctx.send(_("I guess not."), delete_after=30)
return
await ctx.send(_("Done."))
@commands.command()
@checks.is_owner()
@commands.bot_has_permissions(embed_links=True)
async def setguildjoin(
self, ctx: commands.Context, channel: discord.TextChannel = None
) -> None:
"""
Set a channel to see new servers the bot is joining
"""
if channel is None:
channel = ctx.message.channel
await self.config.join_channel.set(channel.id)
msg = _("Posting new servers and left servers in ") + channel.mention
await ctx.send(msg)
@commands.command()
@checks.is_owner()
async def removeguildjoin(self, ctx: commands.Context) -> None:
"""
Stop bots join/leave server messages
"""
await self.config.join_channel.set(None)
await ctx.send(_("No longer posting joined or left servers."))
@commands.command(hidden=True)
@checks.is_owner()
async def checkcheater(self, ctx: commands.Context, user_id: int) -> None:
"""
Checks for possible cheaters abusing the global bank and server powers
"""
is_cheater = False
msg = ""
for guild in self.bot.guilds:
if guild.owner.id == user_id:
is_cheater = True
msg += guild.owner.mention + _(" is guild owner of ") + guild.name + "\n"
if is_cheater:
for page in pagify(msg):
await ctx.maybe_send_embed(page)
if not is_cheater:
await ctx.send(_("Not a cheater"))
@commands.command()
@commands.bot_has_permissions(read_message_history=True, add_reactions=True)
async def whois(
self, ctx: commands.Context, *, user_id: Union[int, discord.Member, discord.User]
) -> None:
"""
Display servers a user shares with the bot
`member` can be a user ID or mention
"""
async with ctx.typing():
if not user_id:
return await ctx.send(_("You need to supply a user ID for this to work properly."))
if isinstance(user_id, int):
try:
member = await self.bot.fetch_user(user_id)
except AttributeError:
member = await self.bot.get_user_info(user_id)
except discord.errors.NotFound:
await ctx.send(str(user_id) + _(" doesn't seem to be a discord user."))
return
else:
member = user_id
if await self.bot.is_owner(ctx.author):
guild_list = []
async for guild in AsyncIter(self.bot.guilds, steps=100):
if m := guild.get_member(member.id):
guild_list.append(m)
else:
guild_list = []
async for guild in AsyncIter(self.bot.guilds, steps=100):
if not guild.get_member(ctx.author.id):
continue
if m := guild.get_member(member.id):
guild_list.append(m)
embed_list = []
robot = "\N{ROBOT FACE}" if member.bot else ""
if guild_list != []:
msg = f"**{member}** ({member.id}) {robot}" + _("is on:\n\n")
embed_msg = ""
for m in guild_list:
# m = guild.get_member(member.id)
is_owner = ""
nick = ""
if m.id == m.guild.owner_id:
is_owner = "\N{CROWN}"
if m.nick:
nick = f"`{m.nick}` in"
msg += f"{is_owner}{nick} __{m.guild.name}__ ({m.guild.id})\n\n"
embed_msg += f"{is_owner}{nick} __{m.guild.name}__ ({m.guild.id})\n\n"
if ctx.channel.permissions_for(ctx.me).embed_links:
for em in pagify(embed_msg, ["\n"], page_length=6000):
embed = discord.Embed()
since_created = f"<t:{int(member.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:R>"
user_created = f"<t:{int(member.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:D>"
public_flags = ""
if version_info >= VersionInfo.from_str("3.4.0"):
public_flags = "\n".join(
bold(i.replace("_", " ").title())
for i, v in member.public_flags
if v
)
created_on = _(
"Joined Discord on {user_created}\n"
"({since_created})\n"
"{public_flags}"
).format(
user_created=user_created,
since_created=since_created,
public_flags=public_flags,
)
embed.description = created_on
embed.set_thumbnail(url=member.avatar_url)
embed.colour = await ctx.embed_colour()
embed.set_author(
name=f"{member} ({member.id}) {robot}", icon_url=member.avatar_url
)
for page in pagify(em, ["\n"], page_length=1024):
embed.add_field(name=_("Shared Servers"), value=page)
embed_list.append(embed)
else:
for page in pagify(msg, ["\n"]):
embed_list.append(page)
else:
if ctx.channel.permissions_for(ctx.me).embed_links:
embed = discord.Embed()
since_created = (
f"<t:{int(member.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:R>"
)
user_created = f"<t:{int(member.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:D>"
public_flags = ""
if version_info >= VersionInfo.from_str("3.4.0"):
public_flags = "\n".join(
bold(i.replace("_", " ").title()) for i, v in member.public_flags if v
)
created_on = _(
"Joined Discord on {user_created}\n" "({since_created})\n" "{public_flags}"
).format(
user_created=user_created,
since_created=since_created,
public_flags=public_flags,
)
embed.description = created_on
embed.set_thumbnail(url=member.avatar_url)
embed.colour = await ctx.embed_colour()
embed.set_author(
name=f"{member} ({member.id}) {robot}", icon_url=member.avatar_url
)
embed_list.append(embed)
else:
msg = f"**{member}** ({member.id}) " + _("is not in any shared servers!")
embed_list.append(msg)
await BaseMenu(
source=ListPages(pages=embed_list),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
@commands.command(hidden=True)
@checks.is_owner()
@commands.bot_has_permissions(read_message_history=True, add_reactions=True, embed_links=True)
async def topservers(self, ctx: commands.Context) -> None:
"""
Lists servers by number of users and shows number of users
"""
guilds = sorted(list(self.bot.guilds), key=lambda s: s.member_count, reverse=True)
msg = ""
msg_list = []
count = 0
for _, server in enumerate(guilds):
if count == 10:
msg_list.append(msg)
msg = ""
count = 0
msg += (
f"{escape(server.name, mass_mentions=True, formatting=True)}: "
f"`{humanize_number(server.member_count)}`\n"
)
count += 1
msg_list.append(msg)
await BaseMenu(
source=ListPages(pages=msg_list),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
@commands.command(hidden=True)
@checks.is_owner()
@commands.bot_has_permissions(read_message_history=True, add_reactions=True)
async def newservers(self, ctx: commands.Context) -> None:
"""
Lists servers by when the bot was added to the server
"""
guilds = sorted(list(self.bot.guilds), key=lambda s: s.me.joined_at)
msg = ""
msg_list = []
count = 0
for _, server in enumerate(guilds):
if count == 10:
msg_list.append(msg)
msg = ""
count = 0
msg += (
f"{escape(server.name, mass_mentions=True, formatting=True)}: "
f"`{humanize_number(server.member_count)}`\n"
)
count += 1
msg_list.append(msg)
await BaseMenu(
source=ListPages(pages=msg_list),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
@commands.group()
@checks.admin_or_permissions(manage_guild=True)
@commands.bot_has_permissions(manage_guild=True)
async def guildedit(self, ctx: commands.Context) -> None:
"""Edit various guild settings"""
pass
@guildedit.command(name="name")
async def guild_name(self, ctx: commands.Context, *, name: str):
"""
Change the server name
`<name>` The new name of the server.
"""
reason = _("Requested by {author}").format(author=ctx.author)
try:
await ctx.guild.edit(name=name, reason=reason)
except Exception:
log.exception("Could not edit guild name")
return await ctx.send(_("I could not edit the servers name."))
await ctx.send(_("Server name set to {name}.").format(name=name))
@guildedit.command(name="verificationlevel", aliases=["verification"])
async def verifivation_level(self, ctx: commands.Context, *, level: str) -> None:
"""
Modify the guilds verification level
`<level>` must be one of:
`none`, `low`, `medium`, `table flip`(`high`), or `double table flip`(`extreme`)
"""
levels = {
"none": discord.VerificationLevel.none,
"low": discord.VerificationLevel.low,
"medium": discord.VerificationLevel.medium,
"high": discord.VerificationLevel.high,
"table flip": discord.VerificationLevel.high,
"extreme": discord.VerificationLevel.extreme,
"double table flip": discord.VerificationLevel.extreme,
}
reason = _("Requested by {author}").format(author=ctx.author)
if level.lower() not in levels:
await ctx.send(_("`{}` is not a proper verification level.").format(level))
return
try:
await ctx.guild.edit(verification_level=levels[level], reason=reason)
except Exception:
log.exception("Could not edit guild verification level")
return await ctx.send(_("I could not edit the servers verification level."))
await ctx.send(_("Server verification level set to {level}").format(level=level))
@guildedit.command(name="systemchannel", aliases=["welcomechannel"])
async def system_channel(
self, ctx: commands.Context, channel: Optional[discord.TextChannel] = None
) -> None:
"""
Change the system channel
This is the default discord welcome channel.
`[channel]` The channel you want to set as the system channel.
If not provided will be set to `None`.
"""
reason = _("Requested by {author}").format(author=ctx.author)
try:
await ctx.guild.edit(system_channel=channel, reason=reason)
except Exception:
log.exception("Could not edit guild systemchannel")
return await ctx.send(_("I could not edit the servers systemchannel."))
channel_name = getattr(channel, "mention", "None")
await ctx.send(_("Server systemchannel set to {channel}").format(channel=channel_name))
@guildedit.command(name="afkchannel")
async def afk_channel(
self, ctx: commands.Context, channel: Optional[discord.VoiceChannel] = None
) -> None:
"""
Change the servers AFK voice channel
`[channel]` The channel you want to set as the system channel.
If not provided will be set to `None`.
"""
reason = _("Requested by {author}").format(author=ctx.author)
try:
await ctx.guild.edit(afk_channel=channel, reason=reason)
except Exception:
log.exception("Could not edit guild afk channel")
return await ctx.send(_("I could not edit the servers afk channel."))
channel_name = getattr(channel, "mention", "None")
await ctx.send(_("Server afk channel set to {channel}").format(channel=channel_name))
@guildedit.command(name="afktimeout")
async def afk_timeout(self, ctx: commands.Context, timeout: int) -> None:
"""
Change the servers AFK timeout
`<timeout>` must be a value of 60, 300, 900, 1800, or 3600.
"""
if timeout not in [60, 300, 900, 1800, 3600]:
await ctx.send(_("`timeout` must be a value of 60, 300, 900, 1800, or 3600."))
return
reason = _("Requested by {author}").format(author=ctx.author)
try:
await ctx.guild.edit(afk_timeout=timeout, reason=reason)
except Exception:
log.exception("Could not edit guild afk timeout")
return await ctx.send(_("I could not edit the servers afk timeout."))
await ctx.send(_("Server AFK timeout set to {timeout} seconds.").format(timeout=timeout))
@commands.command()
@commands.guild_only()
@checks.mod_or_permissions(manage_messages=True)
@commands.bot_has_permissions(read_message_history=True, add_reactions=True)
async def topmembers(
self, ctx: commands.Context, number: int = 10, guild: GuildConverter = None
) -> None:
"""
Lists top members on the server by join date
`number` optional[int] number of members to display at a time maximum of 50
`guild` can be either the server ID or name
"""
if not guild:
guild = ctx.guild
if number > 50:
number = 50
if number < 10:
number = 10
def joined(member: discord.Member):
return getattr(member, "joined_at", datetime.datetime.utcnow())
member_list = sorted(guild.members, key=joined)
is_embed = ctx.channel.permissions_for(ctx.me).embed_links
x = []
for i in range(0, len(member_list), number):
x.append(member_list[i : i + number])
await asyncio.sleep(0.2)
msg_list = []
for page in x:
header_msg = (
"__**" + _("First ") + str(number) + _(" members of ") + f"{guild.name}**__\n"
)
msg = ""
for member in page:
if is_embed:
msg += f"{member_list.index(member)+1}. {member.mention}\n"
else:
msg += f"{member_list.index(member)+1}. {member.name}\n"
if is_embed:
embed = discord.Embed(description=msg)
embed.set_author(name=guild.name + _(" first members"), icon_url=guild.icon_url)
msg_list.append(embed)
else:
msg_list.append(header_msg + msg)
await asyncio.sleep(0.1)
await BaseMenu(
source=ListPages(pages=msg_list),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
@commands.command()
@checks.is_owner()
async def listchannels(self, ctx: commands.Context, *, guild: GuildConverter = None) -> None:
"""
Lists channels and their position and ID for a server
`guild` can be either the server ID or name
"""
if not guild:
guild = ctx.guild
msg = "__**{}({})**__\n".format(guild.name, guild.id)
for category in guild.by_category():
if category[0] is not None:
word = _("Position")
msg += "{0} ({1}): {2} {3}\n".format(
category[0].mention, category[0].id, word, category[0].position
)
for channel in category[1]:
word = _("Position")
msg += "{0} ({1}): {2} {3}\n".format(
channel.mention, channel.id, word, channel.position
)
for page in pagify(msg, ["\n"]):
await ctx.send(page)
@staticmethod
async def confirm_leave_guild(ctx: commands.Context, guild) -> None:
await ctx.send(
_("Are you sure you want me to leave {guild}? (reply yes or no)").format(
guild=guild.name
)
)
pred = MessagePredicate.yes_or_no(ctx)
await ctx.bot.wait_for("message", check=pred)
if pred.result is True:
try:
await ctx.send(_("Leaving {guild}.").format(guild=guild.name))
await guild.leave()
except Exception:
log.error(
_("I couldn't leave {guild} ({g_id}).").format(
guild=guild.name, g_id=guild.id
),
exc_info=True,
)
await ctx.send(_("I couldn't leave {guild}.").format(guild=guild.name))
else:
await ctx.send(_("Okay, not leaving {guild}.").format(guild=guild.name))
@staticmethod
async def get_guild_invite(guild: discord.Guild, max_age: int = 86400) -> None:
"""Handles the reinvite logic for getting an invite
to send the newly unbanned user
:returns: :class:`Invite`
https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/cogs/mod/mod.py#L771
"""
my_perms: discord.Permissions = guild.me.guild_permissions
if my_perms.manage_guild or my_perms.administrator:
if "VANITY_URL" in guild.features:
# guild has a vanity url so use it as the one to send
try:
return await guild.vanity_invite()
except discord.errors.Forbidden:
invites = []
invites = await guild.invites()
else:
invites = []
for inv in invites: # Loop through the invites for the guild
if not (inv.max_uses or inv.max_age or inv.temporary):
# Invite is for the guild's default channel,
# has unlimited uses, doesn't expire, and
# doesn't grant temporary membership
# (i.e. they won't be kicked on disconnect)
return inv
else: # No existing invite found that is valid
channels_and_perms = zip(
guild.text_channels, map(guild.me.permissions_in, guild.text_channels)
)
channel = next(
(channel for channel, perms in channels_and_perms if perms.create_instant_invite),
None,
)
if channel is None:
return
try:
# Create invite that expires after max_age
return await channel.create_invite(max_age=max_age)
except discord.HTTPException:
return
@commands.command()
@commands.bot_has_permissions(embed_links=True)
@commands.bot_has_permissions(read_message_history=True, add_reactions=True, embed_links=True)
async def getguild(self, ctx: commands.Context, *, guild: GuildConverter = None) -> None:
"""
Display info about servers the bot is on
`guild_name` can be either the server ID or partial name
"""
async with ctx.typing():
if not ctx.guild and not await ctx.bot.is_owner(ctx.author):
return await ctx.send(_("This command is not available in DM."))
guilds = [ctx.guild]
page = 0
if await ctx.bot.is_owner(ctx.author):
if ctx.guild:
page = ctx.bot.guilds.index(ctx.guild)
guilds = ctx.bot.guilds
if guild:
page = ctx.bot.guilds.index(guild)
await BaseMenu(
source=GuildPages(guilds=guilds),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=page,
).start(ctx=ctx)
@commands.command()
@commands.bot_has_permissions(embed_links=True)
@checks.admin()
@commands.bot_has_permissions(read_message_history=True, add_reactions=True, embed_links=True)
async def getguilds(self, ctx: commands.Context, *, guilds: MultiGuildConverter) -> None:
"""
Display info about multiple servers
`guild_name` can be either the server ID or partial name
"""
async with ctx.typing():
page = 0
if not guilds:
guilds = ctx.bot.guilds
page = ctx.bot.guilds.index(ctx.guild)
await BaseMenu(
source=GuildPages(guilds=guilds),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=page,
).start(ctx=ctx)
@commands.command()
@commands.guild_only()
@checks.mod_or_permissions(manage_messages=True)
async def nummembers(self, ctx: commands.Context, *, guild: GuildConverter = None) -> None:
"""
Display number of users on a server
`guild_name` can be either the server ID or partial name
"""
if not guild:
guild = ctx.guild
await ctx.send(
"{} has {} members.".format(guild.name, humanize_number(guild.member_count))
)
@commands.guild_only()
@commands.command(aliases=["rolestats"])
@checks.mod_or_permissions(manage_messages=True)
@commands.bot_has_permissions(read_message_history=True, add_reactions=True)
async def getroles(self, ctx: commands.Context, *, guild: GuildConverter = None) -> None:
"""
Displays all roles their ID and number of members in order of
hierarchy
`guild_name` can be either the server ID or partial name
"""
if not guild:
guild = ctx.guild
msg = ""
for role in sorted(guild.roles, reverse=True):
if ctx.channel.permissions_for(ctx.me).embed_links and guild is ctx.guild:
msg += f"{role.mention} ({role.id}): {len(role.members)}\n"
else:
msg += f"{role.name} ({role.id}): {len(role.members)}\n"
msg_list = []
for page in pagify(msg, ["\n"]):
if ctx.channel.permissions_for(ctx.me).embed_links:
embed = discord.Embed()
embed.description = page
embed.set_author(name=f"{guild.name} " + _("Roles"), icon_url=guild.icon_url)
msg_list.append(embed)
else:
msg_list.append(page)
await BaseMenu(
source=ListPages(pages=msg_list),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
async def check_highest(self, data):
highest = 0
users = 0
for user, value in data.items():
if value > highest:
highest = value
users = user
return highest, users
@commands.command(name="getreactions", aliases=["getreaction"])
@checks.mod_or_permissions(manage_messages=True)
@commands.bot_has_permissions(read_message_history=True, add_reactions=True)
async def get_reactions(self, ctx: commands.Context, message: discord.Message) -> None:
"""
Gets a list of all reactions from specified message and displays the user ID,
Username, and Discriminator and the emoji name.
"""
async with ctx.typing():
new_msg = ""
for reaction in message.reactions:
async for user in reaction.users():
if isinstance(reaction.emoji, discord.PartialEmoji):
new_msg += "{} {}#{} {}\n".format(
user.id, user.name, user.discriminator, reaction.emoji.name
)
else:
new_msg += "{} {}#{} {}\n".format(
user.id, user.name, user.discriminator, reaction.emoji
)
temp_pages = []
pages = []
for page in pagify(new_msg, shorten_by=20):
temp_pages.append(box(page, "py"))
max_i = len(temp_pages)
i = 1
for page in temp_pages:
pages.append(f"`Page {i}/{max_i}`\n" + page)
i += 1
await BaseMenu(
source=ListPages(pages=pages),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
async def get_server_stats(
self, guild: discord.Guild
) -> Dict[str, Union[str, Dict[str, int]]]:
"""
This is a very expensive function but handles only pulling new
data into config since the last time the command has been run.
"""
# to_return: Dict[str, Union[int, Dict[int, int]]] = {
# "last_checked": 0,
# "members": {m.id: 0 for m in guild.members},
# "total_posts": 0,
# "channels": {},
# } This is the data schema for saved data
# It's all formatted easily for end user data request and deletion
# to_return = await self.config.guild(guild).all()
async with self.config.guild(guild).all() as to_return:
for channel in guild.text_channels:
my_perms = channel.permissions_for(guild.me)
set_new_last_read = False
if str(channel.id) not in to_return["channels"]:
to_return["channels"][str(channel.id)] = {}
to_return["channels"][str(channel.id)]["members"] = {}
to_return["channels"][str(channel.id)]["total"] = 0
to_return["channels"][str(channel.id)]["last_checked"] = 0
check_after = None
else:
check_after = discord.Object(
id=to_return["channels"][str(channel.id)]["last_checked"]
)
if not my_perms.read_message_history or not my_perms.read_messages:
continue
try:
log.debug(check_after)
async for message in channel.history(
limit=None, after=check_after, oldest_first=False
):
if not set_new_last_read:
log.debug(f"Setting last_checked to {message.id}")
to_return["channels"][str(channel.id)]["last_checked"] = message.id
set_new_last_read = True
author = message.author
if author.discriminator == "0000" and author.bot:
continue
if str(author.id) not in to_return["members"]:
to_return["members"][str(author.id)] = 0
if str(author.id) not in to_return["channels"][str(channel.id)]["members"]:
to_return["channels"][str(channel.id)]["members"][str(author.id)] = 0
to_return["channels"][str(channel.id)]["members"][str(author.id)] += 1
to_return["channels"][str(channel.id)]["total"] += 1
to_return["members"][str(author.id)] += 1
to_return["total"] += 1
except (AttributeError, discord.Forbidden):
log.debug("the heck", exc_info=True)
pass
_ret = copy(to_return)
# copy the data to prevent context manager from removing the reference
log.debug(_ret)
return _ret
async def get_channel_stats(self, channel: discord.TextChannel) -> dict:
"""
This is another expensive function but handles only pulling
new data into config since the last time the command has been run.
"""
guild = channel.guild
async with self.config.guild(guild).all() as to_return:
my_perms = channel.permissions_for(guild.me)
set_new_last_read = False
if channel.id not in to_return["channels"]:
to_return["channels"][str(channel.id)] = {}
to_return["channels"][str(channel.id)]["members"] = {}
to_return["channels"][str(channel.id)]["total"] = 0
to_return["channels"][str(channel.id)]["last_checked"] = 0
check_after = None
else:
check_after = to_return["channels"][str(channel.id)]["last_checked"]
if not my_perms.read_message_history or not my_perms.read_messages:
return {} # we shouldn't have even reached this far before
try:
async for message in channel.history(
limit=None, after=check_after, oldest_first=False
):
if not set_new_last_read:
to_return["channels"][str(channel.id)]["last_checked"] = message.id
set_new_last_read = True
author = message.author
if author.discriminator == "0000" and author.bot:
continue
if str(author.id) not in to_return["members"]:
to_return["members"][str(author.id)] = 0
if str(author.id) not in to_return["channels"][str(channel.id)]["members"]:
to_return["channels"][str(channel.id)]["members"][str(author.id)] = 0
to_return["channels"][str(channel.id)]["members"][str(author.id)] += 1
to_return["channels"][str(channel.id)]["total"] += 1
to_return["members"][str(author.id)] += 1
to_return["total"] += 1
# we still want to update the guild totals if we happened to pull a specific channel
except (AttributeError, discord.Forbidden):
pass
_ret = copy(to_return)
return _ret
@commands.command(name="serverstats")
@checks.mod_or_permissions(manage_messages=True)
@commands.bot_has_permissions(embed_links=True, add_reactions=True)
@commands.guild_only()
async def server_stats(
self,
ctx: commands.Context,
) -> None:
"""
Gets total messages on the server and displays each channel
separately as well as the user who has posted the most in each channel
Note: This is a very slow function and may take some time to complete
"""
warning_msg = await ctx.send(
_(
"This can take a long time to gather all information for the first time! Are you sure you want to continue?"
)
)
pred = ReactionPredicate.yes_or_no(warning_msg, ctx.author)
start_adding_reactions(warning_msg, ReactionPredicate.YES_OR_NO_EMOJIS)
try:
await self.bot.wait_for("reaction_add", check=pred, timeout=60)
except asyncio.TimeoutError:
await ctx.send(_("I guess not."), delete_after=30)
return
if not pred.result:
return await ctx.send(_("Alright I will not gather data."))
async with ctx.channel.typing():
guild_data = await self.get_server_stats(ctx.guild)
channel_messages = []
member_messages = []
sorted_chans = sorted(
guild_data["channels"].items(), key=lambda x: x[1]["total"], reverse=True
)
sorted_members = sorted(
guild_data["members"].items(), key=lambda x: x[1], reverse=True
)
for member_id, value in sorted_members[:5]:
member_messages.append(f"<@!{member_id}>: {bold(humanize_number(value))}\n")
try:
most_messages_user_id = sorted_members[0][0]
except IndexError:
most_messages_user_id = None
try:
most_messages_user_num = sorted_members[0][1]
except IndexError:
most_messages_user_num = 0
new_msg = (
_("**Most posts on the server**\nTotal Messages: ")
+ bold(humanize_number(guild_data["total"]))
+ _("\nMost posts by ")
+ f"<@!{most_messages_user_id}> {bold(humanize_number(most_messages_user_num))}\n\n"
)
for channel_id, value in sorted_chans[:5]:
sorted_members = sorted(
guild_data["channels"][channel_id]["members"].items(),
key=lambda x: x[1],
reverse=True,
)
most_messages_user_id = sorted_members[0][0]
most_messages_user_num = sorted_members[0][1]
maybe_guild = f"<@!{most_messages_user_id}>: {bold(humanize_number(int(most_messages_user_num)))}\n"
channel_messages.append(
_("**Most posts in <#{}>**\nTotal Messages: ").format(channel_id)
+ bold(humanize_number(int(value["total"])))
+ _("\nMost posts by {}\n".format(maybe_guild))
)
em = discord.Embed(colour=await self.bot.get_embed_colour(ctx))
em.set_author(name=ctx.guild.name, icon_url=ctx.guild.icon_url)
em.description = f"{new_msg}{"".join(i for i in channel_messages)}"
em.add_field(name=_("Top Members"), value="".join(i for i in member_messages))
await ctx.send(embed=em)
@commands.command(name="channelstats")
@commands.guild_only()
@commands.bot_has_permissions(embed_links=True)
async def channel_stats(
self,
ctx: commands.Context,
channel: discord.TextChannel = None,
) -> None:
"""
Gets total messages in a specific channel as well as the user who
has posted the most in that channel
`limit` must be a number of messages to check, defaults to all messages
Note: This can be a very slow function and may take some time to complete
"""
warning_msg = await ctx.send(
_(
"This can take a long time to gather all information for the first time! Are you sure you want to continue?"
)
)
pred = ReactionPredicate.yes_or_no(warning_msg, ctx.author)
start_adding_reactions(warning_msg, ReactionPredicate.YES_OR_NO_EMOJIS)
try:
await self.bot.wait_for("reaction_add", check=pred, timeout=60)
except asyncio.TimeoutError:
await ctx.send(_("I guess not."), delete_after=30)
return
if not pred.result:
return await ctx.send(_("Alright I will not gather data."))
if not channel:
channel = ctx.channel
async with ctx.channel.typing():
channel_data = await self.get_channel_stats(channel)
member_messages = []
sorted_members = sorted(
channel_data["channels"][str(channel.id)]["members"].items(),
key=lambda x: x[1],
reverse=True,
)
for member_id, value in sorted_members[:5]:
member_messages.append(f"<@!{member_id}>: {bold(humanize_number(value))}\n")
try:
most_messages_user_id = sorted_members[0][0]
except IndexError:
most_messages_user_id = None
try:
most_messages_user_num = sorted_members[0][1]
except IndexError:
most_messages_user_num = 0
maybe_guild = f"<@!{most_messages_user_id}>: {bold(humanize_number(int(most_messages_user_num)))}\n"
new_msg = (
_("**Most posts in <#{}>**\nTotal Messages: ").format(channel.id)
+ bold(humanize_number(int(channel_data["channels"][str(channel.id)]["total"])))
+ _("\nMost posts by {}\n".format(maybe_guild))
)
em = discord.Embed(colour=await self.bot.get_embed_colour(ctx))
em.set_author(name=ctx.guild.name, icon_url=ctx.guild.icon_url)
em.description = f"{new_msg}"
em.add_field(name=_("Top Members"), value="".join(i for i in member_messages))
await ctx.send(embed=em)
@commands.guild_only()
@commands.command(aliases=["serveremojis"])
@commands.bot_has_permissions(read_message_history=True, add_reactions=True, embed_links=True)
async def guildemojis(
self,
ctx: commands.Context,
id_emojis: Optional[bool] = False,
*,
guild: GuildConverter = None,
) -> None:
"""
Display all server emojis in a menu that can be scrolled through
`id_emojis` return the id of emojis. Default to False, set True
if you want to see emojis ID's.
`guild_name` can be either the server ID or partial name
"""
if not guild:
guild = ctx.guild
msg = ""
embed = discord.Embed(timestamp=ctx.message.created_at)
embed.set_author(name=guild.name, icon_url=guild.icon_url)
regular = []
for emoji in guild.emojis:
if id_emojis:
regular.append(
(
f"{emoji} = `:{emoji.name}:` "
f"`<{"a" if emoji.animated else ""}:{emoji.name}:{emoji.id}>`\n"
)
)
else:
regular.append(f"{emoji} = `:{emoji.name}:`\n")
if regular != "":
embed.description = regular
x = [regular[i : i + 10] for i in range(0, len(regular), 10)]
emoji_embeds = []
count = 1
for page in x:
em = discord.Embed(timestamp=ctx.message.created_at)
em.set_author(name=guild.name + _(" Emojis"), icon_url=guild.icon_url)
regular = []
msg = ""
for emoji in page:
msg += emoji
em.description = msg
em.set_footer(text="Page {} of {}".format(count, len(x)))
count += 1
emoji_embeds.append(em)
if len(emoji_embeds) == 0:
await ctx.send(_("There are no emojis on {guild}.").format(guild=guild.name))
else:
await BaseMenu(
source=ListPages(pages=emoji_embeds),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
|
import asyncio
import datetime
import logging
from copy import copy
from io import BytesIO
from typing import Dict, List, Literal, Optional, Tuple, Union, cast
import aiohttp
import discord
from redbot import VersionInfo, version_info
from redbot.core import Config, checks, commands
from redbot.core.bot import Red
from redbot.core.i18n import Translator, cog_i18n
from redbot.core.utils import AsyncIter
from redbot.core.utils.chat_formatting import (
bold,
box,
escape,
humanize_list,
humanize_number,
humanize_timedelta,
pagify,
)
from redbot.core.utils.menus import start_adding_reactions
from redbot.core.utils.predicates import MessagePredicate, ReactionPredicate
from .converters import (
ChannelConverter,
FuzzyMember,
GuildConverter,
MultiGuildConverter,
PermissionConverter,
)
from .menus import AvatarPages, BaseMenu, GuildPages, ListPages
_ = Translator("ServerStats", __file__)
log = logging.getLogger("red.trusty-cogs.ServerStats")
@cog_i18n(_)
class ServerStats(commands.Cog):
"""
Gather useful information about servers the bot is in
A lot of commands are bot owner only
"""
__author__ = ["TrustyJAID", "Preda"]
__version__ = "1.6.9"
def __init__(self, bot):
self.bot: Red = bot
default_global: dict = {"join_channel": None}
default_guild: dict = {"last_checked": 0, "members": {}, "total": 0, "channels": {}}
self.config: Config = Config.get_conf(self, 54853421465543, force_registration=True)
self.config.register_global(**default_global)
self.config.register_guild(**default_guild)
def format_help_for_context(self, ctx: commands.Context) -> str:
"""
Thanks Sinbad!
"""
pre_processed = super().format_help_for_context(ctx)
return f"{pre_processed}\n\nCog Version: {self.__version__}"
async def red_delete_data_for_user(
self,
*,
requester: Literal["discord_deleted_user", "owner", "user", "user_strict"],
user_id: int,
):
"""
Method for finding users data inside the cog and deleting it.
"""
all_guilds = await self.config.all_guilds()
for guild_id, data in all_guilds.items():
save = False
if str(user_id) in data["members"]:
del data["members"][str(user_id)]
save = True
for channel_id, chan_data in data["channels"].items():
if str(user_id) in chan_data["members"]:
del chan_data["members"][str(user_id)]
save = True
if save:
await self.config.guild_from_id(guild_id).set(data)
@commands.command()
@commands.bot_has_permissions(read_message_history=True, add_reactions=True, embed_links=True)
async def avatar(self, ctx: commands.Context, *, members: Optional[FuzzyMember]):
"""
Display a users avatar in chat
"""
if members is None:
members = [ctx.author]
await BaseMenu(
source=AvatarPages(members=members),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
).start(ctx=ctx)
@commands.Cog.listener()
async def on_guild_join(self, guild: discord.Guild):
"""Build and send a message containing serverinfo when the bot joins a new server"""
channel_id = await self.config.join_channel()
if channel_id is None:
return
channel = self.bot.get_channel(channel_id)
passed = f"<t:{int(guild.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:R>"
created_at = _(
"{bot} has joined a server!\n "
"That's **{num}** servers now!\n"
"That's a total of **{users}** users !\n"
"Server created on **{since}**. "
"That's over **{passed}**!"
).format(
bot=channel.guild.me.mention,
num=humanize_number(len(self.bot.guilds)),
users=humanize_number(len(self.bot.users)),
since=f"<t:{int(guild.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:D>",
passed=passed,
)
try:
em = await self.guild_embed(guild)
em.description = created_at
await channel.send(embed=em)
except Exception:
log.error(f"Error creating guild embed for new guild ID {guild.id}", exc_info=True)
async def guild_embed(self, guild: discord.Guild) -> discord.Embed:
"""
Builds the guild embed information used throughout the cog
"""
def _size(num):
for unit in ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB"]:
if abs(num) < 1024.0:
return "{0:.1f}{1}".format(num, unit)
num /= 1024.0
return "{0:.1f}{1}".format(num, "YB")
def _bitsize(num):
for unit in ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB"]:
if abs(num) < 1000.0:
return "{0:.1f}{1}".format(num, unit)
num /= 1000.0
return "{0:.1f}{1}".format(num, "YB")
passed = (datetime.datetime.utcnow() - guild.created_at).days
created_at = _("Created on {date}. That's over {num}!").format(
date=bold(f"<t:{int(guild.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:D>"),
num=bold(f"<t:{int(guild.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:R>"),
)
total_users = humanize_number(guild.member_count)
try:
joined_at = guild.me.joined_at
except AttributeError:
joined_at = datetime.datetime.utcnow()
bot_joined = f"<t:{int(joined_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:D>"
since_joined = f"<t:{int(joined_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:R>"
joined_on = _(
"**{bot_name}** joined this server on **{bot_join}**.\n"
"That's over **{since_join}**!"
).format(bot_name=self.bot.user.name, bot_join=bot_joined, since_join=since_joined)
shard = (
_("\nShard ID: **{shard_id}/{shard_count}**").format(
shard_id=humanize_number(guild.shard_id + 1),
shard_count=humanize_number(self.bot.shard_count),
)
if self.bot.shard_count > 1
else ""
)
colour = guild.roles[-1].colour
online_stats = {
_("Humans: "): lambda x: not x.bot,
_(" • Bots: "): lambda x: x.bot,
"\N{LARGE GREEN CIRCLE}": lambda x: x.status is discord.Status.online,
"\N{LARGE ORANGE CIRCLE}": lambda x: x.status is discord.Status.idle,
"\N{LARGE RED CIRCLE}": lambda x: x.status is discord.Status.do_not_disturb,
"\N{MEDIUM WHITE CIRCLE}": lambda x: x.status is discord.Status.offline,
"\N{LARGE PURPLE CIRCLE}": lambda x: (
x.activity is not None and x.activity.type is discord.ActivityType.streaming
),
}
member_msg = _("Total Users: {}\n").format(bold(total_users))
count = 1
for emoji, value in online_stats.items():
try:
num = len([m for m in guild.members if value(m)])
except Exception as error:
print(error)
continue
else:
member_msg += f"{emoji} {bold(humanize_number(num))} " + (
"\n" if count % 2 == 0 else ""
)
count += 1
text_channels = len(guild.text_channels)
nsfw_channels = len([c for c in guild.text_channels if c.is_nsfw()])
voice_channels = len(guild.voice_channels)
vc_regions = {
"vip-us-east": _("__VIP__ US East ") + "\U0001F1FA\U0001F1F8",
"vip-us-west": _("__VIP__ US West ") + "\U0001F1FA\U0001F1F8",
"vip-amsterdam": _("__VIP__ Amsterdam ") + "\U0001F1F3\U0001F1F1",
"eu-west": _("EU West ") + "\U0001F1EA\U0001F1FA",
"eu-central": _("EU Central ") + "\U0001F1EA\U0001F1FA",
"europe": _("Europe ") + "\U0001F1EA\U0001F1FA",
"london": _("London ") + "\U0001F1EC\U0001F1E7",
"frankfurt": _("Frankfurt ") + "\U0001F1E9\U0001F1EA",
"amsterdam": _("Amsterdam ") + "\U0001F1F3\U0001F1F1",
"us-west": _("US West ") + "\U0001F1FA\U0001F1F8",
"us-east": _("US East ") + "\U0001F1FA\U0001F1F8",
"us-south": _("US South ") + "\U0001F1FA\U0001F1F8",
"us-central": _("US Central ") + "\U0001F1FA\U0001F1F8",
"singapore": _("Singapore ") + "\U0001F1F8\U0001F1EC",
"sydney": _("Sydney ") + "\U0001F1E6\U0001F1FA",
"brazil": _("Brazil ") + "\U0001F1E7\U0001F1F7",
"hongkong": _("Hong Kong ") + "\U0001F1ED\U0001F1F0",
"russia": _("Russia ") + "\U0001F1F7\U0001F1FA",
"japan": _("Japan ") + "\U0001F1EF\U0001F1F5",
"southafrica": _("South Africa ") + "\U0001F1FF\U0001F1E6",
"india": _("India ") + "\U0001F1EE\U0001F1F3",
"south-korea": _("South Korea ") + "\U0001f1f0\U0001f1f7",
} # Unicode is needed because bold() is escaping emojis for some reason in this case.
verif = {
"none": _("0 - None"),
"low": _("1 - Low"),
"medium": _("2 - Medium"),
"high": _("3 - High"),
"extreme": _("4 - Extreme"),
}
features = {
"ANIMATED_ICON": _("Animated Icon"),
"BANNER": _("Banner Image"),
"COMMERCE": _("Commerce"),
"COMMUNITY": _("Community"),
"DISCOVERABLE": _("Server Discovery"),
"FEATURABLE": _("Featurable"),
"INVITE_SPLASH": _("Splash Invite"),
"MEMBER_LIST_DISABLED": _("Member list disabled"),
"MEMBER_VERIFICATION_GATE_ENABLED": _("Membership Screening enabled"),
"MORE_EMOJI": _("More Emojis"),
"NEWS": _("News Channels"),
"PARTNERED": _("Partnered"),
"PREVIEW_ENABLED": _("Preview enabled"),
"PUBLIC_DISABLED": _("Public disabled"),
"VANITY_URL": _("Vanity URL"),
"VERIFIED": _("Verified"),
"VIP_REGIONS": _("VIP Voice Servers"),
"WELCOME_SCREEN_ENABLED": _("Welcome Screen enabled"),
}
guild_features_list = [
f"✅ {name}" for feature, name in features.items() if feature in guild.features
]
em = discord.Embed(
description=(f"{guild.description}\n\n" if guild.description else "")
+ f"{created_at}\n{joined_on}",
colour=colour,
)
em.set_author(
name=guild.name,
icon_url="https://cdn.discordapp.com/emojis/457879292152381443.png"
if "VERIFIED" in guild.features
else "https://cdn.discordapp.com/emojis/508929941610430464.png"
if "PARTNERED" in guild.features
else discord.Embed.Empty,
url=guild.icon_url
if guild.icon_url
else "https://cdn.discordapp.com/embed/avatars/1.png",
)
em.set_thumbnail(
url=guild.icon_url
if guild.icon_url
else "https://cdn.discordapp.com/embed/avatars/1.png"
)
em.add_field(name=_("Members:"), value=member_msg)
em.add_field(
name=_("Channels:"),
value=_(
"\N{SPEECH BALLOON} Text: {text}\n{nsfw}"
"\N{SPEAKER WITH THREE SOUND WAVES} Voice: {voice}"
).format(
text=bold(humanize_number(text_channels)),
nsfw=_("\N{NO ONE UNDER EIGHTEEN SYMBOL} Nsfw: {}\n").format(
bold(humanize_number(nsfw_channels))
)
if nsfw_channels
else "",
voice=bold(humanize_number(voice_channels)),
),
)
owner = guild.owner if guild.owner else await self.bot.get_or_fetch_user(guild.owner_id)
em.add_field(
name=_("Utility:"),
value=_(
"Owner: {owner_mention}\n{owner}\nRegion: {region}\nVerif. level: {verif}\nServer ID: {id}{shard}"
).format(
owner_mention=bold(str(owner.mention)),
owner=bold(str(owner)),
region=f"**{vc_regions.get(str(guild.region)) or str(guild.region)}**",
verif=bold(verif[str(guild.verification_level)]),
id=bold(str(guild.id)),
shard=shard,
),
inline=False,
)
em.add_field(
name=_("Misc:"),
value=_(
"AFK channel: {afk_chan}\nAFK timeout: {afk_timeout}\nCustom emojis: {emojis}\nRoles: {roles}"
).format(
afk_chan=bold(str(guild.afk_channel)) if guild.afk_channel else bold(_("Not set")),
afk_timeout=bold(humanize_timedelta(seconds=guild.afk_timeout)),
emojis=bold(humanize_number(len(guild.emojis))),
roles=bold(humanize_number(len(guild.roles))),
),
inline=False,
)
if guild_features_list:
em.add_field(name=_("Server features:"), value="\n".join(guild_features_list))
if guild.premium_tier != 0:
nitro_boost = _(
"Tier {boostlevel} with {nitroboosters} boosters\n"
"File size limit: {filelimit}\n"
"Emoji limit: {emojis_limit}\n"
"VCs max bitrate: {bitrate}"
).format(
boostlevel=bold(str(guild.premium_tier)),
nitroboosters=bold(humanize_number(guild.premium_subscription_count)),
filelimit=bold(_size(guild.filesize_limit)),
emojis_limit=bold(str(guild.emoji_limit)),
bitrate=bold(_bitsize(guild.bitrate_limit)),
)
em.add_field(name=_("Nitro Boost:"), value=nitro_boost)
if guild.splash:
em.set_image(url=guild.splash_url_as(format="png"))
return em
@commands.Cog.listener()
async def on_guild_remove(self, guild: discord.Guild) -> None:
"""Build and send a message containing serverinfo when the bot leaves a server"""
channel_id = await self.config.join_channel()
if channel_id is None:
return
channel = self.bot.get_channel(channel_id)
passed = f"<t:{int(guild.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:R>"
created_at = _(
"{bot} has left a server!\n "
"That's **{num}** servers now!\n"
"That's a total of **{users}** users !\n"
"Server created on **{since}**. "
"That's over **{passed}**!"
).format(
bot=channel.guild.me.mention,
num=humanize_number(len(self.bot.guilds)),
users=humanize_number(len(self.bot.users)),
since=f"<t:{int(guild.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:D>",
passed=passed,
)
try:
em = await self.guild_embed(guild)
em.description = created_at
await channel.send(embed=em)
except Exception:
log.error(f"Error creating guild embed for old guild ID {guild.id}", exc_info=True)
@commands.command()
async def emoji(
self, ctx: commands.Context, emoji: Union[discord.Emoji, discord.PartialEmoji, str]
) -> None:
"""
Post a large size emojis in chat
"""
await ctx.channel.trigger_typing()
if type(emoji) in [discord.PartialEmoji, discord.Emoji]:
d_emoji = cast(discord.Emoji, emoji)
ext = "gif" if d_emoji.animated else "png"
url = "https://cdn.discordapp.com/emojis/{id}.{ext}?v=1".format(id=d_emoji.id, ext=ext)
filename = "{name}.{ext}".format(name=d_emoji.name, ext=ext)
else:
try:
"""https://github.com/glasnt/emojificate/blob/master/emojificate/filter.py"""
cdn_fmt = "https://twemoji.maxcdn.com/2/72x72/{codepoint:x}.png"
url = cdn_fmt.format(codepoint=ord(str(emoji)))
filename = "emoji.png"
except TypeError:
await ctx.send(_("That doesn't appear to be a valid emoji"))
return
try:
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
image = BytesIO(await resp.read())
except Exception:
await ctx.send(_("That doesn't appear to be a valid emoji"))
return
file = discord.File(image, filename=filename)
await ctx.send(file=file)
@commands.command()
async def botstats(self, ctx: commands.Context) -> None:
"""Display stats about the bot"""
async with ctx.typing():
servers = humanize_number(len(ctx.bot.guilds))
members = humanize_number(len(self.bot.users))
passed = f"<t:{int(ctx.me.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:R>"
since = f"<t:{int(ctx.me.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:D>"
msg = _(
"{bot} is on {servers} servers serving {members} members!\n"
"{bot} was created on **{since}**.\n"
"That's over **{passed}**!"
).format(
bot=ctx.me.mention,
servers=servers,
members=members,
since=since,
passed=passed,
)
em = discord.Embed(
description=msg, colour=await ctx.embed_colour(), timestamp=ctx.message.created_at
)
if ctx.guild:
em.set_author(
name=f"{ctx.me} {f'~ {ctx.me.nick}' if ctx.me.nick else ''}",
icon_url=ctx.me.avatar_url,
)
else:
em.set_author(
name=f"{ctx.me}",
icon_url=ctx.me.avatar_url,
)
em.set_thumbnail(url=ctx.me.avatar_url)
if ctx.channel.permissions_for(ctx.me).embed_links:
await ctx.send(embed=em)
else:
await ctx.send(msg)
@commands.command()
@checks.mod_or_permissions(manage_channels=True)
@checks.bot_has_permissions(manage_channels=True)
async def topic(
self, ctx: commands.Context, channel: Optional[discord.TextChannel], *, topic: str = ""
) -> None:
"""
Sets a specified channels topic
`channel` is optional and if not supplied will use the current channel
Note: The maximum number of characters is 1024
"""
if channel is None:
channel = ctx.channel
if not channel.permissions_for(ctx.author).manage_messages:
return
if not channel.permissions_for(ctx.me).manage_channels:
await ctx.send(
_('I require the "Manage Channels" permission to execute that command.')
)
return
await channel.edit(
topic=topic[:1024], reason=_("Requested by {author}").format(author=ctx.author)
)
await ctx.tick()
@commands.group()
@checks.mod_or_permissions(manage_channels=True)
@checks.bot_has_permissions(manage_channels=True)
async def channeledit(self, ctx: commands.Context) -> None:
"""Modify channel options"""
pass
@channeledit.command(name="name")
@checks.mod_or_permissions(manage_channels=True)
@checks.bot_has_permissions(manage_channels=True)
async def channel_name(
self, ctx: commands.Context, channel: Optional[ChannelConverter], *, name: str
) -> None:
"""Edit a channels name"""
if not channel:
channel = ctx.channel
await channel.edit(
name=name[:100], reason=_("Requested by {author}").format(author=ctx.author)
)
await ctx.tick()
@channeledit.command(name="position")
@checks.mod_or_permissions(manage_channels=True)
@checks.bot_has_permissions(manage_channels=True)
async def channel_position(
self, ctx: commands.Context, channel: Optional[ChannelConverter], position: int
) -> None:
"""Edit a channels position"""
if not channel:
channel = ctx.channel
try:
await channel.edit(
position=position, reason=_("Requested by {author}").format(author=ctx.author)
)
except Exception as e:
print(e)
return
await ctx.tick()
@channeledit.command(name="sync")
@checks.mod_or_permissions(manage_channels=True)
@checks.bot_has_permissions(manage_channels=True)
async def channel_sync(
self, ctx: commands.Context, channel: Optional[ChannelConverter], toggle: bool
) -> None:
"""Set whether or not to sync permissions with the channels Category"""
if not channel:
channel = ctx.channel
await channel.edit(
sync_permissions=toggle, reason=_("Requested by {author}").format(author=ctx.author)
)
await ctx.tick()
@channeledit.command(name="nsfw")
@checks.mod_or_permissions(manage_channels=True)
@checks.bot_has_permissions(manage_channels=True)
async def channel_nsfw(
self, ctx: commands.Context, toggle: bool, channel: discord.TextChannel = None
) -> None:
"""Set whether or not a channel is NSFW"""
if not channel:
channel = ctx.channel
await channel.edit(
nsfw=toggle, reason=_("Requested by {author}").format(author=ctx.author)
)
await ctx.tick()
@channeledit.command(name="topic")
@checks.mod_or_permissions(manage_channels=True)
@checks.bot_has_permissions(manage_channels=True)
async def channel_topic(
self, ctx: commands.Context, channel: Optional[discord.TextChannel], *, topic: str
) -> None:
"""Edit a channels topic"""
if not channel:
channel = ctx.channel
await channel.edit(
topic=topic[:1024], reason=_("Requested by {author}").format(author=ctx.author)
)
await ctx.tick()
@channeledit.command(name="bitrate")
@checks.mod_or_permissions(manage_channels=True)
@checks.bot_has_permissions(manage_channels=True)
async def channel_bitrate(
self, ctx: commands.Context, channel: discord.VoiceChannel, bitrate: int
) -> None:
"""Edit a voice channels bitrate"""
try:
await channel.edit(
bitrate=bitrate, reason=_("Requested by {author}").format(author=ctx.author)
)
except Exception:
await ctx.send(
_(
"`{bitrate}` is either too high or too low please "
"provide a number between 8000 and 96000."
).format(bitrate=bitrate)
)
return
await ctx.tick()
@channeledit.command(name="userlimit")
@checks.mod_or_permissions(manage_channels=True)
@checks.bot_has_permissions(manage_channels=True)
async def channel_userlimit(
self, ctx: commands.Context, channel: discord.VoiceChannel, limit: int
) -> None:
"""Edit a voice channels user limit"""
try:
await channel.edit(
user_limit=limit, reason=_("Requested by {author}").format(author=ctx.author)
)
except Exception:
await ctx.send(
_(
"`{limit}` is either too high or too low please "
"provide a number between 0 and 99."
).format(limit=limit)
)
return
await ctx.tick()
@channeledit.command(name="permissions", aliases=["perms", "permission"])
@checks.mod_or_permissions(manage_permissions=True)
@checks.bot_has_permissions(manage_permissions=True)
async def edit_channel_perms(
self,
ctx: commands.Context,
permission: PermissionConverter,
channel: Optional[ChannelConverter],
true_or_false: Optional[bool],
*roles_or_users: Union[discord.Member, discord.Role, str],
) -> None:
"""
Edit channel read permissions for designated role
`[channel]` The channel you would like to edit. If no channel is provided
the channel this command is run in will be used.
`[true_or_false]` `True` or `False` to set the permission level. If this is not
provided `None` will be used instead which signifies the default state of the permission.
`[roles_or_users]` the roles or users you want to edit this setting for.
`<permission>` Must be one of the following:
add_reactions
attach_files
connect
create_instant_invite
deafen_members
embed_links
external_emojis
manage_messages
manage_permissions
manage_roles
manage_webhooks
move_members
mute_members
priority_speaker
read_message_history
read_messages
send_messages
send_tts_messages
speak
stream
use_external_emojis
use_slash_commands
use_voice_activation
"""
if channel is None:
channel = ctx.channel
if (
not channel.permissions_for(ctx.author).manage_permissions
or not channel.permissions_for(ctx.author).manage_channels
):
return await ctx.send(
_("You do not have the correct permissions to edit {channel}.").format(
channel=channel.mention
)
)
if (
not channel.permissions_for(ctx.me).manage_permissions
or not channel.permissions_for(ctx.author).manage_channels
):
return await ctx.send(
_("I do not have the correct permissions to edit {channel}.").format(
channel=channel.mention
)
)
targets = list(roles_or_users)
for r in roles_or_users:
if isinstance(r, str):
if r == "everyone":
targets.remove(r)
targets.append(ctx.guild.default_role)
else:
targets.remove(r)
if not targets:
return await ctx.send(
_("You need to provide a role or user you want to edit permissions for")
)
overs = channel.overwrites
for target in targets:
if target in overs:
overs[target].update(**{permission: true_or_false})
else:
perm = discord.PermissionOverwrite(**{permission: true_or_false})
overs[target] = perm
try:
await channel.edit(overwrites=overs)
await ctx.send(
_(
"The following roles or users have had `{perm}` "
"in {channel} set to `{perm_level}`:\n{roles_or_users}"
).format(
perm=permission,
channel=channel.mention,
perm_level=true_or_false,
roles_or_users=humanize_list([i.mention for i in targets]),
)
)
except Exception:
log.exception(f"Error editing permissions in channel {channel.name}")
return await ctx.send(_("There was an issue editing permissions on that channel."))
async def ask_for_invite(self, ctx: commands.Context) -> Optional[str]:
"""
Ask the user to provide an invite link
if reinvite is True
"""
msg_send = _(
"Please provide a reinvite link/message.\n" "Type `exit` for no invite link/message."
)
await ctx.send(msg_send)
try:
msg = await ctx.bot.wait_for(
"message", check=lambda m: m.author == ctx.message.author, timeout=30
)
except asyncio.TimeoutError:
await ctx.send(_("I Guess not."))
return None
if "exit" in msg.content:
return None
else:
return msg.content
async def get_members_since(
self,
ctx: commands.Context,
days: int,
role: Union[discord.Role, Tuple[discord.Role], None],
) -> List[discord.Member]:
now = datetime.datetime.utcnow()
after = now - datetime.timedelta(days=days)
member_list = []
if role:
if not isinstance(role, discord.Role):
for r in role:
for m in r.members:
if m.top_role < ctx.me.top_role:
member_list.append(m)
else:
member_list = [m for m in role.members if m.top_role < ctx.me.top_role]
else:
member_list = [m for m in ctx.guild.members if m.top_role < ctx.me.top_role]
for channel in ctx.guild.text_channels:
if not channel.permissions_for(ctx.me).read_message_history:
continue
async for message in channel.history(limit=None, after=after):
if message.author in member_list:
member_list.remove(message.author)
return member_list
@commands.group()
@commands.guild_only()
@checks.bot_has_permissions(add_reactions=True)
async def pruneroles(self, ctx: commands.Context) -> None:
"""
Perform various actions on users who haven't spoken in x days
Note: This will only check if a user has talked in the past x days whereas
discords built in Prune checks online status
"""
pass
@pruneroles.command()
@commands.bot_has_permissions(read_message_history=True, add_reactions=True)
async def list(self, ctx: commands.Context, days: int, role: discord.Role = None) -> None:
"""
List the users who have not talked in x days
"""
if days < 1:
return await ctx.send(_("You must provide a value of more than 0 days."))
member_list = await self.get_members_since(ctx, days, role)
x = [member_list[i : i + 10] for i in range(0, len(member_list), 10)]
msg_list = []
count = 1
for page in x:
if ctx.channel.permissions_for(ctx.me).embed_links:
em = discord.Embed(colour=await ctx.embed_colour())
if role:
em.add_field(name=_("Role"), value=role.mention)
else:
estimate = await ctx.guild.estimate_pruned_members(
days=days if days < 30 else 30
)
em.add_field(name=_("Discord Estimate"), value=str(estimate))
em.description = "\n".join(m.mention for m in page)
em.set_author(name=f"{ctx.guild.name}", icon_url=ctx.guild.icon_url)
em.title = _("Estimated members to be pruned ") + str(len(member_list))
em.set_footer(text="Page {} of {}".format(count, len(x)))
count += 1
msg_list.append(em)
else:
if not role:
estimate = await ctx.guild.estimate_pruned_members(days=days)
role_msg = _("Discord Estimate: {estimate}").format(estimate=estimate)
else:
role_msg = _("Role: {role.name}").format(role=role)
members = "\n".join(str(m) for m in page)
msg = _(
"Estimated members to be pruned {num_members}\n" "{role}\n{members}\n"
).format(num_members=len(member_list), role=role_msg, members=members)
msg += "Page {} of {}".format(count, len(x))
count += 1
msg_list.append(msg)
if msg_list != []:
await BaseMenu(
source=ListPages(pages=msg_list),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
else:
await ctx.send(_("No one was found to be inactive in this time."))
@pruneroles.command()
@checks.mod_or_permissions(kick_members=True)
@checks.bot_has_permissions(kick_members=True, add_reactions=True)
async def kick(
self, ctx: commands.Context, days: int, role: discord.Role = None, reinvite: bool = True
) -> None:
"""
Kick users from the server who have been inactive for x days
`days` is the number of days since last seen talking on the server
`role` is the specified role you would like to kick defaults to everyone
`reinvite` True/False whether to try to send the user a message before kicking
"""
if days < 1:
return await ctx.send(_("You must provide a value of more than 0 days."))
if role is not None and role >= ctx.me.top_role:
msg = _("That role is higher than my " "role so I cannot kick those members.")
await ctx.send(msg)
return
member_list = await self.get_members_since(ctx, days, role)
send_msg = str(len(member_list)) + _(
" estimated users to kick. " "Would you like to kick them?"
)
msg = await ctx.send(send_msg)
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
try:
await self.bot.wait_for("reaction_add", check=pred, timeout=60)
except asyncio.TimeoutError:
await ctx.send(_("I guess not."), delete_after=30)
return
if pred.result is True:
link = await self.ask_for_invite(ctx)
no_invite = []
for member in member_list:
if link:
try:
await member.send(link)
except Exception:
no_invite.append(member.id)
await member.kick(reason=_("Kicked due to inactivity."))
if link and len(no_invite) > 0:
msg = str(len(no_invite)) + _(" users could not be DM'd an invite link")
await ctx.send(msg)
else:
await ctx.send(_("I guess not."), delete_after=30)
return
await ctx.send(_("Done."))
@pruneroles.command()
@checks.mod_or_permissions(manage_roles=True)
@checks.bot_has_permissions(manage_roles=True, add_reactions=True)
async def add(self, ctx: commands.Context, days: int, *new_roles: discord.Role) -> None:
"""
Give roles to users who haven't spoken in x days
`days` is the number of days since last seen talking on the server
`new_roles` The new roles to apply to a user who is inactive
"""
if days < 1:
return await ctx.send(_("You must provide a value of more than 0 days."))
if any([r >= ctx.me.top_role for r in new_roles]):
msg = _(
"At least one of those roles is higher than my "
"role so I cannot add those roles."
)
await ctx.send(msg)
return
member_list = await self.get_members_since(ctx, days, None)
send_msg = str(len(member_list)) + _(
" estimated users to give the role. " "Would you like to reassign their roles now?"
)
msg = await ctx.send(send_msg)
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
try:
await self.bot.wait_for("reaction_add", check=pred, timeout=60)
except asyncio.TimeoutError:
await ctx.send(_("I guess not."), delete_after=30)
return
if pred.result is True:
for member in member_list:
roles = list(set(member.roles + list(new_roles)))
await member.edit(roles=roles, reason=_("Given role due to inactivity."))
else:
await ctx.send(_("I guess not."), delete_after=30)
return
await ctx.send(_("Done."))
@pruneroles.command()
@checks.mod_or_permissions(manage_roles=True)
@checks.bot_has_permissions(manage_roles=True, add_reactions=True)
async def remove(self, ctx: commands.Context, days: int, *removed_roles: discord.Role) -> None:
"""
Remove roles from users who haven't spoken in x days
`days` is the number of days since last seen talking on the server
`role` is the specified role you would like to remove roles defaults to everyone
`removed_roles` the roles to remove from inactive users
"""
if days < 1:
return await ctx.send(_("You must provide a value of more than 0 days."))
if any([r >= ctx.me.top_role for r in removed_roles]):
msg = _(
"At least one of those roles is higher than my "
"role so I cannot remove those roles."
)
await ctx.send(msg)
return
member_list = await self.get_members_since(ctx, days, removed_roles)
send_msg = str(len(member_list)) + _(
" estimated users to remove their roles. "
"Would you like to reassign their roles now?"
)
msg = await ctx.send(send_msg)
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
try:
await self.bot.wait_for("reaction_add", check=pred, timeout=60)
except asyncio.TimeoutError:
await ctx.send(_("I guess not."), delete_after=30)
return
if pred.result is True:
for member in member_list:
roles = list(set(member.roles) - set(removed_roles))
await member.edit(roles=roles, reason=_("Roles removed due to inactivity."))
else:
await ctx.send(_("I guess not."), delete_after=30)
return
await ctx.send(_("Done."))
@commands.command()
@checks.is_owner()
@commands.bot_has_permissions(embed_links=True)
async def setguildjoin(
self, ctx: commands.Context, channel: discord.TextChannel = None
) -> None:
"""
Set a channel to see new servers the bot is joining
"""
if channel is None:
channel = ctx.message.channel
await self.config.join_channel.set(channel.id)
msg = _("Posting new servers and left servers in ") + channel.mention
await ctx.send(msg)
@commands.command()
@checks.is_owner()
async def removeguildjoin(self, ctx: commands.Context) -> None:
"""
Stop bots join/leave server messages
"""
await self.config.join_channel.set(None)
await ctx.send(_("No longer posting joined or left servers."))
@commands.command(hidden=True)
@checks.is_owner()
async def checkcheater(self, ctx: commands.Context, user_id: int) -> None:
"""
Checks for possible cheaters abusing the global bank and server powers
"""
is_cheater = False
msg = ""
for guild in self.bot.guilds:
if guild.owner.id == user_id:
is_cheater = True
msg += guild.owner.mention + _(" is guild owner of ") + guild.name + "\n"
if is_cheater:
for page in pagify(msg):
await ctx.maybe_send_embed(page)
if not is_cheater:
await ctx.send(_("Not a cheater"))
@commands.command()
@commands.bot_has_permissions(read_message_history=True, add_reactions=True)
async def whois(
self, ctx: commands.Context, *, user_id: Union[int, discord.Member, discord.User]
) -> None:
"""
Display servers a user shares with the bot
`member` can be a user ID or mention
"""
async with ctx.typing():
if not user_id:
return await ctx.send(_("You need to supply a user ID for this to work properly."))
if isinstance(user_id, int):
try:
member = await self.bot.fetch_user(user_id)
except AttributeError:
member = await self.bot.get_user_info(user_id)
except discord.errors.NotFound:
await ctx.send(str(user_id) + _(" doesn't seem to be a discord user."))
return
else:
member = user_id
if await self.bot.is_owner(ctx.author):
guild_list = []
async for guild in AsyncIter(self.bot.guilds, steps=100):
if m := guild.get_member(member.id):
guild_list.append(m)
else:
guild_list = []
async for guild in AsyncIter(self.bot.guilds, steps=100):
if not guild.get_member(ctx.author.id):
continue
if m := guild.get_member(member.id):
guild_list.append(m)
embed_list = []
robot = "\N{ROBOT FACE}" if member.bot else ""
if guild_list != []:
msg = f"**{member}** ({member.id}) {robot}" + _("is on:\n\n")
embed_msg = ""
for m in guild_list:
# m = guild.get_member(member.id)
is_owner = ""
nick = ""
if m.id == m.guild.owner_id:
is_owner = "\N{CROWN}"
if m.nick:
nick = f"`{m.nick}` in"
msg += f"{is_owner}{nick} __{m.guild.name}__ ({m.guild.id})\n\n"
embed_msg += f"{is_owner}{nick} __{m.guild.name}__ ({m.guild.id})\n\n"
if ctx.channel.permissions_for(ctx.me).embed_links:
for em in pagify(embed_msg, ["\n"], page_length=6000):
embed = discord.Embed()
since_created = f"<t:{int(member.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:R>"
user_created = f"<t:{int(member.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:D>"
public_flags = ""
if version_info >= VersionInfo.from_str("3.4.0"):
public_flags = "\n".join(
bold(i.replace("_", " ").title())
for i, v in member.public_flags
if v
)
created_on = _(
"Joined Discord on {user_created}\n"
"({since_created})\n"
"{public_flags}"
).format(
user_created=user_created,
since_created=since_created,
public_flags=public_flags,
)
embed.description = created_on
embed.set_thumbnail(url=member.avatar_url)
embed.colour = await ctx.embed_colour()
embed.set_author(
name=f"{member} ({member.id}) {robot}", icon_url=member.avatar_url
)
for page in pagify(em, ["\n"], page_length=1024):
embed.add_field(name=_("Shared Servers"), value=page)
embed_list.append(embed)
else:
for page in pagify(msg, ["\n"]):
embed_list.append(page)
else:
if ctx.channel.permissions_for(ctx.me).embed_links:
embed = discord.Embed()
since_created = (
f"<t:{int(member.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:R>"
)
user_created = f"<t:{int(member.created_at.replace(tzinfo=datetime.timezone.utc).timestamp())}:D>"
public_flags = ""
if version_info >= VersionInfo.from_str("3.4.0"):
public_flags = "\n".join(
bold(i.replace("_", " ").title()) for i, v in member.public_flags if v
)
created_on = _(
"Joined Discord on {user_created}\n" "({since_created})\n" "{public_flags}"
).format(
user_created=user_created,
since_created=since_created,
public_flags=public_flags,
)
embed.description = created_on
embed.set_thumbnail(url=member.avatar_url)
embed.colour = await ctx.embed_colour()
embed.set_author(
name=f"{member} ({member.id}) {robot}", icon_url=member.avatar_url
)
embed_list.append(embed)
else:
msg = f"**{member}** ({member.id}) " + _("is not in any shared servers!")
embed_list.append(msg)
await BaseMenu(
source=ListPages(pages=embed_list),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
@commands.command(hidden=True)
@checks.is_owner()
@commands.bot_has_permissions(read_message_history=True, add_reactions=True, embed_links=True)
async def topservers(self, ctx: commands.Context) -> None:
"""
Lists servers by number of users and shows number of users
"""
guilds = sorted(list(self.bot.guilds), key=lambda s: s.member_count, reverse=True)
msg = ""
msg_list = []
count = 0
for _, server in enumerate(guilds):
if count == 10:
msg_list.append(msg)
msg = ""
count = 0
msg += (
f"{escape(server.name, mass_mentions=True, formatting=True)}: "
f"`{humanize_number(server.member_count)}`\n"
)
count += 1
msg_list.append(msg)
await BaseMenu(
source=ListPages(pages=msg_list),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
@commands.command(hidden=True)
@checks.is_owner()
@commands.bot_has_permissions(read_message_history=True, add_reactions=True)
async def newservers(self, ctx: commands.Context) -> None:
"""
Lists servers by when the bot was added to the server
"""
guilds = sorted(list(self.bot.guilds), key=lambda s: s.me.joined_at)
msg = ""
msg_list = []
count = 0
for _, server in enumerate(guilds):
if count == 10:
msg_list.append(msg)
msg = ""
count = 0
msg += (
f"{escape(server.name, mass_mentions=True, formatting=True)}: "
f"`{humanize_number(server.member_count)}`\n"
)
count += 1
msg_list.append(msg)
await BaseMenu(
source=ListPages(pages=msg_list),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
@commands.group()
@checks.admin_or_permissions(manage_guild=True)
@commands.bot_has_permissions(manage_guild=True)
async def guildedit(self, ctx: commands.Context) -> None:
"""Edit various guild settings"""
pass
@guildedit.command(name="name")
async def guild_name(self, ctx: commands.Context, *, name: str):
"""
Change the server name
`<name>` The new name of the server.
"""
reason = _("Requested by {author}").format(author=ctx.author)
try:
await ctx.guild.edit(name=name, reason=reason)
except Exception:
log.exception("Could not edit guild name")
return await ctx.send(_("I could not edit the servers name."))
await ctx.send(_("Server name set to {name}.").format(name=name))
@guildedit.command(name="verificationlevel", aliases=["verification"])
async def verifivation_level(self, ctx: commands.Context, *, level: str) -> None:
"""
Modify the guilds verification level
`<level>` must be one of:
`none`, `low`, `medium`, `table flip`(`high`), or `double table flip`(`extreme`)
"""
levels = {
"none": discord.VerificationLevel.none,
"low": discord.VerificationLevel.low,
"medium": discord.VerificationLevel.medium,
"high": discord.VerificationLevel.high,
"table flip": discord.VerificationLevel.high,
"extreme": discord.VerificationLevel.extreme,
"double table flip": discord.VerificationLevel.extreme,
}
reason = _("Requested by {author}").format(author=ctx.author)
if level.lower() not in levels:
await ctx.send(_("`{}` is not a proper verification level.").format(level))
return
try:
await ctx.guild.edit(verification_level=levels[level], reason=reason)
except Exception:
log.exception("Could not edit guild verification level")
return await ctx.send(_("I could not edit the servers verification level."))
await ctx.send(_("Server verification level set to {level}").format(level=level))
@guildedit.command(name="systemchannel", aliases=["welcomechannel"])
async def system_channel(
self, ctx: commands.Context, channel: Optional[discord.TextChannel] = None
) -> None:
"""
Change the system channel
This is the default discord welcome channel.
`[channel]` The channel you want to set as the system channel.
If not provided will be set to `None`.
"""
reason = _("Requested by {author}").format(author=ctx.author)
try:
await ctx.guild.edit(system_channel=channel, reason=reason)
except Exception:
log.exception("Could not edit guild systemchannel")
return await ctx.send(_("I could not edit the servers systemchannel."))
channel_name = getattr(channel, "mention", "None")
await ctx.send(_("Server systemchannel set to {channel}").format(channel=channel_name))
@guildedit.command(name="afkchannel")
async def afk_channel(
self, ctx: commands.Context, channel: Optional[discord.VoiceChannel] = None
) -> None:
"""
Change the servers AFK voice channel
`[channel]` The channel you want to set as the system channel.
If not provided will be set to `None`.
"""
reason = _("Requested by {author}").format(author=ctx.author)
try:
await ctx.guild.edit(afk_channel=channel, reason=reason)
except Exception:
log.exception("Could not edit guild afk channel")
return await ctx.send(_("I could not edit the servers afk channel."))
channel_name = getattr(channel, "mention", "None")
await ctx.send(_("Server afk channel set to {channel}").format(channel=channel_name))
@guildedit.command(name="afktimeout")
async def afk_timeout(self, ctx: commands.Context, timeout: int) -> None:
"""
Change the servers AFK timeout
`<timeout>` must be a value of 60, 300, 900, 1800, or 3600.
"""
if timeout not in [60, 300, 900, 1800, 3600]:
await ctx.send(_("`timeout` must be a value of 60, 300, 900, 1800, or 3600."))
return
reason = _("Requested by {author}").format(author=ctx.author)
try:
await ctx.guild.edit(afk_timeout=timeout, reason=reason)
except Exception:
log.exception("Could not edit guild afk timeout")
return await ctx.send(_("I could not edit the servers afk timeout."))
await ctx.send(_("Server AFK timeout set to {timeout} seconds.").format(timeout=timeout))
@commands.command()
@commands.guild_only()
@checks.mod_or_permissions(manage_messages=True)
@commands.bot_has_permissions(read_message_history=True, add_reactions=True)
async def topmembers(
self, ctx: commands.Context, number: int = 10, guild: GuildConverter = None
) -> None:
"""
Lists top members on the server by join date
`number` optional[int] number of members to display at a time maximum of 50
`guild` can be either the server ID or name
"""
if not guild:
guild = ctx.guild
if number > 50:
number = 50
if number < 10:
number = 10
def joined(member: discord.Member):
return getattr(member, "joined_at", datetime.datetime.utcnow())
member_list = sorted(guild.members, key=joined)
is_embed = ctx.channel.permissions_for(ctx.me).embed_links
x = []
for i in range(0, len(member_list), number):
x.append(member_list[i : i + number])
await asyncio.sleep(0.2)
msg_list = []
for page in x:
header_msg = (
"__**" + _("First ") + str(number) + _(" members of ") + f"{guild.name}**__\n"
)
msg = ""
for member in page:
if is_embed:
msg += f"{member_list.index(member)+1}. {member.mention}\n"
else:
msg += f"{member_list.index(member)+1}. {member.name}\n"
if is_embed:
embed = discord.Embed(description=msg)
embed.set_author(name=guild.name + _(" first members"), icon_url=guild.icon_url)
msg_list.append(embed)
else:
msg_list.append(header_msg + msg)
await asyncio.sleep(0.1)
await BaseMenu(
source=ListPages(pages=msg_list),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
@commands.command()
@checks.is_owner()
async def listchannels(self, ctx: commands.Context, *, guild: GuildConverter = None) -> None:
"""
Lists channels and their position and ID for a server
`guild` can be either the server ID or name
"""
if not guild:
guild = ctx.guild
msg = "__**{}({})**__\n".format(guild.name, guild.id)
for category in guild.by_category():
if category[0] is not None:
word = _("Position")
msg += "{0} ({1}): {2} {3}\n".format(
category[0].mention, category[0].id, word, category[0].position
)
for channel in category[1]:
word = _("Position")
msg += "{0} ({1}): {2} {3}\n".format(
channel.mention, channel.id, word, channel.position
)
for page in pagify(msg, ["\n"]):
await ctx.send(page)
@staticmethod
async def confirm_leave_guild(ctx: commands.Context, guild) -> None:
await ctx.send(
_("Are you sure you want me to leave {guild}? (reply yes or no)").format(
guild=guild.name
)
)
pred = MessagePredicate.yes_or_no(ctx)
await ctx.bot.wait_for("message", check=pred)
if pred.result is True:
try:
await ctx.send(_("Leaving {guild}.").format(guild=guild.name))
await guild.leave()
except Exception:
log.error(
_("I couldn't leave {guild} ({g_id}).").format(
guild=guild.name, g_id=guild.id
),
exc_info=True,
)
await ctx.send(_("I couldn't leave {guild}.").format(guild=guild.name))
else:
await ctx.send(_("Okay, not leaving {guild}.").format(guild=guild.name))
@staticmethod
async def get_guild_invite(guild: discord.Guild, max_age: int = 86400) -> None:
"""Handles the reinvite logic for getting an invite
to send the newly unbanned user
:returns: :class:`Invite`
https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/cogs/mod/mod.py#L771
"""
my_perms: discord.Permissions = guild.me.guild_permissions
if my_perms.manage_guild or my_perms.administrator:
if "VANITY_URL" in guild.features:
# guild has a vanity url so use it as the one to send
try:
return await guild.vanity_invite()
except discord.errors.Forbidden:
invites = []
invites = await guild.invites()
else:
invites = []
for inv in invites: # Loop through the invites for the guild
if not (inv.max_uses or inv.max_age or inv.temporary):
# Invite is for the guild's default channel,
# has unlimited uses, doesn't expire, and
# doesn't grant temporary membership
# (i.e. they won't be kicked on disconnect)
return inv
else: # No existing invite found that is valid
channels_and_perms = zip(
guild.text_channels, map(guild.me.permissions_in, guild.text_channels)
)
channel = next(
(channel for channel, perms in channels_and_perms if perms.create_instant_invite),
None,
)
if channel is None:
return
try:
# Create invite that expires after max_age
return await channel.create_invite(max_age=max_age)
except discord.HTTPException:
return
@commands.command()
@commands.bot_has_permissions(embed_links=True)
@commands.bot_has_permissions(read_message_history=True, add_reactions=True, embed_links=True)
async def getguild(self, ctx: commands.Context, *, guild: GuildConverter = None) -> None:
"""
Display info about servers the bot is on
`guild_name` can be either the server ID or partial name
"""
async with ctx.typing():
if not ctx.guild and not await ctx.bot.is_owner(ctx.author):
return await ctx.send(_("This command is not available in DM."))
guilds = [ctx.guild]
page = 0
if await ctx.bot.is_owner(ctx.author):
if ctx.guild:
page = ctx.bot.guilds.index(ctx.guild)
guilds = ctx.bot.guilds
if guild:
page = ctx.bot.guilds.index(guild)
await BaseMenu(
source=GuildPages(guilds=guilds),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=page,
).start(ctx=ctx)
@commands.command()
@commands.bot_has_permissions(embed_links=True)
@checks.admin()
@commands.bot_has_permissions(read_message_history=True, add_reactions=True, embed_links=True)
async def getguilds(self, ctx: commands.Context, *, guilds: MultiGuildConverter) -> None:
"""
Display info about multiple servers
`guild_name` can be either the server ID or partial name
"""
async with ctx.typing():
page = 0
if not guilds:
guilds = ctx.bot.guilds
page = ctx.bot.guilds.index(ctx.guild)
await BaseMenu(
source=GuildPages(guilds=guilds),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=page,
).start(ctx=ctx)
@commands.command()
@commands.guild_only()
@checks.mod_or_permissions(manage_messages=True)
async def nummembers(self, ctx: commands.Context, *, guild: GuildConverter = None) -> None:
"""
Display number of users on a server
`guild_name` can be either the server ID or partial name
"""
if not guild:
guild = ctx.guild
await ctx.send(
"{} has {} members.".format(guild.name, humanize_number(guild.member_count))
)
@commands.guild_only()
@commands.command(aliases=["rolestats"])
@checks.mod_or_permissions(manage_messages=True)
@commands.bot_has_permissions(read_message_history=True, add_reactions=True)
async def getroles(self, ctx: commands.Context, *, guild: GuildConverter = None) -> None:
"""
Displays all roles their ID and number of members in order of
hierarchy
`guild_name` can be either the server ID or partial name
"""
if not guild:
guild = ctx.guild
msg = ""
for role in sorted(guild.roles, reverse=True):
if ctx.channel.permissions_for(ctx.me).embed_links and guild is ctx.guild:
msg += f"{role.mention} ({role.id}): {len(role.members)}\n"
else:
msg += f"{role.name} ({role.id}): {len(role.members)}\n"
msg_list = []
for page in pagify(msg, ["\n"]):
if ctx.channel.permissions_for(ctx.me).embed_links:
embed = discord.Embed()
embed.description = page
embed.set_author(name=f"{guild.name} " + _("Roles"), icon_url=guild.icon_url)
msg_list.append(embed)
else:
msg_list.append(page)
await BaseMenu(
source=ListPages(pages=msg_list),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
async def check_highest(self, data):
highest = 0
users = 0
for user, value in data.items():
if value > highest:
highest = value
users = user
return highest, users
@commands.command(name="getreactions", aliases=["getreaction"])
@checks.mod_or_permissions(manage_messages=True)
@commands.bot_has_permissions(read_message_history=True, add_reactions=True)
async def get_reactions(self, ctx: commands.Context, message: discord.Message) -> None:
"""
Gets a list of all reactions from specified message and displays the user ID,
Username, and Discriminator and the emoji name.
"""
async with ctx.typing():
new_msg = ""
for reaction in message.reactions:
async for user in reaction.users():
if isinstance(reaction.emoji, discord.PartialEmoji):
new_msg += "{} {}#{} {}\n".format(
user.id, user.name, user.discriminator, reaction.emoji.name
)
else:
new_msg += "{} {}#{} {}\n".format(
user.id, user.name, user.discriminator, reaction.emoji
)
temp_pages = []
pages = []
for page in pagify(new_msg, shorten_by=20):
temp_pages.append(box(page, "py"))
max_i = len(temp_pages)
i = 1
for page in temp_pages:
pages.append(f"`Page {i}/{max_i}`\n" + page)
i += 1
await BaseMenu(
source=ListPages(pages=pages),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
async def get_server_stats(
self, guild: discord.Guild
) -> Dict[str, Union[str, Dict[str, int]]]:
"""
This is a very expensive function but handles only pulling new
data into config since the last time the command has been run.
"""
# to_return: Dict[str, Union[int, Dict[int, int]]] = {
# "last_checked": 0,
# "members": {m.id: 0 for m in guild.members},
# "total_posts": 0,
# "channels": {},
# } This is the data schema for saved data
# It's all formatted easily for end user data request and deletion
# to_return = await self.config.guild(guild).all()
async with self.config.guild(guild).all() as to_return:
for channel in guild.text_channels:
my_perms = channel.permissions_for(guild.me)
set_new_last_read = False
if str(channel.id) not in to_return["channels"]:
to_return["channels"][str(channel.id)] = {}
to_return["channels"][str(channel.id)]["members"] = {}
to_return["channels"][str(channel.id)]["total"] = 0
to_return["channels"][str(channel.id)]["last_checked"] = 0
check_after = None
else:
check_after = discord.Object(
id=to_return["channels"][str(channel.id)]["last_checked"]
)
if not my_perms.read_message_history or not my_perms.read_messages:
continue
try:
log.debug(check_after)
async for message in channel.history(
limit=None, after=check_after, oldest_first=False
):
if not set_new_last_read:
log.debug(f"Setting last_checked to {message.id}")
to_return["channels"][str(channel.id)]["last_checked"] = message.id
set_new_last_read = True
author = message.author
if author.discriminator == "0000" and author.bot:
continue
if str(author.id) not in to_return["members"]:
to_return["members"][str(author.id)] = 0
if str(author.id) not in to_return["channels"][str(channel.id)]["members"]:
to_return["channels"][str(channel.id)]["members"][str(author.id)] = 0
to_return["channels"][str(channel.id)]["members"][str(author.id)] += 1
to_return["channels"][str(channel.id)]["total"] += 1
to_return["members"][str(author.id)] += 1
to_return["total"] += 1
except (AttributeError, discord.Forbidden):
log.debug("the heck", exc_info=True)
pass
_ret = copy(to_return)
# copy the data to prevent context manager from removing the reference
log.debug(_ret)
return _ret
async def get_channel_stats(self, channel: discord.TextChannel) -> dict:
"""
This is another expensive function but handles only pulling
new data into config since the last time the command has been run.
"""
guild = channel.guild
async with self.config.guild(guild).all() as to_return:
my_perms = channel.permissions_for(guild.me)
set_new_last_read = False
if channel.id not in to_return["channels"]:
to_return["channels"][str(channel.id)] = {}
to_return["channels"][str(channel.id)]["members"] = {}
to_return["channels"][str(channel.id)]["total"] = 0
to_return["channels"][str(channel.id)]["last_checked"] = 0
check_after = None
else:
check_after = to_return["channels"][str(channel.id)]["last_checked"]
if not my_perms.read_message_history or not my_perms.read_messages:
return {} # we shouldn't have even reached this far before
try:
async for message in channel.history(
limit=None, after=check_after, oldest_first=False
):
if not set_new_last_read:
to_return["channels"][str(channel.id)]["last_checked"] = message.id
set_new_last_read = True
author = message.author
if author.discriminator == "0000" and author.bot:
continue
if str(author.id) not in to_return["members"]:
to_return["members"][str(author.id)] = 0
if str(author.id) not in to_return["channels"][str(channel.id)]["members"]:
to_return["channels"][str(channel.id)]["members"][str(author.id)] = 0
to_return["channels"][str(channel.id)]["members"][str(author.id)] += 1
to_return["channels"][str(channel.id)]["total"] += 1
to_return["members"][str(author.id)] += 1
to_return["total"] += 1
# we still want to update the guild totals if we happened to pull a specific channel
except (AttributeError, discord.Forbidden):
pass
_ret = copy(to_return)
return _ret
@commands.command(name="serverstats")
@checks.mod_or_permissions(manage_messages=True)
@commands.bot_has_permissions(embed_links=True, add_reactions=True)
@commands.guild_only()
async def server_stats(
self,
ctx: commands.Context,
) -> None:
"""
Gets total messages on the server and displays each channel
separately as well as the user who has posted the most in each channel
Note: This is a very slow function and may take some time to complete
"""
warning_msg = await ctx.send(
_(
"This can take a long time to gather all information for the first time! Are you sure you want to continue?"
)
)
pred = ReactionPredicate.yes_or_no(warning_msg, ctx.author)
start_adding_reactions(warning_msg, ReactionPredicate.YES_OR_NO_EMOJIS)
try:
await self.bot.wait_for("reaction_add", check=pred, timeout=60)
except asyncio.TimeoutError:
await ctx.send(_("I guess not."), delete_after=30)
return
if not pred.result:
return await ctx.send(_("Alright I will not gather data."))
async with ctx.channel.typing():
guild_data = await self.get_server_stats(ctx.guild)
channel_messages = []
member_messages = []
sorted_chans = sorted(
guild_data["channels"].items(), key=lambda x: x[1]["total"], reverse=True
)
sorted_members = sorted(
guild_data["members"].items(), key=lambda x: x[1], reverse=True
)
for member_id, value in sorted_members[:5]:
member_messages.append(f"<@!{member_id}>: {bold(humanize_number(value))}\n")
try:
most_messages_user_id = sorted_members[0][0]
except IndexError:
most_messages_user_id = None
try:
most_messages_user_num = sorted_members[0][1]
except IndexError:
most_messages_user_num = 0
new_msg = (
_("**Most posts on the server**\nTotal Messages: ")
+ bold(humanize_number(guild_data["total"]))
+ _("\nMost posts by ")
+ f"<@!{most_messages_user_id}> {bold(humanize_number(most_messages_user_num))}\n\n"
)
for channel_id, value in sorted_chans[:5]:
sorted_members = sorted(
guild_data["channels"][channel_id]["members"].items(),
key=lambda x: x[1],
reverse=True,
)
most_messages_user_id = sorted_members[0][0]
most_messages_user_num = sorted_members[0][1]
maybe_guild = f"<@!{most_messages_user_id}>: {bold(humanize_number(int(most_messages_user_num)))}\n"
channel_messages.append(
_("**Most posts in <#{}>**\nTotal Messages: ").format(channel_id)
+ bold(humanize_number(int(value["total"])))
+ _("\nMost posts by {}\n".format(maybe_guild))
)
em = discord.Embed(colour=await self.bot.get_embed_colour(ctx))
em.set_author(name=ctx.guild.name, icon_url=ctx.guild.icon_url)
em.description = f"{new_msg}{''.join(i for i in channel_messages)}"
em.add_field(name=_("Top Members"), value="".join(i for i in member_messages))
await ctx.send(embed=em)
@commands.command(name="channelstats")
@commands.guild_only()
@commands.bot_has_permissions(embed_links=True)
async def channel_stats(
self,
ctx: commands.Context,
channel: discord.TextChannel = None,
) -> None:
"""
Gets total messages in a specific channel as well as the user who
has posted the most in that channel
`limit` must be a number of messages to check, defaults to all messages
Note: This can be a very slow function and may take some time to complete
"""
warning_msg = await ctx.send(
_(
"This can take a long time to gather all information for the first time! Are you sure you want to continue?"
)
)
pred = ReactionPredicate.yes_or_no(warning_msg, ctx.author)
start_adding_reactions(warning_msg, ReactionPredicate.YES_OR_NO_EMOJIS)
try:
await self.bot.wait_for("reaction_add", check=pred, timeout=60)
except asyncio.TimeoutError:
await ctx.send(_("I guess not."), delete_after=30)
return
if not pred.result:
return await ctx.send(_("Alright I will not gather data."))
if not channel:
channel = ctx.channel
async with ctx.channel.typing():
channel_data = await self.get_channel_stats(channel)
member_messages = []
sorted_members = sorted(
channel_data["channels"][str(channel.id)]["members"].items(),
key=lambda x: x[1],
reverse=True,
)
for member_id, value in sorted_members[:5]:
member_messages.append(f"<@!{member_id}>: {bold(humanize_number(value))}\n")
try:
most_messages_user_id = sorted_members[0][0]
except IndexError:
most_messages_user_id = None
try:
most_messages_user_num = sorted_members[0][1]
except IndexError:
most_messages_user_num = 0
maybe_guild = f"<@!{most_messages_user_id}>: {bold(humanize_number(int(most_messages_user_num)))}\n"
new_msg = (
_("**Most posts in <#{}>**\nTotal Messages: ").format(channel.id)
+ bold(humanize_number(int(channel_data["channels"][str(channel.id)]["total"])))
+ _("\nMost posts by {}\n".format(maybe_guild))
)
em = discord.Embed(colour=await self.bot.get_embed_colour(ctx))
em.set_author(name=ctx.guild.name, icon_url=ctx.guild.icon_url)
em.description = f"{new_msg}"
em.add_field(name=_("Top Members"), value="".join(i for i in member_messages))
await ctx.send(embed=em)
@commands.guild_only()
@commands.command(aliases=["serveremojis"])
@commands.bot_has_permissions(read_message_history=True, add_reactions=True, embed_links=True)
async def guildemojis(
self,
ctx: commands.Context,
id_emojis: Optional[bool] = False,
*,
guild: GuildConverter = None,
) -> None:
"""
Display all server emojis in a menu that can be scrolled through
`id_emojis` return the id of emojis. Default to False, set True
if you want to see emojis ID's.
`guild_name` can be either the server ID or partial name
"""
if not guild:
guild = ctx.guild
msg = ""
embed = discord.Embed(timestamp=ctx.message.created_at)
embed.set_author(name=guild.name, icon_url=guild.icon_url)
regular = []
for emoji in guild.emojis:
if id_emojis:
regular.append(
(
f"{emoji} = `:{emoji.name}:` "
f"`<{'a' if emoji.animated else ''}:{emoji.name}:{emoji.id}>`\n"
)
)
else:
regular.append(f"{emoji} = `:{emoji.name}:`\n")
if regular != "":
embed.description = regular
x = [regular[i : i + 10] for i in range(0, len(regular), 10)]
emoji_embeds = []
count = 1
for page in x:
em = discord.Embed(timestamp=ctx.message.created_at)
em.set_author(name=guild.name + _(" Emojis"), icon_url=guild.icon_url)
regular = []
msg = ""
for emoji in page:
msg += emoji
em.description = msg
em.set_footer(text="Page {} of {}".format(count, len(x)))
count += 1
emoji_embeds.append(em)
if len(emoji_embeds) == 0:
await ctx.send(_("There are no emojis on {guild}.").format(guild=guild.name))
else:
await BaseMenu(
source=ListPages(pages=emoji_embeds),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
|
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_table as dt
import fire
import pandas as pd
from dash.dependencies import Input, Output
from codecarbon.viz.components import Components
from codecarbon.viz.data import Data
def render_app(df: pd.DataFrame):
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.COSMO])
components = Components()
header = components.get_header()
net_summary = components.get_net_summary()
project_dropdown = components.get_project_dropdown(df)
project_details = components.get_project_details()
exemplary_equivalents = components.get_exemplary_equivalents()
_hidden_project_data = components.get_hidden_project_data()
_hidden_project_summary = components.get_hidden_project_summary()
cloud_emissions_comparison = components.get_cloud_emissions_comparison()
global_comparison = components.get_global_comparison()
regional_comparison = components.get_regional_emissions_comparison()
project_time_series = components.get_project_time_series()
project_emissions_bar_chart = components.get_project_emissions_bar_chart()
references = components.get_references()
data = Data()
app.layout = dbc.Container(
[
header,
net_summary,
project_dropdown,
project_details,
exemplary_equivalents,
cloud_emissions_comparison,
global_comparison,
regional_comparison,
project_time_series,
project_emissions_bar_chart,
references,
_hidden_project_data,
_hidden_project_summary,
],
style={"padding-top": "50px"},
)
@app.callback(
[
Output(component_id="hidden_project_data", component_property="children"),
Output(component_id="hidden_project_summary", component_property="data"),
Output(component_id="net_power_consumption", component_property="children"),
Output(component_id="net_carbon_equivalent", component_property="children"),
Output(
component_id="project_infrastructure_location",
component_property="children",
),
Output(
component_id="project_power_consumption", component_property="children"
),
Output(
component_id="project_carbon_equivalent", component_property="children"
),
Output(
component_id="last_run_power_consumption", component_property="children"
),
Output(
component_id="last_run_carbon_equivalent", component_property="children"
),
],
[Input(component_id="project_name", component_property="value")],
)
def update_project_data(project_name: str):
project_data = data.get_project_data(df, project_name)
project_summary = data.get_project_summary(project_data.data)
net_power_consumption = f"{"{:.1f}".format(sum(df["energy_consumed"]))} kWh"
net_carbon_equivalent = f"{"{:.1f}".format(sum(df["emissions"]))} kg"
if {project_summary["region"]} == "":
project_infrastructure_location = f"{project_summary["country_name"]}"
else:
project_infrastructure_location = (
f"{project_summary["region"]}, {project_summary["country_name"]}"
)
project_power_consumption = (
f"{round(project_summary["total"]["energy_consumed"],1)} kWh"
)
project_carbon_equivalent = (
f"{round(project_summary["total"]["emissions"],1)} kg"
)
last_run_power_consumption = (
f"{project_summary["last_run"]["energy_consumed"]} kWh"
)
last_run_carbon_equivalent = f"{project_summary["last_run"]["emissions"]} kg"
return (
project_data,
project_summary,
net_power_consumption,
net_carbon_equivalent,
project_infrastructure_location,
project_power_consumption,
project_carbon_equivalent,
last_run_power_consumption,
last_run_carbon_equivalent,
)
@app.callback(
[
Output(component_id="house_icon", component_property="src"),
Output(component_id="car_icon", component_property="src"),
Output(component_id="tv_icon", component_property="src"),
Output(component_id="car_miles", component_property="children"),
Output(component_id="tv_time", component_property="children"),
Output(component_id="household_fraction", component_property="children"),
],
[Input(component_id="hidden_project_summary", component_property="data")],
)
def update_exemplary_equivalents(hidden_project_summary: dcc.Store):
project_carbon_equivalent = hidden_project_summary["total"]["emissions"]
house_icon = app.get_asset_url("house_icon.png")
car_icon = app.get_asset_url("car_icon.png")
tv_icon = app.get_asset_url("tv_icon.png")
car_miles = f"{data.get_car_miles(project_carbon_equivalent)} miles"
tv_time = data.get_tv_time(project_carbon_equivalent)
household_fraction = (
f"{data.get_household_fraction(project_carbon_equivalent)} %"
)
return house_icon, car_icon, tv_icon, car_miles, tv_time, household_fraction
@app.callback(
[
Output(
component_id="global_emissions_choropleth", component_property="figure"
),
Output(
component_id="global_energy_mix_choropleth", component_property="figure"
),
],
[
Input(component_id="hidden_project_summary", component_property="data"),
Input(component_id="energy_type", component_property="value"),
],
)
def update_global_comparisons(hidden_project_summary: dcc.Store, energy_type: str):
net_energy_consumed = hidden_project_summary["total"]["energy_consumed"]
global_emissions_choropleth_data = data.get_global_emissions_choropleth_data(
net_energy_consumed
)
return (
components.get_global_emissions_choropleth_figure(
global_emissions_choropleth_data
),
components.get_global_energy_mix_choropleth_figure(
energy_type, global_emissions_choropleth_data
),
)
@app.callback(
Output(
component_id="regional_emissions_comparison_component",
component_property="style",
),
[Input(component_id="hidden_project_summary", component_property="data")],
)
def update_show_regional_comparison(hidden_project_summary: dcc.Store):
country_iso_code = hidden_project_summary["country_iso_code"]
# add country codes here to render for different countries
if country_iso_code.upper() in ["USA", "CAN"]:
return {"display": "block"}
else:
return {"display": "none"}
@app.callback(
[
Output(component_id="country_name", component_property="children"),
Output(
component_id="regional_emissions_comparison_choropleth",
component_property="figure",
),
],
[Input(component_id="hidden_project_summary", component_property="data")],
)
def update_regional_comparison_choropleth(hidden_project_summary: dcc.Store):
country_name = hidden_project_summary["country_name"]
country_iso_code = hidden_project_summary["country_iso_code"]
net_energy_consumed = hidden_project_summary["total"]["energy_consumed"]
regional_emissions_choropleth_data = (
data.get_regional_emissions_choropleth_data(
net_energy_consumed, country_iso_code
)
)
return (
country_name,
components.get_regional_emissions_choropleth_figure(
regional_emissions_choropleth_data, country_iso_code
),
)
@app.callback(
Output(component_id="project_time_series", component_property="figure"),
[Input(component_id="hidden_project_data", component_property="children")],
)
def update_project_time_series(hidden_project_data: dt.DataTable):
return components.get_project_time_series_figure(
hidden_project_data["props"]["data"]
)
@app.callback(
Output(component_id="project_emissions_bar_chart", component_property="figure"),
[Input(component_id="hidden_project_data", component_property="children")],
)
def update_project_time_series(hidden_project_data: dt.DataTable):
return components.get_project_emissions_bar_chart_figure(
hidden_project_data["props"]["data"]
)
@app.callback(
Output(
component_id="cloud_emissions_comparison_component",
component_property="style",
),
[Input(component_id="hidden_project_summary", component_property="data")],
)
def update_on_cloud(hidden_project_summary: dcc.Store):
on_cloud = hidden_project_summary["on_cloud"]
if on_cloud == "Y":
return {"display": "block"}
else:
return {"display": "none"}
@app.callback(
[
Output(component_id="cloud_provider_name", component_property="children"),
Output(
component_id="cloud_emissions_barchart", component_property="figure"
),
Output(component_id="cloud_recommendation", component_property="children"),
],
[Input(component_id="hidden_project_summary", component_property="data")],
)
def update_cloud_emissions_barchart(hidden_project_summary: dcc.Store):
on_cloud = hidden_project_summary["on_cloud"]
net_energy_consumed = hidden_project_summary["total"]["energy_consumed"]
cloud_provider = hidden_project_summary["cloud_provider"]
cloud_region = hidden_project_summary["cloud_region"]
(
cloud_provider_name,
cloud_emissions_barchart_data,
) = data.get_cloud_emissions_barchart_data(
net_energy_consumed, on_cloud, cloud_provider, cloud_region
)
return (
cloud_provider_name,
components.get_cloud_emissions_barchart_figure(
cloud_emissions_barchart_data
),
components.get_cloud_recommendation(
on_cloud, cloud_provider_name, cloud_emissions_barchart_data
),
)
return app
def viz(filepath: str, port: int = 8050, debug: bool = False) -> None:
df = pd.read_csv(filepath)
app = render_app(df)
app.run_server(port=port, debug=debug)
def main():
fire.Fire(viz)
if __name__ == "__main__":
main()
|
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_table as dt
import fire
import pandas as pd
from dash.dependencies import Input, Output
from codecarbon.viz.components import Components
from codecarbon.viz.data import Data
def render_app(df: pd.DataFrame):
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.COSMO])
components = Components()
header = components.get_header()
net_summary = components.get_net_summary()
project_dropdown = components.get_project_dropdown(df)
project_details = components.get_project_details()
exemplary_equivalents = components.get_exemplary_equivalents()
_hidden_project_data = components.get_hidden_project_data()
_hidden_project_summary = components.get_hidden_project_summary()
cloud_emissions_comparison = components.get_cloud_emissions_comparison()
global_comparison = components.get_global_comparison()
regional_comparison = components.get_regional_emissions_comparison()
project_time_series = components.get_project_time_series()
project_emissions_bar_chart = components.get_project_emissions_bar_chart()
references = components.get_references()
data = Data()
app.layout = dbc.Container(
[
header,
net_summary,
project_dropdown,
project_details,
exemplary_equivalents,
cloud_emissions_comparison,
global_comparison,
regional_comparison,
project_time_series,
project_emissions_bar_chart,
references,
_hidden_project_data,
_hidden_project_summary,
],
style={"padding-top": "50px"},
)
@app.callback(
[
Output(component_id="hidden_project_data", component_property="children"),
Output(component_id="hidden_project_summary", component_property="data"),
Output(component_id="net_power_consumption", component_property="children"),
Output(component_id="net_carbon_equivalent", component_property="children"),
Output(
component_id="project_infrastructure_location",
component_property="children",
),
Output(
component_id="project_power_consumption", component_property="children"
),
Output(
component_id="project_carbon_equivalent", component_property="children"
),
Output(
component_id="last_run_power_consumption", component_property="children"
),
Output(
component_id="last_run_carbon_equivalent", component_property="children"
),
],
[Input(component_id="project_name", component_property="value")],
)
def update_project_data(project_name: str):
project_data = data.get_project_data(df, project_name)
project_summary = data.get_project_summary(project_data.data)
net_power_consumption = f"{'{:.1f}'.format(sum(df['energy_consumed']))} kWh"
net_carbon_equivalent = f"{'{:.1f}'.format(sum(df['emissions']))} kg"
if {project_summary["region"]} == "":
project_infrastructure_location = f"{project_summary['country_name']}"
else:
project_infrastructure_location = (
f"{project_summary['region']}, {project_summary['country_name']}"
)
project_power_consumption = (
f"{round(project_summary['total']['energy_consumed'],1)} kWh"
)
project_carbon_equivalent = (
f"{round(project_summary['total']['emissions'],1)} kg"
)
last_run_power_consumption = (
f"{project_summary['last_run']['energy_consumed']} kWh"
)
last_run_carbon_equivalent = f"{project_summary['last_run']['emissions']} kg"
return (
project_data,
project_summary,
net_power_consumption,
net_carbon_equivalent,
project_infrastructure_location,
project_power_consumption,
project_carbon_equivalent,
last_run_power_consumption,
last_run_carbon_equivalent,
)
@app.callback(
[
Output(component_id="house_icon", component_property="src"),
Output(component_id="car_icon", component_property="src"),
Output(component_id="tv_icon", component_property="src"),
Output(component_id="car_miles", component_property="children"),
Output(component_id="tv_time", component_property="children"),
Output(component_id="household_fraction", component_property="children"),
],
[Input(component_id="hidden_project_summary", component_property="data")],
)
def update_exemplary_equivalents(hidden_project_summary: dcc.Store):
project_carbon_equivalent = hidden_project_summary["total"]["emissions"]
house_icon = app.get_asset_url("house_icon.png")
car_icon = app.get_asset_url("car_icon.png")
tv_icon = app.get_asset_url("tv_icon.png")
car_miles = f"{data.get_car_miles(project_carbon_equivalent)} miles"
tv_time = data.get_tv_time(project_carbon_equivalent)
household_fraction = (
f"{data.get_household_fraction(project_carbon_equivalent)} %"
)
return house_icon, car_icon, tv_icon, car_miles, tv_time, household_fraction
@app.callback(
[
Output(
component_id="global_emissions_choropleth", component_property="figure"
),
Output(
component_id="global_energy_mix_choropleth", component_property="figure"
),
],
[
Input(component_id="hidden_project_summary", component_property="data"),
Input(component_id="energy_type", component_property="value"),
],
)
def update_global_comparisons(hidden_project_summary: dcc.Store, energy_type: str):
net_energy_consumed = hidden_project_summary["total"]["energy_consumed"]
global_emissions_choropleth_data = data.get_global_emissions_choropleth_data(
net_energy_consumed
)
return (
components.get_global_emissions_choropleth_figure(
global_emissions_choropleth_data
),
components.get_global_energy_mix_choropleth_figure(
energy_type, global_emissions_choropleth_data
),
)
@app.callback(
Output(
component_id="regional_emissions_comparison_component",
component_property="style",
),
[Input(component_id="hidden_project_summary", component_property="data")],
)
def update_show_regional_comparison(hidden_project_summary: dcc.Store):
country_iso_code = hidden_project_summary["country_iso_code"]
# add country codes here to render for different countries
if country_iso_code.upper() in ["USA", "CAN"]:
return {"display": "block"}
else:
return {"display": "none"}
@app.callback(
[
Output(component_id="country_name", component_property="children"),
Output(
component_id="regional_emissions_comparison_choropleth",
component_property="figure",
),
],
[Input(component_id="hidden_project_summary", component_property="data")],
)
def update_regional_comparison_choropleth(hidden_project_summary: dcc.Store):
country_name = hidden_project_summary["country_name"]
country_iso_code = hidden_project_summary["country_iso_code"]
net_energy_consumed = hidden_project_summary["total"]["energy_consumed"]
regional_emissions_choropleth_data = (
data.get_regional_emissions_choropleth_data(
net_energy_consumed, country_iso_code
)
)
return (
country_name,
components.get_regional_emissions_choropleth_figure(
regional_emissions_choropleth_data, country_iso_code
),
)
@app.callback(
Output(component_id="project_time_series", component_property="figure"),
[Input(component_id="hidden_project_data", component_property="children")],
)
def update_project_time_series(hidden_project_data: dt.DataTable):
return components.get_project_time_series_figure(
hidden_project_data["props"]["data"]
)
@app.callback(
Output(component_id="project_emissions_bar_chart", component_property="figure"),
[Input(component_id="hidden_project_data", component_property="children")],
)
def update_project_time_series(hidden_project_data: dt.DataTable):
return components.get_project_emissions_bar_chart_figure(
hidden_project_data["props"]["data"]
)
@app.callback(
Output(
component_id="cloud_emissions_comparison_component",
component_property="style",
),
[Input(component_id="hidden_project_summary", component_property="data")],
)
def update_on_cloud(hidden_project_summary: dcc.Store):
on_cloud = hidden_project_summary["on_cloud"]
if on_cloud == "Y":
return {"display": "block"}
else:
return {"display": "none"}
@app.callback(
[
Output(component_id="cloud_provider_name", component_property="children"),
Output(
component_id="cloud_emissions_barchart", component_property="figure"
),
Output(component_id="cloud_recommendation", component_property="children"),
],
[Input(component_id="hidden_project_summary", component_property="data")],
)
def update_cloud_emissions_barchart(hidden_project_summary: dcc.Store):
on_cloud = hidden_project_summary["on_cloud"]
net_energy_consumed = hidden_project_summary["total"]["energy_consumed"]
cloud_provider = hidden_project_summary["cloud_provider"]
cloud_region = hidden_project_summary["cloud_region"]
(
cloud_provider_name,
cloud_emissions_barchart_data,
) = data.get_cloud_emissions_barchart_data(
net_energy_consumed, on_cloud, cloud_provider, cloud_region
)
return (
cloud_provider_name,
components.get_cloud_emissions_barchart_figure(
cloud_emissions_barchart_data
),
components.get_cloud_recommendation(
on_cloud, cloud_provider_name, cloud_emissions_barchart_data
),
)
return app
def viz(filepath: str, port: int = 8050, debug: bool = False) -> None:
df = pd.read_csv(filepath)
app = render_app(df)
app.run_server(port=port, debug=debug)
def main():
fire.Fire(viz)
if __name__ == "__main__":
main()
|
from collections import namedtuple
from datetime import datetime
import numbers
import sys
from ..time import Resolution, Frequency
from .base import Series
from .timeseries import TimeseriesList, Timeseries, Value
class Period(namedtuple("Period", ("begin", "end", "value"))):
"""
A period for a period-based series.
Implemented as a namedtuple of (begin, end, value).
.. py:attribute:: begin
The begin date-time
.. py:attribute:: end
The end date-time
.. py:attribute:: value
The numeric value
"""
def validate(self):
"""
Check if this period is in a valid format.
:raises ValueError: When this period tuple is invalid
"""
if not isinstance(self.begin, datetime):
raise ValueError("Period.begin is not a datetime")
if not isinstance(self.end, datetime):
raise ValueError("Period.end is not a datetime")
if not (self.value is None or isinstance(self.value, (numbers.Number))):
raise ValueError("Period.value must either be a number or None")
def print(self, file=sys.stdout):
"""
Print this period-based series value.
"""
d0 = self.begin.isoformat(sep=' ')
d1 = self.end.isoformat(sep=' ')
v = self.value
print(f"{d0}–{d1}\t{v:13.2f}", file=file)
def to_dict(self):
"""
Convert this period value to a dict in the same short
format as the API response.
:return: A dict of this period in the same format as the API response
:rtype: dict
"""
return {
"begin": self.begin.isoformat(sep=' '),
"end": self.end.isoformat(sep=' '),
"v": self.value
}
def __str__(self):
d0 = self.begin.isoformat(sep=' ')
d1 = self.end.isoformat(sep=' ')
v = self.value
return f"<Period: begin={d0}, end={d1}, value={v}>"
def __repr__(self):
return str(self)
def is_active(self, date):
return self.begin <= date < self.end
def is_empty_or_invalid(self):
return self.begin >= self.end
def is_date_before(self, date):
return date < self.begin
def is_date_after(self, date):
return date >= self.end
def is_interval_before(self, begin, end):
return end <= self.begin
def is_interval_after(self, begin, end):
return begin >= self.end
def is_overlayed(self, begin, end):
return end > self.begin and begin < self.end
def is_covered(self, begin, end):
return self.begin <= begin and self.end >= end
def get_duration_seconds(self, begin, end):
d0 = max(self.begin, begin)
d1 = min(self.end, end)
return (d1 - d0).total_seconds()
class CapacityPeriod(namedtuple("Period", ("begin", "end", "value", "installed"))):
"""
A period for a period-based series. Includes the installed capacity for
the period (which may differ from the currently available capacity given in
the *value* attribute).
Implemented as a namedtuple of (begin, end, value).
.. py:attribute:: begin
The begin date-time
.. py:attribute:: end
The end date-time
.. py:attribute:: value
The numeric value
.. py:attribute:: installed
The total installed capacity
"""
def validate(self):
"""
Check if this period is in a valid format.
:raises ValueError: When this period tuple is invalid
"""
if not isinstance(self.begin, datetime):
raise ValueError("Period.begin is not a datetime")
if not isinstance(self.end, datetime):
raise ValueError("Period.end is not a datetime")
if not (self.value is None or isinstance(self.value, (numbers.Number))):
raise ValueError("Period.value must either be a number or None")
if not (self.installed is None or isinstance(self.installed, (numbers.Number))):
raise ValueError("Period.installed must either be a number or None")
def print(self, file=sys.stdout):
"""
Print this period-based series value.
"""
d0 = self.begin.isoformat(sep=' ')
d1 = self.end.isoformat(sep=' ')
v = self.value
c = self.installed
print(f"{d0}–{d1}\t{v:13.2f}\t{c:13.2f}", file=file)
def to_dict(self):
"""
Convert this period value to a dict in the same short
format as the API response.
:return: A dict of this period in the same format as the API response
:rtype: dict
"""
return {
"begin": self.begin.isoformat(sep=' '),
"end": self.end.isoformat(sep=' '),
"v": self.value,
"c": self.installed,
}
def __str__(self):
d0 = self.begin.isoformat(sep=' ')
d1 = self.end.isoformat(sep=' ')
v = self.value
c = self.installed
return f"<Period: begin={d0}, end={d1}, value={v}, installed={c}>"
def __repr__(self):
return str(self)
def is_active(self, date):
return self.begin <= date < self.end
def is_empty_or_invalid(self):
return self.begin >= self.end
def is_date_before(self, date):
return date < self.begin
def is_date_after(self, date):
return date >= self.end
def is_interval_before(self, begin, end):
return end <= self.begin
def is_interval_after(self, begin, end):
return begin >= self.end
def is_overlayed(self, begin, end):
return end > self.begin and begin < self.end
def is_covered(self, begin, end):
return self.begin <= begin and self.end >= end
def get_duration_seconds(self, begin, end):
d0 = max(self.begin, begin)
d1 = min(self.end, end)
return (d1 - d0).total_seconds()
class Periodseries(Series):
"""
A period-based series with metadata.
:param curve: The curve, defaults to None
:type curve: Curve, optional
:param resolution: The resolution of the time series, defaults to None
:type resolution: Resolution, optional
:param instance: The instance, defaults to None
:type instance: Instance, optional
:param data: A list of periods (Period or CapacityPeriod)
:type data: list[]
"""
def __init__(self, data=None, **kwargs):
super().__init__(**kwargs)
assert isinstance(self.resolution, Resolution), (
"Periodseries.resolution is required"
)
self.data = data or []
def __str__(self):
items = []
items.append(f"resolution={self.resolution}")
items.append(f"curve=\"{self.curve}\"")
if self.instance:
items.append(f"instance={self.instance}")
if self.has_data():
items.append(f"begin=\"{self.begin().isoformat(sep=" ")}\"")
items.append(f"end=\"{self.end().isoformat(sep=" ")}\"")
else:
items.append("EMPTY")
return f"<Periodseries: {", ".join(items)}>"
def __repr__(self):
return str(self)
def begin(self):
if self.data:
return self.data[0].begin
else:
raise ValueError("Periodseries has no values")
def end(self):
if self.data:
return self.data[-1].end
else:
raise ValueError("Periodseries has no values")
def to_timeseries(self, frequency=None):
"""
Convert this period-based series to a regular time series.
When periods overlap the same step in the resulting time series,
a weighted average is calculated down to second-precision.
:param frequency: The frequency of the resulting time series
:type frequency: Frequency, required
:return: A time series
:rtype: Timeseries
"""
# Verify parameters
assert isinstance(frequency, Frequency), "Must be a frequency"
# Prepare conversion
resolution = Resolution(frequency, self.resolution.timezone)
if not self.has_data():
return Timeseries(
curve=self.curve,
resolution=resolution,
instance=self.instance,
data=[]
)
begin = resolution.floor(self.begin())
end = self.end()
iterator = _PeriodsToTimeseriesIterator(
periods=self.data,
resolution=resolution,
begin=begin,
end=end
)
# Convert
data = [Value(dt, value) for dt, value in iterator]
timeseries = Timeseries(
curve=self.curve,
resolution=resolution,
instance=self.instance,
data=data
)
timeseries.set_name(self._name)
return timeseries
def to_df(self, frequency=None, name=None):
"""
Alias for :meth:`Periodseries.to_dataframe`.
Convert this period-based to a ``pandas.DataFrame`` as a time series
in the given frequency.
Using :py:meth:`Periodseries.to_timeseries` to convert this
period-based series to a regular time series first. When periods
overlap the same step in the resulting time series, a weighted
average is calculated down to second-precision.
:param frequency: The frequency of the resulting ``pandas.DataFrame``\
time series
:type frequency: Frequency, required
:param name: Set a name for the column in the ``pandas.DataFrame``,\
defaults to ``value``
:type name: str, optional
:return: A DataFrame
:rtype: pandas.DataFrame
:raises ImportError: When pandas is not installed on the system
"""
return self.to_dataframe(frequency=frequency, name=name)
def to_dataframe(self, frequency=None, name=None):
"""
Convert this period-based to a ``pandas.DataFrame`` as a time series
in the given frequency.
Using :py:meth:`Periodseries.to_timeseries` to convert this
period-based series to a regular time series first. When periods
overlap the same step in the resulting time series, a weighted
average is calculated down to second-precision.
:param frequency: The frequency of the resulting ``pandas.DataFrame``\
time series
:type frequency: Frequency, required
:param name: Set a name for the column in the ``pandas.DataFrame``,\
defaults to ``value``
:type name: str, optional
:return: A DataFrame
:rtype: pandas.DataFrame
:raises ImportError: When pandas is not installed on the system
"""
# Verify parameters
assert isinstance(frequency, Frequency), "Must be a frequency"
# Conversion
timeseries = self.to_timeseries(frequency=frequency)
df = timeseries.to_dataframe(name=name)
return df
def print(self, file=sys.stdout):
"""
Utility method to print a period-based series to any file handle
(defaults to stdout).
"""
print(f"Periodseries:", file=file)
if self.curve:
print(f" Curve: {repr(self.curve)}", file=file)
if self.instance:
print(f" Instance: {self.instance}", file=file)
print(f" Resolution: {self.resolution}", file=file)
print(f"", file=file)
for d in self.data:
d.print(file=file)
class _PeriodsToTimeseriesIterator:
"""
A period-based series iterator used for conversions to Timeseries objects.
"""
def __init__(self, periods=None, resolution=None, begin=None, end=None):
self.periods = [p for p in periods if p.end > begin and p.begin < end]
self.resolution = resolution
self.begin = begin
self.end = end
# Iterator stuff
self.d = None
self.p = None
def __iter__(self):
# No periods available
if not self.periods:
return []
# Get first period
self.d = self.begin
self.p = self.periods.pop(0)
return self
def __next__(self):
# Get dates and current period
p = self.p
d0 = self.d
d1 = self.d = self.resolution >> d0
# We're done
if d0 >= self.end:
raise StopIteration
else:
return self._find_next_value(p, d0, d1)
def _find_next_value(self, p, d0, d1):
# No more periods
if not p:
return (d0, None)
# Period covers the entire time interval
if p.is_covered(d0, d1):
return (d0, p.value)
# We do not have any values for given date
if p.is_interval_before(d0, d1):
return (d0, None)
# We are past current period
if p.is_interval_after(d0, d1):
p = self.p = self.periods.pop(0) if self.periods else None
return self._find_next_value(p, d0, d1)
# Overlapping, but not covering – find all periods covering interval
overlapping = self._get_overlayed_periods(d0, d1)
# Current period starts in the middle of the interval
if not overlapping:
return (d0, None)
# More than one period – check if they are connected (no gaps)
if self._is_covering_interval(p, overlapping, d0, d1):
# No gaps – generate a mean value
mean = self._calc_mean_periods([p] + overlapping, d0, d1)
return (d0, mean)
# There are gaps, so we do not have a value
return (d0, None)
def _get_overlayed_periods(self, begin, end):
# Find other periods overlapping current interval
periods = []
for p in self.periods:
if p.is_overlayed(begin, end):
periods.append(p)
else:
break
return periods
def _is_covering_interval(self, current, periods, begin, end):
# Check if the first period starts somewhere in the interval
if current.begin > begin:
return False
# Check for gaps between periods
previous = current
for p in periods:
if previous.end != p.begin:
return False
previous = p
# Check if the last element stops somewhere in the interval
if previous.end < end:
return False
# All covered
return True
def _calc_mean_periods(self, periods, begin, end):
# Get value and duration in interval for each period
available_weights = [
(p.value, p.get_duration_seconds(begin, end))
for p in periods
]
# Get sum of weight
sum_weights = 1.0 * sum(weight for avail, weight in available_weights)
# Get the mean value
return (
sum(avail * weight for avail, weight in available_weights)
/ sum_weights
)
# Period-based series list with helpers
class PeriodseriesList(list):
"""
A list of Periodseries objects. Have methods for converting them to a
:py:class:`TimeseriesList` or a `pandas.DataFrame`.
:param iterable: Any iterable of `Periodseries`
:type iterable: iterable
"""
def __init__(self, iterable=()):
# Initialize list
super().__init__(iterable)
# Asserts
_validate_periodseries_list(iterable)
def to_timeseries(self, frequency=None):
"""
Convert all period-based series in this list to time series.
When periods overlap the same step in the resulting time series,
a weighted average is calculated down to second-precision.
:param frequency: The frequency of the resulting time series
:type frequency: Frequency, required
:return: A list of time series
:rtype: TimeseriesList
"""
# Verify parameters
assert isinstance(frequency, Frequency), "Must be a frequency"
# Convert all period-based series to time series
return TimeseriesList(
periodseries.to_timeseries(frequency=frequency)
for periodseries in self
)
def to_df(self, frequency=None):
"""
Alias for :meth:`Timeseries.to_dataframe`.
Convert this PeriodseriesList to a ``pandas.DataFrame`` where all time
series are placed in its own column and are lined up with the date-time
as index.
:param frequency: The frequency of the resulting time series'
:type frequency: Frequency, required
:return: A DataFrame
:rtype: pandas.DataFrame
:raises ImportError: When pandas is not installed on the system
"""
return self.to_dataframe(frequency=frequency)
def to_dataframe(self, frequency=None):
"""
Convert this PeriodseriesList to a ``pandas.DataFrame`` where all time
series are placed in its own column and are lined up with the date-time
as index.
:param frequency: The frequency of the resulting time series'
:type frequency: Frequency, required
:return: A DataFrame
:rtype: pandas.DataFrame
:raises ImportError: When pandas is not installed on the system
"""
# Verify parameters
assert isinstance(frequency, Frequency), "Must be a frequency"
# Convert to time series then to data frame
timeseries_list = self.to_timeseries(frequency=frequency)
return timeseries_list.to_dataframe()
def append(self, periodseries):
_validate_periodseries(periodseries)
return super().append(periodseries)
def extend(self, iterable):
# Asserts
_validate_periodseries_list(iterable)
# Perform operation
return super().extend(iterable)
def insert(self, index, periodseries):
# Asserts
_validate_periodseries(periodseries)
# Perform operation
return super().insert(index, periodseries)
def __add__(self, rhs):
_validate_periodseries_list(rhs)
return PeriodseriesList(list.__add__(self, rhs))
def __iadd__(self, rhs):
_validate_periodseries_list(rhs)
return PeriodseriesList(list.__iadd__(self, rhs))
def __setitem__(self, key, periodseries):
_validate_periodseries(periodseries)
return super().__setitem__(periodseries)
def __mul__(self, rhs):
raise NotImplementedError("PeriodseriesList does not support multiply")
def __rmul__(self, rhs):
raise NotImplementedError("PeriodseriesList does not support multiply")
def __imul__(self, rhs):
raise NotImplementedError("PeriodseriesList does not support multiply")
def copy(self):
return PeriodseriesList(self)
def __getitem__(self, item):
result = list.__getitem__(self, item)
if isinstance(result, list):
return PeriodseriesList(result)
else:
return result
def _validate_periodseries(periodseries):
assert isinstance(periodseries, Periodseries), (
f"Element is not a Periodseries. Expects all "
f"elements to be Periodseries objects."
)
def _validate_periodseries_list(periodseries_list):
for index, periodseries in enumerate(periodseries_list):
assert isinstance(periodseries, Periodseries), (
f"Element {index} is not a Periodseries. Expects all "
f"elements to be Periodseries objects."
)
|
from collections import namedtuple
from datetime import datetime
import numbers
import sys
from ..time import Resolution, Frequency
from .base import Series
from .timeseries import TimeseriesList, Timeseries, Value
class Period(namedtuple("Period", ("begin", "end", "value"))):
"""
A period for a period-based series.
Implemented as a namedtuple of (begin, end, value).
.. py:attribute:: begin
The begin date-time
.. py:attribute:: end
The end date-time
.. py:attribute:: value
The numeric value
"""
def validate(self):
"""
Check if this period is in a valid format.
:raises ValueError: When this period tuple is invalid
"""
if not isinstance(self.begin, datetime):
raise ValueError("Period.begin is not a datetime")
if not isinstance(self.end, datetime):
raise ValueError("Period.end is not a datetime")
if not (self.value is None or isinstance(self.value, (numbers.Number))):
raise ValueError("Period.value must either be a number or None")
def print(self, file=sys.stdout):
"""
Print this period-based series value.
"""
d0 = self.begin.isoformat(sep=' ')
d1 = self.end.isoformat(sep=' ')
v = self.value
print(f"{d0}–{d1}\t{v:13.2f}", file=file)
def to_dict(self):
"""
Convert this period value to a dict in the same short
format as the API response.
:return: A dict of this period in the same format as the API response
:rtype: dict
"""
return {
"begin": self.begin.isoformat(sep=' '),
"end": self.end.isoformat(sep=' '),
"v": self.value
}
def __str__(self):
d0 = self.begin.isoformat(sep=' ')
d1 = self.end.isoformat(sep=' ')
v = self.value
return f"<Period: begin={d0}, end={d1}, value={v}>"
def __repr__(self):
return str(self)
def is_active(self, date):
return self.begin <= date < self.end
def is_empty_or_invalid(self):
return self.begin >= self.end
def is_date_before(self, date):
return date < self.begin
def is_date_after(self, date):
return date >= self.end
def is_interval_before(self, begin, end):
return end <= self.begin
def is_interval_after(self, begin, end):
return begin >= self.end
def is_overlayed(self, begin, end):
return end > self.begin and begin < self.end
def is_covered(self, begin, end):
return self.begin <= begin and self.end >= end
def get_duration_seconds(self, begin, end):
d0 = max(self.begin, begin)
d1 = min(self.end, end)
return (d1 - d0).total_seconds()
class CapacityPeriod(namedtuple("Period", ("begin", "end", "value", "installed"))):
"""
A period for a period-based series. Includes the installed capacity for
the period (which may differ from the currently available capacity given in
the *value* attribute).
Implemented as a namedtuple of (begin, end, value).
.. py:attribute:: begin
The begin date-time
.. py:attribute:: end
The end date-time
.. py:attribute:: value
The numeric value
.. py:attribute:: installed
The total installed capacity
"""
def validate(self):
"""
Check if this period is in a valid format.
:raises ValueError: When this period tuple is invalid
"""
if not isinstance(self.begin, datetime):
raise ValueError("Period.begin is not a datetime")
if not isinstance(self.end, datetime):
raise ValueError("Period.end is not a datetime")
if not (self.value is None or isinstance(self.value, (numbers.Number))):
raise ValueError("Period.value must either be a number or None")
if not (self.installed is None or isinstance(self.installed, (numbers.Number))):
raise ValueError("Period.installed must either be a number or None")
def print(self, file=sys.stdout):
"""
Print this period-based series value.
"""
d0 = self.begin.isoformat(sep=' ')
d1 = self.end.isoformat(sep=' ')
v = self.value
c = self.installed
print(f"{d0}–{d1}\t{v:13.2f}\t{c:13.2f}", file=file)
def to_dict(self):
"""
Convert this period value to a dict in the same short
format as the API response.
:return: A dict of this period in the same format as the API response
:rtype: dict
"""
return {
"begin": self.begin.isoformat(sep=' '),
"end": self.end.isoformat(sep=' '),
"v": self.value,
"c": self.installed,
}
def __str__(self):
d0 = self.begin.isoformat(sep=' ')
d1 = self.end.isoformat(sep=' ')
v = self.value
c = self.installed
return f"<Period: begin={d0}, end={d1}, value={v}, installed={c}>"
def __repr__(self):
return str(self)
def is_active(self, date):
return self.begin <= date < self.end
def is_empty_or_invalid(self):
return self.begin >= self.end
def is_date_before(self, date):
return date < self.begin
def is_date_after(self, date):
return date >= self.end
def is_interval_before(self, begin, end):
return end <= self.begin
def is_interval_after(self, begin, end):
return begin >= self.end
def is_overlayed(self, begin, end):
return end > self.begin and begin < self.end
def is_covered(self, begin, end):
return self.begin <= begin and self.end >= end
def get_duration_seconds(self, begin, end):
d0 = max(self.begin, begin)
d1 = min(self.end, end)
return (d1 - d0).total_seconds()
class Periodseries(Series):
"""
A period-based series with metadata.
:param curve: The curve, defaults to None
:type curve: Curve, optional
:param resolution: The resolution of the time series, defaults to None
:type resolution: Resolution, optional
:param instance: The instance, defaults to None
:type instance: Instance, optional
:param data: A list of periods (Period or CapacityPeriod)
:type data: list[]
"""
def __init__(self, data=None, **kwargs):
super().__init__(**kwargs)
assert isinstance(self.resolution, Resolution), (
"Periodseries.resolution is required"
)
self.data = data or []
def __str__(self):
items = []
items.append(f"resolution={self.resolution}")
items.append(f"curve=\"{self.curve}\"")
if self.instance:
items.append(f"instance={self.instance}")
if self.has_data():
items.append(f"begin=\"{self.begin().isoformat(sep=' ')}\"")
items.append(f"end=\"{self.end().isoformat(sep=' ')}\"")
else:
items.append("EMPTY")
return f"<Periodseries: {', '.join(items)}>"
def __repr__(self):
return str(self)
def begin(self):
if self.data:
return self.data[0].begin
else:
raise ValueError("Periodseries has no values")
def end(self):
if self.data:
return self.data[-1].end
else:
raise ValueError("Periodseries has no values")
def to_timeseries(self, frequency=None):
"""
Convert this period-based series to a regular time series.
When periods overlap the same step in the resulting time series,
a weighted average is calculated down to second-precision.
:param frequency: The frequency of the resulting time series
:type frequency: Frequency, required
:return: A time series
:rtype: Timeseries
"""
# Verify parameters
assert isinstance(frequency, Frequency), "Must be a frequency"
# Prepare conversion
resolution = Resolution(frequency, self.resolution.timezone)
if not self.has_data():
return Timeseries(
curve=self.curve,
resolution=resolution,
instance=self.instance,
data=[]
)
begin = resolution.floor(self.begin())
end = self.end()
iterator = _PeriodsToTimeseriesIterator(
periods=self.data,
resolution=resolution,
begin=begin,
end=end
)
# Convert
data = [Value(dt, value) for dt, value in iterator]
timeseries = Timeseries(
curve=self.curve,
resolution=resolution,
instance=self.instance,
data=data
)
timeseries.set_name(self._name)
return timeseries
def to_df(self, frequency=None, name=None):
"""
Alias for :meth:`Periodseries.to_dataframe`.
Convert this period-based to a ``pandas.DataFrame`` as a time series
in the given frequency.
Using :py:meth:`Periodseries.to_timeseries` to convert this
period-based series to a regular time series first. When periods
overlap the same step in the resulting time series, a weighted
average is calculated down to second-precision.
:param frequency: The frequency of the resulting ``pandas.DataFrame``\
time series
:type frequency: Frequency, required
:param name: Set a name for the column in the ``pandas.DataFrame``,\
defaults to ``value``
:type name: str, optional
:return: A DataFrame
:rtype: pandas.DataFrame
:raises ImportError: When pandas is not installed on the system
"""
return self.to_dataframe(frequency=frequency, name=name)
def to_dataframe(self, frequency=None, name=None):
"""
Convert this period-based to a ``pandas.DataFrame`` as a time series
in the given frequency.
Using :py:meth:`Periodseries.to_timeseries` to convert this
period-based series to a regular time series first. When periods
overlap the same step in the resulting time series, a weighted
average is calculated down to second-precision.
:param frequency: The frequency of the resulting ``pandas.DataFrame``\
time series
:type frequency: Frequency, required
:param name: Set a name for the column in the ``pandas.DataFrame``,\
defaults to ``value``
:type name: str, optional
:return: A DataFrame
:rtype: pandas.DataFrame
:raises ImportError: When pandas is not installed on the system
"""
# Verify parameters
assert isinstance(frequency, Frequency), "Must be a frequency"
# Conversion
timeseries = self.to_timeseries(frequency=frequency)
df = timeseries.to_dataframe(name=name)
return df
def print(self, file=sys.stdout):
"""
Utility method to print a period-based series to any file handle
(defaults to stdout).
"""
print(f"Periodseries:", file=file)
if self.curve:
print(f" Curve: {repr(self.curve)}", file=file)
if self.instance:
print(f" Instance: {self.instance}", file=file)
print(f" Resolution: {self.resolution}", file=file)
print(f"", file=file)
for d in self.data:
d.print(file=file)
class _PeriodsToTimeseriesIterator:
"""
A period-based series iterator used for conversions to Timeseries objects.
"""
def __init__(self, periods=None, resolution=None, begin=None, end=None):
self.periods = [p for p in periods if p.end > begin and p.begin < end]
self.resolution = resolution
self.begin = begin
self.end = end
# Iterator stuff
self.d = None
self.p = None
def __iter__(self):
# No periods available
if not self.periods:
return []
# Get first period
self.d = self.begin
self.p = self.periods.pop(0)
return self
def __next__(self):
# Get dates and current period
p = self.p
d0 = self.d
d1 = self.d = self.resolution >> d0
# We're done
if d0 >= self.end:
raise StopIteration
else:
return self._find_next_value(p, d0, d1)
def _find_next_value(self, p, d0, d1):
# No more periods
if not p:
return (d0, None)
# Period covers the entire time interval
if p.is_covered(d0, d1):
return (d0, p.value)
# We do not have any values for given date
if p.is_interval_before(d0, d1):
return (d0, None)
# We are past current period
if p.is_interval_after(d0, d1):
p = self.p = self.periods.pop(0) if self.periods else None
return self._find_next_value(p, d0, d1)
# Overlapping, but not covering – find all periods covering interval
overlapping = self._get_overlayed_periods(d0, d1)
# Current period starts in the middle of the interval
if not overlapping:
return (d0, None)
# More than one period – check if they are connected (no gaps)
if self._is_covering_interval(p, overlapping, d0, d1):
# No gaps – generate a mean value
mean = self._calc_mean_periods([p] + overlapping, d0, d1)
return (d0, mean)
# There are gaps, so we do not have a value
return (d0, None)
def _get_overlayed_periods(self, begin, end):
# Find other periods overlapping current interval
periods = []
for p in self.periods:
if p.is_overlayed(begin, end):
periods.append(p)
else:
break
return periods
def _is_covering_interval(self, current, periods, begin, end):
# Check if the first period starts somewhere in the interval
if current.begin > begin:
return False
# Check for gaps between periods
previous = current
for p in periods:
if previous.end != p.begin:
return False
previous = p
# Check if the last element stops somewhere in the interval
if previous.end < end:
return False
# All covered
return True
def _calc_mean_periods(self, periods, begin, end):
# Get value and duration in interval for each period
available_weights = [
(p.value, p.get_duration_seconds(begin, end))
for p in periods
]
# Get sum of weight
sum_weights = 1.0 * sum(weight for avail, weight in available_weights)
# Get the mean value
return (
sum(avail * weight for avail, weight in available_weights)
/ sum_weights
)
# Period-based series list with helpers
class PeriodseriesList(list):
"""
A list of Periodseries objects. Have methods for converting them to a
:py:class:`TimeseriesList` or a `pandas.DataFrame`.
:param iterable: Any iterable of `Periodseries`
:type iterable: iterable
"""
def __init__(self, iterable=()):
# Initialize list
super().__init__(iterable)
# Asserts
_validate_periodseries_list(iterable)
def to_timeseries(self, frequency=None):
"""
Convert all period-based series in this list to time series.
When periods overlap the same step in the resulting time series,
a weighted average is calculated down to second-precision.
:param frequency: The frequency of the resulting time series
:type frequency: Frequency, required
:return: A list of time series
:rtype: TimeseriesList
"""
# Verify parameters
assert isinstance(frequency, Frequency), "Must be a frequency"
# Convert all period-based series to time series
return TimeseriesList(
periodseries.to_timeseries(frequency=frequency)
for periodseries in self
)
def to_df(self, frequency=None):
"""
Alias for :meth:`Timeseries.to_dataframe`.
Convert this PeriodseriesList to a ``pandas.DataFrame`` where all time
series are placed in its own column and are lined up with the date-time
as index.
:param frequency: The frequency of the resulting time series'
:type frequency: Frequency, required
:return: A DataFrame
:rtype: pandas.DataFrame
:raises ImportError: When pandas is not installed on the system
"""
return self.to_dataframe(frequency=frequency)
def to_dataframe(self, frequency=None):
"""
Convert this PeriodseriesList to a ``pandas.DataFrame`` where all time
series are placed in its own column and are lined up with the date-time
as index.
:param frequency: The frequency of the resulting time series'
:type frequency: Frequency, required
:return: A DataFrame
:rtype: pandas.DataFrame
:raises ImportError: When pandas is not installed on the system
"""
# Verify parameters
assert isinstance(frequency, Frequency), "Must be a frequency"
# Convert to time series then to data frame
timeseries_list = self.to_timeseries(frequency=frequency)
return timeseries_list.to_dataframe()
def append(self, periodseries):
_validate_periodseries(periodseries)
return super().append(periodseries)
def extend(self, iterable):
# Asserts
_validate_periodseries_list(iterable)
# Perform operation
return super().extend(iterable)
def insert(self, index, periodseries):
# Asserts
_validate_periodseries(periodseries)
# Perform operation
return super().insert(index, periodseries)
def __add__(self, rhs):
_validate_periodseries_list(rhs)
return PeriodseriesList(list.__add__(self, rhs))
def __iadd__(self, rhs):
_validate_periodseries_list(rhs)
return PeriodseriesList(list.__iadd__(self, rhs))
def __setitem__(self, key, periodseries):
_validate_periodseries(periodseries)
return super().__setitem__(periodseries)
def __mul__(self, rhs):
raise NotImplementedError("PeriodseriesList does not support multiply")
def __rmul__(self, rhs):
raise NotImplementedError("PeriodseriesList does not support multiply")
def __imul__(self, rhs):
raise NotImplementedError("PeriodseriesList does not support multiply")
def copy(self):
return PeriodseriesList(self)
def __getitem__(self, item):
result = list.__getitem__(self, item)
if isinstance(result, list):
return PeriodseriesList(result)
else:
return result
def _validate_periodseries(periodseries):
assert isinstance(periodseries, Periodseries), (
f"Element is not a Periodseries. Expects all "
f"elements to be Periodseries objects."
)
def _validate_periodseries_list(periodseries_list):
for index, periodseries in enumerate(periodseries_list):
assert isinstance(periodseries, Periodseries), (
f"Element {index} is not a Periodseries. Expects all "
f"elements to be Periodseries objects."
)
|
#!/usr/bin/env python3
"""
Extract audio from vinput_fne and convert to correct format.
"""
import asyncio
import websockets
import sys
import json
import hashlib
import os
import argparse
from common.tools import compute_digest
def parse_options(argv):
parser = argparse.ArgumentParser(
description='Extract audio from video.'
)
parser.add_argument(
'input',
# dest='input_fn',
type=str,
help='input video file'
)
parser.add_argument(
'--output',
dest='output',
required=False,
help='output audio file'
)
parser.add_argument(
'--output-format',
dest='output_format',
required=False,
help='output format'
)
return parser.parse_args()
def extract_audio(input_fn, output_fn=None, output_format='mp3'):
input_fn = os.path.abspath(input_fn)
input_root, input_ext = os.path.splitext(input_fn)
if not output_fn:
output_fn = os.path.join('/', 'tmp', 'output', f'audio.{output_format}')
output_fn = os.path.abspath(output_fn)
if not os.path.exists(os.path.dirname(output_fn)):
os.makedirs(output_fn)
if not output_fn.endswith(output_format):
raise Exception(f"'output_fn' must end with '.{output_format}'.")
if output_format == 'wav':
os.system(f"ffmpeg -i '{input_fn}' -acodec pcm_s16le -ac 2 '{output_fn}'")
elif output_format == 'mp3':
os.system(f"ffmpeg -i '{input_fn}' -b:a 192K -vn '{output_fn}'")
else:
raise Exception(f"Unsupported file type {input_ext}")
extract_event = {
"input": {
"filename": input_fn,
"digest": compute_digest(input_fn)
},
"output": {
"filename": output_fn,
"format": output_format,
"digest": compute_digest(output_fn)
}
}
return extract_event
if __name__ == "__main__":
# input_fn = sys.argv[1]
options = parse_options(sys.argv)
input_fn = options.input
print(f'input: {input_fn}')
extract_event = extract_audio(input_fn)
# print(json.dumps(extract_event, indent=4, sort_keys=True))
print(f"input: {extract_event["input"]["filename"]}")
print(f"output: {extract_event["output"]["filename"]}")
|
#!/usr/bin/env python3
"""
Extract audio from vinput_fne and convert to correct format.
"""
import asyncio
import websockets
import sys
import json
import hashlib
import os
import argparse
from common.tools import compute_digest
def parse_options(argv):
parser = argparse.ArgumentParser(
description='Extract audio from video.'
)
parser.add_argument(
'input',
# dest='input_fn',
type=str,
help='input video file'
)
parser.add_argument(
'--output',
dest='output',
required=False,
help='output audio file'
)
parser.add_argument(
'--output-format',
dest='output_format',
required=False,
help='output format'
)
return parser.parse_args()
def extract_audio(input_fn, output_fn=None, output_format='mp3'):
input_fn = os.path.abspath(input_fn)
input_root, input_ext = os.path.splitext(input_fn)
if not output_fn:
output_fn = os.path.join('/', 'tmp', 'output', f'audio.{output_format}')
output_fn = os.path.abspath(output_fn)
if not os.path.exists(os.path.dirname(output_fn)):
os.makedirs(output_fn)
if not output_fn.endswith(output_format):
raise Exception(f"'output_fn' must end with '.{output_format}'.")
if output_format == 'wav':
os.system(f"ffmpeg -i '{input_fn}' -acodec pcm_s16le -ac 2 '{output_fn}'")
elif output_format == 'mp3':
os.system(f"ffmpeg -i '{input_fn}' -b:a 192K -vn '{output_fn}'")
else:
raise Exception(f"Unsupported file type {input_ext}")
extract_event = {
"input": {
"filename": input_fn,
"digest": compute_digest(input_fn)
},
"output": {
"filename": output_fn,
"format": output_format,
"digest": compute_digest(output_fn)
}
}
return extract_event
if __name__ == "__main__":
# input_fn = sys.argv[1]
options = parse_options(sys.argv)
input_fn = options.input
print(f'input: {input_fn}')
extract_event = extract_audio(input_fn)
# print(json.dumps(extract_event, indent=4, sort_keys=True))
print(f"input: {extract_event['input']['filename']}")
print(f"output: {extract_event['output']['filename']}")
|
import json
import os
import unittest
import warnings
import yaml
from checkov.terraform import checks
from checkov.common.checks_infra.checks_parser import NXGraphCheckParser
from checkov.common.checks_infra.registry import Registry
from checkov.common.models.enums import CheckResult
from typing import List
from pathlib import Path
from checkov.terraform.runner import Runner
from checkov.runner_filter import RunnerFilter
class TestYamlPolicies(unittest.TestCase):
def setUp(self) -> None:
os.environ['UNIQUE_TAG'] = ''
warnings.filterwarnings("ignore", category=ResourceWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
def test_VPCHasFlowLog(self):
self.go("VPCHasFlowLog")
def test_VPCHasRestrictedSG(self):
self.go("VPCHasRestrictedSG")
def test_APIGWLoggingLevelsDefinedProperly(self):
self.go("APIGWLoggingLevelsDefinedProperly")
def test_GuardDutyIsEnabled(self):
self.go("GuardDutyIsEnabled")
def test_SGAttachedToResource(self):
self.go("SGAttachedToResource")
def test_StorageContainerActivityLogsNotPublic(self):
self.go("StorageContainerActivityLogsNotPublic")
def test_StorageCriticalDataEncryptedCMK(self):
self.go("StorageCriticalDataEncryptedCMK")
def test_VAconfiguredToSendReports(self):
self.go("VAconfiguredToSendReports")
def test_VAconfiguredToSendReportsToAdmins(self):
self.go("VAconfiguredToSendReportsToAdmins")
def test_VAisEnabledInStorageAccount(self):
self.go("VAisEnabledInStorageAccount")
def test_VAsetPeriodicScansOnSQL(self):
self.go("VAsetPeriodicScansOnSQL")
def test_CloudFrontHasSecurityHeadersPolicy(self):
self.go("CloudFrontHasSecurityHeadersPolicy")
def test_CloudtrailHasCloudwatch(self):
self.go("CloudtrailHasCloudwatch")
def test_S3BucketHasPublicAccessBlock(self):
self.go("S3BucketHasPublicAccessBlock")
def test_AccessToPostgreSQLFromAzureServicesIsDisabled(self):
self.go("AccessToPostgreSQLFromAzureServicesIsDisabled")
def test_AzureActiveDirectoryAdminIsConfigured(self):
self.go("AzureActiveDirectoryAdminIsConfigured")
def test_DisableAccessToSqlDBInstanceForRootUsersWithoutPassword(self):
self.go("DisableAccessToSqlDBInstanceForRootUsersWithoutPassword")
def test_GCPProjectHasNoLegacyNetworks(self):
self.go("GCPProjectHasNoLegacyNetworks")
def test_AzureDataFactoriesEncryptedWithCustomerManagedKey(self):
self.go("AzureDataFactoriesEncryptedWithCustomerManagedKey")
def test_AzureUnattachedDisksAreEncrypted(self):
self.go("AzureUnattachedDisksAreEncrypted")
def test_AzureNetworkInterfacePublicIPAddressId(self):
self.go("AzureNetworkInterfacePublicIPAddressId")
def test_AzureAntimalwareIsConfiguredWithAutoUpdatesForVMs(self):
self.go("AzureAntimalwareIsConfiguredWithAutoUpdatesForVMs")
def test_ALBRedirectsHTTPToHTTPS(self):
self.go("ALBRedirectsHTTPToHTTPS")
def test_GCPLogBucketsConfiguredUsingLock(self):
self.go("GCPLogBucketsConfiguredUsingLock")
def test_GCPAuditLogsConfiguredForAllServicesAndUsers(self):
self.go("GCPAuditLogsConfiguredForAllServicesAndUsers")
def test_GCPKMSCryptoKeysAreNotPubliclyAccessible(self):
self.go("GCPKMSCryptoKeysAreNotPubliclyAccessible")
def test_VirtualMachinesUtilizingManagedDisks(self):
self.go("VirtualMachinesUtilizingManagedDisks")
def test_RDSClusterHasBackupPlan(self):
self.go("RDSClusterHasBackupPlan")
def test_EBSAddedBackup(self):
self.go("EBSAddedBackup")
def test_AMRClustersNotOpenToInternet(self):
self.go("AMRClustersNotOpenToInternet")
def test_AutoScallingEnabledELB(self):
self.go("AutoScallingEnabledELB")
def test_IAMGroupHasAtLeastOneUser(self):
self.go("IAMGroupHasAtLeastOneUser")
def test_IAMUserHasNoConsoleAccess(self):
self.go("IAMUserHasNoConsoleAccess")
def test_IAMUsersAreMembersAtLeastOneGroup(self):
self.go("IAMUsersAreMembersAtLeastOneGroup")
def test_DataExplorerEncryptionUsesCustomKey(self):
self.go("DataExplorerEncryptionUsesCustomKey")
def test_MSQLenablesCustomerManagedKey(self):
self.go("MSQLenablesCustomerManagedKey")
def test_PGSQLenablesCustomerManagedKey(self):
self.go("PGSQLenablesCustomerManagedKey")
def test_StorageLoggingIsEnabledForBlobService(self):
self.go("StorageLoggingIsEnabledForBlobService")
def test_StorageLoggingIsEnabledForTableService(self):
self.go("StorageLoggingIsEnabledForTableService")
def test_VMHasBackUpMachine(self):
self.go("VMHasBackUpMachine")
def test_SubnetHasACL(self):
self.go("SubnetHasACL")
def test_GKEClustersAreNotUsingDefaultServiceAccount(self):
self.go("GKEClustersAreNotUsingDefaultServiceAccount")
def test_AzureStorageAccountsUseCustomerManagedKeyForEncryption(self):
self.go("AzureStorageAccountsUseCustomerManagedKeyForEncryption")
def test_AzureMSSQLServerHasSecurityAlertPolicy(self):
self.go("AzureMSSQLServerHasSecurityAlertPolicy")
def test_AzureSynapseWorkspacesHaveNoIPFirewallRulesAttached(self):
self.go("AzureSynapseWorkspacesHaveNoIPFirewallRulesAttached")
def test_EncryptedEBSVolumeOnlyConnectedToEC2s(self):
self.go("EncryptedEBSVolumeOnlyConnectedToEC2s")
def test_ServiceAccountHasGCPmanagedKey(self):
self.go("ServiceAccountHasGCPmanagedKey")
def test_AutoScalingEnableOnDynamoDBTables(self):
self.go("AutoScalingEnableOnDynamoDBTables")
def test_EIPAllocatedToVPCAttachedEC2(self):
self.go("EIPAllocatedToVPCAttachedEC2")
def test_EFSAddedBackup(self):
self.go("EFSAddedBackup")
def test_EFSAddedBackupSuppress(self):
self.go("EFSAddedBackupSuppress", "EFSAddedBackup")
def test_Route53ARecordAttachedResource(self):
self.go("Route53ARecordAttachedResource")
def test_PostgresRDSHasQueryLoggingEnabled(self):
self.go("PostgresRDSHasQueryLoggingEnabled")
def test_PostgresDBHasQueryLoggingEnabled(self):
self.go("PostgresDBHasQueryLoggingEnabled")
def test_ALBProtectedByWAF(self):
self.go("ALBProtectedByWAF")
def test_APIProtectedByWAF(self):
self.go("APIProtectedByWAF")
def test_SQLServerAuditingEnabled(self):
self.go("SQLServerAuditingEnabled")
def test_WAF2HasLogs(self):
self.go("WAF2HasLogs")
def test_AppSyncProtectedByWAF(self):
self.go("AppSyncProtectedByWAF")
def test_SQLServerAuditingRetention90Days(self):
self.go("SQLServerAuditingRetention90Days")
def test_AWSSSMParameterShouldBeEncrypted(self):
self.go("AWSSSMParametershouldbeEncrypted", "AWSSSMParameterShouldBeEncrypted")
def test_AWSNATGatewaysshouldbeutilized(self):
self.go("AWSNATGatewaysshouldbeutilized")
def test_registry_load(self):
registry = Registry(parser=NXGraphCheckParser(), checks_dir=str(
Path(__file__).parent.parent.parent.parent.parent / "checkov" / "terraform" / "checks" / "graph_checks"))
registry.load_checks()
self.assertGreater(len(registry.checks), 0)
def go(self, dir_name, check_name=None):
dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
f"resources/{dir_name}")
assert os.path.exists(dir_path)
policy_dir_path = os.path.dirname(checks.__file__)
assert os.path.exists(policy_dir_path)
found = False
for root, d_names, f_names in os.walk(policy_dir_path):
for f_name in f_names:
check_name = dir_name if check_name is None else check_name
if f_name == f"{check_name}.yaml":
found = True
policy = load_yaml_data(f_name, root)
assert policy is not None
expected = load_yaml_data("expected.yaml", dir_path)
assert expected is not None
report = get_policy_results(dir_path, policy)
expected = load_yaml_data("expected.yaml", dir_path)
expected_to_fail = expected.get('fail', [])
expected_to_pass = expected.get('pass', [])
expected_to_skip = expected.get('skip', [])
self.assert_entities(expected_to_pass, report.passed_checks, True)
self.assert_entities(expected_to_fail, report.failed_checks, False)
self.assert_entities(expected_to_skip, report.skipped_checks, True)
assert found
def assert_entities(self, expected_entities: List[str], results: List[CheckResult], assertion: bool):
self.assertEqual(len(expected_entities), len(results),
f"mismatch in number of results in {"passed" if assertion else "failed"}, "
f"expected: {len(expected_entities)}, got: {len(results)}")
for expected_entity in expected_entities:
found = False
for check_result in results:
entity_id = check_result.resource
if entity_id == expected_entity:
found = True
break
self.assertTrue(found, f"expected to find entity {expected_entity}, {"passed" if assertion else "failed"}")
def get_policy_results(root_folder, policy):
check_id = policy['metadata']['id']
graph_runner = Runner()
report = graph_runner.run(root_folder, runner_filter=RunnerFilter(checks=[check_id]))
return report
def wrap_policy(policy):
policy['query'] = policy['definition']
del policy['definition']
def load_yaml_data(source_file_name, dir_path):
expected_path = os.path.join(dir_path, source_file_name)
if not os.path.exists(expected_path):
return None
with open(expected_path, "r") as f:
expected_data = yaml.safe_load(f)
return json.loads(json.dumps(expected_data))
|
import json
import os
import unittest
import warnings
import yaml
from checkov.terraform import checks
from checkov.common.checks_infra.checks_parser import NXGraphCheckParser
from checkov.common.checks_infra.registry import Registry
from checkov.common.models.enums import CheckResult
from typing import List
from pathlib import Path
from checkov.terraform.runner import Runner
from checkov.runner_filter import RunnerFilter
class TestYamlPolicies(unittest.TestCase):
def setUp(self) -> None:
os.environ['UNIQUE_TAG'] = ''
warnings.filterwarnings("ignore", category=ResourceWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
def test_VPCHasFlowLog(self):
self.go("VPCHasFlowLog")
def test_VPCHasRestrictedSG(self):
self.go("VPCHasRestrictedSG")
def test_APIGWLoggingLevelsDefinedProperly(self):
self.go("APIGWLoggingLevelsDefinedProperly")
def test_GuardDutyIsEnabled(self):
self.go("GuardDutyIsEnabled")
def test_SGAttachedToResource(self):
self.go("SGAttachedToResource")
def test_StorageContainerActivityLogsNotPublic(self):
self.go("StorageContainerActivityLogsNotPublic")
def test_StorageCriticalDataEncryptedCMK(self):
self.go("StorageCriticalDataEncryptedCMK")
def test_VAconfiguredToSendReports(self):
self.go("VAconfiguredToSendReports")
def test_VAconfiguredToSendReportsToAdmins(self):
self.go("VAconfiguredToSendReportsToAdmins")
def test_VAisEnabledInStorageAccount(self):
self.go("VAisEnabledInStorageAccount")
def test_VAsetPeriodicScansOnSQL(self):
self.go("VAsetPeriodicScansOnSQL")
def test_CloudFrontHasSecurityHeadersPolicy(self):
self.go("CloudFrontHasSecurityHeadersPolicy")
def test_CloudtrailHasCloudwatch(self):
self.go("CloudtrailHasCloudwatch")
def test_S3BucketHasPublicAccessBlock(self):
self.go("S3BucketHasPublicAccessBlock")
def test_AccessToPostgreSQLFromAzureServicesIsDisabled(self):
self.go("AccessToPostgreSQLFromAzureServicesIsDisabled")
def test_AzureActiveDirectoryAdminIsConfigured(self):
self.go("AzureActiveDirectoryAdminIsConfigured")
def test_DisableAccessToSqlDBInstanceForRootUsersWithoutPassword(self):
self.go("DisableAccessToSqlDBInstanceForRootUsersWithoutPassword")
def test_GCPProjectHasNoLegacyNetworks(self):
self.go("GCPProjectHasNoLegacyNetworks")
def test_AzureDataFactoriesEncryptedWithCustomerManagedKey(self):
self.go("AzureDataFactoriesEncryptedWithCustomerManagedKey")
def test_AzureUnattachedDisksAreEncrypted(self):
self.go("AzureUnattachedDisksAreEncrypted")
def test_AzureNetworkInterfacePublicIPAddressId(self):
self.go("AzureNetworkInterfacePublicIPAddressId")
def test_AzureAntimalwareIsConfiguredWithAutoUpdatesForVMs(self):
self.go("AzureAntimalwareIsConfiguredWithAutoUpdatesForVMs")
def test_ALBRedirectsHTTPToHTTPS(self):
self.go("ALBRedirectsHTTPToHTTPS")
def test_GCPLogBucketsConfiguredUsingLock(self):
self.go("GCPLogBucketsConfiguredUsingLock")
def test_GCPAuditLogsConfiguredForAllServicesAndUsers(self):
self.go("GCPAuditLogsConfiguredForAllServicesAndUsers")
def test_GCPKMSCryptoKeysAreNotPubliclyAccessible(self):
self.go("GCPKMSCryptoKeysAreNotPubliclyAccessible")
def test_VirtualMachinesUtilizingManagedDisks(self):
self.go("VirtualMachinesUtilizingManagedDisks")
def test_RDSClusterHasBackupPlan(self):
self.go("RDSClusterHasBackupPlan")
def test_EBSAddedBackup(self):
self.go("EBSAddedBackup")
def test_AMRClustersNotOpenToInternet(self):
self.go("AMRClustersNotOpenToInternet")
def test_AutoScallingEnabledELB(self):
self.go("AutoScallingEnabledELB")
def test_IAMGroupHasAtLeastOneUser(self):
self.go("IAMGroupHasAtLeastOneUser")
def test_IAMUserHasNoConsoleAccess(self):
self.go("IAMUserHasNoConsoleAccess")
def test_IAMUsersAreMembersAtLeastOneGroup(self):
self.go("IAMUsersAreMembersAtLeastOneGroup")
def test_DataExplorerEncryptionUsesCustomKey(self):
self.go("DataExplorerEncryptionUsesCustomKey")
def test_MSQLenablesCustomerManagedKey(self):
self.go("MSQLenablesCustomerManagedKey")
def test_PGSQLenablesCustomerManagedKey(self):
self.go("PGSQLenablesCustomerManagedKey")
def test_StorageLoggingIsEnabledForBlobService(self):
self.go("StorageLoggingIsEnabledForBlobService")
def test_StorageLoggingIsEnabledForTableService(self):
self.go("StorageLoggingIsEnabledForTableService")
def test_VMHasBackUpMachine(self):
self.go("VMHasBackUpMachine")
def test_SubnetHasACL(self):
self.go("SubnetHasACL")
def test_GKEClustersAreNotUsingDefaultServiceAccount(self):
self.go("GKEClustersAreNotUsingDefaultServiceAccount")
def test_AzureStorageAccountsUseCustomerManagedKeyForEncryption(self):
self.go("AzureStorageAccountsUseCustomerManagedKeyForEncryption")
def test_AzureMSSQLServerHasSecurityAlertPolicy(self):
self.go("AzureMSSQLServerHasSecurityAlertPolicy")
def test_AzureSynapseWorkspacesHaveNoIPFirewallRulesAttached(self):
self.go("AzureSynapseWorkspacesHaveNoIPFirewallRulesAttached")
def test_EncryptedEBSVolumeOnlyConnectedToEC2s(self):
self.go("EncryptedEBSVolumeOnlyConnectedToEC2s")
def test_ServiceAccountHasGCPmanagedKey(self):
self.go("ServiceAccountHasGCPmanagedKey")
def test_AutoScalingEnableOnDynamoDBTables(self):
self.go("AutoScalingEnableOnDynamoDBTables")
def test_EIPAllocatedToVPCAttachedEC2(self):
self.go("EIPAllocatedToVPCAttachedEC2")
def test_EFSAddedBackup(self):
self.go("EFSAddedBackup")
def test_EFSAddedBackupSuppress(self):
self.go("EFSAddedBackupSuppress", "EFSAddedBackup")
def test_Route53ARecordAttachedResource(self):
self.go("Route53ARecordAttachedResource")
def test_PostgresRDSHasQueryLoggingEnabled(self):
self.go("PostgresRDSHasQueryLoggingEnabled")
def test_PostgresDBHasQueryLoggingEnabled(self):
self.go("PostgresDBHasQueryLoggingEnabled")
def test_ALBProtectedByWAF(self):
self.go("ALBProtectedByWAF")
def test_APIProtectedByWAF(self):
self.go("APIProtectedByWAF")
def test_SQLServerAuditingEnabled(self):
self.go("SQLServerAuditingEnabled")
def test_WAF2HasLogs(self):
self.go("WAF2HasLogs")
def test_AppSyncProtectedByWAF(self):
self.go("AppSyncProtectedByWAF")
def test_SQLServerAuditingRetention90Days(self):
self.go("SQLServerAuditingRetention90Days")
def test_AWSSSMParameterShouldBeEncrypted(self):
self.go("AWSSSMParametershouldbeEncrypted", "AWSSSMParameterShouldBeEncrypted")
def test_AWSNATGatewaysshouldbeutilized(self):
self.go("AWSNATGatewaysshouldbeutilized")
def test_registry_load(self):
registry = Registry(parser=NXGraphCheckParser(), checks_dir=str(
Path(__file__).parent.parent.parent.parent.parent / "checkov" / "terraform" / "checks" / "graph_checks"))
registry.load_checks()
self.assertGreater(len(registry.checks), 0)
def go(self, dir_name, check_name=None):
dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
f"resources/{dir_name}")
assert os.path.exists(dir_path)
policy_dir_path = os.path.dirname(checks.__file__)
assert os.path.exists(policy_dir_path)
found = False
for root, d_names, f_names in os.walk(policy_dir_path):
for f_name in f_names:
check_name = dir_name if check_name is None else check_name
if f_name == f"{check_name}.yaml":
found = True
policy = load_yaml_data(f_name, root)
assert policy is not None
expected = load_yaml_data("expected.yaml", dir_path)
assert expected is not None
report = get_policy_results(dir_path, policy)
expected = load_yaml_data("expected.yaml", dir_path)
expected_to_fail = expected.get('fail', [])
expected_to_pass = expected.get('pass', [])
expected_to_skip = expected.get('skip', [])
self.assert_entities(expected_to_pass, report.passed_checks, True)
self.assert_entities(expected_to_fail, report.failed_checks, False)
self.assert_entities(expected_to_skip, report.skipped_checks, True)
assert found
def assert_entities(self, expected_entities: List[str], results: List[CheckResult], assertion: bool):
self.assertEqual(len(expected_entities), len(results),
f"mismatch in number of results in {'passed' if assertion else 'failed'}, "
f"expected: {len(expected_entities)}, got: {len(results)}")
for expected_entity in expected_entities:
found = False
for check_result in results:
entity_id = check_result.resource
if entity_id == expected_entity:
found = True
break
self.assertTrue(found, f"expected to find entity {expected_entity}, {'passed' if assertion else 'failed'}")
def get_policy_results(root_folder, policy):
check_id = policy['metadata']['id']
graph_runner = Runner()
report = graph_runner.run(root_folder, runner_filter=RunnerFilter(checks=[check_id]))
return report
def wrap_policy(policy):
policy['query'] = policy['definition']
del policy['definition']
def load_yaml_data(source_file_name, dir_path):
expected_path = os.path.join(dir_path, source_file_name)
if not os.path.exists(expected_path):
return None
with open(expected_path, "r") as f:
expected_data = yaml.safe_load(f)
return json.loads(json.dumps(expected_data))
|
import os, re, copy, json, subprocess
from random import randrange, randint, choice
from threading import Thread
from couchbase_helper.cluster import Cluster
from membase.helper.rebalance_helper import RebalanceHelper
from couchbase_helper.documentgenerator import BlobGenerator, DocumentGenerator
from ent_backup_restore.enterprise_backup_restore_base import EnterpriseBackupRestoreBase
from ent_backup_restore.backup_service_upgrade import BackupServiceHook
from membase.api.rest_client import RestConnection, RestHelper, Bucket
from membase.helper.bucket_helper import BucketOperationHelper
from pytests.query_tests_helper import QueryHelperTests
#from lib.membase.helper.cluster_helper import ClusterOperationHelper
from remote.remote_util import RemoteUtilHelper, RemoteMachineShellConnection
from security.auditmain import audit
from security.rbac_base import RbacBase
from upgrade.newupgradebasetest import NewUpgradeBaseTest
from couchbase.bucket import Bucket
from couchbase_helper.document import View
from eventing.eventing_base import EventingBaseTest
from tasks.future import Future, TimeoutError
from xdcr.xdcrnewbasetests import NodeHelper
from couchbase_helper.stats_tools import StatsCommon
from testconstants import COUCHBASE_DATA_PATH, WIN_COUCHBASE_DATA_PATH, \
COUCHBASE_FROM_4DOT6, ENT_BKRS, ENT_BKRS_FTS
AUDITBACKUPID = 20480
AUDITRESTOREID = 20485
SOURCE_CB_PARAMS = {
"authUser": "default",
"authPassword": "",
"authSaslUser": "",
"authSaslPassword": "",
"clusterManagerBackoffFactor": 0,
"clusterManagerSleepInitMS": 0,
"clusterManagerSleepMaxMS": 20000,
"dataManagerBackoffFactor": 0,
"dataManagerSleepInitMS": 0,
"dataManagerSleepMaxMS": 20000,
"feedBufferSizeBytes": 0,
"feedBufferAckThreshold": 0
}
INDEX_DEFINITION = {
"type": "fulltext-index",
"name": "",
"uuid": "",
"params": {},
"sourceType": "couchbase",
"sourceName": "default",
"sourceUUID": "",
"sourceParams": SOURCE_CB_PARAMS,
"planParams": {}
}
class EnterpriseBackupRestoreTest(EnterpriseBackupRestoreBase, NewUpgradeBaseTest):
def setUp(self):
super().setUp()
self.users_check_restore = \
self.input.param("users-check-restore", '').replace("ALL", "*").split(";")
if '' in self.users_check_restore:
self.users_check_restore.remove('')
for server in [self.backupset.backup_host, self.backupset.restore_cluster_host]:
conn = RemoteMachineShellConnection(server)
conn.extract_remote_info()
conn.terminate_processes(conn.info, ["cbbackupmgr"])
conn.disconnect()
self.bucket_helper = BucketOperationHelper()
def tearDown(self):
super(EnterpriseBackupRestoreTest, self).tearDown()
def test_backup_create(self):
self.backup_create_validate()
def test_backup_restore_sanity(self):
"""
1. Create default bucket on the cluster and loads it with given number of items
2. Perform updates and create backups for specified number of times (test param number_of_backups)
3. Perform restores for the same number of times with random start and end values
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self.log.info("*** start to load items to all buckets")
self._load_all_buckets(self.master, gen, "create", self.expires)
self.log.info("*** done to load items to all buckets")
self.ops_type = self.input.param("ops-type", "update")
self.expected_error = self.input.param("expected_error", None)
if self.auto_failover:
self.log.info("Enabling auto failover on " + str(self.backupset.cluster_host))
rest_conn = RestConnection(self.backupset.cluster_host)
rest_conn.update_autofailover_settings(self.auto_failover, self.auto_failover_timeout)
self.backup_create_validate()
for i in range(1, self.backupset.number_of_backups + 1):
if self.ops_type == "update":
self.log.info("*** start to update items in all buckets")
self._load_all_buckets(self.master, gen, "update", self.expires)
self.log.info("*** done update items in all buckets")
elif self.ops_type == "delete":
self.log.info("*** start to delete items in all buckets")
self._load_all_buckets(self.master, gen, "delete", self.expires)
self.log.info("*** done to delete items in all buckets")
self.sleep(10)
self.log.info("*** start to validate backup cluster")
self.backup_cluster_validate()
self.targetMaster = True
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
self.log.info("*** start to restore cluster")
restored = {"{0}/{1}".format(start, end): ""}
for i in range(1, self.backupset.number_of_backups + 1):
if self.reset_restore_cluster:
self.log.info("*** start to reset cluster")
self.backup_reset_clusters(self.cluster_to_restore)
if self.same_cluster:
self._initialize_nodes(Cluster(), self.servers[:self.nodes_init])
else:
shell = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
shell.enable_diag_eval_on_non_local_hosts()
shell.disconnect()
rest = RestConnection(self.backupset.restore_cluster_host)
rest.force_eject_node()
rest.init_node()
self.log.info("Done reset cluster")
self.sleep(10)
""" Add built-in user cbadminbucket to second cluster """
self.add_built_in_server_user(node=self.input.clusters[0][:self.nodes_init][0])
self.backupset.start = start
self.backupset.end = end
self.log.info("*** start restore validation")
self.backup_restore_validate(compare_uuid=False,
seqno_compare_function=">=",
expected_error=self.expected_error)
if self.backupset.number_of_backups == 1:
continue
while "{0}/{1}".format(start, end) in restored:
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
restored["{0}/{1}".format(start, end)] = ""
def test_backup_restore_after_rebalance(self):
"""
1. Create default bucket on the cluster and loads it with given number of items
2. Does a rebalance on cluster to be backed up with specified number of servers in (test param nodes_in) and
servers out (test param nodes_out)
3. Takes a backup
4. Does a rebalance on cluster to be restored to with specified number of servers in (test param nodes_in) and
servers out (test param nodes_out)
5. Performs a restore on the restore cluster
"""
serv_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]
serv_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create_validate()
self.backupset.number_of_backups = 1
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, serv_in, serv_out)
rebalance.result()
self.backup_cluster_validate()
if not self.same_cluster:
self._initialize_nodes(Cluster(), self.input.clusters[0][:self.nodes_init])
serv_in = self.input.clusters[0][self.nodes_init: self.nodes_init + self.nodes_in]
serv_out = self.input.clusters[0][self.nodes_init - self.nodes_out: self.nodes_init]
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_in, serv_out)
else:
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_out, serv_in)
rebalance.result()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function="<=")
def test_backup_restore_with_rebalance(self):
"""
1. Create default bucket on the cluster and loads it with given number of items
2. Does a rebalance on cluster to be backed up with specified number of servers in (test param nodes_in) and
servers out (test param nodes_out)
3. Takes a backup while rebalance is going on
4. Does a rebalance on cluster to be restored to with specified number of servers in (test param nodes_in) and
servers out (test param nodes_out)
5. Performs a restore on the restore cluster while rebalance is going on
"""
serv_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]
serv_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create_validate()
self.backupset.number_of_backups = 1
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, serv_in, serv_out)
self.sleep(10)
count = 0
while rebalance.state != "FINISHED":
if count == 0:
self.backup_cluster_validate()
count += 1
if not self.same_cluster:
self._initialize_nodes(Cluster(), self.input.clusters[0][:self.nodes_init])
serv_in = self.input.clusters[0][self.nodes_init: self.nodes_init + self.nodes_in]
serv_out = self.input.clusters[0][self.nodes_init - self.nodes_out: self.nodes_init]
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_in, serv_out)
else:
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_out, serv_in)
self.sleep(10)
count = 0
while rebalance.state != "FINISHED":
if count == 0:
self.backup_restore_validate(compare_uuid=False, seqno_compare_function="<=")
count += 1
def test_backup_restore_with_ops(self):
"""
1. Create default bucket on the cluster and loads it with given number of items
2. Perform the specified ops (test param ops-type) and create backups for specified number of times
(test param number_of_backups)
3. Perform restores for the same number of times with random start and end values
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
initial_gen = copy.deepcopy(gen)
initial_keys = []
for x in initial_gen:
initial_keys.append(x[0])
self.log.info("Start to load items to all buckets")
self._load_all_buckets(self.master, gen, "create", 0)
self.ops_type = self.input.param("ops-type", "update")
self.log.info("Create backup repo ")
self.backup_create()
for i in range(1, self.backupset.number_of_backups + 1):
self._backup_restore_with_ops()
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
if self.compact_backup and self.ops_type == "delete":
self.log.info("Start to compact backup ")
self.backup_compact_validate()
self.log.info("Validate deleted keys")
self.backup_compact_deleted_keys_validation(initial_keys)
self.log.info("start restore cluster ")
restored = {"{0}/{1}".format(start, end): ""}
for i in range(1, self.backupset.number_of_backups + 1):
self.backupset.start = start
self.backupset.end = end
self._backup_restore_with_ops(backup=False, compare_function=">=")
if self.backupset.number_of_backups == 1:
continue
while "{0}/{1}".format(start, end) in restored:
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
restored["{0}/{1}".format(start, end)] = ""
def _backup_restore_with_ops(self, exp=0, backup=True, compare_uuid=False,
compare_function="==", replicas=False,
mode="memory", node=None, repeats=0,
validate_directory_structure=True):
self.ops_type = self.input.param("ops-type", "update")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self.log.info("Start doing ops: %s " % self.ops_type)
if node is None:
node = self.master
self._load_all_buckets(node, gen, self.ops_type, exp)
if backup:
self.backup_cluster_validate(repeats=repeats,
validate_directory_structure=validate_directory_structure)
else:
self.backup_restore_validate(compare_uuid=compare_uuid,
seqno_compare_function=compare_function,
replicas=replicas, mode=mode)
def test_backup_list(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backup and validates it
3. Executes list command on the backupset and validates the output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self.backup_list_validate()
def test_backup_list_optional_switches(self):
"""
1. Creates specified buckets on the cluster and loads it with given number of items
Note: this test should be run with 2 buckets
2. Creates two backupsets
3. Creates two backups on each of the backupset
4. Executes list command with --name and validates
5. Executes list command with --name and --incr-backup and validates
6. Executes list command with --name, --incr-backup and --bucket-backup and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=2)
self.backupset.name = "backup2"
self.backup_create(del_old_backup=False)
self._take_n_backups(n=2)
incr_names = 0
backup_name = False
warnning_mesg = "is either empty or it got interrupted"
self.backupset.backup_list_name = "backup"
status, output, message = self.backup_list()
if not status:
self.fail(message)
for line in output:
if warnning_mesg in line:
continue
if self.backupset.backup_list_name in line:
backup_name = True
if self.backups[0] in line:
incr_names += 1
if self.backups[1] in line:
incr_names += 1
self.assertTrue(backup_name, "Expected backup name not found in output")
self.log.info("Expected backup name found in output")
self.assertEqual(incr_names, 2, "Expected backups were not listed for --name option")
self.log.info("Expected backups listed for --name option")
incr_names = 0
backup_name = False
self.backupset.backup_list_name = "backup2"
status, output, message = self.backup_list()
if not status:
self.fail(message)
for line in output:
if warnning_mesg in line:
continue
if self.backupset.backup_list_name in line:
backup_name = True
if self.backups[2] in line:
incr_names += 1
if self.backups[3] in line:
incr_names += 1
self.assertTrue(backup_name, "Expected backup name not found in output")
self.log.info("Expected backup name found in output")
self.assertEqual(incr_names, 2, "Expected backups were not listed for --name option")
self.log.info("Expected backups listed for --name option")
buckets = 0
name = False
self.backupset.backup_list_name = "backup"
self.backupset.backup_incr_backup = self.backups[0]
status, output, message = self.backup_list()
if not status:
self.fail(message)
for line in output:
if warnning_mesg in line:
continue
if self.backupset.backup_incr_backup in line:
name = True
if self.buckets[0].name in line:
buckets += 1
if self.buckets[1].name in line:
buckets += 1
self.assertTrue(name, "Expected incremental backup name not found in output")
self.log.info("Expected incrmental backup name found in output")
self.assertEqual(buckets, 2, "Expected buckets were not listed for --incr-backup option")
self.log.info("Expected buckets were listed for --incr-backup option")
name = False
items = 0
self.backupset.backup_list_name = "backup2"
self.backupset.backup_incr_backup = self.backups[2]
self.backupset.bucket_backup = self.buckets[0].name
status, output, message = self.backup_list()
if not status:
self.fail(message)
if output and output[0]:
output = json.loads(output[0])
if self.buckets[0].name == output["name"]:
name = True
items = output["items"]
self.assertTrue(name, "Expected bucket not listed for --bucket-backup option")
self.log.info("Expected bucket listed for --bucket-backup option")
self.assertEqual(items, self.num_items, "Mismatch in items for --bucket-backup option")
self.log.info("Expected number of items for --bucket-backup option")
def test_list_with_large_number_of_backups(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a large number of backups
3. Executes list command on the backupset and validates the output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=25)
status, output, message = self.backup_list()
if not status:
self.fail(message)
if output and output[0]:
bk_info = json.loads(output[0])
bk_info = bk_info["repos"][0]["backups"]
else:
return False, "No output content"
self.assertEqual(len(bk_info), len(self.backups),
"Number of backups did not match. In repo: {0} != in bk: {1}"\
.format(len(bk_info), len(self.backups)))
for backup in bk_info:
if backup["date"] not in self.backups:
raise("backup date does not match")
self.log.info("Number of backups matched")
def _take_n_backups(self, n=1, validate=False):
for i in range(1, n + 1):
if validate:
self.backup_cluster_validate()
else:
self.backup_cluster()
def test_backup_info_with_start_end_flag(self):
"""
1. Create default bucket and load items to bucket
2. Run number of backups pass by param number_of_backups=x
3. Run subcommand info with random start and end values. Value could be index, date or bk nam
4. conf file name: bkrs-info-with-start-end-flag.conf
"""
if self.bkinfo_date_start_ago:
conn = RemoteMachineShellConnection(self.backupset.backup_host)
start_date_cmd = "date --date=\"{} days ago\" '+%d-%m-%Y' "\
.format(self.bkinfo_date_start_ago)
output, error = conn.execute_command(start_date_cmd)
start_date = output[0]
end_date_cmd = "date '+%d-%m-%Y' "
output, error = conn.execute_command(end_date_cmd)
end_date = output[0]
conn.disconnect()
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
initial_gen = copy.deepcopy(gen)
self.log.info("Start to load items to all buckets")
self._load_all_buckets(self.master, gen, "create", 0)
self.log.info("Create backup repo ")
self.backup_create()
for i in range(1, self.backupset.number_of_backups + 1):
self.backup_cluster()
self.log.info("done running backup")
if self.bkinfo_start_end_with_bkname:
bkname_start_index = int(self.bkinfo_start_end_with_bkname.split(":")[0])
bkname_start = self.backups[bkname_start_index]
bkname_end_index = int(self.bkinfo_start_end_with_bkname.split(":")[1])
bkname_end = self.backups[bkname_end_index]
if self.bkinfo_date_start_ago:
o, e = self.backup_info(start=start_date,end=end_date)
elif self.bkinfo_start_end_with_bkname:
o, e = self.backup_info(start=bkname_start,end=bkname_end)
else:
o, e = self.backup_info(start=self.bkinfo_start,end=self.bkinfo_end)
if o and o[0]:
bk_info = json.loads(o[0])
bk_info = bk_info["backups"]
if self.debug_logs:
print("\nbk info : ", bk_info)
print("\n bkinfo len: ", len(bk_info))
print("\nbk info date : ", bk_info[0]["date"])
print("\nbk info type : ", bk_info[0]["type"])
print("\nnubmer backup : ", self.backups)
if self.bkinfo_start == 1 and self.bkinfo_end == 1:
if "FULL" not in bk_info[0]["type"]:
self.fail("First backup is not full backup")
elif self.bkinfo_start > 1 and self.bkinfo_end > 1:
if "INCR" not in bk_info[0]["type"]:
self.fail("> 0th backup is not incr backup")
if self.bkinfo_date_start_ago:
if len(bk_info) != len(self.backups):
self.fail("bkrs info failed to show all backups today")
elif self.bkinfo_start_end_with_bkname:
if len(bk_info) != (bkname_end_index - bkname_start_index + 1):
self.fail("bkrs info does not show correct nubmer of backups with backup name")
elif len(bk_info) != (self.bkinfo_end - self.bkinfo_start + 1):
self.fail("bkrs info does not show correct nubmer of backups")
def test_backup_compact(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backup and validates it
3. Executes compact command on the backupset and validates the output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self.backup_compact_validate()
def test_backup_with_purge_interval_set_to_float(self):
"""
cbbackupmgr should handle case with purge interval set to float number
return: None
"""
purgeInterval = 1.5
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self.log.info("Set purge interval to float value '%s'" % purgeInterval)
rest = RestConnection(self.backupset.cluster_host)
status, content = rest.set_purge_interval_and_parallel_compaction(purgeInterval)
if status:
self.log.info("Done set purge interval value '%s'" % purgeInterval)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate()
else:
self.fail("Failed to set purgeInterval value")
def test_restore_from_compacted_backup(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backup and validates it
3. Executes compact command on the backupset
4. Restores from the compacted backup and validates it
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self.backup_compact()
self.backup_restore_validate()
def test_backup_with_compress_flag(self):
"""
1. Load docs into bucket
2. Backup without compress flag
3. Get backup data size
4. Delete backup repo
5. Do backup again with compress flag
6. Compare those data if it flag works
:return: None
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backupset.backup_compressed = False
self.backup_cluster()
no_compression = self.get_database_file_info()
self.log.info("\nDelete old backup and do backup again with compress flag")
self.backup_create()
self.backupset.backup_compressed = self.input.param("backup-compressed", False)
self.backup_cluster()
with_compression = self.get_database_file_info()
self.validate_backup_compressed_file(no_compression, with_compression)
def test_backup_restore_with_credentials_env(self):
"""
password will pass as in env variable
:return: None
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
output, error = self.backup_cluster()
if output and not self._check_output("Backup completed successfully", output):
self.fail("Failed to run with password env %s " % output)
self.backup_cluster_validate(skip_backup=True)
self.backup_list()
self.backup_restore_validate()
def test_backup_with_update_on_disk_of_snapshot_markers(self):
"""
This test is for MB-25727 (using cbbackupwrapper)
Check when cbwrapper will be dropped to remove this test.
No default bucket, default_bucket=false
Create bucket0
Load 100K items to bucket0
Stop persistence on server via cbepctl
Load another 100K items.
Run full backup with cbbackupwrapper
Load another 100K items.
Run diff backup. Backup process will hang with error in memcached as shown above
:return: None
"""
version = RestConnection(self.backupset.backup_host).get_nodes_version()
if version[:5] == "6.5.0":
self.log.info("\n\n******* Due to issue in MB-36904, \
\nthis test will be skipped in 6.5.0 ********\n")
return
gen1 = BlobGenerator("ent-backup1", "ent-backup-", self.value_size, end=100000)
gen2 = BlobGenerator("ent-backup2", "ent-backup-", self.value_size, end=100000)
gen3 = BlobGenerator("ent-backup3", "ent-backup-", self.value_size, end=100000)
rest_conn = RestConnection(self.backupset.cluster_host)
rest_conn.create_bucket(bucket="bucket0", ramQuotaMB=1024)
self.buckets = rest_conn.get_buckets()
authentication = "-u Administrator -p password"
self._load_all_buckets(self.master, gen1, "create", 0)
self.log.info("Stop persistent")
cluster_nodes = rest_conn.get_nodes()
clusters = copy.deepcopy(cluster_nodes)
shell = RemoteMachineShellConnection(self.backupset.backup_host)
for node in clusters:
shell.execute_command("%scbepctl%s %s:11210 -b %s stop %s" % \
(self.cli_command_location,
self.cmd_ext,
node.ip,
"bucket0",
authentication))
shell.disconnect()
self.log.info("Load 2nd batch docs")
self._load_all_buckets(self.master, gen2, "create", 0)
self.log.info("Run full backup with cbbackupwrapper")
shell = RemoteMachineShellConnection(self.backupset.backup_host)
backup_dir = self.tmp_path + "backup" + self.master.ip
shell.execute_command("rm -rf %s" % backup_dir)
shell.execute_command("mkdir %s" % backup_dir)
shell.execute_command("cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s"
% (self.cli_command_location, self.cmd_ext,
self.backupset.cluster_host.ip,
backup_dir,
authentication))
self.log.info("Load 3rd batch docs")
self._load_all_buckets(self.master, gen3, "create", 0)
self.log.info("Run diff backup with cbbackupwrapper")
output, _ = shell.execute_command("cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s"
% (self.cli_command_location, self.cmd_ext,
self.backupset.cluster_host.ip,
backup_dir,
authentication))
if output and "SUCCESSFULLY COMPLETED" not in output[1]:
self.fail("Failed to backup as the fix in MB-25727")
shell.disconnect()
def test_cbrestoremgr_should_not_change_replica_count_in_restore_bucket(self):
"""
This test is for MB-25809
Set default_bucket=False
Create bucket with 1 replica
Load 10K items to bucket
Backup data from bucket
Create other bucket with 2 replicas in other cluster
Restore data to bucket with 2 replicas
Verify data and bucket setting. It must retain 2 replicas
:return: None
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=10000)
if not self.new_replicas:
self.fail("This test needs to pass param 'new-replicas' to run")
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.log.info("Start backup cluster")
self.backup_cluster_validate()
self.backup_restore_validate()
self.log.info("replicas from backup bucket: {0}".format(self.num_replicas))
self.log.info("replica in restore bucket should be {0} after restore"\
.format(self.new_replicas))
rest_r = RestConnection(self.backupset.restore_cluster_host)
for bucket in self.buckets:
bucket_stats = rest_r.get_bucket_json(bucket.name)
if self.new_replicas != bucket_stats["replicaNumber"]:
self.fail("replia number in bucket {0} did change after restore"\
.format(bucket.name))
self.log.info("Verified replica in bucket {0}: {1}"\
.format(bucket.name,
bucket_stats["replicaNumber"]))
def test_restore_with_invalid_bucket_config_json(self):
"""
When bucket-config.json in latest backup corrupted,
The merge backups should fail.
1. Create a bucket and load docs into it.
2. Create a backup and validate it.
3. Run full backup
4. Load more docs into bucket
5. Run backup (incremental) and verify.
6. Modify backup-config.json to make invalid json in content
7. Run restore to other bucket, restore should fail with error
"""
gen = BlobGenerator("ent-backup_1", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=self.backupset.number_of_backups)
status, output, message = self.backup_list()
error_msg = "Error merging data: Unable to read bucket settings because bucket-config.json is corrupt"
if not status:
self.fail(message)
backup_count = 0
for line in output:
if re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}", line):
backup_name = re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}",
line).group()
if backup_name in self.backups:
backup_count += 1
self.log.info("{0} matched in list command output".format(backup_name))
backup_bucket_config_path = self.backupset.directory + "/backup" + \
"/" + self.backups[self.backupset.number_of_backups - 1] + \
"/" + self.buckets[0].name + "-*" \
"/bucket-config.json"
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.log.info("Remove } in bucket-config.json to make it invalid json ")
remote_client.execute_command("sed -i 's/}//' %s " % backup_bucket_config_path)
self.log.info("Start to merge backup")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
self.backupset.end = randrange(self.backupset.start + 1,
self.backupset.number_of_backups + 1)
result, output, _ = self.backup_merge()
if result:
self.log.info("Here is the output from command %s " % output[0])
if not self._check_output(error_msg, output):
self.fail("read bucket config should fail since bucket-config.json is invalid")
remote_client.disconnect()
def test_restore_with_non_exist_bucket(self):
"""
1. Create a bucket A
2. Load docs to bucket A
3. Do backup bucket A
4. Delete bucket A
5. Restore to bucket A (non exist bucket)
6. Expect errors throw out
"""
gen = BlobGenerator("ent-backup1_", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.log.info("Start doing backup")
self.backup_create()
self.backup_cluster()
self.log.info("Start to delete bucket")
BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)
output, _ = self.backup_restore()
if output and "Error restoring cluster" not in output[0]:
self.fail("Restore to non exist bucket should fail")
def test_merge_backup_from_old_and_new_bucket(self):
"""
1. Create a bucket A
2. Load docs with key 1
3. Do backup
4. Delete bucket A
5. Re-create bucket A
6. Load docs with key 2
7. Do backup
8. Do merge backup. Verify backup only contain docs key 2
"""
gen = BlobGenerator("ent-backup1_", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.log.info("Start doing backup")
self.backup_create()
self.backup_cluster()
if self.bucket_delete:
self.log.info("Start to delete bucket")
BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)
BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
elif self.bucket_flush:
self.log.info("Start to flush bucket")
self._all_buckets_flush()
gen = BlobGenerator("ent-backup2_", "ent-backup-", self.value_size, end=self.num_items)
self.log.info("Start to load bucket again with different key")
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_cluster()
self.backupset.number_of_backups += 1
status, output, message = self.backup_list()
if not status:
self.fail(message)
self.log.info("Start to merge backup")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
self.backupset.end = self.backupset.number_of_backups
self.merged = True
result, output, _ = self.backup_merge()
self.backupset.end -= 1
status, output, message = self.backup_list()
if not status:
self.fail(message)
current_vseqno = self.get_vbucket_seqnos(self.cluster_to_backup, self.buckets,
self.skip_consistency, self.per_node)
self.log.info("*** Start to validate data in merge backup ")
self.validate_backup_data(self.backupset.backup_host, [self.master],
"ent-backup", False, False, "memory",
self.num_items, "ent-backup1")
self.backup_cluster_validate(skip_backup=True)
def test_merge_backup_with_merge_kill_and_re_merge(self):
"""
1. Create a bucket A
2. Load docs
3. Do backup
4. Load docs
5. Do backup
6. Merge backup
7. Kill merge process
8. Merge backup again
Result: 2nd merge should run ok
"""
gen = BlobGenerator("ent-backup1", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=self.backupset.number_of_backups)
status, output, message = self.backup_list()
if not status:
self.fail(message)
self.log.info("Start to merge backup")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
self.backupset.end = 2
self.merged = True
merge_threads = []
merge_thread = Thread(target=self.backup_merge)
merge_threads.append(merge_thread)
merge_thread.start()
merge_kill_thread = Thread(target=self._kill_cbbackupmgr)
merge_threads.append(merge_kill_thread)
merge_kill_thread.start()
for merge_thread in merge_threads:
merge_thread.join()
status, output, message = self.backup_list()
if not status:
self.fail(message)
result, output, _ = self.backup_merge()
status, output, message = self.backup_list()
if not status:
self.fail(message)
def test_merge_backup_with_partial_backup(self):
"""
1. Create a bucket A
2. Load docs
3. Do backup
4. Load docs
5. Do backup and kill backup process
6. Merge backup. Merge should fail
"""
gen = BlobGenerator("ent-backup1", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=self.backupset.number_of_backups)
status, output, message = self.backup_list()
if not status:
self.fail(message)
backup_threads = []
backup_thread = Thread(target=self.backup_cluster)
backup_threads.append(backup_thread)
backup_thread.start()
backup_kill_thread = Thread(target=self._kill_cbbackupmgr)
backup_threads.append(backup_kill_thread)
backup_kill_thread.start()
for backup_thread in backup_threads:
backup_thread.join()
self.backupset.number_of_backups += 1
self.log.info("Start to merge backup")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
self.backupset.end = 3
self.merged = True
status, output, error = self.backup_merge()
if status:
self.fail("This merge should fail due to last backup killed, not complete yet")
elif "Merging backup failed" in error:
self.log.info("Test failed as expected as last backup failed to complete")
status, output, message = self.backup_list()
if not status:
self.fail(message)
def _kill_cbbackupmgr(self):
"""
kill all cbbackupmgr processes
"""
self.sleep(1, "times need for cbbackupmgr process run")
shell = RemoteMachineShellConnection(self.backupset.backup_host)
if self.os_name != "windows":
cmd = "ps aux | grep cbbackupmgr | gawk '{print $2}' | xargs kill -9"
output, _ = shell.execute_command(cmd)
else:
cmd = "tasklist | grep cbbackupmgr | gawk '{printf$2}'"
output, _ = shell.execute_command(cmd)
if output:
kill_cmd = "taskkill /F /T /pid %d " % int(output[0])
output, _ = shell.execute_command(kill_cmd)
if output and "SUCCESS" not in output[0]:
self.fail("Failed to kill cbbackupmgr on windows")
shell.disconnect()
def test_merge_backup_with_purge_deleted_keys(self):
"""
1. Load 100K docs to a bucket A with key 1
2. Delete 50K docs from bucket A
3. Load 50K docs with key 2 to bucket A
4. Take backup
5. Run compaction on each vbucket to purge all delete keys
6. Load again 25K docs with key 3
7. Run backup again
8. Load another 25K docs with key 4
9. Run backup. It should not fail
"""
self.log.info("Load 1st batch docs")
create_gen1 = BlobGenerator("ent-backup1", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, create_gen1, "create", 0)
self.log.info("Delete half docs of 1st batch")
delete_gen = BlobGenerator("ent-backup1", "ent-backup-", self.value_size,
end=self.num_items // 2)
self._load_all_buckets(self.master, delete_gen, "delete", 0)
self.log.info("Load 2nd batch docs")
create_gen2 = BlobGenerator("ent-backup2", "ent-backup-", self.value_size,
end=self.num_items // 2)
self._load_all_buckets(self.master, create_gen2, "create", 0)
self.log.info("Start backup")
self.backup_create()
self.backup_cluster()
nodes = []
upto_seq = 100000
self.log.info("Start compact each vbucket in bucket")
rest = RestConnection(self.master)
cluster_nodes = rest.get_nodes()
for bucket in RestConnection(self.master).get_buckets():
found = self.get_info_in_database(self.backupset.cluster_host, bucket, "deleted")
if found:
shell = RemoteMachineShellConnection(self.backupset.cluster_host)
shell.compact_vbuckets(len(bucket.vbuckets), cluster_nodes, upto_seq)
shell.disconnect()
found = self.get_info_in_database(self.backupset.cluster_host, bucket, "deleted")
if not found:
self.log.info("Load another docs to bucket %s " % bucket.name)
create_gen3 = BlobGenerator("ent-backup3", "ent-backup-", self.value_size,
end=self.num_items // 4)
self._load_bucket(bucket, self.master, create_gen3, "create",
self.expire_time)
self.backup_cluster()
create_gen4 = BlobGenerator("ent-backup3", "ent-backup-", self.value_size,
end=self.num_items // 4)
self._load_bucket(bucket, self.master, create_gen4, "create",
self.expire_time)
self.backup_cluster()
self.backupset.end = 3
status, output, message = self.backup_merge()
if not status:
self.fail(message)
else:
self.fail("cbcompact failed to purge deleted key")
def test_merge_backup_with_failover_logs(self):
"""
1. Load 100K docs into bucket.
2. Wait for all docs persisted.
3. Stop persistence.
4. Load another 100K docs to bucket.
5. Kill memcached will generate about 4 failover logs.
./cbstats localhost:11210 -u username -p pass failovers | grep num_entries
6. Take backup.
7. Load another 100K docs
8. Take backup again.
Verify:
Only 1st backup is full backup
All backup after would be incremental backup
In 4.5.1, all backups would be full backup
"""
self.log.info("Load 1st batch docs")
create_gen1 = BlobGenerator("ent-backup1", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, create_gen1, "create", 0)
failed_persisted_bucket = []
rest = RestConnection(self.master)
cluster_nodes = rest.get_nodes()
for bucket in self.buckets:
ready = RebalanceHelper.wait_for_stats_on_all(self.backupset.cluster_host,
bucket.name, 'ep_queue_size',
0, timeout_in_seconds=120)
if not ready:
failed_persisted_bucket.append(bucket.name)
if failed_persisted_bucket:
self.fail("Buckets %s did not persisted." % failed_persisted_bucket)
self.log.info("Stop persistence at each node")
clusters = copy.deepcopy(cluster_nodes)
shell = RemoteMachineShellConnection(self.backupset.backup_host)
for bucket in self.buckets:
for node in clusters:
shell.execute_command("%scbepctl%s %s:11210 -b %s stop" % \
(self.cli_command_location,
self.cmd_ext,
node.ip,
bucket.name))
shell.disconnect()
self.log.info("Load 2nd batch docs")
create_gen2 = BlobGenerator("ent-backup2", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, create_gen2, "create", 0)
self.sleep(5)
self.log.info("Crash cluster via kill memcached")
for node in clusters:
for server in self.servers:
if node.ip == server.ip:
num_entries = 4
reach_num_entries = False
while not reach_num_entries:
shell = RemoteMachineShellConnection(server)
shell.kill_memcached()
ready = False
while not ready:
if not RestHelper(RestConnection(server)).is_ns_server_running():
self.sleep(10)
else:
ready = True
cmd = "%scbstats%s %s:11210 failovers -u %s -p %s | grep num_entries " \
"| gawk%s '{printf $2}' | grep -m 5 '4\|5\|6\|7'" \
% (self.cli_command_location, self.cmd_ext, server.ip,
"cbadminbucket", "password", self.cmd_ext)
output, error = shell.execute_command(cmd)
shell.disconnect()
if output:
self.log.info("number failover logs entries reached. %s " % output)
reach_num_entries = True
self.backup_create()
self.log.info("Start backup data")
self.backup_cluster()
status, output, message = self.backup_list()
if not status:
self.fail(message)
self.log.info("Load 3rd batch docs")
create_gen3 = BlobGenerator("ent-backup3", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, create_gen3, "create", 0)
self.backup_cluster()
status, output, message = self.backup_list()
if not status:
self.fail(message)
def test_backupmgr_with_short_option(self):
"""
Test short option flags at each option
"""
cmd = "%scbbackupmgr%s " % (self.cli_command_location, self.cmd_ext)
cmd += "%s " % self.input.param("command", "backup")
options = " -%s %s " % (self.input.param("repo", "-repo"),
self.backupset.name)
options += " -%s %s" % (self.input.param("archive", "-archive"),
self.backupset.directory)
if self.input.param("command", "backup") != "list":
options += " -%s http://%s:%s" % (self.input.param("cluster", "-cluster"),
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port)
options += " -%s Administrator" % self.input.param("bkusername", "-username")
options += " -%s password" % self.input.param("bkpassword", "-password")
self.backup_create()
shell = RemoteMachineShellConnection(self.backupset.backup_host)
output, error = shell.execute_command("%s %s " % (cmd, options))
shell.log_command_output(output, error)
shell.disconnect()
if error:
self.fail("There is a error in %s " % error)
def test_backupmgr_help_display(self):
"""
Test display help manual in each option
We do not test compare the whole content but only
few first lines to make sure manual page displayed.
"""
display_option = self.input.param("display", "-h")
if self.input.param("subcommand", None) is None:
subcommand = ""
else:
subcommand = self.input.param("subcommand", None)
if subcommand == "list":
subcommand = "info"
cmd = "{0}cbbackupmgr{1} ".format(self.cli_command_location, self.cmd_ext)
if display_option == "--help":
display_option = self.long_help_flag
elif display_option == "-h":
self.long_help_flag = self.short_help_flag
cmd += " {0} {1} ".format(subcommand, display_option)
shell = RemoteMachineShellConnection(self.backupset.cluster_host)
output, error = shell.execute_command("{0} ".format(cmd))
self.log.info("Verify print out help message")
if display_option == "-h":
if subcommand == "":
content = ['cbbackupmgr [<command>] [<args>]', '',
' backup Backup a Couchbase cluster']
elif subcommand == "help":
content = ['cbbackupmgr help [<command>] [<args>]', '',
' archivelayout View the archive directory layout structure']
else:
content = ['cbbackupmgr {0} [<args>]'.format(subcommand), '',
'Required Flags:']
self.validate_help_content(output[:3], content)
elif display_option == "--help":
content = None
if subcommand == "":
content = \
['CBBACKUPMGR(1) Couchbase Server Manual CBBACKUPMGR(1)']
self.validate_help_content(output, content)
else:
subcmd_cap = subcommand.upper()
content = \
['CBBACKUPMGR-{0}(1) Couchbase Server Manual CBBACKUPMGR-{1}(1)'\
.format(subcmd_cap, subcmd_cap)]
self.validate_help_content(output, content)
if self.bkrs_flag is not None:
self.assertTrue(self._check_output(self.bkrs_flag, output),
"Missing flag {0} in help content".format(self.bkrs_flag))
shell.disconnect()
def test_cbbackupmgr_help_contains_objstore_info(self):
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
supports_read_only = ['restore']
for sub_command in ['backup', 'collect-logs', 'config', 'examine', 'info', 'remove', 'restore']:
output, error = remote_client.execute_command(f"{self.cli_command_location}/cbbackupmgr {sub_command} -h")
if error:
self.fail(f"Expected to be able to get help for {sub_command}")
arguments = ['--obj-access-key-id', '--obj-cacert', '--obj-endpoint', '--obj-no-ssl-verify',
'--obj-region', '--obj-secret-access-key', '--obj-staging-dir', '--s3-force-path-style',
'--obj-log-level']
if sub_command in supports_read_only:
arguments.append('--obj-read-only')
for argument in arguments:
found = False
for line in output:
found = found or argument in line
self.assertTrue(found, f"Expected to find help about {argument}")
def test_backup_restore_with_optional_flags(self):
"""
1. Create a bucket
2. Load docs to bucket
3. Backup with optional flags like no-ssl-verify, secure-conn
4. Verify backup data in backup file
"""
self.log.info("Load 1st batch docs")
create_gen1 = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, create_gen1, "create", 0)
self.backup_create()
verify_data = True
output, error = self.backup_cluster()
if self.backupset.secure_conn:
if self.backupset.bk_no_cert:
if self._check_output("Backup completed successfully", output):
self.fail("Taking cluster backup failed.")
elif self._check_output("Error", output):
verify_data = False
else:
if not self._check_output("Backup completed successfully", output):
self.fail("Taking cluster backup failed.")
if verify_data:
self.validate_backup_data(self.backupset.backup_host,
self.servers[:self.nodes_init],
"ent-backup", False, False, "memory",
self.num_items, None)
if self.do_restore:
self.log.info("Restore with secure connection")
self.backup_restore()
def test_restore_with_filter_regex(self):
"""
1. Create a bucket
2. Load docs to bucket with key patterned
3. Backup docs
4. Delete bucket
5. Restore docs with regex
6. Verify only key or value in regex restored to bucket
NOTE: This test requires a specific config/ini to run correctly; if provided with an incorrect config
testrunner will restore data into the bucket that was backed up on the same cluster without performing a
flush. This will mean cbbackupmgr will restore with conflict resolution enabled and the validation will find
an unexpected amount of keys (all of them) in the target bucket.
"""
key_name = "ent-backup"
if self.backupset.random_keys:
key_name = "random_keys"
self.validate_keys = self.input.param("validate_keys", False)
if self.validate_keys:
gen = BlobGenerator(key_name, "ent-backup-", self.value_size,
end=self.num_items)
else:
gen = DocumentGenerator('random_keys', '{{"age": {0}}}', list(range(100)),
start=0, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.log.info("Start backup")
self.backup_create()
self.backup_cluster()
self.backup_restore()
self.merged = False
regex_check = self.backupset.filter_keys
if not self.backupset.filter_keys:
regex_check = self.backupset.filter_values
self.validate_backup_data(self.backupset.backup_host,
[self.backupset.restore_cluster_host],
key_name, False, False, "memory",
self.num_items, None,
validate_keys=self.validate_keys,
regex_pattern=regex_check)
def test_backup_with_rbac(self):
"""
1. Create a cluster
2. Create a bucket and load date
3. Create a user with specific role
param in conf: new_user
param in conf: new_role
Roles:
admin, ro_admin, cluster_admin, bucket_full_access[*], bucket_admin[*],
views_admin[*],
replication_admin, roadmin_no_access, cluster_admin_no_access,
bucket_admin_no_access, view_admin_no_access, replication_admin_no_access,
view_replication_admin, replication_ro_admin, bucket_view_replication_admin,
4. Run backup with new user created
5. Verify if backup command handles user role correctly
"""
all_buckets = self.input.param("all_buckets", False)
backup_failed = False
if self.create_fts_index:
gen = DocumentGenerator('test_docs', '{{"age": {0}}}', list(range(100)), start=0,
end=self.num_items)
index_definition = INDEX_DEFINITION
index_name = index_definition['name'] = "age"
fts_server = self.get_nodes_from_services_map(service_type="fts")
rest_fts = RestConnection(fts_server)
try:
self.log.info("Create fts index")
rest_fts.create_fts_index(index_name, index_definition)
except Exception as ex:
self.fail(ex)
else:
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
if self.create_views:
self._create_views()
self.backup_create()
if all_buckets:
if "-" in self.cluster_new_role:
self.cluster_new_role = "[*],".join(self.cluster_new_role.split("-")) + "[*]"
else:
self.cluster_new_role = self.cluster_new_role + "[*]"
admin_roles = ["cluster_admin", "eventing_admin"]
for role in admin_roles:
if role in self.cluster_new_role:
self.cluster_new_role = self.cluster_new_role.replace(role + "[*]", role)
self.log.info("\n***** Create new user: {0} with role: {1} to do backup *****"\
.format(self.cluster_new_user, self.cluster_new_role))
testuser = [{"id": "{0}".format(self.cluster_new_user),
"name": "{0}".format(self.cluster_new_user),
"password": "password"}]
rolelist = [{"id": "{0}".format(self.cluster_new_user),
"name": "{0}".format(self.cluster_new_user),
"roles": "{0}".format(self.cluster_new_role)}]
users_can_backup_all = ["admin", "bucket_full_access[*]",
"data_backup[*]", "eventing_admin",
"cluster_admin", "backup_admin"]
users_can_not_backup_all = ["views_admin[*]", "replication_admin",
"replication_target[*]", "data_monitoring[*]",
"data_writer[*]", "data_reader[*]",
"data_dcp_reader[*]", "fts_searcher[*]",
"fts_admin[*]", "query_manage_index[*]",
"ro_admin", "bucket_admin[*]", "cluster_admin"]
try:
status = self.add_built_in_server_user(testuser, rolelist)
if not status:
self.fail("Fail to add user: {0} with role: {1} " \
.format(self.cluster_new_user,
self.cluster_new_role))
output, error = self.backup_cluster()
success_msg = 'Backup completed successfully'
fail_msg = ["Error backing up cluster:"]
for bucket in self.buckets:
fail_msg.append('Backed up bucket "{0}" failed'.format(bucket.name))
if self.cluster_new_role in users_can_backup_all:
if not self._check_output(success_msg, output):
rest_bk = RestConnection(self.backupset.cluster_host)
eventing_service_in = False
bk_cluster_services = list(rest_bk.get_nodes_services().values())
for srv in bk_cluster_services:
if "eventing" in srv:
eventing_service_in = True
eventing_err = ["Invalid permissions to backup eventing data",
"cluster.eventing.functions!manage"]
if eventing_service_in and self._check_output(eventing_err, output) and \
("admin" not in self.cluster_new_role or \
"eventing_admin" not in self.cluster_new_role):
self.log.info("Only admin or eventing_admin role could backup eventing service")
else:
self.fail("User {0} failed to backup data.\n"
.format(self.cluster_new_role) + \
"Here is the output {0} ".format(output))
elif self.cluster_new_role in users_can_not_backup_all:
if not self._check_output(fail_msg, output):
self.fail("cbbackupmgr failed to block user to backup")
else:
backup_failed = True
status, _, message = self.backup_list()
if not status:
self.fail(message)
if self.do_verify and not backup_failed:
current_vseqno = self.get_vbucket_seqnos(self.cluster_to_backup,
self.buckets,
self.skip_consistency,
self.per_node)
self.log.info("*** Start to validate data in merge backup ")
result = self.validate_backup_data(self.backupset.backup_host,
[self.master],
"ent-backup", False, False, "memory",
self.num_items, None)
self.validate_backup_views()
except Exception as e:
if e:
print(("Exception error: ", e))
if self.cluster_new_role in users_can_not_backup_all:
error_found = False
error_messages = ["Error backing up cluster: Forbidden",
"Could not find file shard_0.sqlite",
"Error backing up cluster: Invalid permissions",
"Database file is empty",
"Error backing up cluster: Unable to find the latest vbucket",
"Failed to backup bucket"]
if self.do_verify:
if str(e) in error_messages or backup_failed:
error_found = True
if not error_found:
raise Exception("cbbackupmgr does not block user role: {0} to backup" \
.format(self.cluster_new_role))
if self.cluster_new_role == "views_admin[*]" and self.create_views:
status, mesg = self.validate_backup_views(self.backupset.backup_host)
if not status:
raise Exception(mesg)
if "Expected error message not thrown" in str(e):
raise Exception("cbbackupmgr does not block user role: {0} to backup" \
.format(self.cluster_new_role))
if self.cluster_new_role in users_can_backup_all:
if not self._check_output(success_msg, output):
self.fail(e)
finally:
if backup_failed:
self.log.info("cbbackupmgr blocked user: {0} to backup"\
.format(self.cluster_new_role))
self.log.info("Delete new create user: {0} ".format(self.cluster_new_user))
shell = RemoteMachineShellConnection(self.backupset.backup_host)
curl_path = ""
if self.os_name == "windows":
curl_path = self.cli_command_location
cmd = "{0}curl{1} -g -X {2} -u {3}:{4} http://{5}:8091/settings/rbac/users/local/{6}"\
.format(curl_path,
self.cmd_ext,
"DELETE",
self.master.rest_username,
self.master.rest_password,
self.backupset.cluster_host.ip,
self.cluster_new_user)
output, error = shell.execute_command(cmd)
shell.disconnect()
def test_restore_with_rbac(self):
"""
1. Create a backupdata set.
2. Setup cluster.
3. Restore data back to cluster
Important:
This test need to copy entbackup-mh.tgz
to /root or /cygdrive/c/Users/Administrator in backup host.
Files location: 172.23.121.227:/root/entba*.tgz
"""
all_buckets = self.input.param("all_buckets", False)
self.log.info("Copy backup dataset to tmp dir")
shell = RemoteMachineShellConnection(self.backupset.backup_host)
# Since we are just wiping out the archive here, we can just run the object store teardown
if self.objstore_provider:
self.objstore_provider.teardown(shell.extract_remote_info().type.lower(), shell)
else:
shell.execute_command("rm -rf {0} ".format(self.backupset.directory))
shell.execute_command("rm -rf {0} ".format(self.backupset.directory.split("_")[0]))
backup_file = ENT_BKRS
backup_dir_found = False
backup_dir = "entbackup_{0}".format(self.master.ip)
output, error = shell.execute_command("ls | grep entbackup")
self.log.info("check if %s dir exists on this server " % backup_dir)
if output:
for x in output:
if x == backup_dir:
backup_dir_found = True
if not backup_dir_found:
self.log.info("%s dir does not exist on this server. Downloading.. "
% backup_dir)
shell.execute_command("{0} -q {1} --no-check-certificate -O {2}.tgz "
.format(self.wget, backup_file, backup_dir))
shell.execute_command("tar -zxvf {0}.tgz ".format(backup_dir))
shell.execute_command("mv {0} {1}".format(backup_dir.split("_")[0], backup_dir))
if "-" in self.cluster_new_role:
self.cluster_new_role = self.cluster_new_role.replace("-", ",")
if self.objstore_provider and self.objstore_provider.schema_prefix() == "s3://":
command = ""
if self.backupset.objstore_region or self.backupset.objstore_access_key_id or self.backupset.objstore_secret_access_key:
command += "env"
if self.backupset.objstore_region:
command += f" AWS_REGION={self.backupset.objstore_region}"
if self.backupset.objstore_access_key_id:
command += f" AWS_ACCESS_KEY_ID={self.backupset.objstore_access_key_id}"
if self.backupset.objstore_secret_access_key:
command += f" AWS_SECRET_ACCESS_KEY={self.backupset.objstore_secret_access_key}"
command += " aws"
if self.backupset.objstore_endpoint:
command += f" --endpoint={self.backupset.objstore_endpoint}"
command += f" s3 sync entbackup_{self.master.ip} s3://{self.backupset.objstore_bucket}/{self.backupset.directory}"
_, error = shell.execute_command(command, debug=False) # Contains senstive info so don't log
if error:
self.fail(f"Failed to sync backup to S3: {error}")
else:
shell.execute_command("cp -r entbackup_{0}/ {1}/entbackup_{0}"\
.format(self.master.ip, self.tmp_path))
status, _, message = self.backup_list()
if not status:
self.fail(message)
self.log.info("Restore data from backup files")
if all_buckets:
if "bucket_full_access" in self.cluster_new_role and \
"bucket_full_access[*]" not in self.cluster_new_role:
self.cluster_new_role = self.cluster_new_role.replace("bucket_full_access",
"bucket_full_access[*]")
else:
self.cluster_new_role = self.cluster_new_role + "[*]"
if "data_backup" in self.cluster_new_role and \
"data_backup[*]" not in self.cluster_new_role:
self.cluster_new_role = self.cluster_new_role.replace("data_backup",
"data_backup[*]")
if "fts_admin" in self.cluster_new_role and \
"fts_admin[*]" not in self.cluster_new_role:
self.cluster_new_role = self.cluster_new_role.replace("fts_admin",
"fts_admin[*]")
admin_roles = ["cluster_admin", "eventing_admin"]
for role in admin_roles:
if role in self.cluster_new_role:
self.cluster_new_role = self.cluster_new_role.replace(role + "[*]", role)
self.log.info("\n***** Create new user: %s with role: %s to do backup *****"
% (self.cluster_new_user, self.cluster_new_role))
testuser = [{"id": "%s" % self.cluster_new_user,
"name": "%s" % self.cluster_new_user,
"password": "password"}]
rolelist = [{"id": "%s" % self.cluster_new_user,
"name": "%s" % self.cluster_new_user,
"roles": "%s" % self.cluster_new_role}]
try:
status = self.add_built_in_server_user(testuser, rolelist)
if not status:
self.fail("Fail to add user: %s with role: %s " \
% (self.cluster_new_user,
self.cluster_new_role))
users_can_restore_all = ["admin", "bucket_full_access[*]",
"data_backup[*]", "eventing_admin"]
users_can_not_restore_all = ["views_admin[*]", "ro_admin",
"replication_admin", "data_monitoring[*]",
"data_writer[*]", "data_reader[*]",
"data_dcp_reader[*]", "fts_searcher[*]",
"fts_admin[*]", "query_manage_index[*]",
"replication_target[*]", "cluster_admin",
"bucket_admin[*]"]
if self.cluster_new_role in users_can_not_restore_all:
self.should_fail = True
output, error = self.backup_restore()
rest_rs = RestConnection(self.backupset.restore_cluster_host)
eventing_service_in = False
rs_cluster_services = list(rest_rs.get_nodes_services().values())
for srv in rs_cluster_services:
if "eventing" in srv:
eventing_service_in = True
eventing_err = "User needs one of the following permissions: cluster.eventing"
if eventing_service_in and self._check_output(eventing_err, output) and \
("admin" not in self.cluster_new_role or \
"eventing_admin" not in self.cluster_new_role):
self.log.info("Only admin role could backup eventing service")
return
success_msg = 'Restore completed successfully'
fail_msg = "Error restoring cluster:"
failed_persisted_bucket = []
ready = RebalanceHelper.wait_for_stats_on_all(self.backupset.cluster_host,
"default", 'ep_queue_size',
0, timeout_in_seconds=120)
if not ready:
failed_persisted_bucket.append("default")
if failed_persisted_bucket:
self.fail("Buckets %s did not persisted." % failed_persisted_bucket)
self.sleep(3)
rest = RestConnection(self.master)
actual_keys = rest.get_active_key_count("default")
print(("\nActual keys in default bucket: %s \n" % actual_keys))
if self.cluster_new_role in users_can_restore_all:
if not self._check_output(success_msg, output):
self.fail("User with roles: %s failed to restore data.\n"
"Here is the output %s " % \
(self.cluster_new_role, output))
roles = []
if "," in self.cluster_new_role:
roles = self.cluster_new_role.split(",")
if set(roles) & set(users_can_not_restore_all) and \
set(roles) & set(users_can_restore_all):
if not self._check_output(success_msg, output):
self.fail("User: %s failed to restore data with roles: %s. " \
"Here is the output %s " % \
(self.cluster_new_user, roles, output))
if int(actual_keys) != 10000:
self.fail("User: %s failed to restore data with roles: %s. " \
"Here is the actual docs in bucket %s " % \
(self.cluster_new_user, roles, actual_keys))
elif self.cluster_new_role in users_can_not_restore_all:
if int(actual_keys) == 1000:
self.fail("User: %s with role: %s should not allow to restore data" \
% (self.cluster_new_user,
self.cluster_new_role))
if not self._check_output(fail_msg, output):
self.fail("cbbackupmgr failed to block user to restore")
finally:
self.log.info("Delete new create user: %s " % self.cluster_new_user)
shell = RemoteMachineShellConnection(self.backupset.backup_host)
curl_path = ""
if self.os_name == "windows":
curl_path = self.cli_command_location
cmd = "%scurl%s -g -X %s -u %s:%s http://%s:8091/settings/rbac/users/local/%s" \
% (curl_path,
self.cmd_ext,
"DELETE",
self.master.rest_username,
self.master.rest_password,
self.backupset.cluster_host.ip,
self.cluster_new_user)
output, error = shell.execute_command(cmd)
shell.disconnect()
def test_backup_restore_with_nodes_reshuffle(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Enlists the default zone of current cluster - backsup the cluster and validates
3. Creates a new zone - shuffles cluster host to new zone
4. Restores to cluster host and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
rest_conn = RestConnection(self.backupset.cluster_host)
zones = list(rest_conn.get_zone_names().keys())
source_zone = zones[0]
target_zone = "test_backup_restore"
self.log.info("Current nodes in group {0} : {1}".format(source_zone,
str(list(rest_conn.get_nodes_in_zone(source_zone).keys()))))
self.log.info("Taking backup with current groups setup")
self.backup_create()
self.backup_cluster_validate()
self.log.info("Creating new zone " + target_zone)
rest_conn.add_zone(target_zone)
self.log.info("Moving {0} to new zone {1}".format(self.backupset.cluster_host.ip, target_zone))
rest_conn.shuffle_nodes_in_zones(["{0}".format(self.backupset.cluster_host.ip)], source_zone, target_zone)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
rebalance.result()
self.log.info("Restoring to {0} after group change".format(self.backupset.cluster_host.ip))
try:
self.log.info("Flush bucket")
rest_conn.flush_bucket()
self.backup_restore_validate()
except Exception as ex:
self.fail(str(ex))
finally:
self.log.info("Moving {0} back to old zone {1}".format(self.backupset.cluster_host.ip, source_zone))
rest_conn.shuffle_nodes_in_zones(["{0}".format(self.backupset.cluster_host.ip)], target_zone, source_zone)
self.log.info("Deleting new zone " + target_zone)
rest_conn.delete_zone(target_zone)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
rebalance.result()
def test_backup_restore_with_firewall(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates backupset on backup host
3. Enables firewall on cluster host and validates if backup cluster command throws expected error
4. Disables firewall on cluster host, takes backup and validates
5. Enables firewall on restore host and validates if backup restore command throws expected error
6. Disables firewall on restore host, restores and validates
"""
if self.os_name == "windows" or self.nonroot:
self.log.info("This firewall test does not run on windows or nonroot user")
return
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.log.info("Enabling firewall on cluster host before backup")
RemoteUtilHelper.enable_firewall(self.backupset.cluster_host)
self.enable_firewall = True
try:
output, error = self.backup_cluster()
self.assertIn("failed to connect", output[0],
"Expected error not thrown by backup cluster when firewall enabled")
finally:
self.log.info("Disabling firewall on cluster host to take backup")
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.disable_firewall()
conn.disconnect()
self.enable_firewall = False
self.log.info("Trying backup now")
self.backup_cluster_validate()
self.log.info("Enabling firewall on restore host before restore")
RemoteUtilHelper.enable_firewall(self.backupset.restore_cluster_host)
self.enable_firewall = True
""" reset restore cluster to same services as backup cluster """
try:
output, error = self.backup_restore()
mesg = "connect: connection refused"
if self.skip_buckets:
mesg = "Error restoring cluster:"
self.assertTrue(self._check_output(mesg, output),
"Expected error not thrown by backup restore when firewall enabled")
finally:
self.log.info("Disabling firewall on restore host to restore")
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.disable_firewall()
conn.disconnect()
self.enable_firewall = False
self.log.info("Trying restore now")
self.skip_buckets = False
""" Need to reset restore node with services the same as in backup cluster """
rest = RestConnection(self.backupset.restore_cluster_host)
rest.force_eject_node()
master_services = self.get_services([self.backupset.cluster_host],
self.services_init, start_node=0)
info = rest.get_nodes_self()
if info.memoryQuota and int(info.memoryQuota) > 0:
self.quota = info.memoryQuota
rest.init_node()
self.sleep(10)
self.backup_restore_validate()
def test_backup_restore_with_audit(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates backupset on backup host
3. Creates a backup of the cluster host - verifies if corresponding entry was created in audit log
4. Restores data on to restore host - verifies if corresponding entry was created in audit log
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
audit_obj = audit(AUDITBACKUPID, self.backupset.cluster_host)
status = audit_obj.getAuditStatus()
self.log.info("Audit status on {0} is {1}".format(self.backupset.cluster_host.ip, status))
if not status:
self.log.info("Enabling audit on {0}".format(self.backupset.cluster_host.ip))
audit_obj.setAuditEnable('true')
self.backup_create()
self.backup_cluster()
field_verified, value_verified = audit_obj.validateEvents(self._get_event_expected_results(action='backup'))
self.assertTrue(field_verified, "One of the fields is not matching")
self.assertTrue(value_verified, "Values for one of the fields is not matching")
audit_obj = audit(AUDITBACKUPID, self.backupset.restore_cluster_host)
status = audit_obj.getAuditStatus()
self.log.info("Audit status on {0} is {1}".format(self.backupset.restore_cluster_host.ip, status))
if not status:
self.log.info("Enabling audit on {0}".format(self.backupset.restore_cluster_host.ip))
audit_obj.setAuditEnable('true')
self.backup_restore()
audit_obj = audit(AUDITRESTOREID, self.backupset.restore_cluster_host)
field_verified, value_verified = audit_obj.validateEvents(self._get_event_expected_results(action='restore'))
self.assertTrue(field_verified, "One of the fields is not matching")
self.assertTrue(value_verified, "Values for one of the fields is not matching")
def _get_event_expected_results(self, action):
if action == 'backup':
expected_results = {
"real_userid:source": "memcached",
"real_userid:user": "default",
"name": "opened DCP connection",
"id": AUDITBACKUPID,
"description": "opened DCP connection",
"timestamp": "{0}".format(self.backups[0]),
"bucket": "{0}".format(self.buckets[0].name),
"sockname": "{0}:11210".format(self.backupset.cluster_host.ip),
"peername": "{0}".format(self.backupset.backup_host.ip)
}
elif action == 'restore':
expected_results = {
"real_userid:source": "memcached",
"real_userid:user": "default",
"name": "authentication succeeded",
"id": AUDITRESTOREID,
"description": "Authentication to the cluster succeeded",
"timestamp": "{0}".format(self.backups[0]),
"bucket": "{0}".format(self.buckets[0].name),
"sockname": "{0}:11210".format(self.backupset.restore_cluster_host.ip),
"peername": "{0}".format(self.backupset.backup_host.ip)
}
return expected_results
def test_backup_restore_with_lesser_nodes(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Adds another node to restore cluster and rebalances - note the test has to be run with nodes_init >= 3 so
that cluster host had more nodes than restore host
3. Creates backupset on backup host
4. Creates backup of cluster host with 3 or more number of nodes and validates
5. Restores to restore host with lesser number of nodes (2) and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.add_node(self.input.clusters[0][1].rest_username, self.input.clusters[0][1].rest_password,
self.input.clusters[0][1].ip)
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [], [])
rebalance.result()
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate()
def test_backup_with_full_disk(self):
"""
Things to be done before running this testcase:
- scripts/install.py has to be run with init_nodes=False
- scripts/cbqe3043.py has to be run against the ini file - this script will mount a 20MB partition on the
nodes required for the test
1. Creates specified bucket on the cluster and loads it with given number of items
2. Sets backup directory to the 20MB partition and creates a backupset
3. Fills up 20MB partition
4. Keeps taking backup until no space left on device error is hit
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backupset.directory = "/cbqe3043/entbackup"
self.backup_create()
conn = RemoteMachineShellConnection(self.backupset.backup_host)
output, error = conn.execute_command("dd if=/dev/zero of=/cbqe3043/file bs=256M count=50")
conn.log_command_output(output, error)
output, error = self.backup_cluster()
while self._check_output("Backup completed successfully", output):
gen = BlobGenerator("ent-backup{0}{0}".format(randint(1, 10000)), "ent-backup-",
self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
output, error = self.backup_cluster()
error_msg = "no space left on device"
self.assertTrue(self._check_output(error_msg, output),
"Expected error message not thrown by backup when disk is full")
self.log.info("Expected error thrown by backup command")
conn.execute_command("rm -rf /cbqe3043/file")
conn.disconnect()
def test_backup_and_restore_with_map_buckets(self):
"""
1. Creates specified buckets on the cluster and loads it with given number
of items - memcached bucket has to be created for this test
(memcached_buckets=1)
2. Creates a backupset, takes backup of the cluster host and validates
3. Executes list command on the backup and validates that memcached bucket
has been skipped
4. Restores the backup and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
if self.create_gsi:
self.create_indexes()
self.backup_create()
self.backup_cluster()
status, output, message = self.backup_list()
if not status:
self.fail("Getting backup list to validate memcached buckets failed.")
for line in output:
self.assertTrue("memcached_bucket0" not in line,
"Memcached bucket found in backup list output after backup")
self.log.info("Memcached bucket not found in backup list output after backup as expected")
self.backup_restore()
def test_backup_with_erlang_crash_and_restart(self):
"""
1. Creates specified bucket on the cluster and loads it with given number
of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills and restarts
erlang process
4. Validates backup output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.kill_erlang()
conn.start_couchbase()
output = backup_result.result(timeout=200)
self.assertTrue(self._check_output("Backup completed successfully", output),
"Backup failed with erlang crash and restart within 180 seconds")
self.log.info("Backup succeeded with erlang crash and restart within 180 seconds")
conn.disconnect()
def test_backup_with_couchbase_stop_and_start(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills and restarts couchbase server
4. Validates backup output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.stop_couchbase()
conn.start_couchbase()
conn.disconnect()
output = backup_result.result(timeout=200)
self.assertTrue(self._check_output("Backup completed successfully", output),
"Backup failed with couchbase stop and start within 180 seconds")
self.log.info("Backup succeeded with couchbase stop and start within 180 seconds")
def test_backup_with_memcached_crash_and_restart(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills and restarts memcached process
4. Validates backup output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.pause_memcached()
conn.unpause_memcached()
conn.disconnect()
output = backup_result.result(timeout=200)
self.assertTrue(self._check_output("Backup completed successfully", output),
"Backup failed with memcached crash and restart within 180 seconds")
self.log.info("Backup succeeded with memcached crash and restart within 180 seconds")
def test_backup_with_erlang_crash(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills erlang process
4. Waits for 200s and Validates backup error
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
try:
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
if self.os_name != "windows":
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.kill_erlang(self.os_name)
output = backup_result.result(timeout=200)
if self.debug_logs:
print(("Raw output from backup run: ", output))
error_mesgs = ["Error backing up cluster: Not all data was backed up due to",
"No connection could be made because the target machine actively refused it."]
error_found = False
for error in error_mesgs:
if self._check_output(error, output):
error_found = True
if not error_found:
raise("Expected error message not thrown by Backup 180 seconds after erlang crash")
except Exception as ex:
self.fail(str(ex))
finally:
conn.start_couchbase()
conn.disconnect()
self.sleep(30)
def test_backup_with_couchbase_stop(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills couchbase server
4. Waits for 200s and Validates backup error
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
try:
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.stop_couchbase()
output = backup_result.result(timeout=200)
self.assertTrue(self._check_output(
"Error backing up cluster: Not all data was backed up due to connectivity issues.", output),
"Expected error message not thrown by Backup 180 seconds after couchbase-server stop")
self.log.info("Expected error message thrown by Backup 180 seconds after couchbase-server stop")
except Exception as ex:
self.fail(str(ex))
finally:
conn.start_couchbase()
conn.disconnect()
self.sleep(30)
def test_backup_with_memcached_crash(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills memcached process
4. Waits for 200s and Validates backup error
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
try:
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.pause_memcached(self.os_name)
self.sleep(17, "time needs for memcached process completely stopped")
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
output = backup_result.result(timeout=200)
mesg = "Error backing up cluster: Unable to find the latest vbucket sequence numbers"
self.assertTrue(self._check_output(mesg, output),
"Expected error message not thrown by Backup 180 seconds after memcached crash")
self.log.info("Expected error thrown by Backup 180 seconds after memcached crash")
except Exception as ex:
self.fail(str(ex))
finally:
conn.unpause_memcached(self.os_name)
self.sleep(30)
conn.disconnect()
def test_restore_with_erlang_crash_and_restart(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills and restarts erlang process
4. Validates restore output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.kill_erlang(self.os_name)
conn.start_couchbase()
conn.disconnect()
timeout_now = 600
output = restore_result.result(timeout=timeout_now)
self.assertTrue(self._check_output("Restore completed successfully", output),
"Restore failed with erlang crash and restart within 180 seconds")
self.log.info("Restore succeeded with erlang crash and restart within 180 seconds")
def test_restore_with_couchbase_stop_and_start(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills and restarts couchbase process
4. Validates restore output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.stop_couchbase()
self.sleep(10)
conn.start_couchbase()
conn.disconnect()
output = restore_result.result(timeout=500)
self.assertTrue(self._check_output("Restore completed successfully", output),
"Restore failed with couchbase stop and start within 180 seconds")
self.log.info("Restore succeeded with couchbase stop and start within 180 seconds")
def test_restore_with_memcached_crash_and_restart(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills and restarts memcached process
4. Validates restore output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.pause_memcached(self.os_name)
conn.unpause_memcached(self.os_name)
conn.disconnect()
output = restore_result.result(timeout=600)
self.assertTrue(self._check_output("Restore completed successfully", output),
"Restore failed with memcached crash and restart within 400 seconds")
self.log.info("Restore succeeded with memcached crash and restart within 400 seconds")
def test_restore_with_erlang_crash(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills erlang process
4. Waits for 200s and Validates restore output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
try:
restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.kill_erlang(self.os_name)
output = restore_result.result(timeout=300)
self.assertTrue(self._check_output(
"Error restoring cluster: Not all data was sent to Couchbase", output),
"Expected error message not thrown by Restore 180 seconds after erlang crash")
self.log.info("Expected error thrown by Restore 180 seconds after erlang crash")
except Exception as ex:
self.fail(str(ex))
finally:
conn.start_couchbase()
conn.disconnect()
self.sleep(30)
def test_restore_with_couchbase_stop(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills couchbase server
4. Waits for 200s and Validates restore output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
try:
restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.stop_couchbase()
output = restore_result.result(timeout=300)
self.assertTrue(self._check_output(
"Error restoring cluster: Not all data was sent to Couchbase due to connectivity issues.", output),
"Expected error message not thrown by Restore 180 seconds after couchbase-server stop")
self.log.info("Expected error message thrown by Restore 180 seconds after couchbase-server stop")
except Exception as ex:
self.fail(str(ex))
finally:
conn.start_couchbase()
conn.disconnect()
self.sleep(30)
def test_restore_with_memcached_crash(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills memcached process
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
try:
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.pause_memcached(self.os_name)
output, error = self.backup_restore()
self.assertTrue(self._check_output(
"Error restoring cluster: failed to connect", output),
"Expected error message not thrown by Restore 180 seconds after memcached crash")
self.log.info("Expected error thrown by Restore 180 seconds after memcached crash")
except Exception as ex:
self.fail(str(ex))
finally:
conn.unpause_memcached(self.os_name)
conn.disconnect()
self.sleep(30)
def test_backup_merge(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Takes specified number of backups (param number_of_backups - should be atleast 2 for this test case)
3. Executes list command and validates if all backups are present
4. Randomly selects a start and end and merges the backups
5. Executes list command again and validates if the new merges set of backups are listed
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=self.backupset.number_of_backups)
status, output, message = self.backup_list()
if not status:
self.fail(message)
backup_count = 0
""" remove last 6 chars of offset time in backup name"""
if output and output[0]:
bk_info = json.loads(output[0])
bk_info = bk_info["repos"][0]
else:
return False, "No output content"
if bk_info["backups"]:
for i in range(0, len(bk_info["backups"])):
backup_name = bk_info["backups"][i]["date"]
if self.debug_logs:
print("backup name ", backup_name)
print("backup set ", self.backups)
if backup_name in self.backups:
backup_count += 1
self.log.info("{0} matched in info command output".format(backup_name))
self.assertEqual(backup_count, len(self.backups), "Initial number of backups did not match")
self.log.info("Initial number of backups matched")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
self.backupset.end = randrange(self.backupset.start + 1, self.backupset.number_of_backups + 1)
status, output, message = self.backup_merge(check_for_panic=True)
if not status:
self.fail(message)
status, output, message = self.backup_list()
if not status:
self.fail(message)
backup_count = 0
if output and output[0]:
bk_info = json.loads(output[0])
bk_info = bk_info["repos"][0]
else:
return False, "No output content"
if bk_info["backups"]:
for i in range(0, len(bk_info["backups"])):
backup_name = bk_info["backups"][i]["date"]
if self.debug_logs:
print("backup name ", backup_name)
print("backup set ", self.backups)
backup_count += 1
if backup_name in self.backups:
self.log.info("{0} matched in info command output".format(backup_name))
else:
self.fail("Didn't expect backup date {0} from the info command output" \
" to be in self.backups (the list of exepected backup dates" \
" after a merge)".format(backup_name))
self.assertEqual(backup_count, len(self.backups), "Merged number of backups did not match")
self.log.info("Merged number of backups matched")
def test_backup_merge_with_restore(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Takes two backups - restores from the backups and validates
3. Merges both the backups - restores from merged backup and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=2)
self.backupset.start = 1
self.backupset.end = 2
output, error = self.backup_restore()
if error:
self.fail("Restoring backup failed: {0}".format(error))
self.log.info("Finished restoring backup before merging")
status, output, message = self.backup_merge()
if not status:
self.fail(message)
self.backupset.start = 1
self.backupset.end = 1
rest = RestConnection(self.backupset.restore_cluster_host)
rest.flush_bucket()
output, error = self.backup_restore()
if error:
self.fail("Restoring backup failed")
self.log.info("Finished restoring backup after merging")
def test_backup_merge_with_unmerged(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Takes two backups - merges them into one
3. Takes 2 more backups - merges the new backups with already merged ones and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=2)
self.backupset.start = 1
self.backupset.end = 2
self.log.info("Merging existing incremental backups")
status, output, message = self.backup_merge()
if not status:
self.fail(message)
self.log.info("Taking more backups")
self._take_n_backups(n=2)
self.backupset.start = 1
self.backupset.end = 3
self.log.info("Merging new backups into already merged backup")
status, output, message = self.backup_merge()
if not status:
self.fail(message)
self.log.info("Successfully merged new backups with already merged backup")
def test_merge_backup_with_multi_threads(self):
"""
1. Create a cluster with default bucket
2. Load default bucket with key1
3. Create backup with default one thread
4. Load again to bucket with key2
5. Create backup with 2 threads
6. Merge backup. All backup should contain doc key1 and key2
"""
gen = BlobGenerator("ent-backup1", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.log.info("Start doing backup")
self.backup_create()
self.backup_cluster()
gen = BlobGenerator("ent-backup2", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_cluster(self.threads_count)
self.backupset.number_of_backups += 1
status, output, message = self.backup_list()
if not status:
self.fail(message)
self.log.info("Start to merge backup")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
if int(self.backupset.number_of_backups) == 2:
self.backupset.end = 2
elif int(self.backupset.number_of_backups) > 2:
self.backupset.end = randrange(self.backupset.start,
self.backupset.number_of_backups + 1)
self.merged = True
status, output, _ = self.backup_merge()
self.backupset.end -= 1
status, output, message = self.backup_list()
if not status:
self.fail(message)
current_vseqno = self.get_vbucket_seqnos(self.cluster_to_backup, self.buckets,
self.skip_consistency, self.per_node)
self.log.info("*** Start to validate data in merge backup ")
self.validate_backup_data(self.backupset.backup_host, [self.master],
"ent-backup", False, False, "memory",
self.num_items, None)
self.backup_cluster_validate(skip_backup=True)
def test_backup_purge(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset
3. Initiates a backup and kills the erlang server while backup is going on
4. Waits for the backup command to timeout
5. Executes backup command again with purge option
6. Validates the old backup is deleted and new backup is created successfully
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
old_backup_name = ""
new_backup_name = ""
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.kill_erlang()
output = backup_result.result(timeout=200)
self.log.info(str(output))
status, output, message = self.backup_list()
if not status:
self.fail(message)
if output and output[0]:
bk_info = json.loads(output[0])
bk_info = bk_info["repos"][0]
else:
return False, "No output content"
if bk_info["backups"]:
for i in range(0, len(bk_info["backups"])):
old_backup_name = bk_info["backups"][i]["date"]
self.log.info("Backup name before purge: " + old_backup_name)
conn.start_couchbase()
conn.disconnect()
self.sleep(30)
output, error = self.backup_cluster()
if error or not self._check_output("Backup completed successfully", output):
self.fail("Taking cluster backup failed.")
status, output, message = self.backup_list()
if not status:
self.fail(message)
if output and output[0]:
bk_info = json.loads(output[0])
bk_info = bk_info["repos"][0]
else:
return False, "No output content"
if bk_info["backups"]:
for i in range(0, len(bk_info["backups"])):
new_backup_name = bk_info["backups"][i]["date"]
self.log.info("Backup name after purge: " + new_backup_name)
# Once the purge (and backup) have completed we shouldn't see any orphaned multipart uploads
if self.objstore_provider:
self.assertEqual(
self.objstore_provider.num_multipart_uploads(), 0,
"Expected all multipart uploads to have been purged (all newly created ones should have also been completed)"
)
self.assertNotEqual(old_backup_name, new_backup_name,
"Old backup name and new backup name are same when purge is used")
self.log.info("Old backup name and new backup name are not same when purge is used")
def test_backup_resume(self):
"""
1. Creates specified bucket on the cluster and loads it with given
number of items
2. Creates a backupset
3. Initiates a backup and kills the erlang server while backup is going on
4. Waits for the backup command to timeout
5. Executes backup command again with resume option
6. Validates the old backup is resumes and backup is completed successfully
"""
num_vbuckets = self.input.param("num_vbuckets", None)
if num_vbuckets:
remote_client = RemoteMachineShellConnection(self.backupset.cluster_host)
command = (
f"curl -X POST -u {self.master.rest_username}:{self.master.rest_password}"
f" {self.master.ip}:8091/diag/eval -d 'ns_config:set(couchbase_num_vbuckets_default, {num_vbuckets}).'"
)
output, _ = remote_client.execute_command(command)
if 'ok' not in output[0]:
self.fail(f"failed to reduce the number of vBuckets {num_vbuckets}")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.bk_with_stop_and_resume(iterations=self.input.param("iterations", 1),
remove_staging_directory=self.input.param("remove_staging_directory", False))
def test_backup_restore_with_deletes(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset - backsup data and validates
3. Perform deletes
4. Restore data and validate
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self._load_all_buckets(self.master, gen, "delete", 0)
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_with_failover(self):
"""
1. Test should be run with 2 nodes in cluster host (param: nodes_init = 2)
2. Creates specified bucket on the cluster and loads it with given number of items
3. Creates a backupset - backsup data and validates
4. Fails over the second node with specified type (param: graceful = True | False)
5. Sets recovery type to specified value (param: recoveryType = full | delta)
6. Adds back the failed over node and rebalances
7. Restores data and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
rest = RestConnection(self.backupset.cluster_host)
nodes_all = rest.node_statuses()
for node in nodes_all:
if node.ip == self.servers[1].ip:
rest.fail_over(otpNode=node.id, graceful=self.graceful)
self.sleep(30)
try:
rest.set_recovery_type(otpNode=node.id, recoveryType=self.recoveryType)
except Exception as e:
if "Set RecoveryType failed" in str(e):
self.sleep(15)
rest.set_recovery_type(otpNode=node.id, recoveryType=self.recoveryType)
rest.add_back_node(otpNode=node.id)
rebalance = self.cluster.async_rebalance(self.servers, [], [])
rebalance.result()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_after_offline_upgrade(self):
"""
1. Test has to be supplied initial_version to be installed, create
default bucket and load data to this bucket.
2. Backup cluster and verify data and delete default bucket
3. Upgrades cluster to upgrade_version re-reates default bucket
4. Restores data and validates
Params:
backup_service_test (bool): Import repository and restore using the backup service.
"""
upgrade_version = self.input.param("upgrade_version", "5.0.0-3330")
if upgrade_version == "5.0.0-3330":
self.fail("\n *** Need param 'upgrade_version=' to run")
backup_service_test = self.input.param("backup_service_test", False)
if backup_service_test:
backup_service_hook = BackupServiceHook(self.servers[1], self.servers, self.backupset, self.objstore_provider)
self.cli_command_location = "/opt/couchbase/bin"
self._install(self.servers)
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
rebalance = self.cluster.async_rebalance(self.servers[:2], [self.servers[1]],
[])
rebalance.result()
self.add_built_in_server_user()
RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)
self.buckets = RestConnection(self.master).get_buckets()
self.total_buckets = len(self.buckets)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self.sleep(5)
BucketOperationHelper.delete_bucket_or_assert(self.master, "default", self)
""" Start to upgrade """
if self.force_version_upgrade:
upgrade_version = self.force_version_upgrade
upgrade_threads = self._async_update(upgrade_version=upgrade_version,
servers=self.servers[:2])
for th in upgrade_threads:
th.join()
self.log.info("Upgraded to: {ver}".format(ver=upgrade_version))
self.sleep(30)
""" Re-create default bucket on upgrade cluster """
RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)
self.sleep(5)
# Create a backup node and perform a backup service import repository and restore
if backup_service_test:
backup_service_hook.backup_service.replace_services(self.servers[1], ['kv,backup'])
backup_service_hook.backup_service.import_repository(self.backupset.directory, self.backupset.name, "my_repo")
backup_service_hook.backup_service.take_one_off_restore("imported", "my_repo", 20, 20)
backup_service_hook.cleanup()
return
""" Only server from Spock needs build in user
to access bucket and other tasks
"""
if "5" <= RestConnection(self.master).get_nodes_version()[:1]:
self.add_built_in_server_user()
for user in self.users_check_restore:
user_name = user.replace('[', '_').replace(']', '_')
testuser = [{'id': user_name, 'name': user_name,
'password': 'password'}]
rolelist = [{'id': user_name, 'name': user_name,
'roles': user}]
self.log.info("**** add built-in '%s' user to node %s ****" % (testuser[0]["name"],
self.master.ip))
RbacBase().create_user_source(testuser, 'builtin', self.master)
self.log.info("**** add '%s' role to '%s' user ****" % (rolelist[0]["roles"],
testuser[0]["name"]))
RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin')
backupsets = [self.backupset]
if "5" <= RestConnection(self.master).get_nodes_version()[:1]:
for user in self.users_check_restore:
new_backupset = copy.deepcopy(self.backupset)
new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')
backupsets.append(new_backupset)
for backupset in backupsets:
self.backupset = backupset
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
BucketOperationHelper().delete_bucket_or_assert(self.backupset.cluster_host,
"default", self)
def test_backup_restore_after_online_upgrade(self):
"""
1. Test has to be supplied initial_version to be installed and
upgrade_version to be upgraded to
2. Installs initial_version on the servers
3. Load data and backup in pre-upgrade
4. Install upgrade version on 2 nodes. Use swap rebalance to upgrade
cluster
5. Operation after upgrade cluster
6. Restores data and validates
"""
if self.initial_version[:1] == "5" and self.upgrade_versions[0][:1] >= "7":
self.log.error("\n\n\n*** ERROR: Direct upgrade from {0} to {1} does not support.\
Test will skip\n\n"\
.format(self.initial_version[:5], self.upgrade_versions[0][:5]))
return
servers = copy.deepcopy(self.servers)
self.vbuckets = self.initial_vbuckets
if len(servers) != 4:
self.fail("\nThis test needs exactly 4 nodes to run! ")
self._install(servers)
count = 0
nodes_fail_to_install = []
for server in servers:
ready = RestHelper(RestConnection(server)).is_ns_server_running(60)
if ready:
count += 1
else:
nodes_fail_to_install.append(server.ip)
if count < len(servers):
self.fail("Some servers may not install Couchbase server: {0}"\
.format(nodes_fail_to_install))
if not self.disable_diag_eval_on_non_local_host:
self.enable_diag_eval_on_non_local_hosts()
cmd = 'curl -g {0}:8091/diag/eval -u {1}:{2} '.format(self.master.ip,
self.master.rest_username,
self.master.rest_password)
cmd += '-d "path_config:component_path(bin)."'
bin_path = subprocess.check_output(cmd, shell=True)
try:
bin_path = bin_path.decode()
except AttributeError:
pass
if "bin" not in bin_path:
self.fail("Check if cb server install on %s" % self.master.ip)
else:
self.cli_command_location = bin_path.replace('"', '') + "/"
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
rebalance = self.cluster.async_rebalance(servers[:self.nodes_init],
[servers[int(self.nodes_init) - 1]], [])
rebalance.result()
self.sleep(15)
self.add_built_in_server_user()
rest = RestConnection(self.master)
cb_version = rest.get_nodes_version()
initial_compression_mode = "off"
if 5.5 > float(cb_version[:3]):
self.compression_mode = initial_compression_mode
rest.create_bucket(bucket='default', ramQuotaMB=512,
compressionMode=self.compression_mode)
self.buckets = rest.get_buckets()
self._load_all_buckets(self.master, gen, "create", 0)
""" create index """
if self.create_gsi:
if "5" > rest.get_nodes_version()[:1]:
if self.gsi_type == "forestdb":
self.fail("Need to set param self.gsi_type=memory_optimized")
rest.set_indexer_storage_mode(storageMode="memory_optimized")
else:
rest.set_indexer_storage_mode(storageMode="plasma")
self.create_indexes()
self.backup_create()
if self.backupset.number_of_backups > 1:
self.log.info("Start doing multiple backup")
for i in range(1, self.backupset.number_of_backups + 1):
self._backup_restore_with_ops()
else:
self.backup_cluster_validate()
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
self.sleep(5)
self.backup_list()
""" Start to online upgrade using swap rebalance """
self.initial_version = self.upgrade_versions[0]
if self.force_version_upgrade:
self.initial_version = self.force_version_upgrade
self.sleep(self.sleep_time,
"Pre-setup of old version is done. Wait for online upgrade to: "
"{0} version".format(self.initial_version))
self.product = 'couchbase-server'
self._install(servers[2:])
self.sleep(self.sleep_time,
"Installation of new version is done. Wait for rebalance")
self.log.info(
"Rebalanced in upgraded nodes and rebalanced out nodes with old version")
add_node_services = [self.add_node_services]
if "-" in self.add_node_services:
add_node_services = self.add_node_services.split("-")
self.cluster.rebalance(servers, servers[2:], servers[:2],
services=add_node_services)
self.sleep(15)
self.backupset.cluster_host = servers[2]
""" Upgrade is done """
self.log.info("** Upgrade is done **")
healthy = False
timeout = 0
while not healthy:
healthy = RestHelper(RestConnection(self.backupset.cluster_host)).is_cluster_healthy()
if not healthy:
if timeout == 120:
self.fail("Node %s is not ready after 2 mins" % self.backupset.cluster_host)
else:
self.sleep(5, "Wait for server up ")
timeout += 5
else:
healthy = True
if "5" <= RestConnection(servers[2]).get_nodes_version()[:1]:
for user in self.users_check_restore:
user_name = user.replace('[', '_').replace(']', '_')
testuser = [{'id': user_name, 'name': user_name,
'password': 'password'}]
rolelist = [{'id': user_name, 'name': user_name,
'roles': user}]
self.log.info("**** add built-in '%s' user to node %s ****" % (testuser[0]["name"],
servers[2].ip))
RbacBase().create_user_source(testuser, 'builtin', servers[2])
self.log.info("**** add '%s' role to '%s' user ****" % (rolelist[0]["roles"],
testuser[0]["name"]))
status = RbacBase().add_user_role(rolelist, RestConnection(servers[2]), 'builtin')
self.log.info(status)
if self.backupset.number_of_backups_after_upgrade:
self.backupset.number_of_backups += \
self.backupset.number_of_backups_after_upgrade
if "5" <= RestConnection(servers[2]).get_nodes_version()[:1]:
self.add_built_in_server_user(node=servers[2])
for i in range(1, self.backupset.number_of_backups_after_upgrade + 2):
self.log.info("_backup_restore_with_ops #{0} started...".format(i))
validate_dir_struct = True
if i > 2:
validate_dir_struct = False
self._backup_restore_with_ops(node=self.backupset.cluster_host, repeats=1,
validate_directory_structure=validate_dir_struct)
self.backup_list()
""" merged after upgrade """
if self.after_upgrade_merged:
self.backupset.start = 1
self.backupset.end = len(self.backups)
self.backup_merge_validate()
self.backup_list()
backupsets = [self.backupset]
if "5" <= RestConnection(servers[2]).get_nodes_version()[:1]:
for user in self.users_check_restore:
new_backupset = copy.deepcopy(self.backupset)
new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')
backupsets.append(new_backupset)
for backupset in backupsets:
self.backupset = backupset
if self.bucket_flush:
self.log.info("Start to flush bucket")
rest = RestConnection(servers[2])
rest.flush_bucket()
else:
self.bucket_helper.delete_bucket_or_assert(self.backupset.cluster_host,
"default", self)
""" Re-create default bucket on upgrade cluster """
RestConnection(servers[2]).create_bucket(bucket='default',
ramQuotaMB=512,
compressionMode=self.compression_mode)
self.sleep(5)
self.total_buckets = len(self.buckets)
if self.after_upgrade_merged:
self.backupset.end = 1
""" restore back to cluster """
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
if self.create_gsi:
self.verify_gsi()
def test_backup_restore_with_python_sdk(self):
"""
1. Note that python sdk has to be installed on all nodes before running this test
2. Connects to default bucket on cluster host using Python SDK
- loads specifed number of items
3. Creates a backupset, backsup data and validates
4. Restores data and validates
5. Connects to default bucket on restore host using Python SDK
6. Retrieves cas and flgas of each doc on both cluster and restore host
- validates if they are equal
"""
testuser = [{'id': 'default', 'name': 'default', 'password': 'password'}]
rolelist = [{'id': 'default', 'name': 'default', 'roles': 'admin'}]
self.add_built_in_server_user(testuser, rolelist)
try:
cb = Bucket('couchbase://' + self.backupset.cluster_host.ip + '/default',
password="password")
if cb is not None:
self.log.info("Established connection to bucket on cluster host"
" using python SDK")
else:
self.fail("Failed to establish connection to bucket on cluster host"
" using python SDK")
except Exception as ex:
self.fail(str(ex))
self.log.info("Loading bucket with data using python SDK")
for i in range(1, self.num_items + 1):
cb.upsert("doc" + str(i), "value" + str(i))
cluster_host_data = {}
for i in range(1, self.num_items + 1):
key = "doc" + str(i)
value_obj = cb.get(key=key)
cluster_host_data[key] = {}
cluster_host_data[key]["cas"] = str(value_obj.cas)
cluster_host_data[key]["flags"] = str(value_obj.flags)
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
self.add_built_in_server_user(testuser, rolelist, self.backupset.restore_cluster_host)
try:
cb = Bucket('couchbase://' + self.backupset.restore_cluster_host.ip + '/default',
password="password")
if cb is not None:
self.log.info("Established connection to bucket on restore host " \
"using python SDK")
else:
self.fail("Failed to establish connection to bucket on restore " \
"host using python SDK")
except Exception as ex:
self.fail(str(ex))
restore_host_data = {}
for i in range(1, self.num_items + 1):
key = "doc" + str(i)
value_obj = cb.get(key=key)
restore_host_data[key] = {}
restore_host_data[key]["cas"] = str(value_obj.cas)
restore_host_data[key]["flags"] = str(value_obj.flags)
self.log.info("Comparing cluster host data cas and flags against restore host data")
for i in range(1, self.num_items + 1):
key = "doc" + str(i)
if cluster_host_data[key]["cas"] != restore_host_data[key]["cas"]:
self.fail("CAS mismatch for key: {0}".format(key))
if cluster_host_data[key]["flags"] != restore_host_data[key]["flags"]:
self.fail("Flags mismatch for key: {0}".format(key))
self.log.info("Successfully validated cluster host data cas and flags " \
"against restore host data")
def test_backup_restore_with_flush(self):
"""
1. Test should be run with same-cluster=True
2. Creates specified bucket on the cluster and loads it with given number of items
3. Creates a backupset - backsup data and validates
4. Flushes the bucket
5. Restores data and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
rest = RestConnection(self.backupset.cluster_host)
rest.flush_bucket()
self.log.info("Flushed default bucket - restoring data now..")
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_with_recreate(self):
"""
1. Test should be run with same-cluster=True
2. Creates specified bucket on the cluster and loads it with given number of items
3. Creates a backupset - backsup data and validates
4. Deletes the bucket and recreates it
5. Restores data and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
rest = RestConnection(self.backupset.cluster_host)
rest.delete_bucket()
bucket_name = "default"
rest_helper = RestHelper(rest)
rest.create_bucket(bucket=bucket_name, ramQuotaMB=512)
bucket_ready = rest_helper.vbucket_map_ready(bucket_name)
if not bucket_ready:
self.fail("Bucket {0} is not created after 120 seconds.".format(bucket_name))
self.log.info("Deleted {0} bucket and recreated it - restoring it now.."\
.format(bucket_name))
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_create_negative_args(self):
"""
Validates error messages for negative inputs of create command
"""
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
cmd = "config"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
# ['cbbackupmgr config [<args>]', '', 'Required Flags:', '', ' -a,--archive The archive directory to use', ' -r,--repo The name of the backup repository to create and', ' configure', '', 'Optional Flags:', '', ' --exclude-buckets A comma separated list of buckets to exclude from', ' backups. All buckets except for the ones specified', ' will be backed up.', ' --include-buckets A comma separated list of buckets to back up. Only', ' buckets in this list are backed up.', ' --disable-bucket-config Disables backing up bucket configuration', ' information', ' --disable-views Disables backing up view definitions', ' --disable-gsi-indexes Disables backing up GSI index definitions', ' --disable-ft-indexes Disables backing up Full Text index definitions', ' --disable-data Disables backing up cluster data', ' -h,--help Prints the help message', '']
self.assertEqual(output[0], "cbbackupmgr config [<args>]", "Expected error message not thrown")
cmd = "config --archive"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive", "Expected error message not thrown")
cmd = "config --archive {0}".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -r/--repo", "Expected error message not thrown")
cmd = "config --archive {0} --repo".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --repo", "Expected error message not thrown")
self.backup_create()
cmd = "config --archive {0} --repo {1}".format(self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertEqual(output[0], "Backup repository creation failed: Backup Repository `backup` exists",
"Expected error message not thrown")
def test_objstore_negative_args(self):
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
command = f"{self.cli_command_location}/cbbackupmgr"
# Run all the sub_commands with the (non-objstore) required arguments (so that we are actually checking the
# correct error)
for sub_command in ['backup -a archive -r repo -c localhost -u admin -p password',
'collect-logs -a archive',
'config -a archive -r repo',
'examine -a archive -r repo -k asdf --bucket asdf',
'info -a archive',
'remove -a archive -r repo',
'restore -a archive -r repo -c localhost -u admin -p password']:
# Check all the object store arguments (ones that require an argument have one provided so that we are
# validating cbbackupmgr and not cbflag).
for argument in ['--obj-access-key-id asdf',
'--obj-cacert asdf',
'--obj-endpoint asdf',
'--obj-log-level asdf',
'--obj-no-ssl-verify',
'--obj-region asdf',
'--obj-secret-access-key asdf']:
# Check all the common object store commands
output, error = remote_client.execute_command(f"{command} {sub_command} {argument}")
remote_client.log_command_output(output, error)
self.assertNotEqual(len(output), 0)
error_mesg = "cloud arguments provided without the cloud scheme prefix"
if "bucket" in sub_command:
error_mesg = "Unknown flag: --bucket"
self.assertIn(error_mesg, output[0],
"Expected an error about providing cloud arguments without the cloud schema prefix")
# Check all the S3 specific arguments
if self.objstore_provider.schema_prefix() == 's3://':
for argument in ['--s3-force-path-style']:
output, error = remote_client.execute_command(f"{command} {sub_command} {argument}")
remote_client.log_command_output(output, error)
self.assertNotEqual(len(output), 0)
error_mesg_obj = "s3 arguments provided without the archive 's3://' schema prefix"
if "bucket" in sub_command:
error_mesg_obj = "Unknown flag: --bucket"
self.assertIn(error_mesg_obj, output[0],
"Expected an error about providing S3 specific arguments without the s3:// schema prefix")
# Check all the common objstore flags that require arguments without providing arguments. This is testing
# cbflag.
for argument in ['--obj-access-key-id',
'--obj-cacert',
'--obj-endpoint',
'--obj-log-level',
'--obj-region',
'--obj-secret-access-key']:
# Check that common object store arguments that require a value throw the correct error when a value
# is omitted.
output, error = remote_client.execute_command(
f"{command} {sub_command.replace("archive", self.objstore_provider.schema_prefix() + "archive")} --obj-staging-dir staging {argument}"
)
remote_client.log_command_output(output, error)
self.assertNotEqual(len(output), 0)
error_mesg = f"Expected argument for option: {argument}"
if "bucket" in sub_command:
error_mesg = "Unknown flag: --bucket"
self.assertIn(error_mesg, output[0],
"Expected an error about providing cloud arguments without a value")
# Test omitting the staging directory argument
output, error = remote_client.execute_command(
f"{command} {sub_command.replace("archive", self.objstore_provider.schema_prefix() + "archive")}"
)
remote_client.log_command_output(output, error)
self.assertNotEqual(len(output), 0)
error_mesg = "you must provide the '--obj-staging-dir' argument"
if "bucket" in sub_command:
error_mesg = "Unknown flag: --bucket"
self.assertIn(error_mesg, output[0],
"Expected an error about not supplying the '--obj-staging-dir' argument")
def test_backup_cluster_restore_negative_args(self):
"""
Validates error messages for negative inputs of cluster or restore command - command parameter
decides which command to test
"""
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.backup_create()
cmd_to_test = self.input.param("command", "backup")
if cmd_to_test == "restore":
cmd = cmd_to_test + " --archive {0} --repo {1} --host http://{2}:{3} --username {4} \
--password {5}".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port,
self.backupset.cluster_host_username,
self.backupset.cluster_host_password)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
if "7.0.1" in self.cb_version:
self.assertIn("Error restoring cluster: Backup backup doesn't contain any backups", output[-1])
else:
self.assertIn("Error restoring cluster: Repository 'backup' doesn't contain any backups", output[-1])
self.backup_cluster()
cmd = cmd_to_test
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
cmd_test = cmd_to_test
if cmd_to_test.startswith('"') and cmd_to_test.endswith('"'):
cmd_test = cmd_to_test[1:-1]
self.assertEqual(output[0], "cbbackupmgr {} [<args>]".format(cmd_test))
cmd = cmd_to_test + " --archive"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive", "Expected error message not thrown")
cmd = cmd_to_test + " --archive xyz -c http://localhost:8091 -u Administrator -p password -r aa"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue(self._check_output("archive directory '{0}xyz' does not exist".format(self.root_path), output))
cmd = cmd_to_test + " --archive {0} -c http://localhost:8091 -u Administrator -p password".format(
self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -r/--repo", "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1} -c http://localhost:8091 -u Administrator -p password -r".format(
self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --repo", "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} -u Administrator -p password".format(self.backupset.directory,
self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -c/--cluster",
"Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} -c -u Administrator -p password -r repo".format(
self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: -c", "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} -c http://{2}:{3}".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Error backing up cluster: cluster credentials required, expected --username/--password or --client-cert/--client-key",
"Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} --cluster http://{2}:{3} \
--username".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --username", "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} --cluster http://{2}:{3} \
--username {4}".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port,
self.backupset.cluster_host_username)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -p/--password",
"Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo abc --cluster http://{1}:{2} --username {3} \
--password {4}".format(self.backupset.directory,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port,
self.backupset.cluster_host_username,
self.backupset.cluster_host_password)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
part_message = "backing up"
if cmd_to_test.startswith('"') and cmd_to_test.endswith('"'):
cmd_test = cmd_to_test[1:-1]
if cmd_test == "restore":
part_message = 'restoring'
self.assertTrue("Error {0} cluster: Backup Repository `abc` not found"\
.format(part_message) in output[-1],
"Expected error message not thrown. Actual output %s " % output[-1])
cmd = cmd_to_test + " --archive {0} --repo {1} --cluster abc --username {2} \
--password {3}".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host_username,
self.backupset.cluster_host_password)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue(self._check_output(f"Error {part_message} cluster: failed to connect to any host(s) from the connection string", output), "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} --cluster http://{2}:{3} --username abc \
--password {4}".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port,
self.backupset.cluster_host_password)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("check username and password" in output[-1], "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} --cluster http://{2}:{3} --username {4} \
--password abc".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port,
self.backupset.cluster_host_username)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertTrue("check username and password" in output[-1], "Expected error message not thrown")
def test_backup_list_negative_args(self):
"""
Validates error messages for negative inputs of list command
"""
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.backup_create()
cmd = "info"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "cbbackupmgr info [<args>]", "Expected error message not thrown")
cmd = "info --archive"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive", "Expected error message not thrown")
cmd = "info --archive xyz".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertTrue(self._check_output("archive directory '{0}xyz' does not exist".format(self.root_path), output))
def test_backup_compact_negative_args(self):
"""
Validates error messages for negative inputs of compact command
"""
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.backup_create()
self.backup_cluster()
cmd = "compact"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "cbbackupmgr compact [<args>]",
"Expected error message not thrown")
cmd = "compact --archive"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive",
"Expected error message not thrown")
cmd = "compact --archive {0}".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -r/--repo",
"Expected error message not thrown")
cmd = "compact --archive {0} --repo".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --repo",
"Expected error message not thrown")
cmd = "compact --archive {0} --repo {1}".format(self.backupset.directory,
self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: --backup",
"Expected error message not thrown")
cmd = "compact --archive {0} --repo {1} --backup" \
.format(self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --backup",
"Expected error message not thrown")
cmd = "compact --archive abc --repo {0} --backup {1}" \
.format(self.backupset.name, self.backups[0])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertIn("not a directory", output[-1])
cmd = "compact --archive {0} --repo abc --backup {1}" \
.format(self.backupset.directory, self.backups[0])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue(self._check_output("Backup Repository `abc` not found", output),
"Expected error message not thrown")
cmd = "compact --archive {0} --repo {1} --backup abc".format(self.backupset.directory,
self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertTrue("Compacting incr backup `backup` of backup `abc` failed:" in output[-1],
"Expected error message not thrown")
def test_backup_merge_negative_args(self):
"""
Validates error messages for negative inputs of merge command
"""
# This error message is thrown when an invalid date range format is supplied to cbbackupmgr.
invalid_range_format_error = "Error merging data: invalid range format, expected two indexes or two dates; the keywords [start, oldest, end, latest] are also valid"
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.backup_create()
cmd = "merge"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "cbbackupmgr merge [<args>]", "Expected error message not thrown")
cmd = "merge --archive -c http://localhost:8091 -u Administrator -p password -r aa"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive", "Expected error message not thrown")
cmd = "merge --archive {0}".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -r/--repo", "Expected error message not thrown")
cmd = "merge --archive {0} --repo".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1} -r".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --repo", "Expected error message not thrown")
cmd = "merge --archive {0} --repo {1}".format(self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1} --start start --end end".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Error merging data: Repository 'backup' doesn't contain any backups",
"Expected error message not thrown")
self._take_n_backups(n=2)
cmd = "merge --archive {0} --repo {1}".format(self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1} --start bbb --end end".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], invalid_range_format_error, "Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start".format(self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --start", "Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start {2}".format(self.backupset.directory,
self.backupset.name, self.backups[0])
command = "{0}/cbbackupmgr {1} --end aa".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], invalid_range_format_error, "Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start {2} --end".format(self.backupset.directory,
self.backupset.name, self.backups[0])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --end", "Expected error message not thrown")
cmd = "merge --archive xyz --repo {0} --start {1} --end {2}".format(self.backupset.name,
self.backups[0], self.backups[1])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("Error merging data: archive directory '{0}xyz' does not exist".format(self.root_path) in output[-1],
"Expected error message not thrown")
cmd = "merge --archive {0} --repo abc --start {1} --end {2}".format(self.backupset.directory,
self.backups[0], self.backups[1])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("Error merging data: Backup Repository `abc` not found" in output[-1],
"Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start abc --end {2}".format(self.backupset.directory,
self.backupset.name, self.backups[1])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue(invalid_range_format_error in output[-1], "Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start {2} --end abc".format(self.backupset.directory,
self.backupset.name, self.backups[0])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue(invalid_range_format_error in output[-1], "Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start {2} --end {3}".format(self.backupset.directory,
self.backupset.name,
self.backups[1], self.backups[0])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertTrue("Error merging data: invalid range start cannot be before end" in output[-1], "Expected error message not thrown")
def test_backup_remove_negative_args(self):
"""
Validates error messages for negative inputs of remove command
"""
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.backup_create()
self.backup_cluster()
cmd = "remove"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "cbbackupmgr remove [<args>]", "Expected error message not thrown")
cmd = "remove --archive -c http://localhost:8091 -u Administrator -p password -r aa"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive", "Expected error message not thrown")
cmd = "remove --archive {0}".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -r/--repo", "Expected error message not thrown")
cmd = "remove --archive {0} --repo".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --repo", "Expected error message not thrown")
cmd = "remove --archive xyz --repo {0}".format(self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("Removing backup repository failed: archive directory '{0}xyz' does not exist".format(self.root_path) in output[-1],
"Expected error message not thrown")
cmd = "remove --archive {0} --repo xyz".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertIn("Backup Repository `xyz` not found", output[-1])
def test_backup_restore_with_views(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset
3. Creates a simple view on source cluster
4. Backsup data and validates
5. Restores data ans validates
6. Ensures that same view is created in restore cluster
"""
if "ephemeral" in self.input.param("bucket_type", 'membase'):
self.log.info("\n****** view does not support on ephemeral bucket ******")
return
rest_src = RestConnection(self.backupset.cluster_host)
if "community" in self.cb_version:
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].ip, services=['kv', 'index', 'n1ql'])
else:
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].ip, services=['index', 'kv'])
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])
rebalance.result()
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
default_map_func = "function (doc) {\n emit(doc._id, doc);\n}"
default_view_name = "test"
default_ddoc_name = "ddoc_test"
prefix = "dev_"
query = {"full_set": "true", "stale": "false", "connection_timeout": 60000}
view = View(default_view_name, default_map_func)
task = self.cluster.async_create_view(self.backupset.cluster_host,
default_ddoc_name, view, "default")
task.result()
self.backup_cluster_validate()
rest_target = RestConnection(self.backupset.restore_cluster_host)
rest_target.add_node(self.input.clusters[0][1].rest_username,
self.input.clusters[0][1].rest_password,
self.input.clusters[0][1].ip, services=['kv', 'index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [], [])
rebalance.result()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
try:
result = self.cluster.query_view(self.backupset.restore_cluster_host,
prefix + default_ddoc_name,
default_view_name, query, timeout=30)
self.assertEqual(len(result['rows']), self.num_items,
"Querying view on restore cluster did not return expected number of items")
self.log.info("Querying view on restore cluster returned expected number of items")
except TimeoutError:
self.fail("View could not be queried in restore cluster within timeout")
def test_backup_restore_with_gsi(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset
3. Creates a GSI index on source cluster
4. Backsup data and validates
5. Restores data ans validates
6. Ensures that same gsi index is created in restore cluster
"""
rest_src = RestConnection(self.backupset.cluster_host)
self.cluster_storage_mode = \
rest_src.get_index_settings()["indexer.settings.storage_mode"]
self.log.info("index storage mode: {0}".format(self.cluster_storage_mode))
if "community" in self.cb_version:
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].ip, services=['kv', 'index', 'n1ql'])
else:
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].ip, services=['kv', 'index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])
rebalance.result()
self.test_storage_mode = self.cluster_storage_mode
if "ephemeral" in self.bucket_type:
self.log.info("ephemeral bucket needs to set backup cluster to memopt for gsi.")
self.test_storage_mode = "memory_optimized"
self.quota = self._reset_storage_mode(rest_src, self.test_storage_mode)
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].ip, services=['kv', 'index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])
rebalance.result()
rest_src.create_bucket(bucket='default', ramQuotaMB=int(self.quota) - 1,
bucketType=self.bucket_type,
evictionPolicy="noEviction")
self.add_built_in_server_user(node=self.backupset.cluster_host)
gen = DocumentGenerator('test_docs', '{{"age": {0}}}', list(range(100)),
start=0, end=self.num_items)
self.buckets = rest_src.get_buckets()
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
cmd = "cbindex -type create -bucket default -using %s -index age -fields=age " \
" -auth %s:%s" % (self.test_storage_mode,
self.master.rest_username,
self.master.rest_password)
shell = RemoteMachineShellConnection(self.backupset.cluster_host)
command = "{0}/{1}".format(self.cli_command_location, cmd)
output, error = shell.execute_command(command)
shell.log_command_output(output, error)
shell.disconnect()
if error or "Index created" not in output[-1]:
self.fail("GSI index cannot be created")
self.backup_cluster_validate()
rest_target = RestConnection(self.backupset.restore_cluster_host)
rest_target.add_node(self.input.clusters[0][1].rest_username,
self.input.clusters[0][1].rest_password,
self.input.clusters[0][1].ip, services=['kv', 'index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [], [])
rebalance.result()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
cmd = "cbindex -type list -auth %s:%s" % (self.master.rest_username,
self.master.rest_password)
shell = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
command = "{0}/{1}".format(self.cli_command_location, cmd)
output, error = shell.execute_command(command)
shell.log_command_output(output, error)
shell.disconnect()
try:
if len(output) > 1:
index_name_path = "Index:{0}/{1}".format(self.buckets[0].name, "age")
version = RestConnection(
self.backupset.restore_cluster_host).get_nodes_version()
if version[:1] >= "7":
index_name_path = "Index:{0}/_{0}/_{0}/{1}".format(self.buckets[0].name, "age")
self.assertTrue(self._check_output(index_name_path, output),
"GSI index not created in restore cluster as expected")
self.log.info("GSI index created in restore cluster as expected")
else:
self.fail("GSI index not created in restore cluster as expected")
finally:
if "ephemeral" in self.bucket_type:
self.log.info("reset storage mode back to original")
shell = RemoteMachineShellConnection(self.backupset.cluster_host)
shell.enable_diag_eval_on_non_local_hosts()
shell.disconnect()
shell = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
shell.enable_diag_eval_on_non_local_hosts()
shell.disconnect()
self._reset_storage_mode(rest_src, self.cluster_storage_mode)
self._reset_storage_mode(rest_target, self.cluster_storage_mode)
def test_backup_merge_restore_with_gsi(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset
3. Creates a GSI index on source cluster
4. Backsup data and validates
5. Restores data ans validates
6. Ensures that same gsi index is created in restore cluster
"""
rest_src = RestConnection(self.backupset.cluster_host)
rest_src.add_node(self.servers[1].rest_username,
self.servers[1].rest_password,
self.servers[1].ip, services=['index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [],
[])
rebalance.result()
gen = DocumentGenerator('test_docs', '{{"Num1": {0}, "Num2": {1}}}',
list(range(100)), list(range(100)),
start=0, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
cmd = "cbindex -type create -bucket default -using forestdb -index " \
"num1 -fields=Num1"
remote_client = RemoteMachineShellConnection(
self.backupset.cluster_host)
command = "{0}/{1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
if error or "Index created" not in output[-1]:
self.fail("GSI index cannot be created")
self.backup_cluster_validate()
cmd = "cbindex -type create -bucket default -using forestdb -index " \
"num2 -fields=Num2"
remote_client = RemoteMachineShellConnection(
self.backupset.cluster_host)
command = "{0}/{1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
if error or "Index created" not in output[-1]:
self.fail("GSI index cannot be created")
self.backup_cluster_validate()
self.backupset.start = 1
self.backupset.end = len(self.backups)
self.backup_merge_validate()
rest_target = RestConnection(self.backupset.restore_cluster_host)
rest_target.add_node(self.input.clusters[0][1].rest_username,
self.input.clusters[0][1].rest_password,
self.input.clusters[0][1].ip, services=['index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [],
[])
rebalance.result()
start = self.number_of_backups_taken
end = self.number_of_backups_taken
self.backupset.start = start
self.backupset.end = end
self.backup_restore_validate(compare_uuid=False,
seqno_compare_function=">=")
cmd = "cbindex -type list"
remote_client = RemoteMachineShellConnection(
self.backupset.restore_cluster_host)
command = "{0}/{1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
if len(output) > 1:
self.assertTrue("Index:default/Num1" in output[1],
"GSI index not created in restore cluster as expected")
self.log.info("GSI index created in restore cluster as expected")
else:
self.fail("GSI index not created in restore cluster as expected")
def test_backup_restore_with_fts(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset
3. Creates a simple FTS index on source cluster
4. Backsup data and validates
5. Restores data ans validates
6. Ensures that same FTS index is created in restore cluster
"""
self.test_fts = True
rest_src = RestConnection(self.backupset.cluster_host)
if "community" in self.cb_version:
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].ip, services=['kv', 'index', 'n1ql', 'fts'])
else:
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].ip, services=['kv', 'fts'])
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])
rebalance.result()
gen = DocumentGenerator('test_docs', '{{"age": {0}}}', list(range(100)), start=0,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
rest_src_fts = RestConnection(self.servers[1])
try:
from pytests.fts.fts_callable import FTSCallable
fts_obj = FTSCallable(nodes=self.servers, es_validate=False)
index = fts_obj.create_default_index(
index_name="index_default",
bucket_name="default")
fts_obj.wait_for_indexing_complete()
alias = fts_obj.create_alias(target_indexes=[index])
except Exception as ex:
self.fail(ex)
self.backup_cluster_validate()
if self.bucket_type != "ephemeral":
self._create_restore_cluster()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
rest_target_fts = RestConnection(self.input.clusters[0][1])
status = False
try:
status, content = rest_target_fts.get_fts_index_definition(index.name)
self.assertTrue(status and content['status'] == 'ok',
"FTS index not found in restore cluster as expected")
self.log.info("FTS index found in restore cluster as expected")
status, content = rest_target_fts.get_fts_index_definition(alias.name)
self.assertTrue(status and content['status'] == 'ok',
"FTS alias not found in restore cluster as expected")
self.log.info("FTS alias found in restore cluster as expected")
finally:
rest_src_fts.delete_fts_index(index.name)
rest_src_fts.delete_fts_index(alias.name)
if status:
rest_target_fts.delete_fts_index(index.name)
rest_target_fts.delete_fts_index(alias.name)
def test_backup_restore_with_xdcr(self):
"""
1. Creates a XDCR replication between first two servers
2. Creates specified bucket on the cluster and loads it with given number of items
3. Backsup data and validates while replication is going on
4. Restores data and validates while replication is going on
"""
rest_src = RestConnection(self.backupset.cluster_host)
rest_dest = RestConnection(self.servers[1])
try:
rest_src.remove_all_replications()
rest_src.remove_all_remote_clusters()
rest_src.add_remote_cluster(self.servers[1].ip, self.servers[1].port, self.backupset.cluster_host_username,
self.backupset.cluster_host_password, "C2")
rest_dest.create_bucket(bucket='default', ramQuotaMB=512)
self.sleep(10)
repl_id = rest_src.start_replication('continuous', 'default', "C2")
if repl_id is not None:
self.log.info("Replication created successfully")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
tasks = self._async_load_all_buckets(self.master, gen, "create", 0)
self.sleep(10)
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
for task in tasks:
task.result()
finally:
rest_dest.delete_bucket()
def test_backup_restore_with_warmup(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Warmsup the cluster host
2. Backsup data and validates while warmup is on
3. Restores data and validates while warmup is on
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
NodeHelper.do_a_warm_up(self.backupset.cluster_host)
self.sleep(30)
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
""" only membase bucket has warmup state """
if self.bucket_type == "membase":
NodeHelper.wait_warmup_completed([self.backupset.cluster_host])
def stat(self, key):
stats = StatsCommon.get_stats([self.master], 'default', "", key)
val = list(stats.values())[0]
if val.isdigit():
val = int(val)
return val
def load_to_dgm(self, active=75, ttl=0):
"""
decides how many items to load to enter active% dgm state
where active is an integer value between 0 and 100
"""
doc_size = 1024
curr_active = self.stat('vb_active_perc_mem_resident')
# go into heavy dgm
while curr_active > active:
curr_items = self.stat('curr_items')
gen_create = BlobGenerator('dgmkv', 'dgmkv-', doc_size, start=curr_items + 1, end=curr_items + 50000)
try:
self._load_all_buckets(self.master, gen_create, "create", ttl)
except:
pass
curr_active = self.stat('vb_active_perc_mem_resident')
def test_backup_restore_with_dgm(self):
"""
1. Creates specified bucket on the cluster and loads it until dgm
2. Creates a backup set
3. Backsup data and validates
4. Restores data and validates
"""
self.load_to_dgm()
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_with_auto_compaction(self):
"""
1. Creates specified bucket on the cluster and loads it
2. Updates auto compaction settings
3. Validates backup and restore
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
rest = RestConnection(self.backupset.cluster_host)
rest.set_auto_compaction(dbFragmentThresholdPercentage=80,
dbFragmentThreshold=100,
viewFragmntThresholdPercentage=80,
viewFragmntThreshold=100,
bucket="default")
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_with_update_notifications(self):
"""
1. Creates specified bucket on the cluster and loads it
2. Updates notification settings
3. Validates backup and restore
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
rest = RestConnection(self.backupset.cluster_host)
rest.update_notifications("true")
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_with_alerts(self):
"""
1. Creates specified bucket on the cluster and loads it
2. Updates alerts settings
3. Validates backup and restore
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
rest = RestConnection(self.backupset.cluster_host)
rest.set_alerts_settings('couchbase@localhost', 'root@localhost', 'user', 'pwd')
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_merge_with_crash(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=5)
try:
merge_result = self.cluster.async_merge_cluster(backup_host=self.backupset.backup_host,
backups=self.backups,
start=1, end=5,
directory=self.backupset.directory,
name=self.backupset.name,
cli_command_location=self.cli_command_location)
self.sleep(10)
self._kill_cbbackupmgr()
merge_result.result(timeout=400)
except TimeoutError:
status, output, message = self.backup_list()
if not status:
self.fail(message)
backup_count = 0
for line in output:
if "entbackup" in line:
continue
if re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}", line):
backup_name = re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}", line).group()
if backup_name in self.backups:
backup_count += 1
self.log.info("{0} matched in list command output".format(backup_name))
self.assertEqual(backup_count, len(self.backups), "Number of backups after merge crash did not match")
self.log.info("Number of backups after merge crash matched")
def test_compact_with_crash(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
status, output_before_compact, message = self.backup_list()
if not status:
self.fail(message)
try:
compact_result = self.cluster.async_compact_cluster(backup_host=self.backupset.backup_host,
backups=self.backups,
backup_to_compact=self.backupset.backup_to_compact,
directory=self.backupset.directory,
name=self.backupset.name,
cli_command_location=self.cli_command_location)
self.sleep(10)
self._kill_cbbackupmgr()
compact_result.result(timeout=400)
except TimeoutError:
status, output_after_compact, message = self.backup_list()
if not status:
self.fail(message)
status, message = self.validation_helper.validate_compact_lists(output_before_compact,
output_after_compact,
is_approx=True)
if not status:
self.fail(message)
self.log.info(message)
def test_backup_restore_misc(self):
"""
Misc scenarios for backup and restore
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backupset.name = "!@#$%^&"
output, error = self.backup_create()
self.assertTrue("Backup `!@#$%^` created successfully" in output[0],
"Backup could not be created with special characters")
self.log.info("Backup created with special characters")
self.backupset.name = "backup"
self.backup_create()
self.backup_cluster()
conn = RemoteMachineShellConnection(self.backupset.backup_host)
command = "ls -tr {0}/{1}/{2} | tail".format(self.backupset.directory, self.backupset.name, self.backups[0])
o, e = conn.execute_command(command)
data_dir = o[0]
conn.execute_command("dd if=/dev/zero of=/tmp/entbackup/backup/" +
str(self.backups[0]) +
"/" + data_dir + "/data/shard_0.sqlite" +
" bs=1024 count=100 seek=10 conv=notrunc")
output, error = self.backup_restore()
self.assertTrue("Restore failed due to an internal issue, see logs for details" in output[-1],
"Expected error not thrown when file is corrupt")
self.log.info("Expected error thrown when file is corrupted")
conn.execute_command("mv /tmp/entbackup/backup /tmp/entbackup/backup2")
conn.disconnect()
output, error = self.backup_restore()
self.assertTrue("Backup Repository `backup` not found" in output[-1], "Expected error message not thrown")
self.log.info("Expected error message thrown")
def test_backup_logs_for_keywords(self):
"""
Inspired by CBQE-6034.
1. Perform a Backup.
2. Scan backup logs for bad keywords.
Keywords:
1. CBQE-6034/MB-41131 - Check cbbackupmgr's build version/hash set correctly at build time
by scanning for 'cbbackupmgr version Unknown' in the logs.
2. Scan for 'panic' in the logs.
"""
# Populate the default bucket on self.master with documents
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
# Create backup archive and repository.
self.backup_create()
# Perform backup.
self.backup_cluster()
# Keywords to fail on (Keyword: str, at_start: bool, lines_before: int, lines_after: int)
bad_keywords = [
("cbbackupmgr version Unknown", False, 0, 0), # Checks cbbackupmgr build version/hash set correctly at build time
( "panic", True, 0, 12) # Checks for the panic keyword at start of sentence
]
# Scan logs for keywords in bad_keywords
for keyword, at_start, lines_before, lines_after in bad_keywords:
found, output, error = \
self._check_output_in_backup_logs(keyword, at_start = at_start, lines_before = lines_before, lines_after = lines_after)
if found:
self.fail(f"Found bad keyword(s) '{keyword}' in backup logs:\n" + "\n".join(output))
""" cbbackup restore enhancement only from vulcan """
def test_cbbackupmgr_collect_logs(self):
"""
cbbackupmgr collect-logs will collect logs to archive or
output to any path supplied with flag -o
CB_ARCHIVE_PATH
ex: cbbackupmgr collect-logs -a /tmp/backup
cbbackupmgr collect-logs -a /tmp/backup -o /tmp/logs
"""
if "5.5" > self.cb_version[:3]:
self.fail("This test is only for cb version 5.5 and later. ")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
self._collect_logs()
def test_cbbackupmgr_restore_with_ttl(self):
"""
cbbackupmgr restore --replace-ttl will replace ttl
value with flag --replace-ttl-with
ex: cbbackupmgr restore --replace-ttl all --replace-ttl-with 0
"""
if "5.5" > self.cb_version[:3]:
self.fail("This restore with ttl test is only for cb version 5.5 and later. ")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
if self.replace_ttl == "expired":
if self.bk_with_ttl:
self._load_all_buckets(self.master, gen, "create", int(self.bk_with_ttl))
else:
self._load_all_buckets(self.master, gen, "create", 0)
else:
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
if self.bk_with_ttl:
self.sleep(int(self.bk_with_ttl) + 10, "wait items to be expired in backup")
compare_function = "=="
if self.replace_ttl_with:
compare_function = "<="
if self.should_fail:
self.backup_restore()
else:
self.backup_restore_validate(compare_uuid=False,
seqno_compare_function=compare_function)
def test_cbbackupmgr_restore_with_vbuckets_filter(self):
"""
cbbackupmgr restore --vbuckets-filter 2,3,4,5,6
it may require to get minimum 2 nodes servers to run this test
"""
if "5.5" > self.cb_version[:3]:
self.fail("This test is only for cb version 5.5 and later. ")
self.num_items = 1000
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
if self.should_fail:
self.backup_cluster()
else:
self.backup_cluster_validate()
if self.restore_should_fail:
self.backup_restore()
else:
self.backup_restore_validate()
def test_cbbackupmgr_with_eventing(self):
"""
Create backup cluster with saslbucket (default_bucket=False).
Backup cluster (backup_before_eventing=True for MB-34077)
Create events
Backup cluster
Create restore cluster
Restore data back to restore cluster
Check if metadata restored (backup_before_eventing=True)
Verify events restored back
"""
if "5.5" > self.cb_version[:3]:
self.fail("This eventing test is only for cb version 5.5 and later. ")
from pytests.eventing.eventing_constants import HANDLER_CODE
from lib.testconstants import STANDARD_BUCKET_PORT
self.src_bucket_name = self.input.param('src_bucket_name', 'src_bucket')
self.eventing_log_level = self.input.param('eventing_log_level', 'INFO')
self.dst_bucket_name = self.input.param('dst_bucket_name', 'dst_bucket')
self.dst_bucket_name1 = self.input.param('dst_bucket_name1', 'dst_bucket1')
self.metadata_bucket_name = self.input.param('metadata_bucket_name', 'metadata')
self.create_functions_buckets = self.input.param('create_functions_buckets', True)
self.docs_per_day = self.input.param("doc-per-day", 1)
self.use_memory_manager = self.input.param('use_memory_manager', True)
self.backup_before_eventing = self.input.param('backup_before_eventing', False)
bucket_params = self._create_bucket_params(server=self.master, size=256,
replicas=self.num_replicas)
self.cluster.create_standard_bucket(name=self.src_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.buckets = RestConnection(self.master).get_buckets()
self.src_bucket = RestConnection(self.master).get_buckets()
self.cluster.create_standard_bucket(name=self.dst_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.backup_create()
if (self.backup_before_eventing):
self.backup_cluster()
self.cluster.create_standard_bucket(name=self.metadata_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.buckets = RestConnection(self.master).get_buckets()
self.gens_load = self.generate_docs(self.docs_per_day)
self.expiry = 3
self.restServer = self.get_nodes_from_services_map(service_type="eventing")
self.rest = RestConnection(self.restServer)
self.load(self.gens_load, buckets=self.buckets, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
function_name = "Function_{0}_{1}".format(randint(1, 1000000000), self._testMethodName)
self.function_name = function_name[0:90]
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_UPDATE, worker_count=3)
bk_events_created = False
rs_events_created = False
try:
self.deploy_function(body)
bk_events_created = True
self.backup_cluster()
rest_bk = RestConnection(self.backupset.cluster_host)
bk_fxn = rest_bk.get_all_functions()
backup_index = 0
if self.backup_before_eventing:
backup_index = 1
self.backupset.start = 1
self.backupset.end = 2
if bk_fxn != "":
self._verify_backup_events_definition(json.loads(bk_fxn), body, backup_index = backup_index)
self.backup_restore()
rest_rs = RestConnection(self.backupset.restore_cluster_host)
if self.backup_before_eventing:
self.assertTrue('metadata' in [bucket.name for bucket in rest_rs.get_buckets()])
self.bkrs_resume_function(body, rest_rs)
rs_events_created = True
self._verify_restore_events_definition(bk_fxn)
except Exception as e:
self.fail(e)
finally:
master_nodes = [self.backupset.cluster_host,
self.backupset.restore_cluster_host]
for node in master_nodes:
rest = RestConnection(node)
self.bkrs_undeploy_and_delete_function(body, rest, node)
self.rest = RestConnection(self.master)
raise Exception('Test failed. Just clean up eventing function until MB-47236 fixed')
def test_bkrs_logs_when_no_mutations_received(self):
"""
Test that we log an expected message when we don't receive any
mutations for more than 60 seconds. MB-33533.
"""
version = RestConnection(self.backupset.backup_host).get_nodes_version()
if "6.5" > version[:3]:
self.fail("Test not supported for versions pre 6.5.0. "
"Version was run with {}".format(version))
rest_conn = RestConnection(self.backupset.cluster_host)
rest_conn.update_autofailover_settings(enabled=False,
timeout=0)
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
# We need to wait until the data transfer starts before we pause memcached.
# Read the backup file output until we find evidence of a DCP connection,
# or the backup finishes.
backup_client = RemoteMachineShellConnection(self.backupset.backup_host)
command = "tail -n 1 {}/logs/backup-*.log | grep ' (DCP) '"\
.format(self.backupset.directory)
Future.wait_until(
lambda: (bool(backup_client.execute_command(command)[0]) or backup_result.done()),
lambda x: x is True,
200,
interval_time=0.1,
exponential_backoff=False)
# If the backup finished and we never saw a DCP connection something's not right.
if backup_result.done():
self.fail("Never found evidence of open DCP stream in backup logs.")
# Pause memcached to trigger the log message.
cluster_client = RemoteMachineShellConnection(self.backupset.cluster_host)
cluster_client.pause_memcached(self.os_name, timesleep=200)
cluster_client.unpause_memcached(self.os_name)
cluster_client.disconnect()
backup_result.result(timeout=200)
expected_message = "(timed out after 3m0s|Stream has been inactive for 1m0s)"
command = "cat {}/logs/backup-*.log | grep -E '{}' "\
.format(self.backupset.directory, expected_message)
output, _ = backup_client.execute_command(command)
if not output:
self.fail("Mutations were blocked for over 60 seconds, "
"but this wasn't logged.")
backup_client.disconnect()
def test_log_to_stdout(self):
"""
Test that if the log-to-stdout flag is provided cbbackupmgr will log to stdout
:return:
"""
version = RestConnection(self.backupset.backup_host).get_nodes_version()
if "6.5" > version[:3]:
self.fail("Test not supported for versions pre 6.5.0"
"Version was run with {}".format(version))
self.backupset.log_to_stdout = True
# Test config
output, err = self.backup_create()
if err:
self.fail("Could not create backup directory")
# This is a line that is normally printed in the logs but should now instead be printed to stdout
if "(Cmd) cbbackupmgr version" not in " ".join(output):
self.fail("Did not log to standard out")
# Test backup
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
output, err = self.backup_cluster()
if err:
self.fail("Could not backup")
if "(Cmd) cbbackupmgr version" not in " ".join(output):
self.fail("Did not log to standard out")
self.backupset.force_updates = True
# Test restore
output, err = self.backup_restore()
if err:
self.fail("Could not restore")
if "(Cmd) cbbackupmgr version" not in " ".join(output):
self.fail("Did not log to standard out")
def test_auto_select_threads(self):
"""
Test that the --auto-select-threads flag actually selects the threads
:return:
"""
version = RestConnection(self.backupset.backup_host).get_nodes_version()
if "6.5" > version[:3]:
self.fail("Test not supported for versions pre 6.5.0"
"Version was run with {}".format(version))
self.backupset.auto_select_threads = True
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
# If the threads where auto-selected then a log message should appear
shell = RemoteMachineShellConnection(self.backupset.backup_host)
output, _ = shell.execute_command("cat {}/logs/backup-*.log | grep"
" '(Cmd) Automatically set the number"
" of threads to'".format(self.backupset.directory))
if not output:
self.fail("Threads were not automatically selected")
# Remove the logs and test the same thing for restore
shell.execute_command("rm -r {}/logs".format(self.backupset.directory))
self.backupset.force_updates = True
self.backup_restore()
output, _ = shell.execute_command("cat {}/logs/backup-*.log | grep"
" '(Cmd) Automatically set the number"
" of threads to'".format(self.backupset.directory))
if not output:
self.fail("Threads were not automatically selected")
shell.disconnect()
def test_backup_remove_take_backup_range(self):
"""
Test the remove --backups flag it should be able to take:
- backup indexes e.g (0,3)
- backup directory names range
- dd-mm-yyyy ranges
To do this the steps are as follow:
1. Load some data to cluster
2. Create 3 backups
3. Try the different inputs and verify expected outputs
:return:
"""
version = RestConnection(self.backupset.backup_host).get_nodes_version()
if "6.5" > version[:3]:
self.fail("Test not supported for versions pre 6.5.0"
"Version was run with {}".format(version))
# Test based on actual directory names have to be dynamically created based on the directory names.
test_ranges_positive_cases = [
"1,3", # valid index range
"10-01-2000,10-01-3000", # valid date range
]
test_range_invalid_cases = [
"1,-10", # invalid end range negative number
"0,100", # invalid range as there are only 3 backups
"2,0", # invalid range start bigger than end
"01/01/2000,01/01/3000", # invalid date format
"01-30-2000,01-30-3000", # invalid date format
]
# Load some data into the cluser
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
for test in test_ranges_positive_cases:
# create the backup repository and make three backups
self.backup_create()
self._take_n_backups(n=3)
# remove the backup directory
success, _, _ = self.backup_remove(test)
if not success:
self.fail("Failed to remove backups")
self._verify_backup_directory_count(0)
self._delete_repo()
for test in test_range_invalid_cases:
# create the backup repository and make three backups
self.backup_create()
self._take_n_backups(n=3)
success, _, _ = self.backup_remove(test)
if success:
self.fail("Test should have failed")
self._verify_backup_directory_count(3)
self._delete_repo()
# Test based on dynamic file names
self.backup_create()
self._take_n_backups(n=3)
shell = RemoteMachineShellConnection(self.backupset.backup_host)
command = (
f"ls -l {self.backupset.objstore_staging_directory + "/" if self.objstore_provider else ""}"
f"{self.backupset.directory}/{self.backupset.name}"
)
list_dir, _ = shell.execute_command(command)
list_dir = " ".join(list_dir)
shell.disconnect()
dir_names = re.findall(r'(?P<dir>\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}\.\d+(?:(?:[+-]\d{2}_\d{2})|Z))', list_dir)
dir_names.sort()
if len(dir_names) != 3:
self.fail("Expected 3 backups instead have {0}".format(len(dir_names)))
# test non existent directory name
success, _, _ = self.backup_remove("3000-09-30T10_42_37.64647+01_00")
if success:
self.fail("Should not be able to remove non existent directory")
self._verify_backup_directory_count(3)
# test start > backup start
success, _, _ = self.backup_remove("3000-09-30T10_42_37.64647+01_00,3000-09-30T10_43_37.64647+01_00")
if success:
self.fail("Should not be able to remove by directory range where the start is in the future")
self._verify_backup_directory_count(3)
# test start == backup start end > backup end
success, _, _ = self.backup_remove("{0}.64647+01_00,3000-09-30T10_43_37.64647+01_00".format(dir_names[0]))
if success:
self.fail("Should not be able to remove by directory range where the end is in the future")
self._verify_backup_directory_count(3)
# test start before end
success, _, _ = self.backup_remove("{0},{1}".format(dir_names[-1], dir_names[0]))
if success:
self.fail("Should not be able to remove by directory range where start is after end")
self._verify_backup_directory_count(3)
# test valid single directory
success, _, _ = self.backup_remove("{0}".format(dir_names[0]))
if not success:
self.fail("Should not have failed to remove directories by backup directory name")
self._verify_backup_directory_count(2)
# test valid
success, _, _ = self.backup_remove("{0},{1}".format(dir_names[1], dir_names[-1]))
if not success:
self.fail("Should not have failed to remove directories by backup directory name range")
self._verify_backup_directory_count(0)
def test_backup_merge_date_range(self):
"""
Test the merge --date-range flag it should be able to take:
- backup indexes e.g (0,3)
- backup directory names range
- dd-mm-yyyy ranges
To do this the steps are as follow:
1. Load some data to cluster
2. Create 3 backups
3. Try the different inputs and verify expected outputs
:return:
"""
version = RestConnection(self.backupset.backup_host).get_nodes_version()
if "6.5" > version[:3]:
self.fail("Test not supported for versions pre 6.5.0"
"Version was run with {}".format(version))
# Test based on actual directory names have to be dynamically created based on the directory names.
test_ranges_positive_cases = [
"0,2", # valid index range
"10-01-2000,10-01-3000", # valid date range
]
test_range_invalid_cases = [
"1,-10", # invalid end range negative number
"0,100", # invalid range as there are only 3 backups
"2,0", # invalid range start bigger than end
"01/01/2000,01/01/3000", # invalid date format
"01-30-2000,01-30-3000", # invalid date format
]
# Load some data into the cluser
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
for test in test_ranges_positive_cases:
# create the backup repository and make three backups
self.backup_create()
self._take_n_backups(3)
self.backupset.date_range = test
status, output , _ = self.backup_merge()
if not status:
self.fail("Failed to merge backups: {0}".format(output))
self._verify_backup_directory_count(1)
self._delete_repo()
for test in test_range_invalid_cases:
# create the backup repository and make three backups
self.backup_create()
self._take_n_backups(3)
self.backupset.date_range = test
status, output, _ = self.backup_merge()
if status:
self.fail("Test should have failed")
self._verify_backup_directory_count(3)
# Test based on dynamic file names
shell = RemoteMachineShellConnection(self.backupset.backup_host)
command = (
f"ls -l {self.backupset.objstore_staging_directory + "/" if self.objstore_provider else ""}"
f"{self.backupset.directory}/{self.backupset.name}"
)
list_dir, _ = shell.execute_command(command)
list_dir = " ".join(list_dir)
shell.disconnect()
dir_names = re.findall(r'(?P<dir>\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}\.\d+(?:(?:[+-]\d{2}_\d{2})|Z))', list_dir)
dir_names.sort()
if len(dir_names) != 3:
self.fail("Expected 3 backups instead have {0}".format(len(dir_names)))
# test start > backup start
self.backupset.date_range = "3000-09-30T10_42_37.64647+01_00,3000-09-30T10_43_37.64647+01_00"
status, _, _ = self.backup_merge()
if status:
self.fail("Should not be able to merge by directory range where the start is in the future")
self._verify_backup_directory_count(3)
# test start == backup start end > backup end
self.backupset.date_range = "{0}.64647+01_00,3000-09-30T10_43_37.64647+01_00".format(dir_names[0])
status, _, _ = self.backup_merge()
if status:
self.fail("Should not be able to merge by directory range where the end is in the future")
self._verify_backup_directory_count(3)
# test start before end
self.backupset.date_range = "{0},{1}".format(dir_names[-1], dir_names[0])
status, _, _ = self.backup_merge()
if status:
self.fail("Should not be able to merge by directory range where the start is after the end")
self._verify_backup_directory_count(3)
# test valid
self.backupset.date_range = "{0},{1}".format(dir_names[0], dir_names[-1])
status, _, _ = self.backup_merge()
if not status:
self.fail("Should not have failed to merge")
self._verify_backup_directory_count(1)
def test_info_while_other_task_runs(self):
"""
Test that info can run at the same time as other backup tasks
1. Load some data to the cluster
2. Create a backup repository
3. Start an async backup
4. Constantly run info
4. It should not expect error
:return:
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
# Test with backup
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
for i in range(10):
_, err = self.backup_info(True)
if err:
self.fail("Should have been able to run at the same time as the backup")
self.sleep(2)
output = backup_result.result(timeout=200)
self.assertTrue(self._check_output("Backup completed successfully", output),
"Backup failed with concurrent info")
# Test with merge
self._take_n_backups(5)
merge_result = self.cluster.async_merge_cluster(backup_host=self.backupset.backup_host,
backups=self.backups,
start=1, end=5,
directory=self.backupset.directory,
name=self.backupset.name,
cli_command_location=self.cli_command_location)
for i in range(10):
_, err = self.backup_info(True)
if err:
self.fail("Should have been able to run at the same time as the merge")
self.sleep(2)
output = merge_result.result(timeout=200)
self.assertTrue(self._check_output("Merge completed successfully", output),
"Merge failed while running info at the same time")
def test_config_without_objstore_bucket(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.objstore_provider.remove_bucket()
output, _ = self.backup_create(del_old_backup=False)
self.assertRegex(output[0].lower(), re.compile("bucket '.*' not found"))
def test_backup_without_objstore_bucket(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.objstore_provider.remove_bucket()
output, _ = self.backup_cluster()
self.assertRegex(output[0].lower(), re.compile("bucket '.*' not found"))
def test_info_without_objstore_bucket(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.objstore_provider.remove_bucket()
output, _ = self.backup_info()
self.assertIn('the specified bucket does not exist', output[0].lower())
def test_restore_without_objstore_bucket(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.objstore_provider.remove_bucket()
self.restore_only = True
output, _ = self.backup_restore()
self.assertRegex(output[0].lower(), re.compile("bucket '.*' not found"))
def test_remove_without_objstore_bucket(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.objstore_provider.remove_bucket()
_, output, _ = self.backup_remove()
self.assertRegex(output[0].lower(), re.compile("bucket '.*' not found"))
def test_config_create_multiple_repos_with_remove_staging_directory(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.backup_create_validate()
self.backupset.name = "another_repo"
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.objstore_provider._remove_staging_directory(remote_client.extract_remote_info().type.lower(), remote_client)
self.backup_create_validate()
def test_backup_with_remove_staging_directory(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.objstore_provider._remove_staging_directory(remote_client.extract_remote_info().type.lower(), remote_client)
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self.backup_create_validate()
self._load_all_buckets(self.master, gen, "create")
self.backup_cluster_validate()
def test_info_with_remove_staging_directory(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.backup_create_validate()
self.backup_cluster_validate()
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.objstore_provider._remove_staging_directory(remote_client.extract_remote_info().type.lower(), remote_client)
output, error = self.backup_info()
if error:
self.fail(f"Expected to be able to info backup where staging directory has been removed: {error}")
self.assertEqual(json.loads(output[0])['count'], 1,
"Expected to find a single backup even though the staging directory was removed")
def test_restore_with_remove_staging_directory(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
self.backup_cluster_validate()
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.objstore_provider._remove_staging_directory(remote_client.extract_remote_info().type.lower(), remote_client)
self.backup_restore_validate()
def test_remove_with_remove_staging_directory(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.backup_create_validate()
self.backup_cluster_validate()
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.objstore_provider._remove_staging_directory(remote_client.extract_remote_info().type.lower(), remote_client)
success, _, _ = self.backup_remove()
self.assertTrue(success, "Expected to have removed backups even though the staging directory was removed")
def test_restore_start_after_end(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
for _ in range(2):
self.backup_cluster_validate()
self.backupset.start = 2
self.backupset.end = 1
output, _ = self.backup_restore()
self.assertEqual(len(output), 1)
self.assertIn("range start", output[0])
self.assertIn("cannot be before end", output[0])
def test_restore_single_full_backup(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
self.backup_cluster_validate()
self.backupset.start = 1
self.backupset.end = 1
self._all_buckets_flush()
self.backup_restore_validate()
def test_restore_single_incr_backup(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
for _ in range(2):
self._load_all_buckets(self.master, gen, "create")
self.backup_cluster_validate()
self.backupset.start = 2
self.backupset.end = 2
self._all_buckets_flush()
self.backup_restore_validate(seqno_compare_function=">=")
def test_start_full_end_incr(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
for _ in range(2):
self._load_all_buckets(self.master, gen, "create")
self.backup_cluster_validate()
self.backupset.start = 1
self.backupset.end = 2
self._all_buckets_flush()
self.backup_restore_validate(seqno_compare_function=">=")
def test_start_incr_end_full(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
for _ in range(2):
self.backup_cluster_validate()
self._load_all_buckets(self.master, gen, "create")
self.backupset.full_backup = True
self.backup_cluster_validate()
self.backupset.start = 2
self.backupset.end = 3
self._all_buckets_flush()
self.backup_restore_validate(seqno_compare_function=">=")
def test_cbbackup_with_big_rev(self):
# automation ticket MB-38683
# verified test failed in build 6.6.0-7680 and passed in 6.6.0-7685
from ep_mc_bin_client import MemcachedClient, MemcachedError
bucket = 'default'
value = "value"
expiry = 0
rev_seq = 2**64-1
key = 'test_with_meta'
mc = MemcachedClient(self.master.ip, 11210)
mc.sasl_auth_plain('Administrator', 'password')
mc.bucket_select(bucket)
self.log.info("pushing a key with large rev_seq {0} to bucket".format(rev_seq))
try:
mc.setWithMeta(key, 'value', 0, 0, rev_seq, 0x1512a3186faa0000)
meta_key = mc.getMeta(key)
self.log.info("key meta: {0}".format(meta_key))
except MemcachedError as error:
msg = "unable to push key : {0} error : {1}"
self.log.error(msg.format(key, error.status))
self.fail(msg.format(key, error.status))
client = RemoteMachineShellConnection(self.backupset.backup_host)
client.execute_command("rm -rf {0}/backup".format(self.tmp_path))
client.execute_command("mkdir {0}backup".format(self.tmp_path))
cmd = "{0}cbbackup{1} -u Administrator -p password http://{2}:8091 {3}backup"\
.format(self.cli_command_location, self.cmd_ext, self.master.ip, self.tmp_path)
try:
cbbackup_run = False
output, error = client.execute_command(cmd, timeout=20)
cbbackup_run = True
if not self._check_output("done", error):
self.fail("Failed to run cbbackup with large rev_seq")
except Exception as e:
if e and not cbbackup_run:
self.fail("Failed to run cbbackup with large rev_seq")
finally:
client.execute_command("rm -rf {0}/backup".format(self.tmp_path))
client.disconnect()
def test_backup_consistent_metadata(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
backup_threads = []
backup_thread_1 = Thread(target=self.backup_cluster)
backup_threads.append(backup_thread_1)
backup_thread_1.start()
backup_thread_2 = Thread(target=self.backup_cluster)
backup_threads.append(backup_thread_2)
backup_thread_2.start()
for backup_thread in backup_threads:
backup_thread.join()
consistent_metadata = False
for output in self.backup_outputs:
if self._check_output("Error backing up cluster: failed to lock archive", output):
consistent_metadata = True
if not consistent_metadata:
self.fail("Backup does not lock while running backup")
def test_restore_consistent_metadata(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
self.backup_cluster()
restore_threads = []
restore_thread_1 = Thread(target=self.backup_restore)
restore_threads.append(restore_thread_1)
restore_thread_1.start()
restore_thread_2 = Thread(target=self.backup_restore)
restore_threads.append(restore_thread_2)
self.create_bucket_count = 1
restore_thread_2.start()
count = 0
for restore_thread in restore_threads:
restore_thread.join()
consistent_metadata = False
for output in self.restore_outputs:
if self._check_output("Error restoring cluster: failed to lock archive", output):
consistent_metadata = True
break
if not consistent_metadata:
self.fail("Restore does not lock while running restore")
def test_info_backup_merge_remove(self, cluster, no_of_backups):
""" Test Scenario: Create Buckets, Load Documents, Take 'no_of_backups' backups, Merge and Remove a Bucket
This function creates a scenario in which:
1. Buckets are created and loaded with documents.
2. A variable number of Backups >=6 are taken.
3. Backups 2 to 4 are merged.
4. The 2nd last bucket from the end is removed.
Args:
cluster list: A list of 'ServerInfo' that form a cluster to backup.
no_of_backups (int): The number of backups to perform.
"""
# Add built-in user cbadminbucket to backup cluster
self.add_built_in_server_user(node=self.backupset.cluster_host)
# Assemble cluster if more than 1 node in cluster
if len(cluster) > 1:
self.cluster.async_rebalance(cluster, cluster[1:], []).result()
# Take 'no_of_backups' backups
self.backup_create()
self._take_n_backups(n=no_of_backups)
# Merge
self.backupset.start, self.backupset.end = 2, 4
self.backup_merge()
# Delete a bucket
self.backup_remove(self.backups.pop(-2), verify_cluster_stats=False)
def test_ee_only_features(self):
""" Test that EE only features do not work on CE servers
NOTE: PITR currently does nothing, so succeeds on CE.
This should be included when PITR is added properly
This is also true for:
Backing up users,
Auto rebuild of indexes
Params:
examine (bool): Whether to test examine.
merge (bool): Whether to test merge.
s3 (bool): Whether to test s3 cloud backup.
consistency_check (bool): Whether to test consistency_check.
coll_restore (bool): Whether to test collection/scope level restore.
"""
examine = self.input.param('examine', False)
merge = self.input.param('merge', False)
s3 = self.input.param('s3', False)
consistency_check = self.input.param('consistency_check', False)
coll_restore = self.input.param('coll_restore', False)
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
command = f"{self.cli_command_location}/cbbackupmgr"
sub_command = ""
self.backup_create()
if examine:
sub_command = 'examine -a archive -r repo -k asdf --collection-string asdf.asdf.asdf'
elif merge:
sub_command = 'merge -a archive -r repo'
elif s3:
sub_command = f'backup -a s3://backup -r {self.backupset.name}\
-c {self.backupset.backup_host.ip}:{self.backupset.backup_host.port}\
-u Administrator -p password'
elif consistency_check:
sub_command = f'backup -a {self.backupset.directory} -r {self.backupset.name}\
-c {self.backupset.backup_host.ip}:{self.backupset.backup_host.port}\
-u Administrator -p password --consistency-check 1'
elif coll_restore:
sub_command = f'restore -a {self.backupset.directory} -r {self.backupset.name}\
-c {self.backupset.backup_host.ip}:{self.backupset.backup_host.port}\
-u Administrator -p password --include-data asdf.asdf.asdf'
if not sub_command:
self.fail("Must provide a subcommand!")
output, error = remote_client.execute_command(f"{command} {sub_command}")
self.log.info(f"ERROR from command: {error}")
self.log.info(f"OUTPUT from command: {output}")
if s3 and "7.0.0" in self.cb_version:
# The s3 error message differs slightly in 7.0.0
self.assertIn("an enterprise only feature", output[0])
else:
self.assertIn("an Enterprise Edition feature", output[0])
def test_analytics_synonyms(self):
""" Test analytics synonyms can be restored
Params:
dataverses (int): Number of dataverses to create.
datasets (int): Number of datasets to create.
synonyms (int): Number of synonyms to create.
"""
class Query:
""" A class to execute analytics queries """
def __init__(self, server, username, password):
self.restconn = RestConnection(server)
def execute(self, query):
return self.restconn.execute_statement_on_cbas(query, None)
def get_synonyms(self):
synonyms = set()
for result in json.loads(self.execute("select * from Metadata.`Synonym`"))['results']:
synonym = result['Synonym']
synonym_name = synonym['SynonymName']
synonym_target = synonym['ObjectDataverseName'] + '.' + synonym['ObjectName']
synonym_dataverse = synonym['DataverseName']
synonyms.add((synonym_name, synonym_target, synonym_dataverse))
return synonyms
def get_synonyms_count(self):
return json.loads(self.execute("select count(*) as count from Metadata.`Synonym`;"))['results'][0]['count']
class Dataset:
def __init__(self, name, bucket, clause=None):
self.name, self.bucket, self.clause = name, bucket, clause
def get_where_clause(self):
return f" WHERE {self.clause}" if self.clause else ""
class Synonym:
def __init__(self, name, target):
self.name, self.target = name, target
class Dataverse:
def __init__(self, name):
self.name = name
self.datasets = set()
self.synonyms = set()
def add_dataset(self, dataset):
self.datasets.add(dataset)
def add_synonym(self, synonym):
self.synonyms.add(synonym)
def next_dataset_name(self):
return f"dat_{len(self.datasets)}"
def next_synonym_name(self):
return f"syn_{len(self.synonyms)}"
class Analytics:
def __init__(self, query):
self.query, self.dataverses = query, set()
def add_dataverse(self, dataverse):
self.dataverses.add(dataverse)
def next_dataverse_name(self):
return f"dtv_{len(self.dataverses)}"
def pick_target_for_synonym(self):
choices = [f"{dataverse.name}.{dataset.name}" for dataverse in self.dataverses for dataset in dataverse.datasets]
if choices:
return choice(choices)
return None
def create(self):
# Create daterverses and datasets
for dataverse in self.dataverses:
self.query.execute(f"CREATE dataverse {dataverse.name}")
for dataset in dataverse.datasets:
self.query.execute(f"CREATE DATASET {dataverse.name}.{dataset.name} ON {dataset.bucket}{dataset.get_where_clause()}")
# Create synonyms
for dataverse in self.dataverses:
for synonym in dataverse.synonyms:
self.query.execute(f"CREATE analytics synonym {dataverse.name}.{synonym.name} FOR {synonym.target}")
def delete(self):
for dataverse in self.dataverses:
for dataset in dataverse.datasets:
self.query.execute(f"DROP DATASET {dataverse.name}.{dataset.name}")
for synonym in dataverse.synonyms:
self.query.execute(f"DROP analytics synonym {dataverse.name}.{synonym.name}")
self.query.execute(f"DROP dataverse {dataverse.name}")
class AnalyticsTest:
def __init__(self, backup, no_of_dataverses, no_of_datasets, no_of_synonyms, analytics_server):
# The base class
self.backup = backup
# Test parameters
self.no_of_dataverses, self.no_of_datasets, self.no_of_synonyms = no_of_dataverses, no_of_datasets, no_of_synonyms
# The number of synonyms that get created
self.no_of_synonyms_created = no_of_dataverses * no_of_synonyms
# The object thats used to run queries on the server running analytics
self.query = Query(analytics_server, analytics_server.rest_username, analytics_server.rest_password)
# The object that represents our current model of analytics
self.analytics = Analytics(self.query)
def test_analytics(self):
# Define the analytics model (i.e. which dataverses, datasets and synonyms are present)
for i in range(self.no_of_dataverses):
dataverse = Dataverse(self.analytics.next_dataverse_name())
self.analytics.add_dataverse(dataverse)
for j in range(self.no_of_datasets):
dataset = Dataset(dataverse.next_dataset_name(), 'default')
dataverse.add_dataset(dataset)
for j in range(self.no_of_synonyms):
synonym = Synonym(dataverse.next_synonym_name(), self.analytics.pick_target_for_synonym())
dataverse.add_synonym(synonym)
# Create dataverses, datasets and synonyms
self.analytics.create()
self.backup.assertEqual(self.query.get_synonyms_count(), self.no_of_synonyms_created)
# Create a repository
self.backup.backup_create()
# Take a backup
self.backup.backup_cluster()
# Delete all analytics related stuff
self.analytics.delete()
self.backup.assertEqual(self.query.get_synonyms_count(), 0)
# Perform a one off restore
self.backup.backup_restore()
synonyms = self.query.get_synonyms()
# Check synonyms have been restored
for dataverse in self.analytics.dataverses:
for synonym in dataverse.synonyms:
self.backup.assertIn((synonym.name, synonym.target, dataverse.name), synonyms)
# The server that will be reprovisioned with analytics
analytics_server = self.restore_cluster_host = self.servers[2]
# Add a server and provision it with analytics
self.add_server_with_custom_services(analytics_server, services=["cbas"])
# A little sleep for services to warmup
self.assertTrue(RestConnection(analytics_server).wait_until_cbas_is_ready(100))
# Run the analytics test
AnalyticsTest(self, self.input.param("dataverses", 5), self.input.param("datasets", 5), self.input.param("synonyms", 5), analytics_server).test_analytics()
def test_info_after_backup_merge_remove(self):
""" CBQE-5475: Test cbbackupmgr info comprehensively after performing backup, merge and remove
Test params:
flag_depth = [0,1,2,3]
check_tabular = [True, False]
check_all_flag = [True, False]
dgm_run = [True, False]
sasl_buckets >= 1
Comprehensive test: flag_depth=3,check_tabular=True,check_all_flag=True,dgm_run=True,sasl_buckets=2
Scenario:
Perform backup, merge and remove to mutate info output.
Cases tested:
flag_depth>=0: --archive,
flag_depth>=1: --archive --repo
flag_depth>=2: --archive --repo --backup
flag_depth>=3: --archive --repo --backup --collection-string in version>7.0/--bucket in version<=6.6
Output types tested for each of the previous cases:
check_tabular>=False: using --json flag (Checks JSON output)
check_tabular = True: no --json flag (Parses tabular output to reflect JSON output)
State of all flag:
check_all_flag>=False:
using --all flag (e.g. for --archive --all checks all repos in archive, backups in repos, buckets in backups)
check_all_flag = True:
--all flag (e.g. for --archive checks contents of archive only)
Total number of cases: 4 (cases) * 2 (output types) * 2 (all flag state) = 16
"""
import os
import pprint
import itertools
import parse_cbbackupmgr_info as parse_info
pp = pprint.PrettyPrinter(indent=4)
# Params
flag_depth = self.input.param('flag_depth', 3)
check_tabular = self.input.param('check_tabular', True)
check_all_flag = self.input.param('check_all_flag', True)
# The minimum number of backups is 6
min_backups = 6
no_of_backups = max(self.backupset.number_of_backups, min_backups)
if self.backupset.number_of_backups < min_backups:
self.log.warn("number_of_backups increased from {} to {}".format(self.backupset.number_of_backups, min_backups))
# Select backup cluster
cluster = [self.backupset.cluster_host]
# Create Buckets, Load Documents, Take n backups, Merge and Remove a Bucket
self.test_info_backup_merge_remove(cluster, no_of_backups)
# Create lists of expected output from the info command
types = set(['FULL', 'MERGE - FULL', 'MERGE - INCR', 'INCR'])
expected_archs = [os.path.basename(self.backupset.directory)]
expected_repos = [self.backupset.name]
expected_backs = {self.backupset.name: self.backups}
expected_bucks = [bucket.name for bucket in self.buckets]
def check_arch(arch, tabular=False):
""" Checks the archive dictionary.
Args:
arch (dict): A dictionary containing archive information.
Returns:
list: A list containing the repositories in the archive.
"""
expected_keys = [u'archive_uuid', u'name', u'repos']
self.assertTrue(set(expected_keys).issubset(arch.keys()))
archive_uuid, name, repos = [arch[key] for key in expected_keys]
# Check archive name is correct
self.assertTrue(name in expected_archs)
# Check repos names are correct
self.assertEqual(set(expected_repos), set(repo['name'] for repo in repos))
# Check repo size is > 0
self.assertTrue(all(repo['size'] > 0 for repo in repos))
# Check backup sizes are correct
self.assertTrue(all(repo['count'] == len(expected_backs[repo['name']]) for repo in repos))
return repos
def check_repo(repo, tabular=False):
""" Checks the repository dictionary.
Args:
repo (dict): A dictionary containing repository information.
Returns:
list: A list containing the backups in the repository.
"""
expected_keys = [u'count', u'backups', u'name', u'size']
self.assertTrue(set(expected_keys).issubset(repo.keys()))
count, backups, name, size = [repo[key] for key in expected_keys]
# Check repo name is correct
self.assertTrue(name in expected_repos)
# Check repo size is greater than 0
self.assertTrue(size > 0)
# Check number of backups is correct
self.assertEqual(len(backups), len(expected_backs[name]))
# Check backup names
self.assertEqual(set(backup['date'] for backup in backups), set(expected_backs[name]))
# Check backup types
self.assertTrue(set(backup['type'] for backup in backups).issubset(types))
# Check complete status
self.assertTrue(all(backup['complete'] for backup in backups))
return backups
def check_back(backup, tabular=False):
""" Checks the backup dictionary.
Args:
backup (dict): A dictionary containing backup information.
Returns:
list: A list containing the buckets in the backup.
"""
expected_keys = [u'complete', u'fts_alias', u'buckets',
u'source_cluster_uuid', u'source', u'date', u'type', u'events', u'size']
self.assertTrue(set(expected_keys).issubset(backup.keys()))
complete, fts_alias, buckets, source_cluster_uuid, source, date, _type_, events, size = \
[backup[key] for key in expected_keys]
# Check backup name is correct
self.assertTrue(date in self.backups)
# Check backup size is greater than 0
self.assertTrue(size > 0)
# Check type exists
self.assertTrue(_type_ in types)
# Check bucket names
self.assertEqual(set(bucket['name'] for bucket in buckets), set(expected_bucks))
# Check bucket sizes
self.assertTrue(all(bucket['size'] >= 0 for bucket in buckets))
# Check items are equal to self.num_items
self.assertTrue(all(bucket['items'] in [0, self.num_items] for bucket in buckets))
return buckets
def check_buck(bucket, tabular=False):
""" Checks the bucket dictionary.
Args:
bucket (dict): A dictionary containing bucket information.
Returns:
None
"""
expected_keys = [u'index_count', u'views_count', u'items', u'mutations',
u'tombstones', u'fts_count', u'analytics_count', u'size', u'name']
self.assertTrue(set(expected_keys).issubset(bucket.keys()))
index_count, views_count, items, mutations, tombstones, fts_count, \
analytics_count, size, name = [bucket[key] for key in expected_keys]
# Check bucket name
self.assertTrue(name in expected_bucks)
# Check bucket size
self.assertTrue(size >= 0)
# Check bucket items
self.assertTrue(items in [0, self.num_items])
def print_tree(tree):
if self.debug_logs:
pp.pprint(tree)
def parse_output(use_json, output):
""" Parses the JSON/Tabular output into a Python dictionary
Args:
use_json (bool): If True expects JSON output to parse. Otherwise, expects tabular data to parse.
output (list): JSON or Tabular data to parse into a dictionary.
Returns:
dict: A dictionary containing the parsed output.
"""
return json.loads(output[0]) if use_json else parse_info.construct_tree(output)
# Configure initial flags
json_options, all_flag_options = [True], [False]
# Enable tabular output tests
if check_tabular:
json_options.append(False)
# Enable all flag tests
if check_all_flag:
all_flag_options.append(True)
def output_logs(flag_depth, use_json, all_flag):
""" Outputs flags tested in current test case."""
use_json = "--json" if use_json else ""
all_flag = "--all" if all_flag else ""
flags = " ".join(["--archive", "--repo", "--backup", "--bucket"][: flag_depth + 1])
self.log.info("---")
self.log.info(f"Testing Flags: {flags} {use_json} {all_flag}")
self.log.info("---")
# Perform tests
for use_json, all_flag in itertools.product(json_options, all_flag_options):
output_logs(0, use_json, all_flag)
# cbbackupmgr info --archive
arch = parse_output(use_json, self.get_backup_info(json=use_json, all_flag=all_flag))
print_tree(arch)
repos = check_arch(arch)
if all_flag:
[check_buck(buck) for repo in repos for back in check_repo(repo) for buck in check_back(back)]
if flag_depth < 1:
continue
output_logs(1, use_json, all_flag)
# cbbackupmgr info --archive --repo
for repo_name in expected_repos:
repo = parse_output(use_json, self.get_backup_info(json=use_json, repo=repo_name, all_flag=all_flag))
print_tree(repo)
backs = check_repo(repo)
if all_flag:
[check_buck(buck) for back in backs for buck in check_back(back)]
if flag_depth < 2:
continue
output_logs(2, use_json, all_flag)
# cbbackupmgr info --archive --repo --backup
for repo_name in expected_repos:
for back_name in expected_backs[repo_name]:
back = parse_output(use_json, self.get_backup_info(json=use_json, repo=repo_name, backup=back_name, all_flag=all_flag))
print_tree(back)
bucks = check_back(back)
if all_flag:
[check_buck(buck) for buck in bucks]
if flag_depth < 3:
continue
output_logs(3, use_json, all_flag)
# cbbackupmgr info --archive --repo --backup --bucket
for repo_name in expected_repos:
for back_name in expected_backs[repo_name]:
for buck_name in expected_bucks:
buck = parse_output(use_json, self.get_backup_info(json=use_json, repo=repo_name,
backup=back_name, collection_string=buck_name, all_flag=all_flag))
print_tree(buck)
check_buck(buck)
|
import os, re, copy, json, subprocess
from random import randrange, randint, choice
from threading import Thread
from couchbase_helper.cluster import Cluster
from membase.helper.rebalance_helper import RebalanceHelper
from couchbase_helper.documentgenerator import BlobGenerator, DocumentGenerator
from ent_backup_restore.enterprise_backup_restore_base import EnterpriseBackupRestoreBase
from ent_backup_restore.backup_service_upgrade import BackupServiceHook
from membase.api.rest_client import RestConnection, RestHelper, Bucket
from membase.helper.bucket_helper import BucketOperationHelper
from pytests.query_tests_helper import QueryHelperTests
#from lib.membase.helper.cluster_helper import ClusterOperationHelper
from remote.remote_util import RemoteUtilHelper, RemoteMachineShellConnection
from security.auditmain import audit
from security.rbac_base import RbacBase
from upgrade.newupgradebasetest import NewUpgradeBaseTest
from couchbase.bucket import Bucket
from couchbase_helper.document import View
from eventing.eventing_base import EventingBaseTest
from tasks.future import Future, TimeoutError
from xdcr.xdcrnewbasetests import NodeHelper
from couchbase_helper.stats_tools import StatsCommon
from testconstants import COUCHBASE_DATA_PATH, WIN_COUCHBASE_DATA_PATH, \
COUCHBASE_FROM_4DOT6, ENT_BKRS, ENT_BKRS_FTS
AUDITBACKUPID = 20480
AUDITRESTOREID = 20485
SOURCE_CB_PARAMS = {
"authUser": "default",
"authPassword": "",
"authSaslUser": "",
"authSaslPassword": "",
"clusterManagerBackoffFactor": 0,
"clusterManagerSleepInitMS": 0,
"clusterManagerSleepMaxMS": 20000,
"dataManagerBackoffFactor": 0,
"dataManagerSleepInitMS": 0,
"dataManagerSleepMaxMS": 20000,
"feedBufferSizeBytes": 0,
"feedBufferAckThreshold": 0
}
INDEX_DEFINITION = {
"type": "fulltext-index",
"name": "",
"uuid": "",
"params": {},
"sourceType": "couchbase",
"sourceName": "default",
"sourceUUID": "",
"sourceParams": SOURCE_CB_PARAMS,
"planParams": {}
}
class EnterpriseBackupRestoreTest(EnterpriseBackupRestoreBase, NewUpgradeBaseTest):
def setUp(self):
super().setUp()
self.users_check_restore = \
self.input.param("users-check-restore", '').replace("ALL", "*").split(";")
if '' in self.users_check_restore:
self.users_check_restore.remove('')
for server in [self.backupset.backup_host, self.backupset.restore_cluster_host]:
conn = RemoteMachineShellConnection(server)
conn.extract_remote_info()
conn.terminate_processes(conn.info, ["cbbackupmgr"])
conn.disconnect()
self.bucket_helper = BucketOperationHelper()
def tearDown(self):
super(EnterpriseBackupRestoreTest, self).tearDown()
def test_backup_create(self):
self.backup_create_validate()
def test_backup_restore_sanity(self):
"""
1. Create default bucket on the cluster and loads it with given number of items
2. Perform updates and create backups for specified number of times (test param number_of_backups)
3. Perform restores for the same number of times with random start and end values
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self.log.info("*** start to load items to all buckets")
self._load_all_buckets(self.master, gen, "create", self.expires)
self.log.info("*** done to load items to all buckets")
self.ops_type = self.input.param("ops-type", "update")
self.expected_error = self.input.param("expected_error", None)
if self.auto_failover:
self.log.info("Enabling auto failover on " + str(self.backupset.cluster_host))
rest_conn = RestConnection(self.backupset.cluster_host)
rest_conn.update_autofailover_settings(self.auto_failover, self.auto_failover_timeout)
self.backup_create_validate()
for i in range(1, self.backupset.number_of_backups + 1):
if self.ops_type == "update":
self.log.info("*** start to update items in all buckets")
self._load_all_buckets(self.master, gen, "update", self.expires)
self.log.info("*** done update items in all buckets")
elif self.ops_type == "delete":
self.log.info("*** start to delete items in all buckets")
self._load_all_buckets(self.master, gen, "delete", self.expires)
self.log.info("*** done to delete items in all buckets")
self.sleep(10)
self.log.info("*** start to validate backup cluster")
self.backup_cluster_validate()
self.targetMaster = True
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
self.log.info("*** start to restore cluster")
restored = {"{0}/{1}".format(start, end): ""}
for i in range(1, self.backupset.number_of_backups + 1):
if self.reset_restore_cluster:
self.log.info("*** start to reset cluster")
self.backup_reset_clusters(self.cluster_to_restore)
if self.same_cluster:
self._initialize_nodes(Cluster(), self.servers[:self.nodes_init])
else:
shell = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
shell.enable_diag_eval_on_non_local_hosts()
shell.disconnect()
rest = RestConnection(self.backupset.restore_cluster_host)
rest.force_eject_node()
rest.init_node()
self.log.info("Done reset cluster")
self.sleep(10)
""" Add built-in user cbadminbucket to second cluster """
self.add_built_in_server_user(node=self.input.clusters[0][:self.nodes_init][0])
self.backupset.start = start
self.backupset.end = end
self.log.info("*** start restore validation")
self.backup_restore_validate(compare_uuid=False,
seqno_compare_function=">=",
expected_error=self.expected_error)
if self.backupset.number_of_backups == 1:
continue
while "{0}/{1}".format(start, end) in restored:
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
restored["{0}/{1}".format(start, end)] = ""
def test_backup_restore_after_rebalance(self):
"""
1. Create default bucket on the cluster and loads it with given number of items
2. Does a rebalance on cluster to be backed up with specified number of servers in (test param nodes_in) and
servers out (test param nodes_out)
3. Takes a backup
4. Does a rebalance on cluster to be restored to with specified number of servers in (test param nodes_in) and
servers out (test param nodes_out)
5. Performs a restore on the restore cluster
"""
serv_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]
serv_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create_validate()
self.backupset.number_of_backups = 1
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, serv_in, serv_out)
rebalance.result()
self.backup_cluster_validate()
if not self.same_cluster:
self._initialize_nodes(Cluster(), self.input.clusters[0][:self.nodes_init])
serv_in = self.input.clusters[0][self.nodes_init: self.nodes_init + self.nodes_in]
serv_out = self.input.clusters[0][self.nodes_init - self.nodes_out: self.nodes_init]
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_in, serv_out)
else:
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_out, serv_in)
rebalance.result()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function="<=")
def test_backup_restore_with_rebalance(self):
"""
1. Create default bucket on the cluster and loads it with given number of items
2. Does a rebalance on cluster to be backed up with specified number of servers in (test param nodes_in) and
servers out (test param nodes_out)
3. Takes a backup while rebalance is going on
4. Does a rebalance on cluster to be restored to with specified number of servers in (test param nodes_in) and
servers out (test param nodes_out)
5. Performs a restore on the restore cluster while rebalance is going on
"""
serv_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]
serv_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create_validate()
self.backupset.number_of_backups = 1
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, serv_in, serv_out)
self.sleep(10)
count = 0
while rebalance.state != "FINISHED":
if count == 0:
self.backup_cluster_validate()
count += 1
if not self.same_cluster:
self._initialize_nodes(Cluster(), self.input.clusters[0][:self.nodes_init])
serv_in = self.input.clusters[0][self.nodes_init: self.nodes_init + self.nodes_in]
serv_out = self.input.clusters[0][self.nodes_init - self.nodes_out: self.nodes_init]
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_in, serv_out)
else:
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_out, serv_in)
self.sleep(10)
count = 0
while rebalance.state != "FINISHED":
if count == 0:
self.backup_restore_validate(compare_uuid=False, seqno_compare_function="<=")
count += 1
def test_backup_restore_with_ops(self):
"""
1. Create default bucket on the cluster and loads it with given number of items
2. Perform the specified ops (test param ops-type) and create backups for specified number of times
(test param number_of_backups)
3. Perform restores for the same number of times with random start and end values
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
initial_gen = copy.deepcopy(gen)
initial_keys = []
for x in initial_gen:
initial_keys.append(x[0])
self.log.info("Start to load items to all buckets")
self._load_all_buckets(self.master, gen, "create", 0)
self.ops_type = self.input.param("ops-type", "update")
self.log.info("Create backup repo ")
self.backup_create()
for i in range(1, self.backupset.number_of_backups + 1):
self._backup_restore_with_ops()
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
if self.compact_backup and self.ops_type == "delete":
self.log.info("Start to compact backup ")
self.backup_compact_validate()
self.log.info("Validate deleted keys")
self.backup_compact_deleted_keys_validation(initial_keys)
self.log.info("start restore cluster ")
restored = {"{0}/{1}".format(start, end): ""}
for i in range(1, self.backupset.number_of_backups + 1):
self.backupset.start = start
self.backupset.end = end
self._backup_restore_with_ops(backup=False, compare_function=">=")
if self.backupset.number_of_backups == 1:
continue
while "{0}/{1}".format(start, end) in restored:
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
restored["{0}/{1}".format(start, end)] = ""
def _backup_restore_with_ops(self, exp=0, backup=True, compare_uuid=False,
compare_function="==", replicas=False,
mode="memory", node=None, repeats=0,
validate_directory_structure=True):
self.ops_type = self.input.param("ops-type", "update")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self.log.info("Start doing ops: %s " % self.ops_type)
if node is None:
node = self.master
self._load_all_buckets(node, gen, self.ops_type, exp)
if backup:
self.backup_cluster_validate(repeats=repeats,
validate_directory_structure=validate_directory_structure)
else:
self.backup_restore_validate(compare_uuid=compare_uuid,
seqno_compare_function=compare_function,
replicas=replicas, mode=mode)
def test_backup_list(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backup and validates it
3. Executes list command on the backupset and validates the output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self.backup_list_validate()
def test_backup_list_optional_switches(self):
"""
1. Creates specified buckets on the cluster and loads it with given number of items
Note: this test should be run with 2 buckets
2. Creates two backupsets
3. Creates two backups on each of the backupset
4. Executes list command with --name and validates
5. Executes list command with --name and --incr-backup and validates
6. Executes list command with --name, --incr-backup and --bucket-backup and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=2)
self.backupset.name = "backup2"
self.backup_create(del_old_backup=False)
self._take_n_backups(n=2)
incr_names = 0
backup_name = False
warnning_mesg = "is either empty or it got interrupted"
self.backupset.backup_list_name = "backup"
status, output, message = self.backup_list()
if not status:
self.fail(message)
for line in output:
if warnning_mesg in line:
continue
if self.backupset.backup_list_name in line:
backup_name = True
if self.backups[0] in line:
incr_names += 1
if self.backups[1] in line:
incr_names += 1
self.assertTrue(backup_name, "Expected backup name not found in output")
self.log.info("Expected backup name found in output")
self.assertEqual(incr_names, 2, "Expected backups were not listed for --name option")
self.log.info("Expected backups listed for --name option")
incr_names = 0
backup_name = False
self.backupset.backup_list_name = "backup2"
status, output, message = self.backup_list()
if not status:
self.fail(message)
for line in output:
if warnning_mesg in line:
continue
if self.backupset.backup_list_name in line:
backup_name = True
if self.backups[2] in line:
incr_names += 1
if self.backups[3] in line:
incr_names += 1
self.assertTrue(backup_name, "Expected backup name not found in output")
self.log.info("Expected backup name found in output")
self.assertEqual(incr_names, 2, "Expected backups were not listed for --name option")
self.log.info("Expected backups listed for --name option")
buckets = 0
name = False
self.backupset.backup_list_name = "backup"
self.backupset.backup_incr_backup = self.backups[0]
status, output, message = self.backup_list()
if not status:
self.fail(message)
for line in output:
if warnning_mesg in line:
continue
if self.backupset.backup_incr_backup in line:
name = True
if self.buckets[0].name in line:
buckets += 1
if self.buckets[1].name in line:
buckets += 1
self.assertTrue(name, "Expected incremental backup name not found in output")
self.log.info("Expected incrmental backup name found in output")
self.assertEqual(buckets, 2, "Expected buckets were not listed for --incr-backup option")
self.log.info("Expected buckets were listed for --incr-backup option")
name = False
items = 0
self.backupset.backup_list_name = "backup2"
self.backupset.backup_incr_backup = self.backups[2]
self.backupset.bucket_backup = self.buckets[0].name
status, output, message = self.backup_list()
if not status:
self.fail(message)
if output and output[0]:
output = json.loads(output[0])
if self.buckets[0].name == output["name"]:
name = True
items = output["items"]
self.assertTrue(name, "Expected bucket not listed for --bucket-backup option")
self.log.info("Expected bucket listed for --bucket-backup option")
self.assertEqual(items, self.num_items, "Mismatch in items for --bucket-backup option")
self.log.info("Expected number of items for --bucket-backup option")
def test_list_with_large_number_of_backups(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a large number of backups
3. Executes list command on the backupset and validates the output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=25)
status, output, message = self.backup_list()
if not status:
self.fail(message)
if output and output[0]:
bk_info = json.loads(output[0])
bk_info = bk_info["repos"][0]["backups"]
else:
return False, "No output content"
self.assertEqual(len(bk_info), len(self.backups),
"Number of backups did not match. In repo: {0} != in bk: {1}"\
.format(len(bk_info), len(self.backups)))
for backup in bk_info:
if backup["date"] not in self.backups:
raise("backup date does not match")
self.log.info("Number of backups matched")
def _take_n_backups(self, n=1, validate=False):
for i in range(1, n + 1):
if validate:
self.backup_cluster_validate()
else:
self.backup_cluster()
def test_backup_info_with_start_end_flag(self):
"""
1. Create default bucket and load items to bucket
2. Run number of backups pass by param number_of_backups=x
3. Run subcommand info with random start and end values. Value could be index, date or bk nam
4. conf file name: bkrs-info-with-start-end-flag.conf
"""
if self.bkinfo_date_start_ago:
conn = RemoteMachineShellConnection(self.backupset.backup_host)
start_date_cmd = "date --date=\"{} days ago\" '+%d-%m-%Y' "\
.format(self.bkinfo_date_start_ago)
output, error = conn.execute_command(start_date_cmd)
start_date = output[0]
end_date_cmd = "date '+%d-%m-%Y' "
output, error = conn.execute_command(end_date_cmd)
end_date = output[0]
conn.disconnect()
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
initial_gen = copy.deepcopy(gen)
self.log.info("Start to load items to all buckets")
self._load_all_buckets(self.master, gen, "create", 0)
self.log.info("Create backup repo ")
self.backup_create()
for i in range(1, self.backupset.number_of_backups + 1):
self.backup_cluster()
self.log.info("done running backup")
if self.bkinfo_start_end_with_bkname:
bkname_start_index = int(self.bkinfo_start_end_with_bkname.split(":")[0])
bkname_start = self.backups[bkname_start_index]
bkname_end_index = int(self.bkinfo_start_end_with_bkname.split(":")[1])
bkname_end = self.backups[bkname_end_index]
if self.bkinfo_date_start_ago:
o, e = self.backup_info(start=start_date,end=end_date)
elif self.bkinfo_start_end_with_bkname:
o, e = self.backup_info(start=bkname_start,end=bkname_end)
else:
o, e = self.backup_info(start=self.bkinfo_start,end=self.bkinfo_end)
if o and o[0]:
bk_info = json.loads(o[0])
bk_info = bk_info["backups"]
if self.debug_logs:
print("\nbk info : ", bk_info)
print("\n bkinfo len: ", len(bk_info))
print("\nbk info date : ", bk_info[0]["date"])
print("\nbk info type : ", bk_info[0]["type"])
print("\nnubmer backup : ", self.backups)
if self.bkinfo_start == 1 and self.bkinfo_end == 1:
if "FULL" not in bk_info[0]["type"]:
self.fail("First backup is not full backup")
elif self.bkinfo_start > 1 and self.bkinfo_end > 1:
if "INCR" not in bk_info[0]["type"]:
self.fail("> 0th backup is not incr backup")
if self.bkinfo_date_start_ago:
if len(bk_info) != len(self.backups):
self.fail("bkrs info failed to show all backups today")
elif self.bkinfo_start_end_with_bkname:
if len(bk_info) != (bkname_end_index - bkname_start_index + 1):
self.fail("bkrs info does not show correct nubmer of backups with backup name")
elif len(bk_info) != (self.bkinfo_end - self.bkinfo_start + 1):
self.fail("bkrs info does not show correct nubmer of backups")
def test_backup_compact(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backup and validates it
3. Executes compact command on the backupset and validates the output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self.backup_compact_validate()
def test_backup_with_purge_interval_set_to_float(self):
"""
cbbackupmgr should handle case with purge interval set to float number
return: None
"""
purgeInterval = 1.5
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self.log.info("Set purge interval to float value '%s'" % purgeInterval)
rest = RestConnection(self.backupset.cluster_host)
status, content = rest.set_purge_interval_and_parallel_compaction(purgeInterval)
if status:
self.log.info("Done set purge interval value '%s'" % purgeInterval)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate()
else:
self.fail("Failed to set purgeInterval value")
def test_restore_from_compacted_backup(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backup and validates it
3. Executes compact command on the backupset
4. Restores from the compacted backup and validates it
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self.backup_compact()
self.backup_restore_validate()
def test_backup_with_compress_flag(self):
"""
1. Load docs into bucket
2. Backup without compress flag
3. Get backup data size
4. Delete backup repo
5. Do backup again with compress flag
6. Compare those data if it flag works
:return: None
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backupset.backup_compressed = False
self.backup_cluster()
no_compression = self.get_database_file_info()
self.log.info("\nDelete old backup and do backup again with compress flag")
self.backup_create()
self.backupset.backup_compressed = self.input.param("backup-compressed", False)
self.backup_cluster()
with_compression = self.get_database_file_info()
self.validate_backup_compressed_file(no_compression, with_compression)
def test_backup_restore_with_credentials_env(self):
"""
password will pass as in env variable
:return: None
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
output, error = self.backup_cluster()
if output and not self._check_output("Backup completed successfully", output):
self.fail("Failed to run with password env %s " % output)
self.backup_cluster_validate(skip_backup=True)
self.backup_list()
self.backup_restore_validate()
def test_backup_with_update_on_disk_of_snapshot_markers(self):
"""
This test is for MB-25727 (using cbbackupwrapper)
Check when cbwrapper will be dropped to remove this test.
No default bucket, default_bucket=false
Create bucket0
Load 100K items to bucket0
Stop persistence on server via cbepctl
Load another 100K items.
Run full backup with cbbackupwrapper
Load another 100K items.
Run diff backup. Backup process will hang with error in memcached as shown above
:return: None
"""
version = RestConnection(self.backupset.backup_host).get_nodes_version()
if version[:5] == "6.5.0":
self.log.info("\n\n******* Due to issue in MB-36904, \
\nthis test will be skipped in 6.5.0 ********\n")
return
gen1 = BlobGenerator("ent-backup1", "ent-backup-", self.value_size, end=100000)
gen2 = BlobGenerator("ent-backup2", "ent-backup-", self.value_size, end=100000)
gen3 = BlobGenerator("ent-backup3", "ent-backup-", self.value_size, end=100000)
rest_conn = RestConnection(self.backupset.cluster_host)
rest_conn.create_bucket(bucket="bucket0", ramQuotaMB=1024)
self.buckets = rest_conn.get_buckets()
authentication = "-u Administrator -p password"
self._load_all_buckets(self.master, gen1, "create", 0)
self.log.info("Stop persistent")
cluster_nodes = rest_conn.get_nodes()
clusters = copy.deepcopy(cluster_nodes)
shell = RemoteMachineShellConnection(self.backupset.backup_host)
for node in clusters:
shell.execute_command("%scbepctl%s %s:11210 -b %s stop %s" % \
(self.cli_command_location,
self.cmd_ext,
node.ip,
"bucket0",
authentication))
shell.disconnect()
self.log.info("Load 2nd batch docs")
self._load_all_buckets(self.master, gen2, "create", 0)
self.log.info("Run full backup with cbbackupwrapper")
shell = RemoteMachineShellConnection(self.backupset.backup_host)
backup_dir = self.tmp_path + "backup" + self.master.ip
shell.execute_command("rm -rf %s" % backup_dir)
shell.execute_command("mkdir %s" % backup_dir)
shell.execute_command("cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s"
% (self.cli_command_location, self.cmd_ext,
self.backupset.cluster_host.ip,
backup_dir,
authentication))
self.log.info("Load 3rd batch docs")
self._load_all_buckets(self.master, gen3, "create", 0)
self.log.info("Run diff backup with cbbackupwrapper")
output, _ = shell.execute_command("cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s"
% (self.cli_command_location, self.cmd_ext,
self.backupset.cluster_host.ip,
backup_dir,
authentication))
if output and "SUCCESSFULLY COMPLETED" not in output[1]:
self.fail("Failed to backup as the fix in MB-25727")
shell.disconnect()
def test_cbrestoremgr_should_not_change_replica_count_in_restore_bucket(self):
"""
This test is for MB-25809
Set default_bucket=False
Create bucket with 1 replica
Load 10K items to bucket
Backup data from bucket
Create other bucket with 2 replicas in other cluster
Restore data to bucket with 2 replicas
Verify data and bucket setting. It must retain 2 replicas
:return: None
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=10000)
if not self.new_replicas:
self.fail("This test needs to pass param 'new-replicas' to run")
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.log.info("Start backup cluster")
self.backup_cluster_validate()
self.backup_restore_validate()
self.log.info("replicas from backup bucket: {0}".format(self.num_replicas))
self.log.info("replica in restore bucket should be {0} after restore"\
.format(self.new_replicas))
rest_r = RestConnection(self.backupset.restore_cluster_host)
for bucket in self.buckets:
bucket_stats = rest_r.get_bucket_json(bucket.name)
if self.new_replicas != bucket_stats["replicaNumber"]:
self.fail("replia number in bucket {0} did change after restore"\
.format(bucket.name))
self.log.info("Verified replica in bucket {0}: {1}"\
.format(bucket.name,
bucket_stats["replicaNumber"]))
def test_restore_with_invalid_bucket_config_json(self):
"""
When bucket-config.json in latest backup corrupted,
The merge backups should fail.
1. Create a bucket and load docs into it.
2. Create a backup and validate it.
3. Run full backup
4. Load more docs into bucket
5. Run backup (incremental) and verify.
6. Modify backup-config.json to make invalid json in content
7. Run restore to other bucket, restore should fail with error
"""
gen = BlobGenerator("ent-backup_1", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=self.backupset.number_of_backups)
status, output, message = self.backup_list()
error_msg = "Error merging data: Unable to read bucket settings because bucket-config.json is corrupt"
if not status:
self.fail(message)
backup_count = 0
for line in output:
if re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}", line):
backup_name = re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}",
line).group()
if backup_name in self.backups:
backup_count += 1
self.log.info("{0} matched in list command output".format(backup_name))
backup_bucket_config_path = self.backupset.directory + "/backup" + \
"/" + self.backups[self.backupset.number_of_backups - 1] + \
"/" + self.buckets[0].name + "-*" \
"/bucket-config.json"
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.log.info("Remove } in bucket-config.json to make it invalid json ")
remote_client.execute_command("sed -i 's/}//' %s " % backup_bucket_config_path)
self.log.info("Start to merge backup")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
self.backupset.end = randrange(self.backupset.start + 1,
self.backupset.number_of_backups + 1)
result, output, _ = self.backup_merge()
if result:
self.log.info("Here is the output from command %s " % output[0])
if not self._check_output(error_msg, output):
self.fail("read bucket config should fail since bucket-config.json is invalid")
remote_client.disconnect()
def test_restore_with_non_exist_bucket(self):
"""
1. Create a bucket A
2. Load docs to bucket A
3. Do backup bucket A
4. Delete bucket A
5. Restore to bucket A (non exist bucket)
6. Expect errors throw out
"""
gen = BlobGenerator("ent-backup1_", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.log.info("Start doing backup")
self.backup_create()
self.backup_cluster()
self.log.info("Start to delete bucket")
BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)
output, _ = self.backup_restore()
if output and "Error restoring cluster" not in output[0]:
self.fail("Restore to non exist bucket should fail")
def test_merge_backup_from_old_and_new_bucket(self):
"""
1. Create a bucket A
2. Load docs with key 1
3. Do backup
4. Delete bucket A
5. Re-create bucket A
6. Load docs with key 2
7. Do backup
8. Do merge backup. Verify backup only contain docs key 2
"""
gen = BlobGenerator("ent-backup1_", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.log.info("Start doing backup")
self.backup_create()
self.backup_cluster()
if self.bucket_delete:
self.log.info("Start to delete bucket")
BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)
BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
elif self.bucket_flush:
self.log.info("Start to flush bucket")
self._all_buckets_flush()
gen = BlobGenerator("ent-backup2_", "ent-backup-", self.value_size, end=self.num_items)
self.log.info("Start to load bucket again with different key")
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_cluster()
self.backupset.number_of_backups += 1
status, output, message = self.backup_list()
if not status:
self.fail(message)
self.log.info("Start to merge backup")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
self.backupset.end = self.backupset.number_of_backups
self.merged = True
result, output, _ = self.backup_merge()
self.backupset.end -= 1
status, output, message = self.backup_list()
if not status:
self.fail(message)
current_vseqno = self.get_vbucket_seqnos(self.cluster_to_backup, self.buckets,
self.skip_consistency, self.per_node)
self.log.info("*** Start to validate data in merge backup ")
self.validate_backup_data(self.backupset.backup_host, [self.master],
"ent-backup", False, False, "memory",
self.num_items, "ent-backup1")
self.backup_cluster_validate(skip_backup=True)
def test_merge_backup_with_merge_kill_and_re_merge(self):
"""
1. Create a bucket A
2. Load docs
3. Do backup
4. Load docs
5. Do backup
6. Merge backup
7. Kill merge process
8. Merge backup again
Result: 2nd merge should run ok
"""
gen = BlobGenerator("ent-backup1", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=self.backupset.number_of_backups)
status, output, message = self.backup_list()
if not status:
self.fail(message)
self.log.info("Start to merge backup")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
self.backupset.end = 2
self.merged = True
merge_threads = []
merge_thread = Thread(target=self.backup_merge)
merge_threads.append(merge_thread)
merge_thread.start()
merge_kill_thread = Thread(target=self._kill_cbbackupmgr)
merge_threads.append(merge_kill_thread)
merge_kill_thread.start()
for merge_thread in merge_threads:
merge_thread.join()
status, output, message = self.backup_list()
if not status:
self.fail(message)
result, output, _ = self.backup_merge()
status, output, message = self.backup_list()
if not status:
self.fail(message)
def test_merge_backup_with_partial_backup(self):
"""
1. Create a bucket A
2. Load docs
3. Do backup
4. Load docs
5. Do backup and kill backup process
6. Merge backup. Merge should fail
"""
gen = BlobGenerator("ent-backup1", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=self.backupset.number_of_backups)
status, output, message = self.backup_list()
if not status:
self.fail(message)
backup_threads = []
backup_thread = Thread(target=self.backup_cluster)
backup_threads.append(backup_thread)
backup_thread.start()
backup_kill_thread = Thread(target=self._kill_cbbackupmgr)
backup_threads.append(backup_kill_thread)
backup_kill_thread.start()
for backup_thread in backup_threads:
backup_thread.join()
self.backupset.number_of_backups += 1
self.log.info("Start to merge backup")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
self.backupset.end = 3
self.merged = True
status, output, error = self.backup_merge()
if status:
self.fail("This merge should fail due to last backup killed, not complete yet")
elif "Merging backup failed" in error:
self.log.info("Test failed as expected as last backup failed to complete")
status, output, message = self.backup_list()
if not status:
self.fail(message)
def _kill_cbbackupmgr(self):
"""
kill all cbbackupmgr processes
"""
self.sleep(1, "times need for cbbackupmgr process run")
shell = RemoteMachineShellConnection(self.backupset.backup_host)
if self.os_name != "windows":
cmd = "ps aux | grep cbbackupmgr | gawk '{print $2}' | xargs kill -9"
output, _ = shell.execute_command(cmd)
else:
cmd = "tasklist | grep cbbackupmgr | gawk '{printf$2}'"
output, _ = shell.execute_command(cmd)
if output:
kill_cmd = "taskkill /F /T /pid %d " % int(output[0])
output, _ = shell.execute_command(kill_cmd)
if output and "SUCCESS" not in output[0]:
self.fail("Failed to kill cbbackupmgr on windows")
shell.disconnect()
def test_merge_backup_with_purge_deleted_keys(self):
"""
1. Load 100K docs to a bucket A with key 1
2. Delete 50K docs from bucket A
3. Load 50K docs with key 2 to bucket A
4. Take backup
5. Run compaction on each vbucket to purge all delete keys
6. Load again 25K docs with key 3
7. Run backup again
8. Load another 25K docs with key 4
9. Run backup. It should not fail
"""
self.log.info("Load 1st batch docs")
create_gen1 = BlobGenerator("ent-backup1", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, create_gen1, "create", 0)
self.log.info("Delete half docs of 1st batch")
delete_gen = BlobGenerator("ent-backup1", "ent-backup-", self.value_size,
end=self.num_items // 2)
self._load_all_buckets(self.master, delete_gen, "delete", 0)
self.log.info("Load 2nd batch docs")
create_gen2 = BlobGenerator("ent-backup2", "ent-backup-", self.value_size,
end=self.num_items // 2)
self._load_all_buckets(self.master, create_gen2, "create", 0)
self.log.info("Start backup")
self.backup_create()
self.backup_cluster()
nodes = []
upto_seq = 100000
self.log.info("Start compact each vbucket in bucket")
rest = RestConnection(self.master)
cluster_nodes = rest.get_nodes()
for bucket in RestConnection(self.master).get_buckets():
found = self.get_info_in_database(self.backupset.cluster_host, bucket, "deleted")
if found:
shell = RemoteMachineShellConnection(self.backupset.cluster_host)
shell.compact_vbuckets(len(bucket.vbuckets), cluster_nodes, upto_seq)
shell.disconnect()
found = self.get_info_in_database(self.backupset.cluster_host, bucket, "deleted")
if not found:
self.log.info("Load another docs to bucket %s " % bucket.name)
create_gen3 = BlobGenerator("ent-backup3", "ent-backup-", self.value_size,
end=self.num_items // 4)
self._load_bucket(bucket, self.master, create_gen3, "create",
self.expire_time)
self.backup_cluster()
create_gen4 = BlobGenerator("ent-backup3", "ent-backup-", self.value_size,
end=self.num_items // 4)
self._load_bucket(bucket, self.master, create_gen4, "create",
self.expire_time)
self.backup_cluster()
self.backupset.end = 3
status, output, message = self.backup_merge()
if not status:
self.fail(message)
else:
self.fail("cbcompact failed to purge deleted key")
def test_merge_backup_with_failover_logs(self):
"""
1. Load 100K docs into bucket.
2. Wait for all docs persisted.
3. Stop persistence.
4. Load another 100K docs to bucket.
5. Kill memcached will generate about 4 failover logs.
./cbstats localhost:11210 -u username -p pass failovers | grep num_entries
6. Take backup.
7. Load another 100K docs
8. Take backup again.
Verify:
Only 1st backup is full backup
All backup after would be incremental backup
In 4.5.1, all backups would be full backup
"""
self.log.info("Load 1st batch docs")
create_gen1 = BlobGenerator("ent-backup1", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, create_gen1, "create", 0)
failed_persisted_bucket = []
rest = RestConnection(self.master)
cluster_nodes = rest.get_nodes()
for bucket in self.buckets:
ready = RebalanceHelper.wait_for_stats_on_all(self.backupset.cluster_host,
bucket.name, 'ep_queue_size',
0, timeout_in_seconds=120)
if not ready:
failed_persisted_bucket.append(bucket.name)
if failed_persisted_bucket:
self.fail("Buckets %s did not persisted." % failed_persisted_bucket)
self.log.info("Stop persistence at each node")
clusters = copy.deepcopy(cluster_nodes)
shell = RemoteMachineShellConnection(self.backupset.backup_host)
for bucket in self.buckets:
for node in clusters:
shell.execute_command("%scbepctl%s %s:11210 -b %s stop" % \
(self.cli_command_location,
self.cmd_ext,
node.ip,
bucket.name))
shell.disconnect()
self.log.info("Load 2nd batch docs")
create_gen2 = BlobGenerator("ent-backup2", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, create_gen2, "create", 0)
self.sleep(5)
self.log.info("Crash cluster via kill memcached")
for node in clusters:
for server in self.servers:
if node.ip == server.ip:
num_entries = 4
reach_num_entries = False
while not reach_num_entries:
shell = RemoteMachineShellConnection(server)
shell.kill_memcached()
ready = False
while not ready:
if not RestHelper(RestConnection(server)).is_ns_server_running():
self.sleep(10)
else:
ready = True
cmd = "%scbstats%s %s:11210 failovers -u %s -p %s | grep num_entries " \
"| gawk%s '{printf $2}' | grep -m 5 '4\|5\|6\|7'" \
% (self.cli_command_location, self.cmd_ext, server.ip,
"cbadminbucket", "password", self.cmd_ext)
output, error = shell.execute_command(cmd)
shell.disconnect()
if output:
self.log.info("number failover logs entries reached. %s " % output)
reach_num_entries = True
self.backup_create()
self.log.info("Start backup data")
self.backup_cluster()
status, output, message = self.backup_list()
if not status:
self.fail(message)
self.log.info("Load 3rd batch docs")
create_gen3 = BlobGenerator("ent-backup3", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, create_gen3, "create", 0)
self.backup_cluster()
status, output, message = self.backup_list()
if not status:
self.fail(message)
def test_backupmgr_with_short_option(self):
"""
Test short option flags at each option
"""
cmd = "%scbbackupmgr%s " % (self.cli_command_location, self.cmd_ext)
cmd += "%s " % self.input.param("command", "backup")
options = " -%s %s " % (self.input.param("repo", "-repo"),
self.backupset.name)
options += " -%s %s" % (self.input.param("archive", "-archive"),
self.backupset.directory)
if self.input.param("command", "backup") != "list":
options += " -%s http://%s:%s" % (self.input.param("cluster", "-cluster"),
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port)
options += " -%s Administrator" % self.input.param("bkusername", "-username")
options += " -%s password" % self.input.param("bkpassword", "-password")
self.backup_create()
shell = RemoteMachineShellConnection(self.backupset.backup_host)
output, error = shell.execute_command("%s %s " % (cmd, options))
shell.log_command_output(output, error)
shell.disconnect()
if error:
self.fail("There is a error in %s " % error)
def test_backupmgr_help_display(self):
"""
Test display help manual in each option
We do not test compare the whole content but only
few first lines to make sure manual page displayed.
"""
display_option = self.input.param("display", "-h")
if self.input.param("subcommand", None) is None:
subcommand = ""
else:
subcommand = self.input.param("subcommand", None)
if subcommand == "list":
subcommand = "info"
cmd = "{0}cbbackupmgr{1} ".format(self.cli_command_location, self.cmd_ext)
if display_option == "--help":
display_option = self.long_help_flag
elif display_option == "-h":
self.long_help_flag = self.short_help_flag
cmd += " {0} {1} ".format(subcommand, display_option)
shell = RemoteMachineShellConnection(self.backupset.cluster_host)
output, error = shell.execute_command("{0} ".format(cmd))
self.log.info("Verify print out help message")
if display_option == "-h":
if subcommand == "":
content = ['cbbackupmgr [<command>] [<args>]', '',
' backup Backup a Couchbase cluster']
elif subcommand == "help":
content = ['cbbackupmgr help [<command>] [<args>]', '',
' archivelayout View the archive directory layout structure']
else:
content = ['cbbackupmgr {0} [<args>]'.format(subcommand), '',
'Required Flags:']
self.validate_help_content(output[:3], content)
elif display_option == "--help":
content = None
if subcommand == "":
content = \
['CBBACKUPMGR(1) Couchbase Server Manual CBBACKUPMGR(1)']
self.validate_help_content(output, content)
else:
subcmd_cap = subcommand.upper()
content = \
['CBBACKUPMGR-{0}(1) Couchbase Server Manual CBBACKUPMGR-{1}(1)'\
.format(subcmd_cap, subcmd_cap)]
self.validate_help_content(output, content)
if self.bkrs_flag is not None:
self.assertTrue(self._check_output(self.bkrs_flag, output),
"Missing flag {0} in help content".format(self.bkrs_flag))
shell.disconnect()
def test_cbbackupmgr_help_contains_objstore_info(self):
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
supports_read_only = ['restore']
for sub_command in ['backup', 'collect-logs', 'config', 'examine', 'info', 'remove', 'restore']:
output, error = remote_client.execute_command(f"{self.cli_command_location}/cbbackupmgr {sub_command} -h")
if error:
self.fail(f"Expected to be able to get help for {sub_command}")
arguments = ['--obj-access-key-id', '--obj-cacert', '--obj-endpoint', '--obj-no-ssl-verify',
'--obj-region', '--obj-secret-access-key', '--obj-staging-dir', '--s3-force-path-style',
'--obj-log-level']
if sub_command in supports_read_only:
arguments.append('--obj-read-only')
for argument in arguments:
found = False
for line in output:
found = found or argument in line
self.assertTrue(found, f"Expected to find help about {argument}")
def test_backup_restore_with_optional_flags(self):
"""
1. Create a bucket
2. Load docs to bucket
3. Backup with optional flags like no-ssl-verify, secure-conn
4. Verify backup data in backup file
"""
self.log.info("Load 1st batch docs")
create_gen1 = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, create_gen1, "create", 0)
self.backup_create()
verify_data = True
output, error = self.backup_cluster()
if self.backupset.secure_conn:
if self.backupset.bk_no_cert:
if self._check_output("Backup completed successfully", output):
self.fail("Taking cluster backup failed.")
elif self._check_output("Error", output):
verify_data = False
else:
if not self._check_output("Backup completed successfully", output):
self.fail("Taking cluster backup failed.")
if verify_data:
self.validate_backup_data(self.backupset.backup_host,
self.servers[:self.nodes_init],
"ent-backup", False, False, "memory",
self.num_items, None)
if self.do_restore:
self.log.info("Restore with secure connection")
self.backup_restore()
def test_restore_with_filter_regex(self):
"""
1. Create a bucket
2. Load docs to bucket with key patterned
3. Backup docs
4. Delete bucket
5. Restore docs with regex
6. Verify only key or value in regex restored to bucket
NOTE: This test requires a specific config/ini to run correctly; if provided with an incorrect config
testrunner will restore data into the bucket that was backed up on the same cluster without performing a
flush. This will mean cbbackupmgr will restore with conflict resolution enabled and the validation will find
an unexpected amount of keys (all of them) in the target bucket.
"""
key_name = "ent-backup"
if self.backupset.random_keys:
key_name = "random_keys"
self.validate_keys = self.input.param("validate_keys", False)
if self.validate_keys:
gen = BlobGenerator(key_name, "ent-backup-", self.value_size,
end=self.num_items)
else:
gen = DocumentGenerator('random_keys', '{{"age": {0}}}', list(range(100)),
start=0, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.log.info("Start backup")
self.backup_create()
self.backup_cluster()
self.backup_restore()
self.merged = False
regex_check = self.backupset.filter_keys
if not self.backupset.filter_keys:
regex_check = self.backupset.filter_values
self.validate_backup_data(self.backupset.backup_host,
[self.backupset.restore_cluster_host],
key_name, False, False, "memory",
self.num_items, None,
validate_keys=self.validate_keys,
regex_pattern=regex_check)
def test_backup_with_rbac(self):
"""
1. Create a cluster
2. Create a bucket and load date
3. Create a user with specific role
param in conf: new_user
param in conf: new_role
Roles:
admin, ro_admin, cluster_admin, bucket_full_access[*], bucket_admin[*],
views_admin[*],
replication_admin, roadmin_no_access, cluster_admin_no_access,
bucket_admin_no_access, view_admin_no_access, replication_admin_no_access,
view_replication_admin, replication_ro_admin, bucket_view_replication_admin,
4. Run backup with new user created
5. Verify if backup command handles user role correctly
"""
all_buckets = self.input.param("all_buckets", False)
backup_failed = False
if self.create_fts_index:
gen = DocumentGenerator('test_docs', '{{"age": {0}}}', list(range(100)), start=0,
end=self.num_items)
index_definition = INDEX_DEFINITION
index_name = index_definition['name'] = "age"
fts_server = self.get_nodes_from_services_map(service_type="fts")
rest_fts = RestConnection(fts_server)
try:
self.log.info("Create fts index")
rest_fts.create_fts_index(index_name, index_definition)
except Exception as ex:
self.fail(ex)
else:
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
if self.create_views:
self._create_views()
self.backup_create()
if all_buckets:
if "-" in self.cluster_new_role:
self.cluster_new_role = "[*],".join(self.cluster_new_role.split("-")) + "[*]"
else:
self.cluster_new_role = self.cluster_new_role + "[*]"
admin_roles = ["cluster_admin", "eventing_admin"]
for role in admin_roles:
if role in self.cluster_new_role:
self.cluster_new_role = self.cluster_new_role.replace(role + "[*]", role)
self.log.info("\n***** Create new user: {0} with role: {1} to do backup *****"\
.format(self.cluster_new_user, self.cluster_new_role))
testuser = [{"id": "{0}".format(self.cluster_new_user),
"name": "{0}".format(self.cluster_new_user),
"password": "password"}]
rolelist = [{"id": "{0}".format(self.cluster_new_user),
"name": "{0}".format(self.cluster_new_user),
"roles": "{0}".format(self.cluster_new_role)}]
users_can_backup_all = ["admin", "bucket_full_access[*]",
"data_backup[*]", "eventing_admin",
"cluster_admin", "backup_admin"]
users_can_not_backup_all = ["views_admin[*]", "replication_admin",
"replication_target[*]", "data_monitoring[*]",
"data_writer[*]", "data_reader[*]",
"data_dcp_reader[*]", "fts_searcher[*]",
"fts_admin[*]", "query_manage_index[*]",
"ro_admin", "bucket_admin[*]", "cluster_admin"]
try:
status = self.add_built_in_server_user(testuser, rolelist)
if not status:
self.fail("Fail to add user: {0} with role: {1} " \
.format(self.cluster_new_user,
self.cluster_new_role))
output, error = self.backup_cluster()
success_msg = 'Backup completed successfully'
fail_msg = ["Error backing up cluster:"]
for bucket in self.buckets:
fail_msg.append('Backed up bucket "{0}" failed'.format(bucket.name))
if self.cluster_new_role in users_can_backup_all:
if not self._check_output(success_msg, output):
rest_bk = RestConnection(self.backupset.cluster_host)
eventing_service_in = False
bk_cluster_services = list(rest_bk.get_nodes_services().values())
for srv in bk_cluster_services:
if "eventing" in srv:
eventing_service_in = True
eventing_err = ["Invalid permissions to backup eventing data",
"cluster.eventing.functions!manage"]
if eventing_service_in and self._check_output(eventing_err, output) and \
("admin" not in self.cluster_new_role or \
"eventing_admin" not in self.cluster_new_role):
self.log.info("Only admin or eventing_admin role could backup eventing service")
else:
self.fail("User {0} failed to backup data.\n"
.format(self.cluster_new_role) + \
"Here is the output {0} ".format(output))
elif self.cluster_new_role in users_can_not_backup_all:
if not self._check_output(fail_msg, output):
self.fail("cbbackupmgr failed to block user to backup")
else:
backup_failed = True
status, _, message = self.backup_list()
if not status:
self.fail(message)
if self.do_verify and not backup_failed:
current_vseqno = self.get_vbucket_seqnos(self.cluster_to_backup,
self.buckets,
self.skip_consistency,
self.per_node)
self.log.info("*** Start to validate data in merge backup ")
result = self.validate_backup_data(self.backupset.backup_host,
[self.master],
"ent-backup", False, False, "memory",
self.num_items, None)
self.validate_backup_views()
except Exception as e:
if e:
print(("Exception error: ", e))
if self.cluster_new_role in users_can_not_backup_all:
error_found = False
error_messages = ["Error backing up cluster: Forbidden",
"Could not find file shard_0.sqlite",
"Error backing up cluster: Invalid permissions",
"Database file is empty",
"Error backing up cluster: Unable to find the latest vbucket",
"Failed to backup bucket"]
if self.do_verify:
if str(e) in error_messages or backup_failed:
error_found = True
if not error_found:
raise Exception("cbbackupmgr does not block user role: {0} to backup" \
.format(self.cluster_new_role))
if self.cluster_new_role == "views_admin[*]" and self.create_views:
status, mesg = self.validate_backup_views(self.backupset.backup_host)
if not status:
raise Exception(mesg)
if "Expected error message not thrown" in str(e):
raise Exception("cbbackupmgr does not block user role: {0} to backup" \
.format(self.cluster_new_role))
if self.cluster_new_role in users_can_backup_all:
if not self._check_output(success_msg, output):
self.fail(e)
finally:
if backup_failed:
self.log.info("cbbackupmgr blocked user: {0} to backup"\
.format(self.cluster_new_role))
self.log.info("Delete new create user: {0} ".format(self.cluster_new_user))
shell = RemoteMachineShellConnection(self.backupset.backup_host)
curl_path = ""
if self.os_name == "windows":
curl_path = self.cli_command_location
cmd = "{0}curl{1} -g -X {2} -u {3}:{4} http://{5}:8091/settings/rbac/users/local/{6}"\
.format(curl_path,
self.cmd_ext,
"DELETE",
self.master.rest_username,
self.master.rest_password,
self.backupset.cluster_host.ip,
self.cluster_new_user)
output, error = shell.execute_command(cmd)
shell.disconnect()
def test_restore_with_rbac(self):
"""
1. Create a backupdata set.
2. Setup cluster.
3. Restore data back to cluster
Important:
This test need to copy entbackup-mh.tgz
to /root or /cygdrive/c/Users/Administrator in backup host.
Files location: 172.23.121.227:/root/entba*.tgz
"""
all_buckets = self.input.param("all_buckets", False)
self.log.info("Copy backup dataset to tmp dir")
shell = RemoteMachineShellConnection(self.backupset.backup_host)
# Since we are just wiping out the archive here, we can just run the object store teardown
if self.objstore_provider:
self.objstore_provider.teardown(shell.extract_remote_info().type.lower(), shell)
else:
shell.execute_command("rm -rf {0} ".format(self.backupset.directory))
shell.execute_command("rm -rf {0} ".format(self.backupset.directory.split("_")[0]))
backup_file = ENT_BKRS
backup_dir_found = False
backup_dir = "entbackup_{0}".format(self.master.ip)
output, error = shell.execute_command("ls | grep entbackup")
self.log.info("check if %s dir exists on this server " % backup_dir)
if output:
for x in output:
if x == backup_dir:
backup_dir_found = True
if not backup_dir_found:
self.log.info("%s dir does not exist on this server. Downloading.. "
% backup_dir)
shell.execute_command("{0} -q {1} --no-check-certificate -O {2}.tgz "
.format(self.wget, backup_file, backup_dir))
shell.execute_command("tar -zxvf {0}.tgz ".format(backup_dir))
shell.execute_command("mv {0} {1}".format(backup_dir.split("_")[0], backup_dir))
if "-" in self.cluster_new_role:
self.cluster_new_role = self.cluster_new_role.replace("-", ",")
if self.objstore_provider and self.objstore_provider.schema_prefix() == "s3://":
command = ""
if self.backupset.objstore_region or self.backupset.objstore_access_key_id or self.backupset.objstore_secret_access_key:
command += "env"
if self.backupset.objstore_region:
command += f" AWS_REGION={self.backupset.objstore_region}"
if self.backupset.objstore_access_key_id:
command += f" AWS_ACCESS_KEY_ID={self.backupset.objstore_access_key_id}"
if self.backupset.objstore_secret_access_key:
command += f" AWS_SECRET_ACCESS_KEY={self.backupset.objstore_secret_access_key}"
command += " aws"
if self.backupset.objstore_endpoint:
command += f" --endpoint={self.backupset.objstore_endpoint}"
command += f" s3 sync entbackup_{self.master.ip} s3://{self.backupset.objstore_bucket}/{self.backupset.directory}"
_, error = shell.execute_command(command, debug=False) # Contains senstive info so don't log
if error:
self.fail(f"Failed to sync backup to S3: {error}")
else:
shell.execute_command("cp -r entbackup_{0}/ {1}/entbackup_{0}"\
.format(self.master.ip, self.tmp_path))
status, _, message = self.backup_list()
if not status:
self.fail(message)
self.log.info("Restore data from backup files")
if all_buckets:
if "bucket_full_access" in self.cluster_new_role and \
"bucket_full_access[*]" not in self.cluster_new_role:
self.cluster_new_role = self.cluster_new_role.replace("bucket_full_access",
"bucket_full_access[*]")
else:
self.cluster_new_role = self.cluster_new_role + "[*]"
if "data_backup" in self.cluster_new_role and \
"data_backup[*]" not in self.cluster_new_role:
self.cluster_new_role = self.cluster_new_role.replace("data_backup",
"data_backup[*]")
if "fts_admin" in self.cluster_new_role and \
"fts_admin[*]" not in self.cluster_new_role:
self.cluster_new_role = self.cluster_new_role.replace("fts_admin",
"fts_admin[*]")
admin_roles = ["cluster_admin", "eventing_admin"]
for role in admin_roles:
if role in self.cluster_new_role:
self.cluster_new_role = self.cluster_new_role.replace(role + "[*]", role)
self.log.info("\n***** Create new user: %s with role: %s to do backup *****"
% (self.cluster_new_user, self.cluster_new_role))
testuser = [{"id": "%s" % self.cluster_new_user,
"name": "%s" % self.cluster_new_user,
"password": "password"}]
rolelist = [{"id": "%s" % self.cluster_new_user,
"name": "%s" % self.cluster_new_user,
"roles": "%s" % self.cluster_new_role}]
try:
status = self.add_built_in_server_user(testuser, rolelist)
if not status:
self.fail("Fail to add user: %s with role: %s " \
% (self.cluster_new_user,
self.cluster_new_role))
users_can_restore_all = ["admin", "bucket_full_access[*]",
"data_backup[*]", "eventing_admin"]
users_can_not_restore_all = ["views_admin[*]", "ro_admin",
"replication_admin", "data_monitoring[*]",
"data_writer[*]", "data_reader[*]",
"data_dcp_reader[*]", "fts_searcher[*]",
"fts_admin[*]", "query_manage_index[*]",
"replication_target[*]", "cluster_admin",
"bucket_admin[*]"]
if self.cluster_new_role in users_can_not_restore_all:
self.should_fail = True
output, error = self.backup_restore()
rest_rs = RestConnection(self.backupset.restore_cluster_host)
eventing_service_in = False
rs_cluster_services = list(rest_rs.get_nodes_services().values())
for srv in rs_cluster_services:
if "eventing" in srv:
eventing_service_in = True
eventing_err = "User needs one of the following permissions: cluster.eventing"
if eventing_service_in and self._check_output(eventing_err, output) and \
("admin" not in self.cluster_new_role or \
"eventing_admin" not in self.cluster_new_role):
self.log.info("Only admin role could backup eventing service")
return
success_msg = 'Restore completed successfully'
fail_msg = "Error restoring cluster:"
failed_persisted_bucket = []
ready = RebalanceHelper.wait_for_stats_on_all(self.backupset.cluster_host,
"default", 'ep_queue_size',
0, timeout_in_seconds=120)
if not ready:
failed_persisted_bucket.append("default")
if failed_persisted_bucket:
self.fail("Buckets %s did not persisted." % failed_persisted_bucket)
self.sleep(3)
rest = RestConnection(self.master)
actual_keys = rest.get_active_key_count("default")
print(("\nActual keys in default bucket: %s \n" % actual_keys))
if self.cluster_new_role in users_can_restore_all:
if not self._check_output(success_msg, output):
self.fail("User with roles: %s failed to restore data.\n"
"Here is the output %s " % \
(self.cluster_new_role, output))
roles = []
if "," in self.cluster_new_role:
roles = self.cluster_new_role.split(",")
if set(roles) & set(users_can_not_restore_all) and \
set(roles) & set(users_can_restore_all):
if not self._check_output(success_msg, output):
self.fail("User: %s failed to restore data with roles: %s. " \
"Here is the output %s " % \
(self.cluster_new_user, roles, output))
if int(actual_keys) != 10000:
self.fail("User: %s failed to restore data with roles: %s. " \
"Here is the actual docs in bucket %s " % \
(self.cluster_new_user, roles, actual_keys))
elif self.cluster_new_role in users_can_not_restore_all:
if int(actual_keys) == 1000:
self.fail("User: %s with role: %s should not allow to restore data" \
% (self.cluster_new_user,
self.cluster_new_role))
if not self._check_output(fail_msg, output):
self.fail("cbbackupmgr failed to block user to restore")
finally:
self.log.info("Delete new create user: %s " % self.cluster_new_user)
shell = RemoteMachineShellConnection(self.backupset.backup_host)
curl_path = ""
if self.os_name == "windows":
curl_path = self.cli_command_location
cmd = "%scurl%s -g -X %s -u %s:%s http://%s:8091/settings/rbac/users/local/%s" \
% (curl_path,
self.cmd_ext,
"DELETE",
self.master.rest_username,
self.master.rest_password,
self.backupset.cluster_host.ip,
self.cluster_new_user)
output, error = shell.execute_command(cmd)
shell.disconnect()
def test_backup_restore_with_nodes_reshuffle(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Enlists the default zone of current cluster - backsup the cluster and validates
3. Creates a new zone - shuffles cluster host to new zone
4. Restores to cluster host and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
rest_conn = RestConnection(self.backupset.cluster_host)
zones = list(rest_conn.get_zone_names().keys())
source_zone = zones[0]
target_zone = "test_backup_restore"
self.log.info("Current nodes in group {0} : {1}".format(source_zone,
str(list(rest_conn.get_nodes_in_zone(source_zone).keys()))))
self.log.info("Taking backup with current groups setup")
self.backup_create()
self.backup_cluster_validate()
self.log.info("Creating new zone " + target_zone)
rest_conn.add_zone(target_zone)
self.log.info("Moving {0} to new zone {1}".format(self.backupset.cluster_host.ip, target_zone))
rest_conn.shuffle_nodes_in_zones(["{0}".format(self.backupset.cluster_host.ip)], source_zone, target_zone)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
rebalance.result()
self.log.info("Restoring to {0} after group change".format(self.backupset.cluster_host.ip))
try:
self.log.info("Flush bucket")
rest_conn.flush_bucket()
self.backup_restore_validate()
except Exception as ex:
self.fail(str(ex))
finally:
self.log.info("Moving {0} back to old zone {1}".format(self.backupset.cluster_host.ip, source_zone))
rest_conn.shuffle_nodes_in_zones(["{0}".format(self.backupset.cluster_host.ip)], target_zone, source_zone)
self.log.info("Deleting new zone " + target_zone)
rest_conn.delete_zone(target_zone)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
rebalance.result()
def test_backup_restore_with_firewall(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates backupset on backup host
3. Enables firewall on cluster host and validates if backup cluster command throws expected error
4. Disables firewall on cluster host, takes backup and validates
5. Enables firewall on restore host and validates if backup restore command throws expected error
6. Disables firewall on restore host, restores and validates
"""
if self.os_name == "windows" or self.nonroot:
self.log.info("This firewall test does not run on windows or nonroot user")
return
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.log.info("Enabling firewall on cluster host before backup")
RemoteUtilHelper.enable_firewall(self.backupset.cluster_host)
self.enable_firewall = True
try:
output, error = self.backup_cluster()
self.assertIn("failed to connect", output[0],
"Expected error not thrown by backup cluster when firewall enabled")
finally:
self.log.info("Disabling firewall on cluster host to take backup")
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.disable_firewall()
conn.disconnect()
self.enable_firewall = False
self.log.info("Trying backup now")
self.backup_cluster_validate()
self.log.info("Enabling firewall on restore host before restore")
RemoteUtilHelper.enable_firewall(self.backupset.restore_cluster_host)
self.enable_firewall = True
""" reset restore cluster to same services as backup cluster """
try:
output, error = self.backup_restore()
mesg = "connect: connection refused"
if self.skip_buckets:
mesg = "Error restoring cluster:"
self.assertTrue(self._check_output(mesg, output),
"Expected error not thrown by backup restore when firewall enabled")
finally:
self.log.info("Disabling firewall on restore host to restore")
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.disable_firewall()
conn.disconnect()
self.enable_firewall = False
self.log.info("Trying restore now")
self.skip_buckets = False
""" Need to reset restore node with services the same as in backup cluster """
rest = RestConnection(self.backupset.restore_cluster_host)
rest.force_eject_node()
master_services = self.get_services([self.backupset.cluster_host],
self.services_init, start_node=0)
info = rest.get_nodes_self()
if info.memoryQuota and int(info.memoryQuota) > 0:
self.quota = info.memoryQuota
rest.init_node()
self.sleep(10)
self.backup_restore_validate()
def test_backup_restore_with_audit(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates backupset on backup host
3. Creates a backup of the cluster host - verifies if corresponding entry was created in audit log
4. Restores data on to restore host - verifies if corresponding entry was created in audit log
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
audit_obj = audit(AUDITBACKUPID, self.backupset.cluster_host)
status = audit_obj.getAuditStatus()
self.log.info("Audit status on {0} is {1}".format(self.backupset.cluster_host.ip, status))
if not status:
self.log.info("Enabling audit on {0}".format(self.backupset.cluster_host.ip))
audit_obj.setAuditEnable('true')
self.backup_create()
self.backup_cluster()
field_verified, value_verified = audit_obj.validateEvents(self._get_event_expected_results(action='backup'))
self.assertTrue(field_verified, "One of the fields is not matching")
self.assertTrue(value_verified, "Values for one of the fields is not matching")
audit_obj = audit(AUDITBACKUPID, self.backupset.restore_cluster_host)
status = audit_obj.getAuditStatus()
self.log.info("Audit status on {0} is {1}".format(self.backupset.restore_cluster_host.ip, status))
if not status:
self.log.info("Enabling audit on {0}".format(self.backupset.restore_cluster_host.ip))
audit_obj.setAuditEnable('true')
self.backup_restore()
audit_obj = audit(AUDITRESTOREID, self.backupset.restore_cluster_host)
field_verified, value_verified = audit_obj.validateEvents(self._get_event_expected_results(action='restore'))
self.assertTrue(field_verified, "One of the fields is not matching")
self.assertTrue(value_verified, "Values for one of the fields is not matching")
def _get_event_expected_results(self, action):
if action == 'backup':
expected_results = {
"real_userid:source": "memcached",
"real_userid:user": "default",
"name": "opened DCP connection",
"id": AUDITBACKUPID,
"description": "opened DCP connection",
"timestamp": "{0}".format(self.backups[0]),
"bucket": "{0}".format(self.buckets[0].name),
"sockname": "{0}:11210".format(self.backupset.cluster_host.ip),
"peername": "{0}".format(self.backupset.backup_host.ip)
}
elif action == 'restore':
expected_results = {
"real_userid:source": "memcached",
"real_userid:user": "default",
"name": "authentication succeeded",
"id": AUDITRESTOREID,
"description": "Authentication to the cluster succeeded",
"timestamp": "{0}".format(self.backups[0]),
"bucket": "{0}".format(self.buckets[0].name),
"sockname": "{0}:11210".format(self.backupset.restore_cluster_host.ip),
"peername": "{0}".format(self.backupset.backup_host.ip)
}
return expected_results
def test_backup_restore_with_lesser_nodes(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Adds another node to restore cluster and rebalances - note the test has to be run with nodes_init >= 3 so
that cluster host had more nodes than restore host
3. Creates backupset on backup host
4. Creates backup of cluster host with 3 or more number of nodes and validates
5. Restores to restore host with lesser number of nodes (2) and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.add_node(self.input.clusters[0][1].rest_username, self.input.clusters[0][1].rest_password,
self.input.clusters[0][1].ip)
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [], [])
rebalance.result()
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate()
def test_backup_with_full_disk(self):
"""
Things to be done before running this testcase:
- scripts/install.py has to be run with init_nodes=False
- scripts/cbqe3043.py has to be run against the ini file - this script will mount a 20MB partition on the
nodes required for the test
1. Creates specified bucket on the cluster and loads it with given number of items
2. Sets backup directory to the 20MB partition and creates a backupset
3. Fills up 20MB partition
4. Keeps taking backup until no space left on device error is hit
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backupset.directory = "/cbqe3043/entbackup"
self.backup_create()
conn = RemoteMachineShellConnection(self.backupset.backup_host)
output, error = conn.execute_command("dd if=/dev/zero of=/cbqe3043/file bs=256M count=50")
conn.log_command_output(output, error)
output, error = self.backup_cluster()
while self._check_output("Backup completed successfully", output):
gen = BlobGenerator("ent-backup{0}{0}".format(randint(1, 10000)), "ent-backup-",
self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
output, error = self.backup_cluster()
error_msg = "no space left on device"
self.assertTrue(self._check_output(error_msg, output),
"Expected error message not thrown by backup when disk is full")
self.log.info("Expected error thrown by backup command")
conn.execute_command("rm -rf /cbqe3043/file")
conn.disconnect()
def test_backup_and_restore_with_map_buckets(self):
"""
1. Creates specified buckets on the cluster and loads it with given number
of items - memcached bucket has to be created for this test
(memcached_buckets=1)
2. Creates a backupset, takes backup of the cluster host and validates
3. Executes list command on the backup and validates that memcached bucket
has been skipped
4. Restores the backup and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
if self.create_gsi:
self.create_indexes()
self.backup_create()
self.backup_cluster()
status, output, message = self.backup_list()
if not status:
self.fail("Getting backup list to validate memcached buckets failed.")
for line in output:
self.assertTrue("memcached_bucket0" not in line,
"Memcached bucket found in backup list output after backup")
self.log.info("Memcached bucket not found in backup list output after backup as expected")
self.backup_restore()
def test_backup_with_erlang_crash_and_restart(self):
"""
1. Creates specified bucket on the cluster and loads it with given number
of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills and restarts
erlang process
4. Validates backup output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.kill_erlang()
conn.start_couchbase()
output = backup_result.result(timeout=200)
self.assertTrue(self._check_output("Backup completed successfully", output),
"Backup failed with erlang crash and restart within 180 seconds")
self.log.info("Backup succeeded with erlang crash and restart within 180 seconds")
conn.disconnect()
def test_backup_with_couchbase_stop_and_start(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills and restarts couchbase server
4. Validates backup output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.stop_couchbase()
conn.start_couchbase()
conn.disconnect()
output = backup_result.result(timeout=200)
self.assertTrue(self._check_output("Backup completed successfully", output),
"Backup failed with couchbase stop and start within 180 seconds")
self.log.info("Backup succeeded with couchbase stop and start within 180 seconds")
def test_backup_with_memcached_crash_and_restart(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills and restarts memcached process
4. Validates backup output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.pause_memcached()
conn.unpause_memcached()
conn.disconnect()
output = backup_result.result(timeout=200)
self.assertTrue(self._check_output("Backup completed successfully", output),
"Backup failed with memcached crash and restart within 180 seconds")
self.log.info("Backup succeeded with memcached crash and restart within 180 seconds")
def test_backup_with_erlang_crash(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills erlang process
4. Waits for 200s and Validates backup error
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
try:
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
if self.os_name != "windows":
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.kill_erlang(self.os_name)
output = backup_result.result(timeout=200)
if self.debug_logs:
print(("Raw output from backup run: ", output))
error_mesgs = ["Error backing up cluster: Not all data was backed up due to",
"No connection could be made because the target machine actively refused it."]
error_found = False
for error in error_mesgs:
if self._check_output(error, output):
error_found = True
if not error_found:
raise("Expected error message not thrown by Backup 180 seconds after erlang crash")
except Exception as ex:
self.fail(str(ex))
finally:
conn.start_couchbase()
conn.disconnect()
self.sleep(30)
def test_backup_with_couchbase_stop(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills couchbase server
4. Waits for 200s and Validates backup error
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
try:
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.stop_couchbase()
output = backup_result.result(timeout=200)
self.assertTrue(self._check_output(
"Error backing up cluster: Not all data was backed up due to connectivity issues.", output),
"Expected error message not thrown by Backup 180 seconds after couchbase-server stop")
self.log.info("Expected error message thrown by Backup 180 seconds after couchbase-server stop")
except Exception as ex:
self.fail(str(ex))
finally:
conn.start_couchbase()
conn.disconnect()
self.sleep(30)
def test_backup_with_memcached_crash(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills memcached process
4. Waits for 200s and Validates backup error
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
try:
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.pause_memcached(self.os_name)
self.sleep(17, "time needs for memcached process completely stopped")
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
output = backup_result.result(timeout=200)
mesg = "Error backing up cluster: Unable to find the latest vbucket sequence numbers"
self.assertTrue(self._check_output(mesg, output),
"Expected error message not thrown by Backup 180 seconds after memcached crash")
self.log.info("Expected error thrown by Backup 180 seconds after memcached crash")
except Exception as ex:
self.fail(str(ex))
finally:
conn.unpause_memcached(self.os_name)
self.sleep(30)
conn.disconnect()
def test_restore_with_erlang_crash_and_restart(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills and restarts erlang process
4. Validates restore output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.kill_erlang(self.os_name)
conn.start_couchbase()
conn.disconnect()
timeout_now = 600
output = restore_result.result(timeout=timeout_now)
self.assertTrue(self._check_output("Restore completed successfully", output),
"Restore failed with erlang crash and restart within 180 seconds")
self.log.info("Restore succeeded with erlang crash and restart within 180 seconds")
def test_restore_with_couchbase_stop_and_start(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills and restarts couchbase process
4. Validates restore output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.stop_couchbase()
self.sleep(10)
conn.start_couchbase()
conn.disconnect()
output = restore_result.result(timeout=500)
self.assertTrue(self._check_output("Restore completed successfully", output),
"Restore failed with couchbase stop and start within 180 seconds")
self.log.info("Restore succeeded with couchbase stop and start within 180 seconds")
def test_restore_with_memcached_crash_and_restart(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills and restarts memcached process
4. Validates restore output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.pause_memcached(self.os_name)
conn.unpause_memcached(self.os_name)
conn.disconnect()
output = restore_result.result(timeout=600)
self.assertTrue(self._check_output("Restore completed successfully", output),
"Restore failed with memcached crash and restart within 400 seconds")
self.log.info("Restore succeeded with memcached crash and restart within 400 seconds")
def test_restore_with_erlang_crash(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills erlang process
4. Waits for 200s and Validates restore output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
try:
restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.kill_erlang(self.os_name)
output = restore_result.result(timeout=300)
self.assertTrue(self._check_output(
"Error restoring cluster: Not all data was sent to Couchbase", output),
"Expected error message not thrown by Restore 180 seconds after erlang crash")
self.log.info("Expected error thrown by Restore 180 seconds after erlang crash")
except Exception as ex:
self.fail(str(ex))
finally:
conn.start_couchbase()
conn.disconnect()
self.sleep(30)
def test_restore_with_couchbase_stop(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills couchbase server
4. Waits for 200s and Validates restore output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
try:
restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.stop_couchbase()
output = restore_result.result(timeout=300)
self.assertTrue(self._check_output(
"Error restoring cluster: Not all data was sent to Couchbase due to connectivity issues.", output),
"Expected error message not thrown by Restore 180 seconds after couchbase-server stop")
self.log.info("Expected error message thrown by Restore 180 seconds after couchbase-server stop")
except Exception as ex:
self.fail(str(ex))
finally:
conn.start_couchbase()
conn.disconnect()
self.sleep(30)
def test_restore_with_memcached_crash(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills memcached process
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
try:
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.pause_memcached(self.os_name)
output, error = self.backup_restore()
self.assertTrue(self._check_output(
"Error restoring cluster: failed to connect", output),
"Expected error message not thrown by Restore 180 seconds after memcached crash")
self.log.info("Expected error thrown by Restore 180 seconds after memcached crash")
except Exception as ex:
self.fail(str(ex))
finally:
conn.unpause_memcached(self.os_name)
conn.disconnect()
self.sleep(30)
def test_backup_merge(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Takes specified number of backups (param number_of_backups - should be atleast 2 for this test case)
3. Executes list command and validates if all backups are present
4. Randomly selects a start and end and merges the backups
5. Executes list command again and validates if the new merges set of backups are listed
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=self.backupset.number_of_backups)
status, output, message = self.backup_list()
if not status:
self.fail(message)
backup_count = 0
""" remove last 6 chars of offset time in backup name"""
if output and output[0]:
bk_info = json.loads(output[0])
bk_info = bk_info["repos"][0]
else:
return False, "No output content"
if bk_info["backups"]:
for i in range(0, len(bk_info["backups"])):
backup_name = bk_info["backups"][i]["date"]
if self.debug_logs:
print("backup name ", backup_name)
print("backup set ", self.backups)
if backup_name in self.backups:
backup_count += 1
self.log.info("{0} matched in info command output".format(backup_name))
self.assertEqual(backup_count, len(self.backups), "Initial number of backups did not match")
self.log.info("Initial number of backups matched")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
self.backupset.end = randrange(self.backupset.start + 1, self.backupset.number_of_backups + 1)
status, output, message = self.backup_merge(check_for_panic=True)
if not status:
self.fail(message)
status, output, message = self.backup_list()
if not status:
self.fail(message)
backup_count = 0
if output and output[0]:
bk_info = json.loads(output[0])
bk_info = bk_info["repos"][0]
else:
return False, "No output content"
if bk_info["backups"]:
for i in range(0, len(bk_info["backups"])):
backup_name = bk_info["backups"][i]["date"]
if self.debug_logs:
print("backup name ", backup_name)
print("backup set ", self.backups)
backup_count += 1
if backup_name in self.backups:
self.log.info("{0} matched in info command output".format(backup_name))
else:
self.fail("Didn't expect backup date {0} from the info command output" \
" to be in self.backups (the list of exepected backup dates" \
" after a merge)".format(backup_name))
self.assertEqual(backup_count, len(self.backups), "Merged number of backups did not match")
self.log.info("Merged number of backups matched")
def test_backup_merge_with_restore(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Takes two backups - restores from the backups and validates
3. Merges both the backups - restores from merged backup and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=2)
self.backupset.start = 1
self.backupset.end = 2
output, error = self.backup_restore()
if error:
self.fail("Restoring backup failed: {0}".format(error))
self.log.info("Finished restoring backup before merging")
status, output, message = self.backup_merge()
if not status:
self.fail(message)
self.backupset.start = 1
self.backupset.end = 1
rest = RestConnection(self.backupset.restore_cluster_host)
rest.flush_bucket()
output, error = self.backup_restore()
if error:
self.fail("Restoring backup failed")
self.log.info("Finished restoring backup after merging")
def test_backup_merge_with_unmerged(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Takes two backups - merges them into one
3. Takes 2 more backups - merges the new backups with already merged ones and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=2)
self.backupset.start = 1
self.backupset.end = 2
self.log.info("Merging existing incremental backups")
status, output, message = self.backup_merge()
if not status:
self.fail(message)
self.log.info("Taking more backups")
self._take_n_backups(n=2)
self.backupset.start = 1
self.backupset.end = 3
self.log.info("Merging new backups into already merged backup")
status, output, message = self.backup_merge()
if not status:
self.fail(message)
self.log.info("Successfully merged new backups with already merged backup")
def test_merge_backup_with_multi_threads(self):
"""
1. Create a cluster with default bucket
2. Load default bucket with key1
3. Create backup with default one thread
4. Load again to bucket with key2
5. Create backup with 2 threads
6. Merge backup. All backup should contain doc key1 and key2
"""
gen = BlobGenerator("ent-backup1", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.log.info("Start doing backup")
self.backup_create()
self.backup_cluster()
gen = BlobGenerator("ent-backup2", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_cluster(self.threads_count)
self.backupset.number_of_backups += 1
status, output, message = self.backup_list()
if not status:
self.fail(message)
self.log.info("Start to merge backup")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
if int(self.backupset.number_of_backups) == 2:
self.backupset.end = 2
elif int(self.backupset.number_of_backups) > 2:
self.backupset.end = randrange(self.backupset.start,
self.backupset.number_of_backups + 1)
self.merged = True
status, output, _ = self.backup_merge()
self.backupset.end -= 1
status, output, message = self.backup_list()
if not status:
self.fail(message)
current_vseqno = self.get_vbucket_seqnos(self.cluster_to_backup, self.buckets,
self.skip_consistency, self.per_node)
self.log.info("*** Start to validate data in merge backup ")
self.validate_backup_data(self.backupset.backup_host, [self.master],
"ent-backup", False, False, "memory",
self.num_items, None)
self.backup_cluster_validate(skip_backup=True)
def test_backup_purge(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset
3. Initiates a backup and kills the erlang server while backup is going on
4. Waits for the backup command to timeout
5. Executes backup command again with purge option
6. Validates the old backup is deleted and new backup is created successfully
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
old_backup_name = ""
new_backup_name = ""
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.kill_erlang()
output = backup_result.result(timeout=200)
self.log.info(str(output))
status, output, message = self.backup_list()
if not status:
self.fail(message)
if output and output[0]:
bk_info = json.loads(output[0])
bk_info = bk_info["repos"][0]
else:
return False, "No output content"
if bk_info["backups"]:
for i in range(0, len(bk_info["backups"])):
old_backup_name = bk_info["backups"][i]["date"]
self.log.info("Backup name before purge: " + old_backup_name)
conn.start_couchbase()
conn.disconnect()
self.sleep(30)
output, error = self.backup_cluster()
if error or not self._check_output("Backup completed successfully", output):
self.fail("Taking cluster backup failed.")
status, output, message = self.backup_list()
if not status:
self.fail(message)
if output and output[0]:
bk_info = json.loads(output[0])
bk_info = bk_info["repos"][0]
else:
return False, "No output content"
if bk_info["backups"]:
for i in range(0, len(bk_info["backups"])):
new_backup_name = bk_info["backups"][i]["date"]
self.log.info("Backup name after purge: " + new_backup_name)
# Once the purge (and backup) have completed we shouldn't see any orphaned multipart uploads
if self.objstore_provider:
self.assertEqual(
self.objstore_provider.num_multipart_uploads(), 0,
"Expected all multipart uploads to have been purged (all newly created ones should have also been completed)"
)
self.assertNotEqual(old_backup_name, new_backup_name,
"Old backup name and new backup name are same when purge is used")
self.log.info("Old backup name and new backup name are not same when purge is used")
def test_backup_resume(self):
"""
1. Creates specified bucket on the cluster and loads it with given
number of items
2. Creates a backupset
3. Initiates a backup and kills the erlang server while backup is going on
4. Waits for the backup command to timeout
5. Executes backup command again with resume option
6. Validates the old backup is resumes and backup is completed successfully
"""
num_vbuckets = self.input.param("num_vbuckets", None)
if num_vbuckets:
remote_client = RemoteMachineShellConnection(self.backupset.cluster_host)
command = (
f"curl -X POST -u {self.master.rest_username}:{self.master.rest_password}"
f" {self.master.ip}:8091/diag/eval -d 'ns_config:set(couchbase_num_vbuckets_default, {num_vbuckets}).'"
)
output, _ = remote_client.execute_command(command)
if 'ok' not in output[0]:
self.fail(f"failed to reduce the number of vBuckets {num_vbuckets}")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.bk_with_stop_and_resume(iterations=self.input.param("iterations", 1),
remove_staging_directory=self.input.param("remove_staging_directory", False))
def test_backup_restore_with_deletes(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset - backsup data and validates
3. Perform deletes
4. Restore data and validate
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self._load_all_buckets(self.master, gen, "delete", 0)
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_with_failover(self):
"""
1. Test should be run with 2 nodes in cluster host (param: nodes_init = 2)
2. Creates specified bucket on the cluster and loads it with given number of items
3. Creates a backupset - backsup data and validates
4. Fails over the second node with specified type (param: graceful = True | False)
5. Sets recovery type to specified value (param: recoveryType = full | delta)
6. Adds back the failed over node and rebalances
7. Restores data and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
rest = RestConnection(self.backupset.cluster_host)
nodes_all = rest.node_statuses()
for node in nodes_all:
if node.ip == self.servers[1].ip:
rest.fail_over(otpNode=node.id, graceful=self.graceful)
self.sleep(30)
try:
rest.set_recovery_type(otpNode=node.id, recoveryType=self.recoveryType)
except Exception as e:
if "Set RecoveryType failed" in str(e):
self.sleep(15)
rest.set_recovery_type(otpNode=node.id, recoveryType=self.recoveryType)
rest.add_back_node(otpNode=node.id)
rebalance = self.cluster.async_rebalance(self.servers, [], [])
rebalance.result()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_after_offline_upgrade(self):
"""
1. Test has to be supplied initial_version to be installed, create
default bucket and load data to this bucket.
2. Backup cluster and verify data and delete default bucket
3. Upgrades cluster to upgrade_version re-reates default bucket
4. Restores data and validates
Params:
backup_service_test (bool): Import repository and restore using the backup service.
"""
upgrade_version = self.input.param("upgrade_version", "5.0.0-3330")
if upgrade_version == "5.0.0-3330":
self.fail("\n *** Need param 'upgrade_version=' to run")
backup_service_test = self.input.param("backup_service_test", False)
if backup_service_test:
backup_service_hook = BackupServiceHook(self.servers[1], self.servers, self.backupset, self.objstore_provider)
self.cli_command_location = "/opt/couchbase/bin"
self._install(self.servers)
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
rebalance = self.cluster.async_rebalance(self.servers[:2], [self.servers[1]],
[])
rebalance.result()
self.add_built_in_server_user()
RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)
self.buckets = RestConnection(self.master).get_buckets()
self.total_buckets = len(self.buckets)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self.sleep(5)
BucketOperationHelper.delete_bucket_or_assert(self.master, "default", self)
""" Start to upgrade """
if self.force_version_upgrade:
upgrade_version = self.force_version_upgrade
upgrade_threads = self._async_update(upgrade_version=upgrade_version,
servers=self.servers[:2])
for th in upgrade_threads:
th.join()
self.log.info("Upgraded to: {ver}".format(ver=upgrade_version))
self.sleep(30)
""" Re-create default bucket on upgrade cluster """
RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)
self.sleep(5)
# Create a backup node and perform a backup service import repository and restore
if backup_service_test:
backup_service_hook.backup_service.replace_services(self.servers[1], ['kv,backup'])
backup_service_hook.backup_service.import_repository(self.backupset.directory, self.backupset.name, "my_repo")
backup_service_hook.backup_service.take_one_off_restore("imported", "my_repo", 20, 20)
backup_service_hook.cleanup()
return
""" Only server from Spock needs build in user
to access bucket and other tasks
"""
if "5" <= RestConnection(self.master).get_nodes_version()[:1]:
self.add_built_in_server_user()
for user in self.users_check_restore:
user_name = user.replace('[', '_').replace(']', '_')
testuser = [{'id': user_name, 'name': user_name,
'password': 'password'}]
rolelist = [{'id': user_name, 'name': user_name,
'roles': user}]
self.log.info("**** add built-in '%s' user to node %s ****" % (testuser[0]["name"],
self.master.ip))
RbacBase().create_user_source(testuser, 'builtin', self.master)
self.log.info("**** add '%s' role to '%s' user ****" % (rolelist[0]["roles"],
testuser[0]["name"]))
RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin')
backupsets = [self.backupset]
if "5" <= RestConnection(self.master).get_nodes_version()[:1]:
for user in self.users_check_restore:
new_backupset = copy.deepcopy(self.backupset)
new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')
backupsets.append(new_backupset)
for backupset in backupsets:
self.backupset = backupset
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
BucketOperationHelper().delete_bucket_or_assert(self.backupset.cluster_host,
"default", self)
def test_backup_restore_after_online_upgrade(self):
"""
1. Test has to be supplied initial_version to be installed and
upgrade_version to be upgraded to
2. Installs initial_version on the servers
3. Load data and backup in pre-upgrade
4. Install upgrade version on 2 nodes. Use swap rebalance to upgrade
cluster
5. Operation after upgrade cluster
6. Restores data and validates
"""
if self.initial_version[:1] == "5" and self.upgrade_versions[0][:1] >= "7":
self.log.error("\n\n\n*** ERROR: Direct upgrade from {0} to {1} does not support.\
Test will skip\n\n"\
.format(self.initial_version[:5], self.upgrade_versions[0][:5]))
return
servers = copy.deepcopy(self.servers)
self.vbuckets = self.initial_vbuckets
if len(servers) != 4:
self.fail("\nThis test needs exactly 4 nodes to run! ")
self._install(servers)
count = 0
nodes_fail_to_install = []
for server in servers:
ready = RestHelper(RestConnection(server)).is_ns_server_running(60)
if ready:
count += 1
else:
nodes_fail_to_install.append(server.ip)
if count < len(servers):
self.fail("Some servers may not install Couchbase server: {0}"\
.format(nodes_fail_to_install))
if not self.disable_diag_eval_on_non_local_host:
self.enable_diag_eval_on_non_local_hosts()
cmd = 'curl -g {0}:8091/diag/eval -u {1}:{2} '.format(self.master.ip,
self.master.rest_username,
self.master.rest_password)
cmd += '-d "path_config:component_path(bin)."'
bin_path = subprocess.check_output(cmd, shell=True)
try:
bin_path = bin_path.decode()
except AttributeError:
pass
if "bin" not in bin_path:
self.fail("Check if cb server install on %s" % self.master.ip)
else:
self.cli_command_location = bin_path.replace('"', '') + "/"
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
rebalance = self.cluster.async_rebalance(servers[:self.nodes_init],
[servers[int(self.nodes_init) - 1]], [])
rebalance.result()
self.sleep(15)
self.add_built_in_server_user()
rest = RestConnection(self.master)
cb_version = rest.get_nodes_version()
initial_compression_mode = "off"
if 5.5 > float(cb_version[:3]):
self.compression_mode = initial_compression_mode
rest.create_bucket(bucket='default', ramQuotaMB=512,
compressionMode=self.compression_mode)
self.buckets = rest.get_buckets()
self._load_all_buckets(self.master, gen, "create", 0)
""" create index """
if self.create_gsi:
if "5" > rest.get_nodes_version()[:1]:
if self.gsi_type == "forestdb":
self.fail("Need to set param self.gsi_type=memory_optimized")
rest.set_indexer_storage_mode(storageMode="memory_optimized")
else:
rest.set_indexer_storage_mode(storageMode="plasma")
self.create_indexes()
self.backup_create()
if self.backupset.number_of_backups > 1:
self.log.info("Start doing multiple backup")
for i in range(1, self.backupset.number_of_backups + 1):
self._backup_restore_with_ops()
else:
self.backup_cluster_validate()
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
self.sleep(5)
self.backup_list()
""" Start to online upgrade using swap rebalance """
self.initial_version = self.upgrade_versions[0]
if self.force_version_upgrade:
self.initial_version = self.force_version_upgrade
self.sleep(self.sleep_time,
"Pre-setup of old version is done. Wait for online upgrade to: "
"{0} version".format(self.initial_version))
self.product = 'couchbase-server'
self._install(servers[2:])
self.sleep(self.sleep_time,
"Installation of new version is done. Wait for rebalance")
self.log.info(
"Rebalanced in upgraded nodes and rebalanced out nodes with old version")
add_node_services = [self.add_node_services]
if "-" in self.add_node_services:
add_node_services = self.add_node_services.split("-")
self.cluster.rebalance(servers, servers[2:], servers[:2],
services=add_node_services)
self.sleep(15)
self.backupset.cluster_host = servers[2]
""" Upgrade is done """
self.log.info("** Upgrade is done **")
healthy = False
timeout = 0
while not healthy:
healthy = RestHelper(RestConnection(self.backupset.cluster_host)).is_cluster_healthy()
if not healthy:
if timeout == 120:
self.fail("Node %s is not ready after 2 mins" % self.backupset.cluster_host)
else:
self.sleep(5, "Wait for server up ")
timeout += 5
else:
healthy = True
if "5" <= RestConnection(servers[2]).get_nodes_version()[:1]:
for user in self.users_check_restore:
user_name = user.replace('[', '_').replace(']', '_')
testuser = [{'id': user_name, 'name': user_name,
'password': 'password'}]
rolelist = [{'id': user_name, 'name': user_name,
'roles': user}]
self.log.info("**** add built-in '%s' user to node %s ****" % (testuser[0]["name"],
servers[2].ip))
RbacBase().create_user_source(testuser, 'builtin', servers[2])
self.log.info("**** add '%s' role to '%s' user ****" % (rolelist[0]["roles"],
testuser[0]["name"]))
status = RbacBase().add_user_role(rolelist, RestConnection(servers[2]), 'builtin')
self.log.info(status)
if self.backupset.number_of_backups_after_upgrade:
self.backupset.number_of_backups += \
self.backupset.number_of_backups_after_upgrade
if "5" <= RestConnection(servers[2]).get_nodes_version()[:1]:
self.add_built_in_server_user(node=servers[2])
for i in range(1, self.backupset.number_of_backups_after_upgrade + 2):
self.log.info("_backup_restore_with_ops #{0} started...".format(i))
validate_dir_struct = True
if i > 2:
validate_dir_struct = False
self._backup_restore_with_ops(node=self.backupset.cluster_host, repeats=1,
validate_directory_structure=validate_dir_struct)
self.backup_list()
""" merged after upgrade """
if self.after_upgrade_merged:
self.backupset.start = 1
self.backupset.end = len(self.backups)
self.backup_merge_validate()
self.backup_list()
backupsets = [self.backupset]
if "5" <= RestConnection(servers[2]).get_nodes_version()[:1]:
for user in self.users_check_restore:
new_backupset = copy.deepcopy(self.backupset)
new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')
backupsets.append(new_backupset)
for backupset in backupsets:
self.backupset = backupset
if self.bucket_flush:
self.log.info("Start to flush bucket")
rest = RestConnection(servers[2])
rest.flush_bucket()
else:
self.bucket_helper.delete_bucket_or_assert(self.backupset.cluster_host,
"default", self)
""" Re-create default bucket on upgrade cluster """
RestConnection(servers[2]).create_bucket(bucket='default',
ramQuotaMB=512,
compressionMode=self.compression_mode)
self.sleep(5)
self.total_buckets = len(self.buckets)
if self.after_upgrade_merged:
self.backupset.end = 1
""" restore back to cluster """
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
if self.create_gsi:
self.verify_gsi()
def test_backup_restore_with_python_sdk(self):
"""
1. Note that python sdk has to be installed on all nodes before running this test
2. Connects to default bucket on cluster host using Python SDK
- loads specifed number of items
3. Creates a backupset, backsup data and validates
4. Restores data and validates
5. Connects to default bucket on restore host using Python SDK
6. Retrieves cas and flgas of each doc on both cluster and restore host
- validates if they are equal
"""
testuser = [{'id': 'default', 'name': 'default', 'password': 'password'}]
rolelist = [{'id': 'default', 'name': 'default', 'roles': 'admin'}]
self.add_built_in_server_user(testuser, rolelist)
try:
cb = Bucket('couchbase://' + self.backupset.cluster_host.ip + '/default',
password="password")
if cb is not None:
self.log.info("Established connection to bucket on cluster host"
" using python SDK")
else:
self.fail("Failed to establish connection to bucket on cluster host"
" using python SDK")
except Exception as ex:
self.fail(str(ex))
self.log.info("Loading bucket with data using python SDK")
for i in range(1, self.num_items + 1):
cb.upsert("doc" + str(i), "value" + str(i))
cluster_host_data = {}
for i in range(1, self.num_items + 1):
key = "doc" + str(i)
value_obj = cb.get(key=key)
cluster_host_data[key] = {}
cluster_host_data[key]["cas"] = str(value_obj.cas)
cluster_host_data[key]["flags"] = str(value_obj.flags)
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
self.add_built_in_server_user(testuser, rolelist, self.backupset.restore_cluster_host)
try:
cb = Bucket('couchbase://' + self.backupset.restore_cluster_host.ip + '/default',
password="password")
if cb is not None:
self.log.info("Established connection to bucket on restore host " \
"using python SDK")
else:
self.fail("Failed to establish connection to bucket on restore " \
"host using python SDK")
except Exception as ex:
self.fail(str(ex))
restore_host_data = {}
for i in range(1, self.num_items + 1):
key = "doc" + str(i)
value_obj = cb.get(key=key)
restore_host_data[key] = {}
restore_host_data[key]["cas"] = str(value_obj.cas)
restore_host_data[key]["flags"] = str(value_obj.flags)
self.log.info("Comparing cluster host data cas and flags against restore host data")
for i in range(1, self.num_items + 1):
key = "doc" + str(i)
if cluster_host_data[key]["cas"] != restore_host_data[key]["cas"]:
self.fail("CAS mismatch for key: {0}".format(key))
if cluster_host_data[key]["flags"] != restore_host_data[key]["flags"]:
self.fail("Flags mismatch for key: {0}".format(key))
self.log.info("Successfully validated cluster host data cas and flags " \
"against restore host data")
def test_backup_restore_with_flush(self):
"""
1. Test should be run with same-cluster=True
2. Creates specified bucket on the cluster and loads it with given number of items
3. Creates a backupset - backsup data and validates
4. Flushes the bucket
5. Restores data and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
rest = RestConnection(self.backupset.cluster_host)
rest.flush_bucket()
self.log.info("Flushed default bucket - restoring data now..")
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_with_recreate(self):
"""
1. Test should be run with same-cluster=True
2. Creates specified bucket on the cluster and loads it with given number of items
3. Creates a backupset - backsup data and validates
4. Deletes the bucket and recreates it
5. Restores data and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
rest = RestConnection(self.backupset.cluster_host)
rest.delete_bucket()
bucket_name = "default"
rest_helper = RestHelper(rest)
rest.create_bucket(bucket=bucket_name, ramQuotaMB=512)
bucket_ready = rest_helper.vbucket_map_ready(bucket_name)
if not bucket_ready:
self.fail("Bucket {0} is not created after 120 seconds.".format(bucket_name))
self.log.info("Deleted {0} bucket and recreated it - restoring it now.."\
.format(bucket_name))
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_create_negative_args(self):
"""
Validates error messages for negative inputs of create command
"""
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
cmd = "config"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
# ['cbbackupmgr config [<args>]', '', 'Required Flags:', '', ' -a,--archive The archive directory to use', ' -r,--repo The name of the backup repository to create and', ' configure', '', 'Optional Flags:', '', ' --exclude-buckets A comma separated list of buckets to exclude from', ' backups. All buckets except for the ones specified', ' will be backed up.', ' --include-buckets A comma separated list of buckets to back up. Only', ' buckets in this list are backed up.', ' --disable-bucket-config Disables backing up bucket configuration', ' information', ' --disable-views Disables backing up view definitions', ' --disable-gsi-indexes Disables backing up GSI index definitions', ' --disable-ft-indexes Disables backing up Full Text index definitions', ' --disable-data Disables backing up cluster data', ' -h,--help Prints the help message', '']
self.assertEqual(output[0], "cbbackupmgr config [<args>]", "Expected error message not thrown")
cmd = "config --archive"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive", "Expected error message not thrown")
cmd = "config --archive {0}".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -r/--repo", "Expected error message not thrown")
cmd = "config --archive {0} --repo".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --repo", "Expected error message not thrown")
self.backup_create()
cmd = "config --archive {0} --repo {1}".format(self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertEqual(output[0], "Backup repository creation failed: Backup Repository `backup` exists",
"Expected error message not thrown")
def test_objstore_negative_args(self):
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
command = f"{self.cli_command_location}/cbbackupmgr"
# Run all the sub_commands with the (non-objstore) required arguments (so that we are actually checking the
# correct error)
for sub_command in ['backup -a archive -r repo -c localhost -u admin -p password',
'collect-logs -a archive',
'config -a archive -r repo',
'examine -a archive -r repo -k asdf --bucket asdf',
'info -a archive',
'remove -a archive -r repo',
'restore -a archive -r repo -c localhost -u admin -p password']:
# Check all the object store arguments (ones that require an argument have one provided so that we are
# validating cbbackupmgr and not cbflag).
for argument in ['--obj-access-key-id asdf',
'--obj-cacert asdf',
'--obj-endpoint asdf',
'--obj-log-level asdf',
'--obj-no-ssl-verify',
'--obj-region asdf',
'--obj-secret-access-key asdf']:
# Check all the common object store commands
output, error = remote_client.execute_command(f"{command} {sub_command} {argument}")
remote_client.log_command_output(output, error)
self.assertNotEqual(len(output), 0)
error_mesg = "cloud arguments provided without the cloud scheme prefix"
if "bucket" in sub_command:
error_mesg = "Unknown flag: --bucket"
self.assertIn(error_mesg, output[0],
"Expected an error about providing cloud arguments without the cloud schema prefix")
# Check all the S3 specific arguments
if self.objstore_provider.schema_prefix() == 's3://':
for argument in ['--s3-force-path-style']:
output, error = remote_client.execute_command(f"{command} {sub_command} {argument}")
remote_client.log_command_output(output, error)
self.assertNotEqual(len(output), 0)
error_mesg_obj = "s3 arguments provided without the archive 's3://' schema prefix"
if "bucket" in sub_command:
error_mesg_obj = "Unknown flag: --bucket"
self.assertIn(error_mesg_obj, output[0],
"Expected an error about providing S3 specific arguments without the s3:// schema prefix")
# Check all the common objstore flags that require arguments without providing arguments. This is testing
# cbflag.
for argument in ['--obj-access-key-id',
'--obj-cacert',
'--obj-endpoint',
'--obj-log-level',
'--obj-region',
'--obj-secret-access-key']:
# Check that common object store arguments that require a value throw the correct error when a value
# is omitted.
output, error = remote_client.execute_command(
f"{command} {sub_command.replace('archive', self.objstore_provider.schema_prefix() + 'archive')} --obj-staging-dir staging {argument}"
)
remote_client.log_command_output(output, error)
self.assertNotEqual(len(output), 0)
error_mesg = f"Expected argument for option: {argument}"
if "bucket" in sub_command:
error_mesg = "Unknown flag: --bucket"
self.assertIn(error_mesg, output[0],
"Expected an error about providing cloud arguments without a value")
# Test omitting the staging directory argument
output, error = remote_client.execute_command(
f"{command} {sub_command.replace('archive', self.objstore_provider.schema_prefix() + 'archive')}"
)
remote_client.log_command_output(output, error)
self.assertNotEqual(len(output), 0)
error_mesg = "you must provide the '--obj-staging-dir' argument"
if "bucket" in sub_command:
error_mesg = "Unknown flag: --bucket"
self.assertIn(error_mesg, output[0],
"Expected an error about not supplying the '--obj-staging-dir' argument")
def test_backup_cluster_restore_negative_args(self):
"""
Validates error messages for negative inputs of cluster or restore command - command parameter
decides which command to test
"""
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.backup_create()
cmd_to_test = self.input.param("command", "backup")
if cmd_to_test == "restore":
cmd = cmd_to_test + " --archive {0} --repo {1} --host http://{2}:{3} --username {4} \
--password {5}".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port,
self.backupset.cluster_host_username,
self.backupset.cluster_host_password)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
if "7.0.1" in self.cb_version:
self.assertIn("Error restoring cluster: Backup backup doesn't contain any backups", output[-1])
else:
self.assertIn("Error restoring cluster: Repository 'backup' doesn't contain any backups", output[-1])
self.backup_cluster()
cmd = cmd_to_test
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
cmd_test = cmd_to_test
if cmd_to_test.startswith('"') and cmd_to_test.endswith('"'):
cmd_test = cmd_to_test[1:-1]
self.assertEqual(output[0], "cbbackupmgr {} [<args>]".format(cmd_test))
cmd = cmd_to_test + " --archive"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive", "Expected error message not thrown")
cmd = cmd_to_test + " --archive xyz -c http://localhost:8091 -u Administrator -p password -r aa"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue(self._check_output("archive directory '{0}xyz' does not exist".format(self.root_path), output))
cmd = cmd_to_test + " --archive {0} -c http://localhost:8091 -u Administrator -p password".format(
self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -r/--repo", "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1} -c http://localhost:8091 -u Administrator -p password -r".format(
self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --repo", "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} -u Administrator -p password".format(self.backupset.directory,
self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -c/--cluster",
"Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} -c -u Administrator -p password -r repo".format(
self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: -c", "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} -c http://{2}:{3}".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Error backing up cluster: cluster credentials required, expected --username/--password or --client-cert/--client-key",
"Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} --cluster http://{2}:{3} \
--username".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --username", "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} --cluster http://{2}:{3} \
--username {4}".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port,
self.backupset.cluster_host_username)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -p/--password",
"Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo abc --cluster http://{1}:{2} --username {3} \
--password {4}".format(self.backupset.directory,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port,
self.backupset.cluster_host_username,
self.backupset.cluster_host_password)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
part_message = "backing up"
if cmd_to_test.startswith('"') and cmd_to_test.endswith('"'):
cmd_test = cmd_to_test[1:-1]
if cmd_test == "restore":
part_message = 'restoring'
self.assertTrue("Error {0} cluster: Backup Repository `abc` not found"\
.format(part_message) in output[-1],
"Expected error message not thrown. Actual output %s " % output[-1])
cmd = cmd_to_test + " --archive {0} --repo {1} --cluster abc --username {2} \
--password {3}".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host_username,
self.backupset.cluster_host_password)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue(self._check_output(f"Error {part_message} cluster: failed to connect to any host(s) from the connection string", output), "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} --cluster http://{2}:{3} --username abc \
--password {4}".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port,
self.backupset.cluster_host_password)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("check username and password" in output[-1], "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} --cluster http://{2}:{3} --username {4} \
--password abc".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port,
self.backupset.cluster_host_username)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertTrue("check username and password" in output[-1], "Expected error message not thrown")
def test_backup_list_negative_args(self):
"""
Validates error messages for negative inputs of list command
"""
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.backup_create()
cmd = "info"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "cbbackupmgr info [<args>]", "Expected error message not thrown")
cmd = "info --archive"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive", "Expected error message not thrown")
cmd = "info --archive xyz".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertTrue(self._check_output("archive directory '{0}xyz' does not exist".format(self.root_path), output))
def test_backup_compact_negative_args(self):
"""
Validates error messages for negative inputs of compact command
"""
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.backup_create()
self.backup_cluster()
cmd = "compact"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "cbbackupmgr compact [<args>]",
"Expected error message not thrown")
cmd = "compact --archive"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive",
"Expected error message not thrown")
cmd = "compact --archive {0}".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -r/--repo",
"Expected error message not thrown")
cmd = "compact --archive {0} --repo".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --repo",
"Expected error message not thrown")
cmd = "compact --archive {0} --repo {1}".format(self.backupset.directory,
self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: --backup",
"Expected error message not thrown")
cmd = "compact --archive {0} --repo {1} --backup" \
.format(self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --backup",
"Expected error message not thrown")
cmd = "compact --archive abc --repo {0} --backup {1}" \
.format(self.backupset.name, self.backups[0])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertIn("not a directory", output[-1])
cmd = "compact --archive {0} --repo abc --backup {1}" \
.format(self.backupset.directory, self.backups[0])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue(self._check_output("Backup Repository `abc` not found", output),
"Expected error message not thrown")
cmd = "compact --archive {0} --repo {1} --backup abc".format(self.backupset.directory,
self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertTrue("Compacting incr backup `backup` of backup `abc` failed:" in output[-1],
"Expected error message not thrown")
def test_backup_merge_negative_args(self):
"""
Validates error messages for negative inputs of merge command
"""
# This error message is thrown when an invalid date range format is supplied to cbbackupmgr.
invalid_range_format_error = "Error merging data: invalid range format, expected two indexes or two dates; the keywords [start, oldest, end, latest] are also valid"
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.backup_create()
cmd = "merge"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "cbbackupmgr merge [<args>]", "Expected error message not thrown")
cmd = "merge --archive -c http://localhost:8091 -u Administrator -p password -r aa"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive", "Expected error message not thrown")
cmd = "merge --archive {0}".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -r/--repo", "Expected error message not thrown")
cmd = "merge --archive {0} --repo".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1} -r".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --repo", "Expected error message not thrown")
cmd = "merge --archive {0} --repo {1}".format(self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1} --start start --end end".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Error merging data: Repository 'backup' doesn't contain any backups",
"Expected error message not thrown")
self._take_n_backups(n=2)
cmd = "merge --archive {0} --repo {1}".format(self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1} --start bbb --end end".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], invalid_range_format_error, "Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start".format(self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --start", "Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start {2}".format(self.backupset.directory,
self.backupset.name, self.backups[0])
command = "{0}/cbbackupmgr {1} --end aa".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], invalid_range_format_error, "Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start {2} --end".format(self.backupset.directory,
self.backupset.name, self.backups[0])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --end", "Expected error message not thrown")
cmd = "merge --archive xyz --repo {0} --start {1} --end {2}".format(self.backupset.name,
self.backups[0], self.backups[1])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("Error merging data: archive directory '{0}xyz' does not exist".format(self.root_path) in output[-1],
"Expected error message not thrown")
cmd = "merge --archive {0} --repo abc --start {1} --end {2}".format(self.backupset.directory,
self.backups[0], self.backups[1])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("Error merging data: Backup Repository `abc` not found" in output[-1],
"Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start abc --end {2}".format(self.backupset.directory,
self.backupset.name, self.backups[1])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue(invalid_range_format_error in output[-1], "Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start {2} --end abc".format(self.backupset.directory,
self.backupset.name, self.backups[0])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue(invalid_range_format_error in output[-1], "Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start {2} --end {3}".format(self.backupset.directory,
self.backupset.name,
self.backups[1], self.backups[0])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertTrue("Error merging data: invalid range start cannot be before end" in output[-1], "Expected error message not thrown")
def test_backup_remove_negative_args(self):
"""
Validates error messages for negative inputs of remove command
"""
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.backup_create()
self.backup_cluster()
cmd = "remove"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "cbbackupmgr remove [<args>]", "Expected error message not thrown")
cmd = "remove --archive -c http://localhost:8091 -u Administrator -p password -r aa"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive", "Expected error message not thrown")
cmd = "remove --archive {0}".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -r/--repo", "Expected error message not thrown")
cmd = "remove --archive {0} --repo".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --repo", "Expected error message not thrown")
cmd = "remove --archive xyz --repo {0}".format(self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("Removing backup repository failed: archive directory '{0}xyz' does not exist".format(self.root_path) in output[-1],
"Expected error message not thrown")
cmd = "remove --archive {0} --repo xyz".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertIn("Backup Repository `xyz` not found", output[-1])
def test_backup_restore_with_views(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset
3. Creates a simple view on source cluster
4. Backsup data and validates
5. Restores data ans validates
6. Ensures that same view is created in restore cluster
"""
if "ephemeral" in self.input.param("bucket_type", 'membase'):
self.log.info("\n****** view does not support on ephemeral bucket ******")
return
rest_src = RestConnection(self.backupset.cluster_host)
if "community" in self.cb_version:
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].ip, services=['kv', 'index', 'n1ql'])
else:
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].ip, services=['index', 'kv'])
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])
rebalance.result()
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
default_map_func = "function (doc) {\n emit(doc._id, doc);\n}"
default_view_name = "test"
default_ddoc_name = "ddoc_test"
prefix = "dev_"
query = {"full_set": "true", "stale": "false", "connection_timeout": 60000}
view = View(default_view_name, default_map_func)
task = self.cluster.async_create_view(self.backupset.cluster_host,
default_ddoc_name, view, "default")
task.result()
self.backup_cluster_validate()
rest_target = RestConnection(self.backupset.restore_cluster_host)
rest_target.add_node(self.input.clusters[0][1].rest_username,
self.input.clusters[0][1].rest_password,
self.input.clusters[0][1].ip, services=['kv', 'index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [], [])
rebalance.result()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
try:
result = self.cluster.query_view(self.backupset.restore_cluster_host,
prefix + default_ddoc_name,
default_view_name, query, timeout=30)
self.assertEqual(len(result['rows']), self.num_items,
"Querying view on restore cluster did not return expected number of items")
self.log.info("Querying view on restore cluster returned expected number of items")
except TimeoutError:
self.fail("View could not be queried in restore cluster within timeout")
def test_backup_restore_with_gsi(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset
3. Creates a GSI index on source cluster
4. Backsup data and validates
5. Restores data ans validates
6. Ensures that same gsi index is created in restore cluster
"""
rest_src = RestConnection(self.backupset.cluster_host)
self.cluster_storage_mode = \
rest_src.get_index_settings()["indexer.settings.storage_mode"]
self.log.info("index storage mode: {0}".format(self.cluster_storage_mode))
if "community" in self.cb_version:
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].ip, services=['kv', 'index', 'n1ql'])
else:
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].ip, services=['kv', 'index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])
rebalance.result()
self.test_storage_mode = self.cluster_storage_mode
if "ephemeral" in self.bucket_type:
self.log.info("ephemeral bucket needs to set backup cluster to memopt for gsi.")
self.test_storage_mode = "memory_optimized"
self.quota = self._reset_storage_mode(rest_src, self.test_storage_mode)
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].ip, services=['kv', 'index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])
rebalance.result()
rest_src.create_bucket(bucket='default', ramQuotaMB=int(self.quota) - 1,
bucketType=self.bucket_type,
evictionPolicy="noEviction")
self.add_built_in_server_user(node=self.backupset.cluster_host)
gen = DocumentGenerator('test_docs', '{{"age": {0}}}', list(range(100)),
start=0, end=self.num_items)
self.buckets = rest_src.get_buckets()
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
cmd = "cbindex -type create -bucket default -using %s -index age -fields=age " \
" -auth %s:%s" % (self.test_storage_mode,
self.master.rest_username,
self.master.rest_password)
shell = RemoteMachineShellConnection(self.backupset.cluster_host)
command = "{0}/{1}".format(self.cli_command_location, cmd)
output, error = shell.execute_command(command)
shell.log_command_output(output, error)
shell.disconnect()
if error or "Index created" not in output[-1]:
self.fail("GSI index cannot be created")
self.backup_cluster_validate()
rest_target = RestConnection(self.backupset.restore_cluster_host)
rest_target.add_node(self.input.clusters[0][1].rest_username,
self.input.clusters[0][1].rest_password,
self.input.clusters[0][1].ip, services=['kv', 'index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [], [])
rebalance.result()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
cmd = "cbindex -type list -auth %s:%s" % (self.master.rest_username,
self.master.rest_password)
shell = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
command = "{0}/{1}".format(self.cli_command_location, cmd)
output, error = shell.execute_command(command)
shell.log_command_output(output, error)
shell.disconnect()
try:
if len(output) > 1:
index_name_path = "Index:{0}/{1}".format(self.buckets[0].name, "age")
version = RestConnection(
self.backupset.restore_cluster_host).get_nodes_version()
if version[:1] >= "7":
index_name_path = "Index:{0}/_{0}/_{0}/{1}".format(self.buckets[0].name, "age")
self.assertTrue(self._check_output(index_name_path, output),
"GSI index not created in restore cluster as expected")
self.log.info("GSI index created in restore cluster as expected")
else:
self.fail("GSI index not created in restore cluster as expected")
finally:
if "ephemeral" in self.bucket_type:
self.log.info("reset storage mode back to original")
shell = RemoteMachineShellConnection(self.backupset.cluster_host)
shell.enable_diag_eval_on_non_local_hosts()
shell.disconnect()
shell = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
shell.enable_diag_eval_on_non_local_hosts()
shell.disconnect()
self._reset_storage_mode(rest_src, self.cluster_storage_mode)
self._reset_storage_mode(rest_target, self.cluster_storage_mode)
def test_backup_merge_restore_with_gsi(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset
3. Creates a GSI index on source cluster
4. Backsup data and validates
5. Restores data ans validates
6. Ensures that same gsi index is created in restore cluster
"""
rest_src = RestConnection(self.backupset.cluster_host)
rest_src.add_node(self.servers[1].rest_username,
self.servers[1].rest_password,
self.servers[1].ip, services=['index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [],
[])
rebalance.result()
gen = DocumentGenerator('test_docs', '{{"Num1": {0}, "Num2": {1}}}',
list(range(100)), list(range(100)),
start=0, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
cmd = "cbindex -type create -bucket default -using forestdb -index " \
"num1 -fields=Num1"
remote_client = RemoteMachineShellConnection(
self.backupset.cluster_host)
command = "{0}/{1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
if error or "Index created" not in output[-1]:
self.fail("GSI index cannot be created")
self.backup_cluster_validate()
cmd = "cbindex -type create -bucket default -using forestdb -index " \
"num2 -fields=Num2"
remote_client = RemoteMachineShellConnection(
self.backupset.cluster_host)
command = "{0}/{1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
if error or "Index created" not in output[-1]:
self.fail("GSI index cannot be created")
self.backup_cluster_validate()
self.backupset.start = 1
self.backupset.end = len(self.backups)
self.backup_merge_validate()
rest_target = RestConnection(self.backupset.restore_cluster_host)
rest_target.add_node(self.input.clusters[0][1].rest_username,
self.input.clusters[0][1].rest_password,
self.input.clusters[0][1].ip, services=['index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [],
[])
rebalance.result()
start = self.number_of_backups_taken
end = self.number_of_backups_taken
self.backupset.start = start
self.backupset.end = end
self.backup_restore_validate(compare_uuid=False,
seqno_compare_function=">=")
cmd = "cbindex -type list"
remote_client = RemoteMachineShellConnection(
self.backupset.restore_cluster_host)
command = "{0}/{1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
if len(output) > 1:
self.assertTrue("Index:default/Num1" in output[1],
"GSI index not created in restore cluster as expected")
self.log.info("GSI index created in restore cluster as expected")
else:
self.fail("GSI index not created in restore cluster as expected")
def test_backup_restore_with_fts(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset
3. Creates a simple FTS index on source cluster
4. Backsup data and validates
5. Restores data ans validates
6. Ensures that same FTS index is created in restore cluster
"""
self.test_fts = True
rest_src = RestConnection(self.backupset.cluster_host)
if "community" in self.cb_version:
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].ip, services=['kv', 'index', 'n1ql', 'fts'])
else:
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].ip, services=['kv', 'fts'])
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])
rebalance.result()
gen = DocumentGenerator('test_docs', '{{"age": {0}}}', list(range(100)), start=0,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
rest_src_fts = RestConnection(self.servers[1])
try:
from pytests.fts.fts_callable import FTSCallable
fts_obj = FTSCallable(nodes=self.servers, es_validate=False)
index = fts_obj.create_default_index(
index_name="index_default",
bucket_name="default")
fts_obj.wait_for_indexing_complete()
alias = fts_obj.create_alias(target_indexes=[index])
except Exception as ex:
self.fail(ex)
self.backup_cluster_validate()
if self.bucket_type != "ephemeral":
self._create_restore_cluster()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
rest_target_fts = RestConnection(self.input.clusters[0][1])
status = False
try:
status, content = rest_target_fts.get_fts_index_definition(index.name)
self.assertTrue(status and content['status'] == 'ok',
"FTS index not found in restore cluster as expected")
self.log.info("FTS index found in restore cluster as expected")
status, content = rest_target_fts.get_fts_index_definition(alias.name)
self.assertTrue(status and content['status'] == 'ok',
"FTS alias not found in restore cluster as expected")
self.log.info("FTS alias found in restore cluster as expected")
finally:
rest_src_fts.delete_fts_index(index.name)
rest_src_fts.delete_fts_index(alias.name)
if status:
rest_target_fts.delete_fts_index(index.name)
rest_target_fts.delete_fts_index(alias.name)
def test_backup_restore_with_xdcr(self):
"""
1. Creates a XDCR replication between first two servers
2. Creates specified bucket on the cluster and loads it with given number of items
3. Backsup data and validates while replication is going on
4. Restores data and validates while replication is going on
"""
rest_src = RestConnection(self.backupset.cluster_host)
rest_dest = RestConnection(self.servers[1])
try:
rest_src.remove_all_replications()
rest_src.remove_all_remote_clusters()
rest_src.add_remote_cluster(self.servers[1].ip, self.servers[1].port, self.backupset.cluster_host_username,
self.backupset.cluster_host_password, "C2")
rest_dest.create_bucket(bucket='default', ramQuotaMB=512)
self.sleep(10)
repl_id = rest_src.start_replication('continuous', 'default', "C2")
if repl_id is not None:
self.log.info("Replication created successfully")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
tasks = self._async_load_all_buckets(self.master, gen, "create", 0)
self.sleep(10)
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
for task in tasks:
task.result()
finally:
rest_dest.delete_bucket()
def test_backup_restore_with_warmup(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Warmsup the cluster host
2. Backsup data and validates while warmup is on
3. Restores data and validates while warmup is on
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
NodeHelper.do_a_warm_up(self.backupset.cluster_host)
self.sleep(30)
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
""" only membase bucket has warmup state """
if self.bucket_type == "membase":
NodeHelper.wait_warmup_completed([self.backupset.cluster_host])
def stat(self, key):
stats = StatsCommon.get_stats([self.master], 'default', "", key)
val = list(stats.values())[0]
if val.isdigit():
val = int(val)
return val
def load_to_dgm(self, active=75, ttl=0):
"""
decides how many items to load to enter active% dgm state
where active is an integer value between 0 and 100
"""
doc_size = 1024
curr_active = self.stat('vb_active_perc_mem_resident')
# go into heavy dgm
while curr_active > active:
curr_items = self.stat('curr_items')
gen_create = BlobGenerator('dgmkv', 'dgmkv-', doc_size, start=curr_items + 1, end=curr_items + 50000)
try:
self._load_all_buckets(self.master, gen_create, "create", ttl)
except:
pass
curr_active = self.stat('vb_active_perc_mem_resident')
def test_backup_restore_with_dgm(self):
"""
1. Creates specified bucket on the cluster and loads it until dgm
2. Creates a backup set
3. Backsup data and validates
4. Restores data and validates
"""
self.load_to_dgm()
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_with_auto_compaction(self):
"""
1. Creates specified bucket on the cluster and loads it
2. Updates auto compaction settings
3. Validates backup and restore
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
rest = RestConnection(self.backupset.cluster_host)
rest.set_auto_compaction(dbFragmentThresholdPercentage=80,
dbFragmentThreshold=100,
viewFragmntThresholdPercentage=80,
viewFragmntThreshold=100,
bucket="default")
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_with_update_notifications(self):
"""
1. Creates specified bucket on the cluster and loads it
2. Updates notification settings
3. Validates backup and restore
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
rest = RestConnection(self.backupset.cluster_host)
rest.update_notifications("true")
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_with_alerts(self):
"""
1. Creates specified bucket on the cluster and loads it
2. Updates alerts settings
3. Validates backup and restore
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
rest = RestConnection(self.backupset.cluster_host)
rest.set_alerts_settings('couchbase@localhost', 'root@localhost', 'user', 'pwd')
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_merge_with_crash(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=5)
try:
merge_result = self.cluster.async_merge_cluster(backup_host=self.backupset.backup_host,
backups=self.backups,
start=1, end=5,
directory=self.backupset.directory,
name=self.backupset.name,
cli_command_location=self.cli_command_location)
self.sleep(10)
self._kill_cbbackupmgr()
merge_result.result(timeout=400)
except TimeoutError:
status, output, message = self.backup_list()
if not status:
self.fail(message)
backup_count = 0
for line in output:
if "entbackup" in line:
continue
if re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}", line):
backup_name = re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}", line).group()
if backup_name in self.backups:
backup_count += 1
self.log.info("{0} matched in list command output".format(backup_name))
self.assertEqual(backup_count, len(self.backups), "Number of backups after merge crash did not match")
self.log.info("Number of backups after merge crash matched")
def test_compact_with_crash(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
status, output_before_compact, message = self.backup_list()
if not status:
self.fail(message)
try:
compact_result = self.cluster.async_compact_cluster(backup_host=self.backupset.backup_host,
backups=self.backups,
backup_to_compact=self.backupset.backup_to_compact,
directory=self.backupset.directory,
name=self.backupset.name,
cli_command_location=self.cli_command_location)
self.sleep(10)
self._kill_cbbackupmgr()
compact_result.result(timeout=400)
except TimeoutError:
status, output_after_compact, message = self.backup_list()
if not status:
self.fail(message)
status, message = self.validation_helper.validate_compact_lists(output_before_compact,
output_after_compact,
is_approx=True)
if not status:
self.fail(message)
self.log.info(message)
def test_backup_restore_misc(self):
"""
Misc scenarios for backup and restore
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backupset.name = "!@#$%^&"
output, error = self.backup_create()
self.assertTrue("Backup `!@#$%^` created successfully" in output[0],
"Backup could not be created with special characters")
self.log.info("Backup created with special characters")
self.backupset.name = "backup"
self.backup_create()
self.backup_cluster()
conn = RemoteMachineShellConnection(self.backupset.backup_host)
command = "ls -tr {0}/{1}/{2} | tail".format(self.backupset.directory, self.backupset.name, self.backups[0])
o, e = conn.execute_command(command)
data_dir = o[0]
conn.execute_command("dd if=/dev/zero of=/tmp/entbackup/backup/" +
str(self.backups[0]) +
"/" + data_dir + "/data/shard_0.sqlite" +
" bs=1024 count=100 seek=10 conv=notrunc")
output, error = self.backup_restore()
self.assertTrue("Restore failed due to an internal issue, see logs for details" in output[-1],
"Expected error not thrown when file is corrupt")
self.log.info("Expected error thrown when file is corrupted")
conn.execute_command("mv /tmp/entbackup/backup /tmp/entbackup/backup2")
conn.disconnect()
output, error = self.backup_restore()
self.assertTrue("Backup Repository `backup` not found" in output[-1], "Expected error message not thrown")
self.log.info("Expected error message thrown")
def test_backup_logs_for_keywords(self):
"""
Inspired by CBQE-6034.
1. Perform a Backup.
2. Scan backup logs for bad keywords.
Keywords:
1. CBQE-6034/MB-41131 - Check cbbackupmgr's build version/hash set correctly at build time
by scanning for 'cbbackupmgr version Unknown' in the logs.
2. Scan for 'panic' in the logs.
"""
# Populate the default bucket on self.master with documents
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
# Create backup archive and repository.
self.backup_create()
# Perform backup.
self.backup_cluster()
# Keywords to fail on (Keyword: str, at_start: bool, lines_before: int, lines_after: int)
bad_keywords = [
("cbbackupmgr version Unknown", False, 0, 0), # Checks cbbackupmgr build version/hash set correctly at build time
( "panic", True, 0, 12) # Checks for the panic keyword at start of sentence
]
# Scan logs for keywords in bad_keywords
for keyword, at_start, lines_before, lines_after in bad_keywords:
found, output, error = \
self._check_output_in_backup_logs(keyword, at_start = at_start, lines_before = lines_before, lines_after = lines_after)
if found:
self.fail(f"Found bad keyword(s) '{keyword}' in backup logs:\n" + "\n".join(output))
""" cbbackup restore enhancement only from vulcan """
def test_cbbackupmgr_collect_logs(self):
"""
cbbackupmgr collect-logs will collect logs to archive or
output to any path supplied with flag -o
CB_ARCHIVE_PATH
ex: cbbackupmgr collect-logs -a /tmp/backup
cbbackupmgr collect-logs -a /tmp/backup -o /tmp/logs
"""
if "5.5" > self.cb_version[:3]:
self.fail("This test is only for cb version 5.5 and later. ")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
self._collect_logs()
def test_cbbackupmgr_restore_with_ttl(self):
"""
cbbackupmgr restore --replace-ttl will replace ttl
value with flag --replace-ttl-with
ex: cbbackupmgr restore --replace-ttl all --replace-ttl-with 0
"""
if "5.5" > self.cb_version[:3]:
self.fail("This restore with ttl test is only for cb version 5.5 and later. ")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
if self.replace_ttl == "expired":
if self.bk_with_ttl:
self._load_all_buckets(self.master, gen, "create", int(self.bk_with_ttl))
else:
self._load_all_buckets(self.master, gen, "create", 0)
else:
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
if self.bk_with_ttl:
self.sleep(int(self.bk_with_ttl) + 10, "wait items to be expired in backup")
compare_function = "=="
if self.replace_ttl_with:
compare_function = "<="
if self.should_fail:
self.backup_restore()
else:
self.backup_restore_validate(compare_uuid=False,
seqno_compare_function=compare_function)
def test_cbbackupmgr_restore_with_vbuckets_filter(self):
"""
cbbackupmgr restore --vbuckets-filter 2,3,4,5,6
it may require to get minimum 2 nodes servers to run this test
"""
if "5.5" > self.cb_version[:3]:
self.fail("This test is only for cb version 5.5 and later. ")
self.num_items = 1000
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
if self.should_fail:
self.backup_cluster()
else:
self.backup_cluster_validate()
if self.restore_should_fail:
self.backup_restore()
else:
self.backup_restore_validate()
def test_cbbackupmgr_with_eventing(self):
"""
Create backup cluster with saslbucket (default_bucket=False).
Backup cluster (backup_before_eventing=True for MB-34077)
Create events
Backup cluster
Create restore cluster
Restore data back to restore cluster
Check if metadata restored (backup_before_eventing=True)
Verify events restored back
"""
if "5.5" > self.cb_version[:3]:
self.fail("This eventing test is only for cb version 5.5 and later. ")
from pytests.eventing.eventing_constants import HANDLER_CODE
from lib.testconstants import STANDARD_BUCKET_PORT
self.src_bucket_name = self.input.param('src_bucket_name', 'src_bucket')
self.eventing_log_level = self.input.param('eventing_log_level', 'INFO')
self.dst_bucket_name = self.input.param('dst_bucket_name', 'dst_bucket')
self.dst_bucket_name1 = self.input.param('dst_bucket_name1', 'dst_bucket1')
self.metadata_bucket_name = self.input.param('metadata_bucket_name', 'metadata')
self.create_functions_buckets = self.input.param('create_functions_buckets', True)
self.docs_per_day = self.input.param("doc-per-day", 1)
self.use_memory_manager = self.input.param('use_memory_manager', True)
self.backup_before_eventing = self.input.param('backup_before_eventing', False)
bucket_params = self._create_bucket_params(server=self.master, size=256,
replicas=self.num_replicas)
self.cluster.create_standard_bucket(name=self.src_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.buckets = RestConnection(self.master).get_buckets()
self.src_bucket = RestConnection(self.master).get_buckets()
self.cluster.create_standard_bucket(name=self.dst_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.backup_create()
if (self.backup_before_eventing):
self.backup_cluster()
self.cluster.create_standard_bucket(name=self.metadata_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.buckets = RestConnection(self.master).get_buckets()
self.gens_load = self.generate_docs(self.docs_per_day)
self.expiry = 3
self.restServer = self.get_nodes_from_services_map(service_type="eventing")
self.rest = RestConnection(self.restServer)
self.load(self.gens_load, buckets=self.buckets, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
function_name = "Function_{0}_{1}".format(randint(1, 1000000000), self._testMethodName)
self.function_name = function_name[0:90]
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_UPDATE, worker_count=3)
bk_events_created = False
rs_events_created = False
try:
self.deploy_function(body)
bk_events_created = True
self.backup_cluster()
rest_bk = RestConnection(self.backupset.cluster_host)
bk_fxn = rest_bk.get_all_functions()
backup_index = 0
if self.backup_before_eventing:
backup_index = 1
self.backupset.start = 1
self.backupset.end = 2
if bk_fxn != "":
self._verify_backup_events_definition(json.loads(bk_fxn), body, backup_index = backup_index)
self.backup_restore()
rest_rs = RestConnection(self.backupset.restore_cluster_host)
if self.backup_before_eventing:
self.assertTrue('metadata' in [bucket.name for bucket in rest_rs.get_buckets()])
self.bkrs_resume_function(body, rest_rs)
rs_events_created = True
self._verify_restore_events_definition(bk_fxn)
except Exception as e:
self.fail(e)
finally:
master_nodes = [self.backupset.cluster_host,
self.backupset.restore_cluster_host]
for node in master_nodes:
rest = RestConnection(node)
self.bkrs_undeploy_and_delete_function(body, rest, node)
self.rest = RestConnection(self.master)
raise Exception('Test failed. Just clean up eventing function until MB-47236 fixed')
def test_bkrs_logs_when_no_mutations_received(self):
"""
Test that we log an expected message when we don't receive any
mutations for more than 60 seconds. MB-33533.
"""
version = RestConnection(self.backupset.backup_host).get_nodes_version()
if "6.5" > version[:3]:
self.fail("Test not supported for versions pre 6.5.0. "
"Version was run with {}".format(version))
rest_conn = RestConnection(self.backupset.cluster_host)
rest_conn.update_autofailover_settings(enabled=False,
timeout=0)
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
# We need to wait until the data transfer starts before we pause memcached.
# Read the backup file output until we find evidence of a DCP connection,
# or the backup finishes.
backup_client = RemoteMachineShellConnection(self.backupset.backup_host)
command = "tail -n 1 {}/logs/backup-*.log | grep ' (DCP) '"\
.format(self.backupset.directory)
Future.wait_until(
lambda: (bool(backup_client.execute_command(command)[0]) or backup_result.done()),
lambda x: x is True,
200,
interval_time=0.1,
exponential_backoff=False)
# If the backup finished and we never saw a DCP connection something's not right.
if backup_result.done():
self.fail("Never found evidence of open DCP stream in backup logs.")
# Pause memcached to trigger the log message.
cluster_client = RemoteMachineShellConnection(self.backupset.cluster_host)
cluster_client.pause_memcached(self.os_name, timesleep=200)
cluster_client.unpause_memcached(self.os_name)
cluster_client.disconnect()
backup_result.result(timeout=200)
expected_message = "(timed out after 3m0s|Stream has been inactive for 1m0s)"
command = "cat {}/logs/backup-*.log | grep -E '{}' "\
.format(self.backupset.directory, expected_message)
output, _ = backup_client.execute_command(command)
if not output:
self.fail("Mutations were blocked for over 60 seconds, "
"but this wasn't logged.")
backup_client.disconnect()
def test_log_to_stdout(self):
"""
Test that if the log-to-stdout flag is provided cbbackupmgr will log to stdout
:return:
"""
version = RestConnection(self.backupset.backup_host).get_nodes_version()
if "6.5" > version[:3]:
self.fail("Test not supported for versions pre 6.5.0"
"Version was run with {}".format(version))
self.backupset.log_to_stdout = True
# Test config
output, err = self.backup_create()
if err:
self.fail("Could not create backup directory")
# This is a line that is normally printed in the logs but should now instead be printed to stdout
if "(Cmd) cbbackupmgr version" not in " ".join(output):
self.fail("Did not log to standard out")
# Test backup
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
output, err = self.backup_cluster()
if err:
self.fail("Could not backup")
if "(Cmd) cbbackupmgr version" not in " ".join(output):
self.fail("Did not log to standard out")
self.backupset.force_updates = True
# Test restore
output, err = self.backup_restore()
if err:
self.fail("Could not restore")
if "(Cmd) cbbackupmgr version" not in " ".join(output):
self.fail("Did not log to standard out")
def test_auto_select_threads(self):
"""
Test that the --auto-select-threads flag actually selects the threads
:return:
"""
version = RestConnection(self.backupset.backup_host).get_nodes_version()
if "6.5" > version[:3]:
self.fail("Test not supported for versions pre 6.5.0"
"Version was run with {}".format(version))
self.backupset.auto_select_threads = True
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
# If the threads where auto-selected then a log message should appear
shell = RemoteMachineShellConnection(self.backupset.backup_host)
output, _ = shell.execute_command("cat {}/logs/backup-*.log | grep"
" '(Cmd) Automatically set the number"
" of threads to'".format(self.backupset.directory))
if not output:
self.fail("Threads were not automatically selected")
# Remove the logs and test the same thing for restore
shell.execute_command("rm -r {}/logs".format(self.backupset.directory))
self.backupset.force_updates = True
self.backup_restore()
output, _ = shell.execute_command("cat {}/logs/backup-*.log | grep"
" '(Cmd) Automatically set the number"
" of threads to'".format(self.backupset.directory))
if not output:
self.fail("Threads were not automatically selected")
shell.disconnect()
def test_backup_remove_take_backup_range(self):
"""
Test the remove --backups flag it should be able to take:
- backup indexes e.g (0,3)
- backup directory names range
- dd-mm-yyyy ranges
To do this the steps are as follow:
1. Load some data to cluster
2. Create 3 backups
3. Try the different inputs and verify expected outputs
:return:
"""
version = RestConnection(self.backupset.backup_host).get_nodes_version()
if "6.5" > version[:3]:
self.fail("Test not supported for versions pre 6.5.0"
"Version was run with {}".format(version))
# Test based on actual directory names have to be dynamically created based on the directory names.
test_ranges_positive_cases = [
"1,3", # valid index range
"10-01-2000,10-01-3000", # valid date range
]
test_range_invalid_cases = [
"1,-10", # invalid end range negative number
"0,100", # invalid range as there are only 3 backups
"2,0", # invalid range start bigger than end
"01/01/2000,01/01/3000", # invalid date format
"01-30-2000,01-30-3000", # invalid date format
]
# Load some data into the cluser
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
for test in test_ranges_positive_cases:
# create the backup repository and make three backups
self.backup_create()
self._take_n_backups(n=3)
# remove the backup directory
success, _, _ = self.backup_remove(test)
if not success:
self.fail("Failed to remove backups")
self._verify_backup_directory_count(0)
self._delete_repo()
for test in test_range_invalid_cases:
# create the backup repository and make three backups
self.backup_create()
self._take_n_backups(n=3)
success, _, _ = self.backup_remove(test)
if success:
self.fail("Test should have failed")
self._verify_backup_directory_count(3)
self._delete_repo()
# Test based on dynamic file names
self.backup_create()
self._take_n_backups(n=3)
shell = RemoteMachineShellConnection(self.backupset.backup_host)
command = (
f"ls -l {self.backupset.objstore_staging_directory + '/' if self.objstore_provider else ''}"
f"{self.backupset.directory}/{self.backupset.name}"
)
list_dir, _ = shell.execute_command(command)
list_dir = " ".join(list_dir)
shell.disconnect()
dir_names = re.findall(r'(?P<dir>\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}\.\d+(?:(?:[+-]\d{2}_\d{2})|Z))', list_dir)
dir_names.sort()
if len(dir_names) != 3:
self.fail("Expected 3 backups instead have {0}".format(len(dir_names)))
# test non existent directory name
success, _, _ = self.backup_remove("3000-09-30T10_42_37.64647+01_00")
if success:
self.fail("Should not be able to remove non existent directory")
self._verify_backup_directory_count(3)
# test start > backup start
success, _, _ = self.backup_remove("3000-09-30T10_42_37.64647+01_00,3000-09-30T10_43_37.64647+01_00")
if success:
self.fail("Should not be able to remove by directory range where the start is in the future")
self._verify_backup_directory_count(3)
# test start == backup start end > backup end
success, _, _ = self.backup_remove("{0}.64647+01_00,3000-09-30T10_43_37.64647+01_00".format(dir_names[0]))
if success:
self.fail("Should not be able to remove by directory range where the end is in the future")
self._verify_backup_directory_count(3)
# test start before end
success, _, _ = self.backup_remove("{0},{1}".format(dir_names[-1], dir_names[0]))
if success:
self.fail("Should not be able to remove by directory range where start is after end")
self._verify_backup_directory_count(3)
# test valid single directory
success, _, _ = self.backup_remove("{0}".format(dir_names[0]))
if not success:
self.fail("Should not have failed to remove directories by backup directory name")
self._verify_backup_directory_count(2)
# test valid
success, _, _ = self.backup_remove("{0},{1}".format(dir_names[1], dir_names[-1]))
if not success:
self.fail("Should not have failed to remove directories by backup directory name range")
self._verify_backup_directory_count(0)
def test_backup_merge_date_range(self):
"""
Test the merge --date-range flag it should be able to take:
- backup indexes e.g (0,3)
- backup directory names range
- dd-mm-yyyy ranges
To do this the steps are as follow:
1. Load some data to cluster
2. Create 3 backups
3. Try the different inputs and verify expected outputs
:return:
"""
version = RestConnection(self.backupset.backup_host).get_nodes_version()
if "6.5" > version[:3]:
self.fail("Test not supported for versions pre 6.5.0"
"Version was run with {}".format(version))
# Test based on actual directory names have to be dynamically created based on the directory names.
test_ranges_positive_cases = [
"0,2", # valid index range
"10-01-2000,10-01-3000", # valid date range
]
test_range_invalid_cases = [
"1,-10", # invalid end range negative number
"0,100", # invalid range as there are only 3 backups
"2,0", # invalid range start bigger than end
"01/01/2000,01/01/3000", # invalid date format
"01-30-2000,01-30-3000", # invalid date format
]
# Load some data into the cluser
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
for test in test_ranges_positive_cases:
# create the backup repository and make three backups
self.backup_create()
self._take_n_backups(3)
self.backupset.date_range = test
status, output , _ = self.backup_merge()
if not status:
self.fail("Failed to merge backups: {0}".format(output))
self._verify_backup_directory_count(1)
self._delete_repo()
for test in test_range_invalid_cases:
# create the backup repository and make three backups
self.backup_create()
self._take_n_backups(3)
self.backupset.date_range = test
status, output, _ = self.backup_merge()
if status:
self.fail("Test should have failed")
self._verify_backup_directory_count(3)
# Test based on dynamic file names
shell = RemoteMachineShellConnection(self.backupset.backup_host)
command = (
f"ls -l {self.backupset.objstore_staging_directory + '/' if self.objstore_provider else ''}"
f"{self.backupset.directory}/{self.backupset.name}"
)
list_dir, _ = shell.execute_command(command)
list_dir = " ".join(list_dir)
shell.disconnect()
dir_names = re.findall(r'(?P<dir>\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}\.\d+(?:(?:[+-]\d{2}_\d{2})|Z))', list_dir)
dir_names.sort()
if len(dir_names) != 3:
self.fail("Expected 3 backups instead have {0}".format(len(dir_names)))
# test start > backup start
self.backupset.date_range = "3000-09-30T10_42_37.64647+01_00,3000-09-30T10_43_37.64647+01_00"
status, _, _ = self.backup_merge()
if status:
self.fail("Should not be able to merge by directory range where the start is in the future")
self._verify_backup_directory_count(3)
# test start == backup start end > backup end
self.backupset.date_range = "{0}.64647+01_00,3000-09-30T10_43_37.64647+01_00".format(dir_names[0])
status, _, _ = self.backup_merge()
if status:
self.fail("Should not be able to merge by directory range where the end is in the future")
self._verify_backup_directory_count(3)
# test start before end
self.backupset.date_range = "{0},{1}".format(dir_names[-1], dir_names[0])
status, _, _ = self.backup_merge()
if status:
self.fail("Should not be able to merge by directory range where the start is after the end")
self._verify_backup_directory_count(3)
# test valid
self.backupset.date_range = "{0},{1}".format(dir_names[0], dir_names[-1])
status, _, _ = self.backup_merge()
if not status:
self.fail("Should not have failed to merge")
self._verify_backup_directory_count(1)
def test_info_while_other_task_runs(self):
"""
Test that info can run at the same time as other backup tasks
1. Load some data to the cluster
2. Create a backup repository
3. Start an async backup
4. Constantly run info
4. It should not expect error
:return:
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
# Test with backup
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
for i in range(10):
_, err = self.backup_info(True)
if err:
self.fail("Should have been able to run at the same time as the backup")
self.sleep(2)
output = backup_result.result(timeout=200)
self.assertTrue(self._check_output("Backup completed successfully", output),
"Backup failed with concurrent info")
# Test with merge
self._take_n_backups(5)
merge_result = self.cluster.async_merge_cluster(backup_host=self.backupset.backup_host,
backups=self.backups,
start=1, end=5,
directory=self.backupset.directory,
name=self.backupset.name,
cli_command_location=self.cli_command_location)
for i in range(10):
_, err = self.backup_info(True)
if err:
self.fail("Should have been able to run at the same time as the merge")
self.sleep(2)
output = merge_result.result(timeout=200)
self.assertTrue(self._check_output("Merge completed successfully", output),
"Merge failed while running info at the same time")
def test_config_without_objstore_bucket(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.objstore_provider.remove_bucket()
output, _ = self.backup_create(del_old_backup=False)
self.assertRegex(output[0].lower(), re.compile("bucket '.*' not found"))
def test_backup_without_objstore_bucket(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.objstore_provider.remove_bucket()
output, _ = self.backup_cluster()
self.assertRegex(output[0].lower(), re.compile("bucket '.*' not found"))
def test_info_without_objstore_bucket(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.objstore_provider.remove_bucket()
output, _ = self.backup_info()
self.assertIn('the specified bucket does not exist', output[0].lower())
def test_restore_without_objstore_bucket(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.objstore_provider.remove_bucket()
self.restore_only = True
output, _ = self.backup_restore()
self.assertRegex(output[0].lower(), re.compile("bucket '.*' not found"))
def test_remove_without_objstore_bucket(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.objstore_provider.remove_bucket()
_, output, _ = self.backup_remove()
self.assertRegex(output[0].lower(), re.compile("bucket '.*' not found"))
def test_config_create_multiple_repos_with_remove_staging_directory(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.backup_create_validate()
self.backupset.name = "another_repo"
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.objstore_provider._remove_staging_directory(remote_client.extract_remote_info().type.lower(), remote_client)
self.backup_create_validate()
def test_backup_with_remove_staging_directory(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.objstore_provider._remove_staging_directory(remote_client.extract_remote_info().type.lower(), remote_client)
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self.backup_create_validate()
self._load_all_buckets(self.master, gen, "create")
self.backup_cluster_validate()
def test_info_with_remove_staging_directory(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.backup_create_validate()
self.backup_cluster_validate()
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.objstore_provider._remove_staging_directory(remote_client.extract_remote_info().type.lower(), remote_client)
output, error = self.backup_info()
if error:
self.fail(f"Expected to be able to info backup where staging directory has been removed: {error}")
self.assertEqual(json.loads(output[0])['count'], 1,
"Expected to find a single backup even though the staging directory was removed")
def test_restore_with_remove_staging_directory(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
self.backup_cluster_validate()
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.objstore_provider._remove_staging_directory(remote_client.extract_remote_info().type.lower(), remote_client)
self.backup_restore_validate()
def test_remove_with_remove_staging_directory(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.backup_create_validate()
self.backup_cluster_validate()
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.objstore_provider._remove_staging_directory(remote_client.extract_remote_info().type.lower(), remote_client)
success, _, _ = self.backup_remove()
self.assertTrue(success, "Expected to have removed backups even though the staging directory was removed")
def test_restore_start_after_end(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
for _ in range(2):
self.backup_cluster_validate()
self.backupset.start = 2
self.backupset.end = 1
output, _ = self.backup_restore()
self.assertEqual(len(output), 1)
self.assertIn("range start", output[0])
self.assertIn("cannot be before end", output[0])
def test_restore_single_full_backup(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
self.backup_cluster_validate()
self.backupset.start = 1
self.backupset.end = 1
self._all_buckets_flush()
self.backup_restore_validate()
def test_restore_single_incr_backup(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
for _ in range(2):
self._load_all_buckets(self.master, gen, "create")
self.backup_cluster_validate()
self.backupset.start = 2
self.backupset.end = 2
self._all_buckets_flush()
self.backup_restore_validate(seqno_compare_function=">=")
def test_start_full_end_incr(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
for _ in range(2):
self._load_all_buckets(self.master, gen, "create")
self.backup_cluster_validate()
self.backupset.start = 1
self.backupset.end = 2
self._all_buckets_flush()
self.backup_restore_validate(seqno_compare_function=">=")
def test_start_incr_end_full(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
for _ in range(2):
self.backup_cluster_validate()
self._load_all_buckets(self.master, gen, "create")
self.backupset.full_backup = True
self.backup_cluster_validate()
self.backupset.start = 2
self.backupset.end = 3
self._all_buckets_flush()
self.backup_restore_validate(seqno_compare_function=">=")
def test_cbbackup_with_big_rev(self):
# automation ticket MB-38683
# verified test failed in build 6.6.0-7680 and passed in 6.6.0-7685
from ep_mc_bin_client import MemcachedClient, MemcachedError
bucket = 'default'
value = "value"
expiry = 0
rev_seq = 2**64-1
key = 'test_with_meta'
mc = MemcachedClient(self.master.ip, 11210)
mc.sasl_auth_plain('Administrator', 'password')
mc.bucket_select(bucket)
self.log.info("pushing a key with large rev_seq {0} to bucket".format(rev_seq))
try:
mc.setWithMeta(key, 'value', 0, 0, rev_seq, 0x1512a3186faa0000)
meta_key = mc.getMeta(key)
self.log.info("key meta: {0}".format(meta_key))
except MemcachedError as error:
msg = "unable to push key : {0} error : {1}"
self.log.error(msg.format(key, error.status))
self.fail(msg.format(key, error.status))
client = RemoteMachineShellConnection(self.backupset.backup_host)
client.execute_command("rm -rf {0}/backup".format(self.tmp_path))
client.execute_command("mkdir {0}backup".format(self.tmp_path))
cmd = "{0}cbbackup{1} -u Administrator -p password http://{2}:8091 {3}backup"\
.format(self.cli_command_location, self.cmd_ext, self.master.ip, self.tmp_path)
try:
cbbackup_run = False
output, error = client.execute_command(cmd, timeout=20)
cbbackup_run = True
if not self._check_output("done", error):
self.fail("Failed to run cbbackup with large rev_seq")
except Exception as e:
if e and not cbbackup_run:
self.fail("Failed to run cbbackup with large rev_seq")
finally:
client.execute_command("rm -rf {0}/backup".format(self.tmp_path))
client.disconnect()
def test_backup_consistent_metadata(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
backup_threads = []
backup_thread_1 = Thread(target=self.backup_cluster)
backup_threads.append(backup_thread_1)
backup_thread_1.start()
backup_thread_2 = Thread(target=self.backup_cluster)
backup_threads.append(backup_thread_2)
backup_thread_2.start()
for backup_thread in backup_threads:
backup_thread.join()
consistent_metadata = False
for output in self.backup_outputs:
if self._check_output("Error backing up cluster: failed to lock archive", output):
consistent_metadata = True
if not consistent_metadata:
self.fail("Backup does not lock while running backup")
def test_restore_consistent_metadata(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
self.backup_cluster()
restore_threads = []
restore_thread_1 = Thread(target=self.backup_restore)
restore_threads.append(restore_thread_1)
restore_thread_1.start()
restore_thread_2 = Thread(target=self.backup_restore)
restore_threads.append(restore_thread_2)
self.create_bucket_count = 1
restore_thread_2.start()
count = 0
for restore_thread in restore_threads:
restore_thread.join()
consistent_metadata = False
for output in self.restore_outputs:
if self._check_output("Error restoring cluster: failed to lock archive", output):
consistent_metadata = True
break
if not consistent_metadata:
self.fail("Restore does not lock while running restore")
def test_info_backup_merge_remove(self, cluster, no_of_backups):
""" Test Scenario: Create Buckets, Load Documents, Take 'no_of_backups' backups, Merge and Remove a Bucket
This function creates a scenario in which:
1. Buckets are created and loaded with documents.
2. A variable number of Backups >=6 are taken.
3. Backups 2 to 4 are merged.
4. The 2nd last bucket from the end is removed.
Args:
cluster list: A list of 'ServerInfo' that form a cluster to backup.
no_of_backups (int): The number of backups to perform.
"""
# Add built-in user cbadminbucket to backup cluster
self.add_built_in_server_user(node=self.backupset.cluster_host)
# Assemble cluster if more than 1 node in cluster
if len(cluster) > 1:
self.cluster.async_rebalance(cluster, cluster[1:], []).result()
# Take 'no_of_backups' backups
self.backup_create()
self._take_n_backups(n=no_of_backups)
# Merge
self.backupset.start, self.backupset.end = 2, 4
self.backup_merge()
# Delete a bucket
self.backup_remove(self.backups.pop(-2), verify_cluster_stats=False)
def test_ee_only_features(self):
""" Test that EE only features do not work on CE servers
NOTE: PITR currently does nothing, so succeeds on CE.
This should be included when PITR is added properly
This is also true for:
Backing up users,
Auto rebuild of indexes
Params:
examine (bool): Whether to test examine.
merge (bool): Whether to test merge.
s3 (bool): Whether to test s3 cloud backup.
consistency_check (bool): Whether to test consistency_check.
coll_restore (bool): Whether to test collection/scope level restore.
"""
examine = self.input.param('examine', False)
merge = self.input.param('merge', False)
s3 = self.input.param('s3', False)
consistency_check = self.input.param('consistency_check', False)
coll_restore = self.input.param('coll_restore', False)
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
command = f"{self.cli_command_location}/cbbackupmgr"
sub_command = ""
self.backup_create()
if examine:
sub_command = 'examine -a archive -r repo -k asdf --collection-string asdf.asdf.asdf'
elif merge:
sub_command = 'merge -a archive -r repo'
elif s3:
sub_command = f'backup -a s3://backup -r {self.backupset.name}\
-c {self.backupset.backup_host.ip}:{self.backupset.backup_host.port}\
-u Administrator -p password'
elif consistency_check:
sub_command = f'backup -a {self.backupset.directory} -r {self.backupset.name}\
-c {self.backupset.backup_host.ip}:{self.backupset.backup_host.port}\
-u Administrator -p password --consistency-check 1'
elif coll_restore:
sub_command = f'restore -a {self.backupset.directory} -r {self.backupset.name}\
-c {self.backupset.backup_host.ip}:{self.backupset.backup_host.port}\
-u Administrator -p password --include-data asdf.asdf.asdf'
if not sub_command:
self.fail("Must provide a subcommand!")
output, error = remote_client.execute_command(f"{command} {sub_command}")
self.log.info(f"ERROR from command: {error}")
self.log.info(f"OUTPUT from command: {output}")
if s3 and "7.0.0" in self.cb_version:
# The s3 error message differs slightly in 7.0.0
self.assertIn("an enterprise only feature", output[0])
else:
self.assertIn("an Enterprise Edition feature", output[0])
def test_analytics_synonyms(self):
""" Test analytics synonyms can be restored
Params:
dataverses (int): Number of dataverses to create.
datasets (int): Number of datasets to create.
synonyms (int): Number of synonyms to create.
"""
class Query:
""" A class to execute analytics queries """
def __init__(self, server, username, password):
self.restconn = RestConnection(server)
def execute(self, query):
return self.restconn.execute_statement_on_cbas(query, None)
def get_synonyms(self):
synonyms = set()
for result in json.loads(self.execute("select * from Metadata.`Synonym`"))['results']:
synonym = result['Synonym']
synonym_name = synonym['SynonymName']
synonym_target = synonym['ObjectDataverseName'] + '.' + synonym['ObjectName']
synonym_dataverse = synonym['DataverseName']
synonyms.add((synonym_name, synonym_target, synonym_dataverse))
return synonyms
def get_synonyms_count(self):
return json.loads(self.execute("select count(*) as count from Metadata.`Synonym`;"))['results'][0]['count']
class Dataset:
def __init__(self, name, bucket, clause=None):
self.name, self.bucket, self.clause = name, bucket, clause
def get_where_clause(self):
return f" WHERE {self.clause}" if self.clause else ""
class Synonym:
def __init__(self, name, target):
self.name, self.target = name, target
class Dataverse:
def __init__(self, name):
self.name = name
self.datasets = set()
self.synonyms = set()
def add_dataset(self, dataset):
self.datasets.add(dataset)
def add_synonym(self, synonym):
self.synonyms.add(synonym)
def next_dataset_name(self):
return f"dat_{len(self.datasets)}"
def next_synonym_name(self):
return f"syn_{len(self.synonyms)}"
class Analytics:
def __init__(self, query):
self.query, self.dataverses = query, set()
def add_dataverse(self, dataverse):
self.dataverses.add(dataverse)
def next_dataverse_name(self):
return f"dtv_{len(self.dataverses)}"
def pick_target_for_synonym(self):
choices = [f"{dataverse.name}.{dataset.name}" for dataverse in self.dataverses for dataset in dataverse.datasets]
if choices:
return choice(choices)
return None
def create(self):
# Create daterverses and datasets
for dataverse in self.dataverses:
self.query.execute(f"CREATE dataverse {dataverse.name}")
for dataset in dataverse.datasets:
self.query.execute(f"CREATE DATASET {dataverse.name}.{dataset.name} ON {dataset.bucket}{dataset.get_where_clause()}")
# Create synonyms
for dataverse in self.dataverses:
for synonym in dataverse.synonyms:
self.query.execute(f"CREATE analytics synonym {dataverse.name}.{synonym.name} FOR {synonym.target}")
def delete(self):
for dataverse in self.dataverses:
for dataset in dataverse.datasets:
self.query.execute(f"DROP DATASET {dataverse.name}.{dataset.name}")
for synonym in dataverse.synonyms:
self.query.execute(f"DROP analytics synonym {dataverse.name}.{synonym.name}")
self.query.execute(f"DROP dataverse {dataverse.name}")
class AnalyticsTest:
def __init__(self, backup, no_of_dataverses, no_of_datasets, no_of_synonyms, analytics_server):
# The base class
self.backup = backup
# Test parameters
self.no_of_dataverses, self.no_of_datasets, self.no_of_synonyms = no_of_dataverses, no_of_datasets, no_of_synonyms
# The number of synonyms that get created
self.no_of_synonyms_created = no_of_dataverses * no_of_synonyms
# The object thats used to run queries on the server running analytics
self.query = Query(analytics_server, analytics_server.rest_username, analytics_server.rest_password)
# The object that represents our current model of analytics
self.analytics = Analytics(self.query)
def test_analytics(self):
# Define the analytics model (i.e. which dataverses, datasets and synonyms are present)
for i in range(self.no_of_dataverses):
dataverse = Dataverse(self.analytics.next_dataverse_name())
self.analytics.add_dataverse(dataverse)
for j in range(self.no_of_datasets):
dataset = Dataset(dataverse.next_dataset_name(), 'default')
dataverse.add_dataset(dataset)
for j in range(self.no_of_synonyms):
synonym = Synonym(dataverse.next_synonym_name(), self.analytics.pick_target_for_synonym())
dataverse.add_synonym(synonym)
# Create dataverses, datasets and synonyms
self.analytics.create()
self.backup.assertEqual(self.query.get_synonyms_count(), self.no_of_synonyms_created)
# Create a repository
self.backup.backup_create()
# Take a backup
self.backup.backup_cluster()
# Delete all analytics related stuff
self.analytics.delete()
self.backup.assertEqual(self.query.get_synonyms_count(), 0)
# Perform a one off restore
self.backup.backup_restore()
synonyms = self.query.get_synonyms()
# Check synonyms have been restored
for dataverse in self.analytics.dataverses:
for synonym in dataverse.synonyms:
self.backup.assertIn((synonym.name, synonym.target, dataverse.name), synonyms)
# The server that will be reprovisioned with analytics
analytics_server = self.restore_cluster_host = self.servers[2]
# Add a server and provision it with analytics
self.add_server_with_custom_services(analytics_server, services=["cbas"])
# A little sleep for services to warmup
self.assertTrue(RestConnection(analytics_server).wait_until_cbas_is_ready(100))
# Run the analytics test
AnalyticsTest(self, self.input.param("dataverses", 5), self.input.param("datasets", 5), self.input.param("synonyms", 5), analytics_server).test_analytics()
def test_info_after_backup_merge_remove(self):
""" CBQE-5475: Test cbbackupmgr info comprehensively after performing backup, merge and remove
Test params:
flag_depth = [0,1,2,3]
check_tabular = [True, False]
check_all_flag = [True, False]
dgm_run = [True, False]
sasl_buckets >= 1
Comprehensive test: flag_depth=3,check_tabular=True,check_all_flag=True,dgm_run=True,sasl_buckets=2
Scenario:
Perform backup, merge and remove to mutate info output.
Cases tested:
flag_depth>=0: --archive,
flag_depth>=1: --archive --repo
flag_depth>=2: --archive --repo --backup
flag_depth>=3: --archive --repo --backup --collection-string in version>7.0/--bucket in version<=6.6
Output types tested for each of the previous cases:
check_tabular>=False: using --json flag (Checks JSON output)
check_tabular = True: no --json flag (Parses tabular output to reflect JSON output)
State of all flag:
check_all_flag>=False:
using --all flag (e.g. for --archive --all checks all repos in archive, backups in repos, buckets in backups)
check_all_flag = True:
--all flag (e.g. for --archive checks contents of archive only)
Total number of cases: 4 (cases) * 2 (output types) * 2 (all flag state) = 16
"""
import os
import pprint
import itertools
import parse_cbbackupmgr_info as parse_info
pp = pprint.PrettyPrinter(indent=4)
# Params
flag_depth = self.input.param('flag_depth', 3)
check_tabular = self.input.param('check_tabular', True)
check_all_flag = self.input.param('check_all_flag', True)
# The minimum number of backups is 6
min_backups = 6
no_of_backups = max(self.backupset.number_of_backups, min_backups)
if self.backupset.number_of_backups < min_backups:
self.log.warn("number_of_backups increased from {} to {}".format(self.backupset.number_of_backups, min_backups))
# Select backup cluster
cluster = [self.backupset.cluster_host]
# Create Buckets, Load Documents, Take n backups, Merge and Remove a Bucket
self.test_info_backup_merge_remove(cluster, no_of_backups)
# Create lists of expected output from the info command
types = set(['FULL', 'MERGE - FULL', 'MERGE - INCR', 'INCR'])
expected_archs = [os.path.basename(self.backupset.directory)]
expected_repos = [self.backupset.name]
expected_backs = {self.backupset.name: self.backups}
expected_bucks = [bucket.name for bucket in self.buckets]
def check_arch(arch, tabular=False):
""" Checks the archive dictionary.
Args:
arch (dict): A dictionary containing archive information.
Returns:
list: A list containing the repositories in the archive.
"""
expected_keys = [u'archive_uuid', u'name', u'repos']
self.assertTrue(set(expected_keys).issubset(arch.keys()))
archive_uuid, name, repos = [arch[key] for key in expected_keys]
# Check archive name is correct
self.assertTrue(name in expected_archs)
# Check repos names are correct
self.assertEqual(set(expected_repos), set(repo['name'] for repo in repos))
# Check repo size is > 0
self.assertTrue(all(repo['size'] > 0 for repo in repos))
# Check backup sizes are correct
self.assertTrue(all(repo['count'] == len(expected_backs[repo['name']]) for repo in repos))
return repos
def check_repo(repo, tabular=False):
""" Checks the repository dictionary.
Args:
repo (dict): A dictionary containing repository information.
Returns:
list: A list containing the backups in the repository.
"""
expected_keys = [u'count', u'backups', u'name', u'size']
self.assertTrue(set(expected_keys).issubset(repo.keys()))
count, backups, name, size = [repo[key] for key in expected_keys]
# Check repo name is correct
self.assertTrue(name in expected_repos)
# Check repo size is greater than 0
self.assertTrue(size > 0)
# Check number of backups is correct
self.assertEqual(len(backups), len(expected_backs[name]))
# Check backup names
self.assertEqual(set(backup['date'] for backup in backups), set(expected_backs[name]))
# Check backup types
self.assertTrue(set(backup['type'] for backup in backups).issubset(types))
# Check complete status
self.assertTrue(all(backup['complete'] for backup in backups))
return backups
def check_back(backup, tabular=False):
""" Checks the backup dictionary.
Args:
backup (dict): A dictionary containing backup information.
Returns:
list: A list containing the buckets in the backup.
"""
expected_keys = [u'complete', u'fts_alias', u'buckets',
u'source_cluster_uuid', u'source', u'date', u'type', u'events', u'size']
self.assertTrue(set(expected_keys).issubset(backup.keys()))
complete, fts_alias, buckets, source_cluster_uuid, source, date, _type_, events, size = \
[backup[key] for key in expected_keys]
# Check backup name is correct
self.assertTrue(date in self.backups)
# Check backup size is greater than 0
self.assertTrue(size > 0)
# Check type exists
self.assertTrue(_type_ in types)
# Check bucket names
self.assertEqual(set(bucket['name'] for bucket in buckets), set(expected_bucks))
# Check bucket sizes
self.assertTrue(all(bucket['size'] >= 0 for bucket in buckets))
# Check items are equal to self.num_items
self.assertTrue(all(bucket['items'] in [0, self.num_items] for bucket in buckets))
return buckets
def check_buck(bucket, tabular=False):
""" Checks the bucket dictionary.
Args:
bucket (dict): A dictionary containing bucket information.
Returns:
None
"""
expected_keys = [u'index_count', u'views_count', u'items', u'mutations',
u'tombstones', u'fts_count', u'analytics_count', u'size', u'name']
self.assertTrue(set(expected_keys).issubset(bucket.keys()))
index_count, views_count, items, mutations, tombstones, fts_count, \
analytics_count, size, name = [bucket[key] for key in expected_keys]
# Check bucket name
self.assertTrue(name in expected_bucks)
# Check bucket size
self.assertTrue(size >= 0)
# Check bucket items
self.assertTrue(items in [0, self.num_items])
def print_tree(tree):
if self.debug_logs:
pp.pprint(tree)
def parse_output(use_json, output):
""" Parses the JSON/Tabular output into a Python dictionary
Args:
use_json (bool): If True expects JSON output to parse. Otherwise, expects tabular data to parse.
output (list): JSON or Tabular data to parse into a dictionary.
Returns:
dict: A dictionary containing the parsed output.
"""
return json.loads(output[0]) if use_json else parse_info.construct_tree(output)
# Configure initial flags
json_options, all_flag_options = [True], [False]
# Enable tabular output tests
if check_tabular:
json_options.append(False)
# Enable all flag tests
if check_all_flag:
all_flag_options.append(True)
def output_logs(flag_depth, use_json, all_flag):
""" Outputs flags tested in current test case."""
use_json = "--json" if use_json else ""
all_flag = "--all" if all_flag else ""
flags = " ".join(["--archive", "--repo", "--backup", "--bucket"][: flag_depth + 1])
self.log.info("---")
self.log.info(f"Testing Flags: {flags} {use_json} {all_flag}")
self.log.info("---")
# Perform tests
for use_json, all_flag in itertools.product(json_options, all_flag_options):
output_logs(0, use_json, all_flag)
# cbbackupmgr info --archive
arch = parse_output(use_json, self.get_backup_info(json=use_json, all_flag=all_flag))
print_tree(arch)
repos = check_arch(arch)
if all_flag:
[check_buck(buck) for repo in repos for back in check_repo(repo) for buck in check_back(back)]
if flag_depth < 1:
continue
output_logs(1, use_json, all_flag)
# cbbackupmgr info --archive --repo
for repo_name in expected_repos:
repo = parse_output(use_json, self.get_backup_info(json=use_json, repo=repo_name, all_flag=all_flag))
print_tree(repo)
backs = check_repo(repo)
if all_flag:
[check_buck(buck) for back in backs for buck in check_back(back)]
if flag_depth < 2:
continue
output_logs(2, use_json, all_flag)
# cbbackupmgr info --archive --repo --backup
for repo_name in expected_repos:
for back_name in expected_backs[repo_name]:
back = parse_output(use_json, self.get_backup_info(json=use_json, repo=repo_name, backup=back_name, all_flag=all_flag))
print_tree(back)
bucks = check_back(back)
if all_flag:
[check_buck(buck) for buck in bucks]
if flag_depth < 3:
continue
output_logs(3, use_json, all_flag)
# cbbackupmgr info --archive --repo --backup --bucket
for repo_name in expected_repos:
for back_name in expected_backs[repo_name]:
for buck_name in expected_bucks:
buck = parse_output(use_json, self.get_backup_info(json=use_json, repo=repo_name,
backup=back_name, collection_string=buck_name, all_flag=all_flag))
print_tree(buck)
check_buck(buck)
|
"""
Utilities for working with the local dataset cache.
"""
import weakref
from contextlib import contextmanager
import glob
import io
import os
import logging
import tempfile
import json
from abc import ABC
from collections import defaultdict
from dataclasses import dataclass, asdict
from datetime import timedelta
from fnmatch import fnmatch
from os import PathLike
from urllib.parse import urlparse
from pathlib import Path
from typing import (
Optional,
Tuple,
Union,
IO,
Callable,
Set,
List,
Iterator,
Iterable,
Dict,
NamedTuple,
MutableMapping,
)
from hashlib import sha256
from functools import wraps
from weakref import WeakValueDictionary
from zipfile import ZipFile, is_zipfile
import tarfile
import shutil
import pickle
import time
import warnings
import boto3
import botocore
import torch
from filelock import FileLock as _FileLock
from google.cloud import storage
from google.api_core.exceptions import NotFound
import numpy as np
from overrides import overrides
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import lmdb
from torch import Tensor
import huggingface_hub as hf_hub
from allennlp.version import VERSION
from allennlp.common.tqdm import Tqdm
logger = logging.getLogger(__name__)
CACHE_ROOT = Path(os.getenv("ALLENNLP_CACHE_ROOT", Path.home() / ".allennlp"))
CACHE_DIRECTORY = str(CACHE_ROOT / "cache")
DEPRECATED_CACHE_DIRECTORY = str(CACHE_ROOT / "datasets")
# This variable was deprecated in 0.7.2 since we use a single folder for caching
# all types of files (datasets, models, etc.)
DATASET_CACHE = CACHE_DIRECTORY
# Warn if the user is still using the deprecated cache directory.
if os.path.exists(DEPRECATED_CACHE_DIRECTORY):
logger.warning(
f"Deprecated cache directory found ({DEPRECATED_CACHE_DIRECTORY}). "
f"Please remove this directory from your system to free up space."
)
class FileLock(_FileLock):
"""
This is just a subclass of the `FileLock` class from the `filelock` library, except that
it adds an additional argument to the `__init__` method: `read_only_ok`.
By default this flag is `False`, which an exception will be thrown when a lock
can't be acquired due to lack of write permissions.
But if this flag is set to `True`, a warning will be emitted instead of an error when
the lock already exists but the lock can't be acquired because write access is blocked.
"""
def __init__(
self, lock_file: Union[str, PathLike], timeout=-1, read_only_ok: bool = False
) -> None:
super().__init__(str(lock_file), timeout=timeout)
self._read_only_ok = read_only_ok
@overrides
def acquire(self, timeout=None, poll_interval=0.05):
try:
super().acquire(timeout=timeout, poll_intervall=poll_interval)
except OSError as err:
# OSError could be a lot of different things, but what we're looking
# for in particular are permission errors, such as:
# - errno 1 - EPERM - "Operation not permitted"
# - errno 13 - EACCES - "Permission denied"
# - errno 30 - EROFS - "Read-only file system"
if err.errno not in (1, 13, 30):
raise
if os.path.isfile(self._lock_file) and self._read_only_ok:
warnings.warn(
f"Lacking permissions required to obtain lock '{self._lock_file}'. "
"Race conditions are possible if other processes are writing to the same resource.",
UserWarning,
)
else:
raise
def _resource_to_filename(resource: str, etag: str = None) -> str:
"""
Convert a `resource` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the resources's, delimited
by a period.
"""
resource_bytes = resource.encode("utf-8")
resource_hash = sha256(resource_bytes)
filename = resource_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
return filename
def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be `None`) stored for `filename`.
Raise `FileNotFoundError` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = CACHE_DIRECTORY
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def check_tarfile(tar_file: tarfile.TarFile):
"""Tar files can contain files outside of the extraction directory, or symlinks that point
outside the extraction directory. We also don't want any block devices fifos, or other
weird file types extracted. This checks for those issues and throws an exception if there
is a problem."""
base_path = os.path.join("tmp", "pathtest")
base_path = os.path.normpath(base_path)
def normalize_path(path: str) -> str:
path = path.rstrip("/")
path = path.replace("/", os.sep)
path = os.path.join(base_path, path)
path = os.path.normpath(path)
return path
for tarinfo in tar_file:
if not (
tarinfo.isreg()
or tarinfo.isdir()
or tarinfo.isfile()
or tarinfo.islnk()
or tarinfo.issym()
):
raise ValueError(
f"Tar file {str(tar_file.name)} contains invalid member {tarinfo.name}."
)
target_path = normalize_path(tarinfo.name)
if os.path.commonprefix([base_path, target_path]) != base_path:
raise ValueError(
f"Tar file {str(tar_file.name)} is trying to create a file outside of its extraction directory."
)
if tarinfo.islnk() or tarinfo.issym():
target_path = normalize_path(tarinfo.linkname)
if os.path.commonprefix([base_path, target_path]) != base_path:
raise ValueError(
f"Tar file {str(tar_file.name)} is trying to link to a file "
"outside of its extraction directory."
)
def cached_path(
url_or_filename: Union[str, PathLike],
cache_dir: Union[str, Path] = None,
extract_archive: bool = False,
force_extract: bool = False,
) -> str:
"""
Given something that might be a URL or local path, determine which.
If it's a remote resource, download the file and cache it, and
then return the path to the cached file. If it's already a local path,
make sure the file exists and return the path.
For URLs, "http://", "https://", "s3://", "gs://", and "hf://" are all supported.
The latter corresponds to the HuggingFace Hub.
For example, to download the PyTorch weights for the model `epwalsh/bert-xsmall-dummy`
on HuggingFace, you could do:
```python
cached_path("hf://epwalsh/bert-xsmall-dummy/pytorch_model.bin")
```
For paths or URLs that point to a tarfile or zipfile, you can also add a path
to a specific file to the `url_or_filename` preceeded by a "!", and the archive will
be automatically extracted (provided you set `extract_archive` to `True`),
returning the local path to the specific file. For example:
```python
cached_path("model.tar.gz!weights.th", extract_archive=True)
```
# Parameters
url_or_filename : `Union[str, Path]`
A URL or path to parse and possibly download.
cache_dir : `Union[str, Path]`, optional (default = `None`)
The directory to cache downloads.
extract_archive : `bool`, optional (default = `False`)
If `True`, then zip or tar.gz archives will be automatically extracted.
In which case the directory is returned.
force_extract : `bool`, optional (default = `False`)
If `True` and the file is an archive file, it will be extracted regardless
of whether or not the extracted directory already exists.
!!! Warning
Use this flag with caution! This can lead to race conditions if used
from multiple processes on the same file.
"""
if cache_dir is None:
cache_dir = CACHE_DIRECTORY
cache_dir = os.path.expanduser(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
if not isinstance(url_or_filename, str):
url_or_filename = str(url_or_filename)
file_path: str
extraction_path: Optional[str] = None
# If we're using the /a/b/foo.zip!c/d/file.txt syntax, handle it here.
exclamation_index = url_or_filename.find("!")
if extract_archive and exclamation_index >= 0:
archive_path = url_or_filename[:exclamation_index]
file_name = url_or_filename[exclamation_index + 1 :]
# Call 'cached_path' recursively now to get the local path to the archive itself.
cached_archive_path = cached_path(archive_path, cache_dir, True, force_extract)
if not os.path.isdir(cached_archive_path):
raise ValueError(
f"{url_or_filename} uses the ! syntax, but does not specify an archive file."
)
# Now return the full path to the desired file within the extracted archive,
# provided it exists.
file_path = os.path.join(cached_archive_path, file_name)
if not os.path.exists(file_path):
raise FileNotFoundError(f"file {file_name} not found within {archive_path}")
return file_path
parsed = urlparse(url_or_filename)
if parsed.scheme in ("http", "https", "s3", "hf", "gs"):
# URL, so get it from the cache (downloading if necessary)
file_path = get_from_cache(url_or_filename, cache_dir)
if extract_archive and (is_zipfile(file_path) or tarfile.is_tarfile(file_path)):
# This is the path the file should be extracted to.
# For example ~/.allennlp/cache/234234.21341 -> ~/.allennlp/cache/234234.21341-extracted
extraction_path = file_path + "-extracted"
else:
url_or_filename = os.path.expanduser(url_or_filename)
if os.path.exists(url_or_filename):
# File, and it exists.
file_path = url_or_filename
# Normalize the path.
url_or_filename = os.path.abspath(url_or_filename)
if (
extract_archive
and os.path.isfile(file_path)
and (is_zipfile(file_path) or tarfile.is_tarfile(file_path))
):
# We'll use a unique directory within the cache to root to extract the archive to.
# The name of the directory is a hash of the resource file path and it's modification
# time. That way, if the file changes, we'll know when to extract it again.
extraction_name = (
_resource_to_filename(url_or_filename, str(os.path.getmtime(file_path)))
+ "-extracted"
)
extraction_path = os.path.join(cache_dir, extraction_name)
elif parsed.scheme == "":
# File, but it doesn't exist.
raise FileNotFoundError(f"file {url_or_filename} not found")
else:
# Something unknown
raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path")
if extraction_path is not None:
# If the extracted directory already exists (and is non-empty), then no
# need to create a lock file and extract again unless `force_extract=True`.
if os.path.isdir(extraction_path) and os.listdir(extraction_path) and not force_extract:
return extraction_path
# Extract it.
with FileLock(extraction_path + ".lock"):
# Check again if the directory exists now that we've acquired the lock.
if os.path.isdir(extraction_path) and os.listdir(extraction_path):
if force_extract:
logger.warning(
"Extraction directory for %s (%s) already exists, "
"overwriting it since 'force_extract' is 'True'",
url_or_filename,
extraction_path,
)
else:
return extraction_path
logger.info("Extracting %s to %s", url_or_filename, extraction_path)
shutil.rmtree(extraction_path, ignore_errors=True)
# We extract first to a temporary directory in case something goes wrong
# during the extraction process so we don't end up with a corrupted cache.
tmp_extraction_dir = tempfile.mkdtemp(dir=os.path.split(extraction_path)[0])
try:
if is_zipfile(file_path):
with ZipFile(file_path, "r") as zip_file:
zip_file.extractall(tmp_extraction_dir)
zip_file.close()
else:
tar_file = tarfile.open(file_path)
check_tarfile(tar_file)
tar_file.extractall(tmp_extraction_dir)
tar_file.close()
# Extraction was successful, rename temp directory to final
# cache directory and dump the meta data.
os.replace(tmp_extraction_dir, extraction_path)
meta = _Meta(
resource=url_or_filename,
cached_path=extraction_path,
creation_time=time.time(),
extraction_dir=True,
size=_get_resource_size(extraction_path),
)
meta.to_file()
finally:
shutil.rmtree(tmp_extraction_dir, ignore_errors=True)
return extraction_path
return file_path
def is_url_or_existing_file(url_or_filename: Union[str, Path, None]) -> bool:
"""
Given something that might be a URL (or might be a local path),
determine check if it's url or an existing file path.
"""
if url_or_filename is None:
return False
url_or_filename = os.path.expanduser(str(url_or_filename))
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https", "s3", "gs") or os.path.exists(url_or_filename)
def _split_s3_path(url: str) -> Tuple[str, str]:
return _split_cloud_path(url, "s3")
def _split_gcs_path(url: str) -> Tuple[str, str]:
return _split_cloud_path(url, "gs")
def _split_cloud_path(url: str, provider: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad {} path {}".format(provider, url))
bucket_name = parsed.netloc
provider_path = parsed.path
# Remove '/' at beginning of path.
if provider_path.startswith("/"):
provider_path = provider_path[1:]
return bucket_name, provider_path
def _s3_request(func: Callable):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except botocore.exceptions.ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(url))
else:
raise
return wrapper
def _get_s3_resource():
session = boto3.session.Session()
if session.get_credentials() is None:
# Use unsigned requests.
s3_resource = session.resource(
"s3", config=botocore.client.Config(signature_version=botocore.UNSIGNED)
)
else:
s3_resource = session.resource("s3")
return s3_resource
@_s3_request
def _s3_etag(url: str) -> Optional[str]:
"""Check ETag on S3 object."""
s3_resource = _get_s3_resource()
bucket_name, s3_path = _split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@_s3_request
def _s3_get(url: str, temp_file: IO) -> None:
"""Pull a file directly from S3."""
s3_resource = _get_s3_resource()
bucket_name, s3_path = _split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def _gcs_request(func: Callable):
"""
Wrapper function for gcs requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except NotFound:
raise FileNotFoundError("file {} not found".format(url))
return wrapper
def _get_gcs_client():
storage_client = storage.Client()
return storage_client
def _get_gcs_blob(url: str) -> storage.blob.Blob:
gcs_resource = _get_gcs_client()
bucket_name, gcs_path = _split_gcs_path(url)
bucket = gcs_resource.bucket(bucket_name)
blob = bucket.blob(gcs_path)
return blob
@_gcs_request
def _gcs_md5(url: str) -> Optional[str]:
"""Get GCS object's md5."""
blob = _get_gcs_blob(url)
return blob.md5_hash
@_gcs_request
def _gcs_get(url: str, temp_filename: str) -> None:
"""Pull a file directly from GCS."""
blob = _get_gcs_blob(url)
blob.download_to_filename(temp_filename)
def _session_with_backoff() -> requests.Session:
"""
We ran into an issue where http requests to s3 were timing out,
possibly because we were making too many requests too quickly.
This helper function returns a requests session that has retry-with-backoff
built in. See
<https://stackoverflow.com/questions/23267409/how-to-implement-retry-mechanism-into-python-requests-library>.
"""
session = requests.Session()
retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])
session.mount("http://", HTTPAdapter(max_retries=retries))
session.mount("https://", HTTPAdapter(max_retries=retries))
return session
def _http_etag(url: str) -> Optional[str]:
with _session_with_backoff() as session:
response = session.head(url, allow_redirects=True)
if response.status_code != 200:
raise OSError(
"HEAD request failed for url {} with status code {}".format(url, response.status_code)
)
return response.headers.get("ETag")
def _http_get(url: str, temp_file: IO) -> None:
with _session_with_backoff() as session:
req = session.get(url, stream=True)
req.raise_for_status()
content_length = req.headers.get("Content-Length")
total = int(content_length) if content_length is not None else None
progress = Tqdm.tqdm(unit="B", total=total, desc="downloading")
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def _find_latest_cached(url: str, cache_dir: Union[str, Path]) -> Optional[str]:
filename = _resource_to_filename(url)
cache_path = os.path.join(cache_dir, filename)
candidates: List[Tuple[str, float]] = []
for path in glob.glob(cache_path + "*"):
if path.endswith(".json") or path.endswith("-extracted") or path.endswith(".lock"):
continue
mtime = os.path.getmtime(path)
candidates.append((path, mtime))
# Sort candidates by modification time, newest first.
candidates.sort(key=lambda x: x[1], reverse=True)
if candidates:
return candidates[0][0]
return None
def _serialize(data):
buffer = pickle.dumps(data, protocol=-1)
return np.frombuffer(buffer, dtype=np.uint8)
_active_tensor_caches: MutableMapping[int, "TensorCache"] = weakref.WeakValueDictionary()
def _unique_file_id(path: Union[str, PathLike]) -> int:
result = os.stat(path).st_ino
assert result != 0
return result
class TensorCache(MutableMapping[str, Tensor], ABC):
"""
This is a key-value store, mapping strings to tensors. The data is kept on disk,
making this class useful as a cache for storing tensors.
`TensorCache` is also safe to access from multiple processes at the same time, so
you can use it in distributed training situations, or from multiple training
runs at the same time.
"""
def __new__(cls, filename: Union[str, PathLike], *, read_only: bool = False, **kwargs):
# This mechanism makes sure we re-use open lmdb file handles. Lmdb has a problem when the same file is
# opened by the same process multiple times. This is our workaround.
filename = str(filename)
try:
result = _active_tensor_caches.get(_unique_file_id(filename))
except FileNotFoundError:
result = None
if result is None:
result = super(TensorCache, cls).__new__(
cls, filename, read_only=read_only, **kwargs
) # type: ignore
return result
def __init__(
self,
filename: Union[str, PathLike],
*,
map_size: int = 1024 * 1024 * 1024 * 1024,
read_only: bool = False,
) -> None:
"""
Creates a `TensorCache` by either opening an existing one on disk, or creating
a new one. Its interface is almost exactly like a Python dictionary, where the
keys are strings and the values are `torch.Tensor`.
Parameters
----------
filename: `str`
Path to the location of the cache
map_size: `int`, optional, defaults to 1TB
This is the maximum size the cache will ever grow to. On reasonable operating
systems, there is no penalty to making this a large value.
`TensorCache` uses a memory-mapped file to store the data. When the file is
first opened, we have to give the maximum size it can ever grow to. This is
that number. Reasonable operating systems don't actually allocate that space
until it is really needed.
"""
self.lmdb_env: lmdb.Environment
if hasattr(self, "lmdb_env"):
# We're being initialized again after a cache hit in _active_tensor_caches, thanks
# to __new__. In this case, we may have to upgrade to read/write, but other than
# that we are good to go.
if read_only:
return
if not self.read_only:
return
# Upgrade a read-only lmdb env to a read/write lmdb env.
filename = self.lmdb_env.path()
old_info = self.lmdb_env.info()
self.lmdb_env.close()
self.lmdb_env = lmdb.open(
filename,
map_size=old_info["map_size"],
subdir=False,
metasync=False,
sync=True,
readahead=False,
meminit=False,
readonly=False,
lock=True,
)
else:
filename = str(filename)
cpu_count = os.cpu_count() or 1
if os.path.exists(filename):
if os.path.isfile(filename):
# If the file is not writable, set read_only to True, but issue a warning.
if not os.access(filename, os.W_OK):
if not read_only:
warnings.warn(
f"File '{filename}' is read-only, so cache will be read-only",
UserWarning,
)
read_only = True
else:
# If it's not a file, raise an error.
raise ValueError("Expect a file, found a directory instead")
use_lock = True
if read_only:
# Check if the lock file is writable. If it's not, then we won't be able to use the lock.
# This is always how lmdb names the lock file.
lock_filename = filename + "-lock"
if os.path.isfile(lock_filename):
use_lock = os.access(lock_filename, os.W_OK)
else:
# If the lock file doesn't exist yet, then the directory needs to be writable in
# order to create and use the lock file.
use_lock = os.access(os.path.dirname(lock_filename), os.W_OK)
if not use_lock:
warnings.warn(
f"Lacking permissions to use lock file on cache '{filename}'.\nUse at your own risk!",
UserWarning,
)
self.lmdb_env = lmdb.open(
filename,
subdir=False,
map_size=map_size,
max_readers=cpu_count * 4,
max_spare_txns=cpu_count * 4,
metasync=False,
sync=True,
readahead=False,
meminit=False,
readonly=read_only,
lock=use_lock,
)
_active_tensor_caches[_unique_file_id(filename)] = self
# We have another cache here that makes sure we return the same object for the same key. Without it,
# you would get a different tensor, using different memory, every time you call __getitem__(), even
# if you call it with the same key.
# The downside is that we can't keep self.cache_cache up to date when multiple processes modify the
# cache at the same time. We can guarantee though that it is up to date as long as processes either
# write new values, or read existing ones.
self.cache_cache: MutableMapping[str, Tensor] = WeakValueDictionary()
@property
def read_only(self) -> bool:
return self.lmdb_env.flags()["readonly"]
def __contains__(self, key: object):
if not isinstance(key, str):
return False
if key in self.cache_cache:
return True
encoded_key = key.encode()
with self.lmdb_env.begin(write=False) as txn:
result = txn.get(encoded_key)
return result is not None
def __getitem__(self, key: str):
try:
return self.cache_cache[key]
except KeyError:
encoded_key = key.encode()
with self.lmdb_env.begin(write=False) as txn:
buffer = txn.get(encoded_key)
if buffer is None:
raise KeyError()
tensor = torch.load(io.BytesIO(buffer), map_location="cpu")
self.cache_cache[key] = tensor
return tensor
def __setitem__(self, key: str, tensor: torch.Tensor):
if self.read_only:
raise ValueError("cannot write to a read-only cache")
tensor = tensor.cpu()
encoded_key = key.encode()
buffer = io.BytesIO()
if tensor.storage().size() != np.prod(tensor.size()):
tensor = tensor.clone()
assert tensor.storage().size() == np.prod(tensor.size())
torch.save(tensor.detach(), buffer, pickle_protocol=pickle.HIGHEST_PROTOCOL)
with self.lmdb_env.begin(write=True) as txn:
txn.put(encoded_key, buffer.getbuffer())
self.cache_cache[key] = tensor
def __delitem__(self, key: str):
if self.read_only:
raise ValueError("cannot write to a read-only cache")
encoded_key = key.encode()
with self.lmdb_env.begin(write=True) as txn:
txn.delete(encoded_key)
try:
del self.cache_cache[key]
except KeyError:
pass
def __del__(self):
if self.lmdb_env is not None:
self.lmdb_env.close()
self.lmdb_env = None
def __len__(self):
return self.lmdb_env.stat()["entries"]
def __iter__(self):
# It is not hard to implement this, but we have not needed it so far.
raise NotImplementedError()
class CacheFile:
"""
This is a context manager that makes robust caching easier.
On `__enter__`, an IO handle to a temporarily file is returned, which can
be treated as if it's the actual cache file.
On `__exit__`, the temporarily file is renamed to the cache file. If anything
goes wrong while writing to the temporary file, it will be removed.
"""
def __init__(
self, cache_filename: Union[PathLike, str], mode: str = "w+b", suffix: str = ".tmp"
) -> None:
self.cache_filename = (
cache_filename if isinstance(cache_filename, Path) else Path(cache_filename)
)
self.cache_directory = os.path.dirname(self.cache_filename)
self.mode = mode
self.temp_file = tempfile.NamedTemporaryFile(
self.mode, dir=self.cache_directory, delete=False, suffix=suffix
)
def __enter__(self):
return self.temp_file
def __exit__(self, exc_type, exc_value, traceback):
self.temp_file.close()
if exc_value is None:
# Success.
logger.debug(
"Renaming temp file %s to cache at %s", self.temp_file.name, self.cache_filename
)
# Rename the temp file to the actual cache filename.
os.replace(self.temp_file.name, self.cache_filename)
return True
# Something went wrong, remove the temp file.
logger.debug("removing temp file %s", self.temp_file.name)
os.remove(self.temp_file.name)
return False
class LocalCacheResource:
"""
This is a context manager that can be used to fetch and cache arbitrary resources locally
using the same mechanisms that `cached_path` uses for remote resources.
It can be used, for example, when you want to cache the result of an expensive computation.
# Examples
```python
with LocalCacheResource("long-computation", "v1") as cache:
if cache.cached():
with cache.reader() as f:
# read from cache
else:
with cache.writer() as f:
# do the computation
# ...
# write to cache
```
"""
def __init__(self, resource_name: str, version: str, cache_dir: str = CACHE_DIRECTORY) -> None:
self.resource_name = resource_name
self.version = version
self.cache_dir = cache_dir
self.path = os.path.join(self.cache_dir, _resource_to_filename(resource_name, version))
self.file_lock = FileLock(self.path + ".lock")
def cached(self) -> bool:
return os.path.exists(self.path)
@contextmanager
def writer(self, mode="w"):
if self.cached():
raise ValueError(
f"local cache of {self.resource_name} (version '{self.version}') already exists!"
)
with CacheFile(self.path, mode=mode) as f:
yield f
meta = _Meta(
resource=self.resource_name,
cached_path=self.path,
creation_time=time.time(),
etag=self.version,
size=_get_resource_size(self.path),
)
meta.to_file()
@contextmanager
def reader(self, mode="r"):
if not self.cached():
raise ValueError(
f"local cache of {self.resource_name} (version '{self.version}') does not exist yet!"
)
with open(self.path, mode) as f:
yield f
def __enter__(self):
self.file_lock.acquire()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.file_lock.release()
if exc_value is None:
return True
return False
@dataclass
class _Meta:
"""
Any resource that is downloaded to - or extracted in - the cache directory will
have a meta JSON file written next to it, which corresponds to an instance
of this class.
In older versions of AllenNLP, this meta document just had two fields: 'url' and
'etag'. The 'url' field is now the more general 'resource' field, but these old
meta files are still compatible when a `_Meta` is instantiated with the `.from_path()`
class method.
"""
resource: str
"""
URL or normalized path to the resource.
"""
cached_path: str
"""
Path to the corresponding cached version of the resource.
"""
creation_time: float
"""
The unix timestamp of when the corresponding resource was cached or extracted.
"""
size: int = 0
"""
The size of the corresponding resource, in bytes.
"""
etag: Optional[str] = None
"""
Optional ETag associated with the current cached version of the resource.
"""
extraction_dir: bool = False
"""
Does this meta corresponded to an extraction directory?
"""
def to_file(self) -> None:
with open(self.cached_path + ".json", "w") as meta_file:
json.dump(asdict(self), meta_file)
@classmethod
def from_path(cls, path: Union[str, Path]) -> "_Meta":
path = str(path)
with open(path) as meta_file:
data = json.load(meta_file)
# For backwards compat:
if "resource" not in data:
data["resource"] = data.pop("url")
if "creation_time" not in data:
data["creation_time"] = os.path.getmtime(path[:-5])
if "extraction_dir" not in data and path.endswith("-extracted.json"):
data["extraction_dir"] = True
if "cached_path" not in data:
data["cached_path"] = path[:-5]
if "size" not in data:
data["size"] = _get_resource_size(data["cached_path"])
return cls(**data)
def _hf_hub_download(
url, model_identifier: str, filename: Optional[str], cache_dir: Union[str, Path]
) -> str:
revision: Optional[str]
if "@" in model_identifier:
repo_id = model_identifier.split("@")[0]
revision = model_identifier.split("@")[1]
else:
repo_id = model_identifier
revision = None
if filename is not None:
hub_url = hf_hub.hf_hub_url(repo_id=repo_id, filename=filename, revision=revision)
cache_path = str(
hf_hub.cached_download(
url=hub_url,
library_name="allennlp",
library_version=VERSION,
cache_dir=cache_dir,
)
)
# HF writes it's own meta '.json' file which uses the same format we used to use and still
# support, but is missing some fields that we like to have.
# So we overwrite it when it we can.
with FileLock(cache_path + ".lock", read_only_ok=True):
meta = _Meta.from_path(cache_path + ".json")
# The file HF writes will have 'resource' set to the 'http' URL corresponding to the 'hf://' URL,
# but we want 'resource' to be the original 'hf://' URL.
if meta.resource != url:
meta.resource = url
meta.to_file()
else:
cache_path = str(hf_hub.snapshot_download(repo_id, revision=revision, cache_dir=cache_dir))
# Need to write the meta file for snapshot downloads if it doesn't exist.
with FileLock(cache_path + ".lock", read_only_ok=True):
if not os.path.exists(cache_path + ".json"):
meta = _Meta(
resource=url,
cached_path=cache_path,
creation_time=time.time(),
extraction_dir=True,
size=_get_resource_size(cache_path),
)
meta.to_file()
return cache_path
# TODO(joelgrus): do we want to do checksums or anything like that?
def get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = CACHE_DIRECTORY
if url.startswith("hf://"):
# Remove the 'hf://' prefix
identifier = url[5:]
if identifier.count("/") > 1:
filename = "/".join(identifier.split("/")[2:])
model_identifier = "/".join(identifier.split("/")[:2])
return _hf_hub_download(url, model_identifier, filename, cache_dir)
elif identifier.count("/") == 1:
# 'hf://' URLs like 'hf://xxxx/yyyy' are potentially ambiguous,
# because this could refer to either:
# 1. the file 'yyyy' in the 'xxxx' repository, or
# 2. the repo 'yyyy' under the user/org name 'xxxx'.
# We default to (1), but if we get a 404 error then we try (2).
try:
model_identifier, filename = identifier.split("/")
return _hf_hub_download(url, model_identifier, filename, cache_dir)
except requests.exceptions.HTTPError as exc:
if exc.response.status_code == 404:
return _hf_hub_download(url, identifier, None, cache_dir)
raise
else:
return _hf_hub_download(url, identifier, None, cache_dir)
# Get eTag to add to filename, if it exists.
try:
if url.startswith("s3://"):
etag = _s3_etag(url)
elif url.startswith("gs://"):
etag = _gcs_md5(url)
else:
etag = _http_etag(url)
except (requests.exceptions.ConnectionError, botocore.exceptions.EndpointConnectionError):
# We might be offline, in which case we don't want to throw an error
# just yet. Instead, we'll try to use the latest cached version of the
# target resource, if it exists. We'll only throw an exception if we
# haven't cached the resource at all yet.
logger.warning(
"Connection error occurred while trying to fetch ETag for %s. "
"Will attempt to use latest cached version of resource",
url,
)
latest_cached = _find_latest_cached(url, cache_dir)
if latest_cached:
logger.info(
"ETag request failed with connection error, using latest cached "
"version of %s: %s",
url,
latest_cached,
)
return latest_cached
else:
logger.error(
"Connection failed while trying to fetch ETag, "
"and no cached version of %s could be found",
url,
)
raise
except OSError:
# OSError may be triggered if we were unable to fetch the eTag.
# If this is the case, try to proceed without eTag check.
etag = None
filename = _resource_to_filename(url, etag)
# Get cache path to put the file.
cache_path = os.path.join(cache_dir, filename)
# Multiple processes may be trying to cache the same file at once, so we need
# to be a little careful to avoid race conditions. We do this using a lock file.
# Only one process can own this lock file at a time, and a process will block
# on the call to `lock.acquire()` until the process currently holding the lock
# releases it.
logger.debug("waiting to acquire lock on %s", cache_path)
with FileLock(cache_path + ".lock", read_only_ok=True):
if os.path.exists(cache_path):
logger.info("cache of %s is up-to-date", url)
else:
with CacheFile(cache_path) as cache_file:
logger.info("%s not found in cache, downloading to %s", url, cache_path)
# GET file object
if url.startswith("s3://"):
_s3_get(url, cache_file)
elif url.startswith("gs://"):
_gcs_get(url, cache_file.name)
else:
_http_get(url, cache_file)
logger.debug("creating metadata file for %s", cache_path)
meta = _Meta(
resource=url,
cached_path=cache_path,
creation_time=time.time(),
etag=etag,
size=_get_resource_size(cache_path),
)
meta.to_file()
return cache_path
def read_set_from_file(filename: str) -> Set[str]:
"""
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
"""
collection = set()
with open(filename, "r") as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path: str, dot=True, lower: bool = True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
def open_compressed(
filename: Union[str, PathLike], mode: str = "rt", encoding: Optional[str] = "UTF-8", **kwargs
):
if not isinstance(filename, str):
filename = str(filename)
open_fn: Callable = open
if filename.endswith(".gz"):
import gzip
open_fn = gzip.open
elif filename.endswith(".bz2"):
import bz2
open_fn = bz2.open
return open_fn(cached_path(filename), mode=mode, encoding=encoding, **kwargs)
def text_lines_from_file(filename: Union[str, PathLike], strip_lines: bool = True) -> Iterator[str]:
with open_compressed(filename, "rt", encoding="UTF-8", errors="replace") as p:
if strip_lines:
for line in p:
yield line.strip()
else:
yield from p
def json_lines_from_file(filename: Union[str, PathLike]) -> Iterable[Union[list, dict]]:
return (json.loads(line) for line in text_lines_from_file(filename))
def _get_resource_size(path: str) -> int:
"""
Get the size of a file or directory.
"""
if os.path.isfile(path):
return os.path.getsize(path)
inodes: Set[int] = set()
total_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is symbolic link or the same as a file we've already accounted
# for (this could happen with hard links).
inode = os.stat(fp).st_ino
if not os.path.islink(fp) and inode not in inodes:
inodes.add(inode)
total_size += os.path.getsize(fp)
return total_size
class _CacheEntry(NamedTuple):
regular_files: List[_Meta]
extraction_dirs: List[_Meta]
def _find_entries(
patterns: List[str] = None,
cache_dir: Union[str, Path] = None,
) -> Tuple[int, Dict[str, _CacheEntry]]:
"""
Find all cache entries, filtering ones that don't match any of the glob patterns given.
Returns the total size of the matching entries and mapping or resource name to meta data.
The values in the returned mapping are tuples because we seperate meta entries that
correspond to extraction directories vs regular cache entries.
"""
cache_dir = os.path.expanduser(cache_dir or CACHE_DIRECTORY)
total_size: int = 0
cache_entries: Dict[str, _CacheEntry] = defaultdict(lambda: _CacheEntry([], []))
for meta_path in glob.glob(str(cache_dir) + "/*.json"):
meta = _Meta.from_path(meta_path)
if patterns and not any(fnmatch(meta.resource, p) for p in patterns):
continue
if meta.extraction_dir:
cache_entries[meta.resource].extraction_dirs.append(meta)
else:
cache_entries[meta.resource].regular_files.append(meta)
total_size += meta.size
# Sort entries for each resource by creation time, newest first.
for entry in cache_entries.values():
entry.regular_files.sort(key=lambda meta: meta.creation_time, reverse=True)
entry.extraction_dirs.sort(key=lambda meta: meta.creation_time, reverse=True)
return total_size, cache_entries
def remove_cache_entries(patterns: List[str], cache_dir: Union[str, Path] = None) -> int:
"""
Remove cache entries matching the given patterns.
Returns the total reclaimed space in bytes.
"""
total_size, cache_entries = _find_entries(patterns=patterns, cache_dir=cache_dir)
for resource, entry in cache_entries.items():
for meta in entry.regular_files:
logger.info("Removing cached version of %s at %s", resource, meta.cached_path)
os.remove(meta.cached_path)
if os.path.exists(meta.cached_path + ".lock"):
os.remove(meta.cached_path + ".lock")
os.remove(meta.cached_path + ".json")
for meta in entry.extraction_dirs:
logger.info("Removing extracted version of %s at %s", resource, meta.cached_path)
shutil.rmtree(meta.cached_path)
if os.path.exists(meta.cached_path + ".lock"):
os.remove(meta.cached_path + ".lock")
os.remove(meta.cached_path + ".json")
return total_size
def inspect_cache(patterns: List[str] = None, cache_dir: Union[str, Path] = None):
"""
Print out useful information about the cache directory.
"""
from allennlp.common.util import format_timedelta, format_size
cache_dir = os.path.expanduser(cache_dir or CACHE_DIRECTORY)
# Gather cache entries by resource.
total_size, cache_entries = _find_entries(patterns=patterns, cache_dir=cache_dir)
if patterns:
print(f"Cached resources matching {patterns}:")
else:
print("Cached resources:")
for resource, entry in sorted(
cache_entries.items(),
# Sort by creation time, latest first.
key=lambda x: max(
0 if not x[1][0] else x[1][0][0].creation_time,
0 if not x[1][1] else x[1][1][0].creation_time,
),
reverse=True,
):
print("\n-", resource)
if entry.regular_files:
td = timedelta(seconds=time.time() - entry.regular_files[0].creation_time)
n_versions = len(entry.regular_files)
size = entry.regular_files[0].size
print(
f" {n_versions} {"versions" if n_versions > 1 else "version"} cached, "
f"latest {format_size(size)} from {format_timedelta(td)} ago"
)
if entry.extraction_dirs:
td = timedelta(seconds=time.time() - entry.extraction_dirs[0].creation_time)
n_versions = len(entry.extraction_dirs)
size = entry.extraction_dirs[0].size
print(
f" {n_versions} {"versions" if n_versions > 1 else "version"} extracted, "
f"latest {format_size(size)} from {format_timedelta(td)} ago"
)
print(f"\nTotal size: {format_size(total_size)}")
|
"""
Utilities for working with the local dataset cache.
"""
import weakref
from contextlib import contextmanager
import glob
import io
import os
import logging
import tempfile
import json
from abc import ABC
from collections import defaultdict
from dataclasses import dataclass, asdict
from datetime import timedelta
from fnmatch import fnmatch
from os import PathLike
from urllib.parse import urlparse
from pathlib import Path
from typing import (
Optional,
Tuple,
Union,
IO,
Callable,
Set,
List,
Iterator,
Iterable,
Dict,
NamedTuple,
MutableMapping,
)
from hashlib import sha256
from functools import wraps
from weakref import WeakValueDictionary
from zipfile import ZipFile, is_zipfile
import tarfile
import shutil
import pickle
import time
import warnings
import boto3
import botocore
import torch
from filelock import FileLock as _FileLock
from google.cloud import storage
from google.api_core.exceptions import NotFound
import numpy as np
from overrides import overrides
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import lmdb
from torch import Tensor
import huggingface_hub as hf_hub
from allennlp.version import VERSION
from allennlp.common.tqdm import Tqdm
logger = logging.getLogger(__name__)
CACHE_ROOT = Path(os.getenv("ALLENNLP_CACHE_ROOT", Path.home() / ".allennlp"))
CACHE_DIRECTORY = str(CACHE_ROOT / "cache")
DEPRECATED_CACHE_DIRECTORY = str(CACHE_ROOT / "datasets")
# This variable was deprecated in 0.7.2 since we use a single folder for caching
# all types of files (datasets, models, etc.)
DATASET_CACHE = CACHE_DIRECTORY
# Warn if the user is still using the deprecated cache directory.
if os.path.exists(DEPRECATED_CACHE_DIRECTORY):
logger.warning(
f"Deprecated cache directory found ({DEPRECATED_CACHE_DIRECTORY}). "
f"Please remove this directory from your system to free up space."
)
class FileLock(_FileLock):
"""
This is just a subclass of the `FileLock` class from the `filelock` library, except that
it adds an additional argument to the `__init__` method: `read_only_ok`.
By default this flag is `False`, which an exception will be thrown when a lock
can't be acquired due to lack of write permissions.
But if this flag is set to `True`, a warning will be emitted instead of an error when
the lock already exists but the lock can't be acquired because write access is blocked.
"""
def __init__(
self, lock_file: Union[str, PathLike], timeout=-1, read_only_ok: bool = False
) -> None:
super().__init__(str(lock_file), timeout=timeout)
self._read_only_ok = read_only_ok
@overrides
def acquire(self, timeout=None, poll_interval=0.05):
try:
super().acquire(timeout=timeout, poll_intervall=poll_interval)
except OSError as err:
# OSError could be a lot of different things, but what we're looking
# for in particular are permission errors, such as:
# - errno 1 - EPERM - "Operation not permitted"
# - errno 13 - EACCES - "Permission denied"
# - errno 30 - EROFS - "Read-only file system"
if err.errno not in (1, 13, 30):
raise
if os.path.isfile(self._lock_file) and self._read_only_ok:
warnings.warn(
f"Lacking permissions required to obtain lock '{self._lock_file}'. "
"Race conditions are possible if other processes are writing to the same resource.",
UserWarning,
)
else:
raise
def _resource_to_filename(resource: str, etag: str = None) -> str:
"""
Convert a `resource` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the resources's, delimited
by a period.
"""
resource_bytes = resource.encode("utf-8")
resource_hash = sha256(resource_bytes)
filename = resource_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
return filename
def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be `None`) stored for `filename`.
Raise `FileNotFoundError` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = CACHE_DIRECTORY
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def check_tarfile(tar_file: tarfile.TarFile):
"""Tar files can contain files outside of the extraction directory, or symlinks that point
outside the extraction directory. We also don't want any block devices fifos, or other
weird file types extracted. This checks for those issues and throws an exception if there
is a problem."""
base_path = os.path.join("tmp", "pathtest")
base_path = os.path.normpath(base_path)
def normalize_path(path: str) -> str:
path = path.rstrip("/")
path = path.replace("/", os.sep)
path = os.path.join(base_path, path)
path = os.path.normpath(path)
return path
for tarinfo in tar_file:
if not (
tarinfo.isreg()
or tarinfo.isdir()
or tarinfo.isfile()
or tarinfo.islnk()
or tarinfo.issym()
):
raise ValueError(
f"Tar file {str(tar_file.name)} contains invalid member {tarinfo.name}."
)
target_path = normalize_path(tarinfo.name)
if os.path.commonprefix([base_path, target_path]) != base_path:
raise ValueError(
f"Tar file {str(tar_file.name)} is trying to create a file outside of its extraction directory."
)
if tarinfo.islnk() or tarinfo.issym():
target_path = normalize_path(tarinfo.linkname)
if os.path.commonprefix([base_path, target_path]) != base_path:
raise ValueError(
f"Tar file {str(tar_file.name)} is trying to link to a file "
"outside of its extraction directory."
)
def cached_path(
url_or_filename: Union[str, PathLike],
cache_dir: Union[str, Path] = None,
extract_archive: bool = False,
force_extract: bool = False,
) -> str:
"""
Given something that might be a URL or local path, determine which.
If it's a remote resource, download the file and cache it, and
then return the path to the cached file. If it's already a local path,
make sure the file exists and return the path.
For URLs, "http://", "https://", "s3://", "gs://", and "hf://" are all supported.
The latter corresponds to the HuggingFace Hub.
For example, to download the PyTorch weights for the model `epwalsh/bert-xsmall-dummy`
on HuggingFace, you could do:
```python
cached_path("hf://epwalsh/bert-xsmall-dummy/pytorch_model.bin")
```
For paths or URLs that point to a tarfile or zipfile, you can also add a path
to a specific file to the `url_or_filename` preceeded by a "!", and the archive will
be automatically extracted (provided you set `extract_archive` to `True`),
returning the local path to the specific file. For example:
```python
cached_path("model.tar.gz!weights.th", extract_archive=True)
```
# Parameters
url_or_filename : `Union[str, Path]`
A URL or path to parse and possibly download.
cache_dir : `Union[str, Path]`, optional (default = `None`)
The directory to cache downloads.
extract_archive : `bool`, optional (default = `False`)
If `True`, then zip or tar.gz archives will be automatically extracted.
In which case the directory is returned.
force_extract : `bool`, optional (default = `False`)
If `True` and the file is an archive file, it will be extracted regardless
of whether or not the extracted directory already exists.
!!! Warning
Use this flag with caution! This can lead to race conditions if used
from multiple processes on the same file.
"""
if cache_dir is None:
cache_dir = CACHE_DIRECTORY
cache_dir = os.path.expanduser(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
if not isinstance(url_or_filename, str):
url_or_filename = str(url_or_filename)
file_path: str
extraction_path: Optional[str] = None
# If we're using the /a/b/foo.zip!c/d/file.txt syntax, handle it here.
exclamation_index = url_or_filename.find("!")
if extract_archive and exclamation_index >= 0:
archive_path = url_or_filename[:exclamation_index]
file_name = url_or_filename[exclamation_index + 1 :]
# Call 'cached_path' recursively now to get the local path to the archive itself.
cached_archive_path = cached_path(archive_path, cache_dir, True, force_extract)
if not os.path.isdir(cached_archive_path):
raise ValueError(
f"{url_or_filename} uses the ! syntax, but does not specify an archive file."
)
# Now return the full path to the desired file within the extracted archive,
# provided it exists.
file_path = os.path.join(cached_archive_path, file_name)
if not os.path.exists(file_path):
raise FileNotFoundError(f"file {file_name} not found within {archive_path}")
return file_path
parsed = urlparse(url_or_filename)
if parsed.scheme in ("http", "https", "s3", "hf", "gs"):
# URL, so get it from the cache (downloading if necessary)
file_path = get_from_cache(url_or_filename, cache_dir)
if extract_archive and (is_zipfile(file_path) or tarfile.is_tarfile(file_path)):
# This is the path the file should be extracted to.
# For example ~/.allennlp/cache/234234.21341 -> ~/.allennlp/cache/234234.21341-extracted
extraction_path = file_path + "-extracted"
else:
url_or_filename = os.path.expanduser(url_or_filename)
if os.path.exists(url_or_filename):
# File, and it exists.
file_path = url_or_filename
# Normalize the path.
url_or_filename = os.path.abspath(url_or_filename)
if (
extract_archive
and os.path.isfile(file_path)
and (is_zipfile(file_path) or tarfile.is_tarfile(file_path))
):
# We'll use a unique directory within the cache to root to extract the archive to.
# The name of the directory is a hash of the resource file path and it's modification
# time. That way, if the file changes, we'll know when to extract it again.
extraction_name = (
_resource_to_filename(url_or_filename, str(os.path.getmtime(file_path)))
+ "-extracted"
)
extraction_path = os.path.join(cache_dir, extraction_name)
elif parsed.scheme == "":
# File, but it doesn't exist.
raise FileNotFoundError(f"file {url_or_filename} not found")
else:
# Something unknown
raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path")
if extraction_path is not None:
# If the extracted directory already exists (and is non-empty), then no
# need to create a lock file and extract again unless `force_extract=True`.
if os.path.isdir(extraction_path) and os.listdir(extraction_path) and not force_extract:
return extraction_path
# Extract it.
with FileLock(extraction_path + ".lock"):
# Check again if the directory exists now that we've acquired the lock.
if os.path.isdir(extraction_path) and os.listdir(extraction_path):
if force_extract:
logger.warning(
"Extraction directory for %s (%s) already exists, "
"overwriting it since 'force_extract' is 'True'",
url_or_filename,
extraction_path,
)
else:
return extraction_path
logger.info("Extracting %s to %s", url_or_filename, extraction_path)
shutil.rmtree(extraction_path, ignore_errors=True)
# We extract first to a temporary directory in case something goes wrong
# during the extraction process so we don't end up with a corrupted cache.
tmp_extraction_dir = tempfile.mkdtemp(dir=os.path.split(extraction_path)[0])
try:
if is_zipfile(file_path):
with ZipFile(file_path, "r") as zip_file:
zip_file.extractall(tmp_extraction_dir)
zip_file.close()
else:
tar_file = tarfile.open(file_path)
check_tarfile(tar_file)
tar_file.extractall(tmp_extraction_dir)
tar_file.close()
# Extraction was successful, rename temp directory to final
# cache directory and dump the meta data.
os.replace(tmp_extraction_dir, extraction_path)
meta = _Meta(
resource=url_or_filename,
cached_path=extraction_path,
creation_time=time.time(),
extraction_dir=True,
size=_get_resource_size(extraction_path),
)
meta.to_file()
finally:
shutil.rmtree(tmp_extraction_dir, ignore_errors=True)
return extraction_path
return file_path
def is_url_or_existing_file(url_or_filename: Union[str, Path, None]) -> bool:
"""
Given something that might be a URL (or might be a local path),
determine check if it's url or an existing file path.
"""
if url_or_filename is None:
return False
url_or_filename = os.path.expanduser(str(url_or_filename))
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https", "s3", "gs") or os.path.exists(url_or_filename)
def _split_s3_path(url: str) -> Tuple[str, str]:
return _split_cloud_path(url, "s3")
def _split_gcs_path(url: str) -> Tuple[str, str]:
return _split_cloud_path(url, "gs")
def _split_cloud_path(url: str, provider: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad {} path {}".format(provider, url))
bucket_name = parsed.netloc
provider_path = parsed.path
# Remove '/' at beginning of path.
if provider_path.startswith("/"):
provider_path = provider_path[1:]
return bucket_name, provider_path
def _s3_request(func: Callable):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except botocore.exceptions.ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(url))
else:
raise
return wrapper
def _get_s3_resource():
session = boto3.session.Session()
if session.get_credentials() is None:
# Use unsigned requests.
s3_resource = session.resource(
"s3", config=botocore.client.Config(signature_version=botocore.UNSIGNED)
)
else:
s3_resource = session.resource("s3")
return s3_resource
@_s3_request
def _s3_etag(url: str) -> Optional[str]:
"""Check ETag on S3 object."""
s3_resource = _get_s3_resource()
bucket_name, s3_path = _split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@_s3_request
def _s3_get(url: str, temp_file: IO) -> None:
"""Pull a file directly from S3."""
s3_resource = _get_s3_resource()
bucket_name, s3_path = _split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def _gcs_request(func: Callable):
"""
Wrapper function for gcs requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except NotFound:
raise FileNotFoundError("file {} not found".format(url))
return wrapper
def _get_gcs_client():
storage_client = storage.Client()
return storage_client
def _get_gcs_blob(url: str) -> storage.blob.Blob:
gcs_resource = _get_gcs_client()
bucket_name, gcs_path = _split_gcs_path(url)
bucket = gcs_resource.bucket(bucket_name)
blob = bucket.blob(gcs_path)
return blob
@_gcs_request
def _gcs_md5(url: str) -> Optional[str]:
"""Get GCS object's md5."""
blob = _get_gcs_blob(url)
return blob.md5_hash
@_gcs_request
def _gcs_get(url: str, temp_filename: str) -> None:
"""Pull a file directly from GCS."""
blob = _get_gcs_blob(url)
blob.download_to_filename(temp_filename)
def _session_with_backoff() -> requests.Session:
"""
We ran into an issue where http requests to s3 were timing out,
possibly because we were making too many requests too quickly.
This helper function returns a requests session that has retry-with-backoff
built in. See
<https://stackoverflow.com/questions/23267409/how-to-implement-retry-mechanism-into-python-requests-library>.
"""
session = requests.Session()
retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])
session.mount("http://", HTTPAdapter(max_retries=retries))
session.mount("https://", HTTPAdapter(max_retries=retries))
return session
def _http_etag(url: str) -> Optional[str]:
with _session_with_backoff() as session:
response = session.head(url, allow_redirects=True)
if response.status_code != 200:
raise OSError(
"HEAD request failed for url {} with status code {}".format(url, response.status_code)
)
return response.headers.get("ETag")
def _http_get(url: str, temp_file: IO) -> None:
with _session_with_backoff() as session:
req = session.get(url, stream=True)
req.raise_for_status()
content_length = req.headers.get("Content-Length")
total = int(content_length) if content_length is not None else None
progress = Tqdm.tqdm(unit="B", total=total, desc="downloading")
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def _find_latest_cached(url: str, cache_dir: Union[str, Path]) -> Optional[str]:
filename = _resource_to_filename(url)
cache_path = os.path.join(cache_dir, filename)
candidates: List[Tuple[str, float]] = []
for path in glob.glob(cache_path + "*"):
if path.endswith(".json") or path.endswith("-extracted") or path.endswith(".lock"):
continue
mtime = os.path.getmtime(path)
candidates.append((path, mtime))
# Sort candidates by modification time, newest first.
candidates.sort(key=lambda x: x[1], reverse=True)
if candidates:
return candidates[0][0]
return None
def _serialize(data):
buffer = pickle.dumps(data, protocol=-1)
return np.frombuffer(buffer, dtype=np.uint8)
_active_tensor_caches: MutableMapping[int, "TensorCache"] = weakref.WeakValueDictionary()
def _unique_file_id(path: Union[str, PathLike]) -> int:
result = os.stat(path).st_ino
assert result != 0
return result
class TensorCache(MutableMapping[str, Tensor], ABC):
"""
This is a key-value store, mapping strings to tensors. The data is kept on disk,
making this class useful as a cache for storing tensors.
`TensorCache` is also safe to access from multiple processes at the same time, so
you can use it in distributed training situations, or from multiple training
runs at the same time.
"""
def __new__(cls, filename: Union[str, PathLike], *, read_only: bool = False, **kwargs):
# This mechanism makes sure we re-use open lmdb file handles. Lmdb has a problem when the same file is
# opened by the same process multiple times. This is our workaround.
filename = str(filename)
try:
result = _active_tensor_caches.get(_unique_file_id(filename))
except FileNotFoundError:
result = None
if result is None:
result = super(TensorCache, cls).__new__(
cls, filename, read_only=read_only, **kwargs
) # type: ignore
return result
def __init__(
self,
filename: Union[str, PathLike],
*,
map_size: int = 1024 * 1024 * 1024 * 1024,
read_only: bool = False,
) -> None:
"""
Creates a `TensorCache` by either opening an existing one on disk, or creating
a new one. Its interface is almost exactly like a Python dictionary, where the
keys are strings and the values are `torch.Tensor`.
Parameters
----------
filename: `str`
Path to the location of the cache
map_size: `int`, optional, defaults to 1TB
This is the maximum size the cache will ever grow to. On reasonable operating
systems, there is no penalty to making this a large value.
`TensorCache` uses a memory-mapped file to store the data. When the file is
first opened, we have to give the maximum size it can ever grow to. This is
that number. Reasonable operating systems don't actually allocate that space
until it is really needed.
"""
self.lmdb_env: lmdb.Environment
if hasattr(self, "lmdb_env"):
# We're being initialized again after a cache hit in _active_tensor_caches, thanks
# to __new__. In this case, we may have to upgrade to read/write, but other than
# that we are good to go.
if read_only:
return
if not self.read_only:
return
# Upgrade a read-only lmdb env to a read/write lmdb env.
filename = self.lmdb_env.path()
old_info = self.lmdb_env.info()
self.lmdb_env.close()
self.lmdb_env = lmdb.open(
filename,
map_size=old_info["map_size"],
subdir=False,
metasync=False,
sync=True,
readahead=False,
meminit=False,
readonly=False,
lock=True,
)
else:
filename = str(filename)
cpu_count = os.cpu_count() or 1
if os.path.exists(filename):
if os.path.isfile(filename):
# If the file is not writable, set read_only to True, but issue a warning.
if not os.access(filename, os.W_OK):
if not read_only:
warnings.warn(
f"File '{filename}' is read-only, so cache will be read-only",
UserWarning,
)
read_only = True
else:
# If it's not a file, raise an error.
raise ValueError("Expect a file, found a directory instead")
use_lock = True
if read_only:
# Check if the lock file is writable. If it's not, then we won't be able to use the lock.
# This is always how lmdb names the lock file.
lock_filename = filename + "-lock"
if os.path.isfile(lock_filename):
use_lock = os.access(lock_filename, os.W_OK)
else:
# If the lock file doesn't exist yet, then the directory needs to be writable in
# order to create and use the lock file.
use_lock = os.access(os.path.dirname(lock_filename), os.W_OK)
if not use_lock:
warnings.warn(
f"Lacking permissions to use lock file on cache '{filename}'.\nUse at your own risk!",
UserWarning,
)
self.lmdb_env = lmdb.open(
filename,
subdir=False,
map_size=map_size,
max_readers=cpu_count * 4,
max_spare_txns=cpu_count * 4,
metasync=False,
sync=True,
readahead=False,
meminit=False,
readonly=read_only,
lock=use_lock,
)
_active_tensor_caches[_unique_file_id(filename)] = self
# We have another cache here that makes sure we return the same object for the same key. Without it,
# you would get a different tensor, using different memory, every time you call __getitem__(), even
# if you call it with the same key.
# The downside is that we can't keep self.cache_cache up to date when multiple processes modify the
# cache at the same time. We can guarantee though that it is up to date as long as processes either
# write new values, or read existing ones.
self.cache_cache: MutableMapping[str, Tensor] = WeakValueDictionary()
@property
def read_only(self) -> bool:
return self.lmdb_env.flags()["readonly"]
def __contains__(self, key: object):
if not isinstance(key, str):
return False
if key in self.cache_cache:
return True
encoded_key = key.encode()
with self.lmdb_env.begin(write=False) as txn:
result = txn.get(encoded_key)
return result is not None
def __getitem__(self, key: str):
try:
return self.cache_cache[key]
except KeyError:
encoded_key = key.encode()
with self.lmdb_env.begin(write=False) as txn:
buffer = txn.get(encoded_key)
if buffer is None:
raise KeyError()
tensor = torch.load(io.BytesIO(buffer), map_location="cpu")
self.cache_cache[key] = tensor
return tensor
def __setitem__(self, key: str, tensor: torch.Tensor):
if self.read_only:
raise ValueError("cannot write to a read-only cache")
tensor = tensor.cpu()
encoded_key = key.encode()
buffer = io.BytesIO()
if tensor.storage().size() != np.prod(tensor.size()):
tensor = tensor.clone()
assert tensor.storage().size() == np.prod(tensor.size())
torch.save(tensor.detach(), buffer, pickle_protocol=pickle.HIGHEST_PROTOCOL)
with self.lmdb_env.begin(write=True) as txn:
txn.put(encoded_key, buffer.getbuffer())
self.cache_cache[key] = tensor
def __delitem__(self, key: str):
if self.read_only:
raise ValueError("cannot write to a read-only cache")
encoded_key = key.encode()
with self.lmdb_env.begin(write=True) as txn:
txn.delete(encoded_key)
try:
del self.cache_cache[key]
except KeyError:
pass
def __del__(self):
if self.lmdb_env is not None:
self.lmdb_env.close()
self.lmdb_env = None
def __len__(self):
return self.lmdb_env.stat()["entries"]
def __iter__(self):
# It is not hard to implement this, but we have not needed it so far.
raise NotImplementedError()
class CacheFile:
"""
This is a context manager that makes robust caching easier.
On `__enter__`, an IO handle to a temporarily file is returned, which can
be treated as if it's the actual cache file.
On `__exit__`, the temporarily file is renamed to the cache file. If anything
goes wrong while writing to the temporary file, it will be removed.
"""
def __init__(
self, cache_filename: Union[PathLike, str], mode: str = "w+b", suffix: str = ".tmp"
) -> None:
self.cache_filename = (
cache_filename if isinstance(cache_filename, Path) else Path(cache_filename)
)
self.cache_directory = os.path.dirname(self.cache_filename)
self.mode = mode
self.temp_file = tempfile.NamedTemporaryFile(
self.mode, dir=self.cache_directory, delete=False, suffix=suffix
)
def __enter__(self):
return self.temp_file
def __exit__(self, exc_type, exc_value, traceback):
self.temp_file.close()
if exc_value is None:
# Success.
logger.debug(
"Renaming temp file %s to cache at %s", self.temp_file.name, self.cache_filename
)
# Rename the temp file to the actual cache filename.
os.replace(self.temp_file.name, self.cache_filename)
return True
# Something went wrong, remove the temp file.
logger.debug("removing temp file %s", self.temp_file.name)
os.remove(self.temp_file.name)
return False
class LocalCacheResource:
"""
This is a context manager that can be used to fetch and cache arbitrary resources locally
using the same mechanisms that `cached_path` uses for remote resources.
It can be used, for example, when you want to cache the result of an expensive computation.
# Examples
```python
with LocalCacheResource("long-computation", "v1") as cache:
if cache.cached():
with cache.reader() as f:
# read from cache
else:
with cache.writer() as f:
# do the computation
# ...
# write to cache
```
"""
def __init__(self, resource_name: str, version: str, cache_dir: str = CACHE_DIRECTORY) -> None:
self.resource_name = resource_name
self.version = version
self.cache_dir = cache_dir
self.path = os.path.join(self.cache_dir, _resource_to_filename(resource_name, version))
self.file_lock = FileLock(self.path + ".lock")
def cached(self) -> bool:
return os.path.exists(self.path)
@contextmanager
def writer(self, mode="w"):
if self.cached():
raise ValueError(
f"local cache of {self.resource_name} (version '{self.version}') already exists!"
)
with CacheFile(self.path, mode=mode) as f:
yield f
meta = _Meta(
resource=self.resource_name,
cached_path=self.path,
creation_time=time.time(),
etag=self.version,
size=_get_resource_size(self.path),
)
meta.to_file()
@contextmanager
def reader(self, mode="r"):
if not self.cached():
raise ValueError(
f"local cache of {self.resource_name} (version '{self.version}') does not exist yet!"
)
with open(self.path, mode) as f:
yield f
def __enter__(self):
self.file_lock.acquire()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.file_lock.release()
if exc_value is None:
return True
return False
@dataclass
class _Meta:
"""
Any resource that is downloaded to - or extracted in - the cache directory will
have a meta JSON file written next to it, which corresponds to an instance
of this class.
In older versions of AllenNLP, this meta document just had two fields: 'url' and
'etag'. The 'url' field is now the more general 'resource' field, but these old
meta files are still compatible when a `_Meta` is instantiated with the `.from_path()`
class method.
"""
resource: str
"""
URL or normalized path to the resource.
"""
cached_path: str
"""
Path to the corresponding cached version of the resource.
"""
creation_time: float
"""
The unix timestamp of when the corresponding resource was cached or extracted.
"""
size: int = 0
"""
The size of the corresponding resource, in bytes.
"""
etag: Optional[str] = None
"""
Optional ETag associated with the current cached version of the resource.
"""
extraction_dir: bool = False
"""
Does this meta corresponded to an extraction directory?
"""
def to_file(self) -> None:
with open(self.cached_path + ".json", "w") as meta_file:
json.dump(asdict(self), meta_file)
@classmethod
def from_path(cls, path: Union[str, Path]) -> "_Meta":
path = str(path)
with open(path) as meta_file:
data = json.load(meta_file)
# For backwards compat:
if "resource" not in data:
data["resource"] = data.pop("url")
if "creation_time" not in data:
data["creation_time"] = os.path.getmtime(path[:-5])
if "extraction_dir" not in data and path.endswith("-extracted.json"):
data["extraction_dir"] = True
if "cached_path" not in data:
data["cached_path"] = path[:-5]
if "size" not in data:
data["size"] = _get_resource_size(data["cached_path"])
return cls(**data)
def _hf_hub_download(
url, model_identifier: str, filename: Optional[str], cache_dir: Union[str, Path]
) -> str:
revision: Optional[str]
if "@" in model_identifier:
repo_id = model_identifier.split("@")[0]
revision = model_identifier.split("@")[1]
else:
repo_id = model_identifier
revision = None
if filename is not None:
hub_url = hf_hub.hf_hub_url(repo_id=repo_id, filename=filename, revision=revision)
cache_path = str(
hf_hub.cached_download(
url=hub_url,
library_name="allennlp",
library_version=VERSION,
cache_dir=cache_dir,
)
)
# HF writes it's own meta '.json' file which uses the same format we used to use and still
# support, but is missing some fields that we like to have.
# So we overwrite it when it we can.
with FileLock(cache_path + ".lock", read_only_ok=True):
meta = _Meta.from_path(cache_path + ".json")
# The file HF writes will have 'resource' set to the 'http' URL corresponding to the 'hf://' URL,
# but we want 'resource' to be the original 'hf://' URL.
if meta.resource != url:
meta.resource = url
meta.to_file()
else:
cache_path = str(hf_hub.snapshot_download(repo_id, revision=revision, cache_dir=cache_dir))
# Need to write the meta file for snapshot downloads if it doesn't exist.
with FileLock(cache_path + ".lock", read_only_ok=True):
if not os.path.exists(cache_path + ".json"):
meta = _Meta(
resource=url,
cached_path=cache_path,
creation_time=time.time(),
extraction_dir=True,
size=_get_resource_size(cache_path),
)
meta.to_file()
return cache_path
# TODO(joelgrus): do we want to do checksums or anything like that?
def get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = CACHE_DIRECTORY
if url.startswith("hf://"):
# Remove the 'hf://' prefix
identifier = url[5:]
if identifier.count("/") > 1:
filename = "/".join(identifier.split("/")[2:])
model_identifier = "/".join(identifier.split("/")[:2])
return _hf_hub_download(url, model_identifier, filename, cache_dir)
elif identifier.count("/") == 1:
# 'hf://' URLs like 'hf://xxxx/yyyy' are potentially ambiguous,
# because this could refer to either:
# 1. the file 'yyyy' in the 'xxxx' repository, or
# 2. the repo 'yyyy' under the user/org name 'xxxx'.
# We default to (1), but if we get a 404 error then we try (2).
try:
model_identifier, filename = identifier.split("/")
return _hf_hub_download(url, model_identifier, filename, cache_dir)
except requests.exceptions.HTTPError as exc:
if exc.response.status_code == 404:
return _hf_hub_download(url, identifier, None, cache_dir)
raise
else:
return _hf_hub_download(url, identifier, None, cache_dir)
# Get eTag to add to filename, if it exists.
try:
if url.startswith("s3://"):
etag = _s3_etag(url)
elif url.startswith("gs://"):
etag = _gcs_md5(url)
else:
etag = _http_etag(url)
except (requests.exceptions.ConnectionError, botocore.exceptions.EndpointConnectionError):
# We might be offline, in which case we don't want to throw an error
# just yet. Instead, we'll try to use the latest cached version of the
# target resource, if it exists. We'll only throw an exception if we
# haven't cached the resource at all yet.
logger.warning(
"Connection error occurred while trying to fetch ETag for %s. "
"Will attempt to use latest cached version of resource",
url,
)
latest_cached = _find_latest_cached(url, cache_dir)
if latest_cached:
logger.info(
"ETag request failed with connection error, using latest cached "
"version of %s: %s",
url,
latest_cached,
)
return latest_cached
else:
logger.error(
"Connection failed while trying to fetch ETag, "
"and no cached version of %s could be found",
url,
)
raise
except OSError:
# OSError may be triggered if we were unable to fetch the eTag.
# If this is the case, try to proceed without eTag check.
etag = None
filename = _resource_to_filename(url, etag)
# Get cache path to put the file.
cache_path = os.path.join(cache_dir, filename)
# Multiple processes may be trying to cache the same file at once, so we need
# to be a little careful to avoid race conditions. We do this using a lock file.
# Only one process can own this lock file at a time, and a process will block
# on the call to `lock.acquire()` until the process currently holding the lock
# releases it.
logger.debug("waiting to acquire lock on %s", cache_path)
with FileLock(cache_path + ".lock", read_only_ok=True):
if os.path.exists(cache_path):
logger.info("cache of %s is up-to-date", url)
else:
with CacheFile(cache_path) as cache_file:
logger.info("%s not found in cache, downloading to %s", url, cache_path)
# GET file object
if url.startswith("s3://"):
_s3_get(url, cache_file)
elif url.startswith("gs://"):
_gcs_get(url, cache_file.name)
else:
_http_get(url, cache_file)
logger.debug("creating metadata file for %s", cache_path)
meta = _Meta(
resource=url,
cached_path=cache_path,
creation_time=time.time(),
etag=etag,
size=_get_resource_size(cache_path),
)
meta.to_file()
return cache_path
def read_set_from_file(filename: str) -> Set[str]:
"""
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
"""
collection = set()
with open(filename, "r") as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path: str, dot=True, lower: bool = True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
def open_compressed(
filename: Union[str, PathLike], mode: str = "rt", encoding: Optional[str] = "UTF-8", **kwargs
):
if not isinstance(filename, str):
filename = str(filename)
open_fn: Callable = open
if filename.endswith(".gz"):
import gzip
open_fn = gzip.open
elif filename.endswith(".bz2"):
import bz2
open_fn = bz2.open
return open_fn(cached_path(filename), mode=mode, encoding=encoding, **kwargs)
def text_lines_from_file(filename: Union[str, PathLike], strip_lines: bool = True) -> Iterator[str]:
with open_compressed(filename, "rt", encoding="UTF-8", errors="replace") as p:
if strip_lines:
for line in p:
yield line.strip()
else:
yield from p
def json_lines_from_file(filename: Union[str, PathLike]) -> Iterable[Union[list, dict]]:
return (json.loads(line) for line in text_lines_from_file(filename))
def _get_resource_size(path: str) -> int:
"""
Get the size of a file or directory.
"""
if os.path.isfile(path):
return os.path.getsize(path)
inodes: Set[int] = set()
total_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is symbolic link or the same as a file we've already accounted
# for (this could happen with hard links).
inode = os.stat(fp).st_ino
if not os.path.islink(fp) and inode not in inodes:
inodes.add(inode)
total_size += os.path.getsize(fp)
return total_size
class _CacheEntry(NamedTuple):
regular_files: List[_Meta]
extraction_dirs: List[_Meta]
def _find_entries(
patterns: List[str] = None,
cache_dir: Union[str, Path] = None,
) -> Tuple[int, Dict[str, _CacheEntry]]:
"""
Find all cache entries, filtering ones that don't match any of the glob patterns given.
Returns the total size of the matching entries and mapping or resource name to meta data.
The values in the returned mapping are tuples because we seperate meta entries that
correspond to extraction directories vs regular cache entries.
"""
cache_dir = os.path.expanduser(cache_dir or CACHE_DIRECTORY)
total_size: int = 0
cache_entries: Dict[str, _CacheEntry] = defaultdict(lambda: _CacheEntry([], []))
for meta_path in glob.glob(str(cache_dir) + "/*.json"):
meta = _Meta.from_path(meta_path)
if patterns and not any(fnmatch(meta.resource, p) for p in patterns):
continue
if meta.extraction_dir:
cache_entries[meta.resource].extraction_dirs.append(meta)
else:
cache_entries[meta.resource].regular_files.append(meta)
total_size += meta.size
# Sort entries for each resource by creation time, newest first.
for entry in cache_entries.values():
entry.regular_files.sort(key=lambda meta: meta.creation_time, reverse=True)
entry.extraction_dirs.sort(key=lambda meta: meta.creation_time, reverse=True)
return total_size, cache_entries
def remove_cache_entries(patterns: List[str], cache_dir: Union[str, Path] = None) -> int:
"""
Remove cache entries matching the given patterns.
Returns the total reclaimed space in bytes.
"""
total_size, cache_entries = _find_entries(patterns=patterns, cache_dir=cache_dir)
for resource, entry in cache_entries.items():
for meta in entry.regular_files:
logger.info("Removing cached version of %s at %s", resource, meta.cached_path)
os.remove(meta.cached_path)
if os.path.exists(meta.cached_path + ".lock"):
os.remove(meta.cached_path + ".lock")
os.remove(meta.cached_path + ".json")
for meta in entry.extraction_dirs:
logger.info("Removing extracted version of %s at %s", resource, meta.cached_path)
shutil.rmtree(meta.cached_path)
if os.path.exists(meta.cached_path + ".lock"):
os.remove(meta.cached_path + ".lock")
os.remove(meta.cached_path + ".json")
return total_size
def inspect_cache(patterns: List[str] = None, cache_dir: Union[str, Path] = None):
"""
Print out useful information about the cache directory.
"""
from allennlp.common.util import format_timedelta, format_size
cache_dir = os.path.expanduser(cache_dir or CACHE_DIRECTORY)
# Gather cache entries by resource.
total_size, cache_entries = _find_entries(patterns=patterns, cache_dir=cache_dir)
if patterns:
print(f"Cached resources matching {patterns}:")
else:
print("Cached resources:")
for resource, entry in sorted(
cache_entries.items(),
# Sort by creation time, latest first.
key=lambda x: max(
0 if not x[1][0] else x[1][0][0].creation_time,
0 if not x[1][1] else x[1][1][0].creation_time,
),
reverse=True,
):
print("\n-", resource)
if entry.regular_files:
td = timedelta(seconds=time.time() - entry.regular_files[0].creation_time)
n_versions = len(entry.regular_files)
size = entry.regular_files[0].size
print(
f" {n_versions} {'versions' if n_versions > 1 else 'version'} cached, "
f"latest {format_size(size)} from {format_timedelta(td)} ago"
)
if entry.extraction_dirs:
td = timedelta(seconds=time.time() - entry.extraction_dirs[0].creation_time)
n_versions = len(entry.extraction_dirs)
size = entry.extraction_dirs[0].size
print(
f" {n_versions} {'versions' if n_versions > 1 else 'version'} extracted, "
f"latest {format_size(size)} from {format_timedelta(td)} ago"
)
print(f"\nTotal size: {format_size(total_size)}")
|
import datetime
import json
import logging
import os
import urllib.parse
from typing import Optional, Dict, Any
import aws_embedded_metrics
import boto3
import botocore.client
from aws_embedded_metrics.logger.metrics_logger import MetricsLogger
STATIC_HEADERS = {
"Content-Type": "text/plain; charset=utf-8",
"Cache-Control": "no-cache",
"Access-Control-Allow-Origin": "*"
}
RECORD_KEY = "Records"
logger = logging.getLogger()
logger.setLevel(logging.INFO)
@aws_embedded_metrics.metric_scope
def lambda_handler(event, context, metrics):
metrics.set_namespace("CompilerExplorer")
logger.info("Received new lambda event %s", event)
if RECORD_KEY in event:
return handle_sqs(event, context)
return handle_http(event, metrics)
def handle_sqs(
event: Dict,
context,
s3_client: Optional[botocore.client.BaseClient] = None,
now: Optional[datetime.datetime] = None):
s3_client = s3_client or boto3.client('s3')
now = now or datetime.datetime.utcnow()
logger.info("Handling %d messages", len(event[RECORD_KEY]))
key = f"stats/{context.function_name}-{now.strftime("%Y-%m-%d-%H:%M:%S.%f")}.log"
body = "\n".join(r["body"] for r in event[RECORD_KEY])
bucket_name = os.environ['S3_BUCKET_NAME']
logger.info("writing to %s with key %s", bucket_name, key)
s3_client.put_object(Bucket=bucket_name, Body=body, Key=key)
def handle_http(
event: Dict,
metrics: MetricsLogger,
sqs_client: Optional[botocore.client.BaseClient] = None,
dynamo_client: Optional[botocore.client.BaseClient] = None,
now: Optional[datetime.datetime] = None):
sqs_client = sqs_client or boto3.client('sqs')
dynamo_client = dynamo_client or boto3.client('dynamodb')
now = now or datetime.datetime.utcnow()
path = event['path'].split('/')[1:]
method = event['httpMethod']
if path == ['pageload'] and method == 'POST':
return handle_pageload(event, metrics, now, os.environ['SQS_STATS_QUEUE'], sqs_client)
if len(path) == 2 and path[0] == 'compiler-build' and method == 'GET':
return handle_compiler_stats(path[1], os.environ['COMPILER_BUILD_TABLE'], dynamo_client)
return dict(
statusCode=404,
statusDescription="404 Not Found",
isBase64Encoded=False,
headers=STATIC_HEADERS,
body="Not found"
)
def handle_pageload(
event: Dict,
metrics: MetricsLogger,
now: datetime.datetime,
queue_url: str,
sqs_client: botocore.client.BaseClient):
date = now.strftime('%Y-%m-%d')
time = now.strftime('%H:%M:%S')
sqs_client.send_message(
QueueUrl=queue_url,
MessageBody=json.dumps(dict(type='PageLoad', date=date, time=time, value=''), sort_keys=True))
icons = urllib.parse.unquote_plus(event['queryStringParameters'].get('icons', ''))
sponsors = list(filter(lambda x: x, icons.split(',')))
for sponsor in sponsors:
sqs_client.send_message(
QueueUrl=queue_url,
MessageBody=json.dumps(dict(type='SponsorView', date=date, time=time, value=sponsor), sort_keys=True))
metrics.set_property("sponsors", sponsors)
metrics.put_metric("PageLoad", 1)
return dict(
statusCode=200,
statusDescription="200 OK",
isBase64Encoded=False,
headers=STATIC_HEADERS,
body="Ok"
)
# Example query from the UI
# {"TableName":"compiler-builds","ReturnConsumedCapacity":
# "TOTAL","Limit":50,"KeyConditionExpression":"#kn0 = :kv0",
# "ScanIndexForward":false,"FilterExpression":"#n0 = :v0",
# "ExpressionAttributeNames":{"#n0":"status","#kn0":"compiler"},
# "ExpressionAttributeValues":{":v0":{"S":"OK"},":kv0":{"S":"gcc"}}}
def _do_one_query(compiler: str,
table: str,
dynamo_client: botocore.client.BaseClient,
status: Optional[str]) -> Optional[Dict]:
params: Dict[str, Any] = dict(
TableName=table,
Limit=100, # NB limit to _evaluate_ not the limit of matches
ScanIndexForward=False, # items in reverse order (by time)
KeyConditionExpression='#key = :compiler',
ExpressionAttributeNames={"#key": "compiler"},
ExpressionAttributeValues={":compiler": dict(S=compiler)}
)
if status is not None:
params['FilterExpression'] = '#status = :status_filter'
params['ExpressionAttributeNames']["#status"] = "status"
params['ExpressionAttributeValues'][":status_filter"] = dict(S=status or "na")
query_results = dynamo_client.query(**params)
if query_results['Count']:
most_recent = query_results['Items'][0]
return dict(
path=most_recent['path']['S'],
github_run_id=most_recent['github_run_id']['S'],
timestamp=most_recent['timestamp']['S'],
duration=int(most_recent['duration']['N']),
)
return None
def handle_compiler_stats(
compiler: str,
table: str,
dynamo_client: botocore.client.BaseClient) -> Dict:
result = dict(
last_success=_do_one_query(compiler, table, dynamo_client, "OK"),
last_build=_do_one_query(compiler, table, dynamo_client, None)
)
return dict(
statusCode=200,
statusDescription="200 OK",
isBase64Encoded=False,
headers={
"Content-Type": "application/json; charset=utf-8",
"Cache-Control": "max-age: 180, must-revalidate",
"Access-Control-Allow-Origin": "*"
},
body=json.dumps(result)
)
|
import datetime
import json
import logging
import os
import urllib.parse
from typing import Optional, Dict, Any
import aws_embedded_metrics
import boto3
import botocore.client
from aws_embedded_metrics.logger.metrics_logger import MetricsLogger
STATIC_HEADERS = {
"Content-Type": "text/plain; charset=utf-8",
"Cache-Control": "no-cache",
"Access-Control-Allow-Origin": "*"
}
RECORD_KEY = "Records"
logger = logging.getLogger()
logger.setLevel(logging.INFO)
@aws_embedded_metrics.metric_scope
def lambda_handler(event, context, metrics):
metrics.set_namespace("CompilerExplorer")
logger.info("Received new lambda event %s", event)
if RECORD_KEY in event:
return handle_sqs(event, context)
return handle_http(event, metrics)
def handle_sqs(
event: Dict,
context,
s3_client: Optional[botocore.client.BaseClient] = None,
now: Optional[datetime.datetime] = None):
s3_client = s3_client or boto3.client('s3')
now = now or datetime.datetime.utcnow()
logger.info("Handling %d messages", len(event[RECORD_KEY]))
key = f"stats/{context.function_name}-{now.strftime('%Y-%m-%d-%H:%M:%S.%f')}.log"
body = "\n".join(r["body"] for r in event[RECORD_KEY])
bucket_name = os.environ['S3_BUCKET_NAME']
logger.info("writing to %s with key %s", bucket_name, key)
s3_client.put_object(Bucket=bucket_name, Body=body, Key=key)
def handle_http(
event: Dict,
metrics: MetricsLogger,
sqs_client: Optional[botocore.client.BaseClient] = None,
dynamo_client: Optional[botocore.client.BaseClient] = None,
now: Optional[datetime.datetime] = None):
sqs_client = sqs_client or boto3.client('sqs')
dynamo_client = dynamo_client or boto3.client('dynamodb')
now = now or datetime.datetime.utcnow()
path = event['path'].split('/')[1:]
method = event['httpMethod']
if path == ['pageload'] and method == 'POST':
return handle_pageload(event, metrics, now, os.environ['SQS_STATS_QUEUE'], sqs_client)
if len(path) == 2 and path[0] == 'compiler-build' and method == 'GET':
return handle_compiler_stats(path[1], os.environ['COMPILER_BUILD_TABLE'], dynamo_client)
return dict(
statusCode=404,
statusDescription="404 Not Found",
isBase64Encoded=False,
headers=STATIC_HEADERS,
body="Not found"
)
def handle_pageload(
event: Dict,
metrics: MetricsLogger,
now: datetime.datetime,
queue_url: str,
sqs_client: botocore.client.BaseClient):
date = now.strftime('%Y-%m-%d')
time = now.strftime('%H:%M:%S')
sqs_client.send_message(
QueueUrl=queue_url,
MessageBody=json.dumps(dict(type='PageLoad', date=date, time=time, value=''), sort_keys=True))
icons = urllib.parse.unquote_plus(event['queryStringParameters'].get('icons', ''))
sponsors = list(filter(lambda x: x, icons.split(',')))
for sponsor in sponsors:
sqs_client.send_message(
QueueUrl=queue_url,
MessageBody=json.dumps(dict(type='SponsorView', date=date, time=time, value=sponsor), sort_keys=True))
metrics.set_property("sponsors", sponsors)
metrics.put_metric("PageLoad", 1)
return dict(
statusCode=200,
statusDescription="200 OK",
isBase64Encoded=False,
headers=STATIC_HEADERS,
body="Ok"
)
# Example query from the UI
# {"TableName":"compiler-builds","ReturnConsumedCapacity":
# "TOTAL","Limit":50,"KeyConditionExpression":"#kn0 = :kv0",
# "ScanIndexForward":false,"FilterExpression":"#n0 = :v0",
# "ExpressionAttributeNames":{"#n0":"status","#kn0":"compiler"},
# "ExpressionAttributeValues":{":v0":{"S":"OK"},":kv0":{"S":"gcc"}}}
def _do_one_query(compiler: str,
table: str,
dynamo_client: botocore.client.BaseClient,
status: Optional[str]) -> Optional[Dict]:
params: Dict[str, Any] = dict(
TableName=table,
Limit=100, # NB limit to _evaluate_ not the limit of matches
ScanIndexForward=False, # items in reverse order (by time)
KeyConditionExpression='#key = :compiler',
ExpressionAttributeNames={"#key": "compiler"},
ExpressionAttributeValues={":compiler": dict(S=compiler)}
)
if status is not None:
params['FilterExpression'] = '#status = :status_filter'
params['ExpressionAttributeNames']["#status"] = "status"
params['ExpressionAttributeValues'][":status_filter"] = dict(S=status or "na")
query_results = dynamo_client.query(**params)
if query_results['Count']:
most_recent = query_results['Items'][0]
return dict(
path=most_recent['path']['S'],
github_run_id=most_recent['github_run_id']['S'],
timestamp=most_recent['timestamp']['S'],
duration=int(most_recent['duration']['N']),
)
return None
def handle_compiler_stats(
compiler: str,
table: str,
dynamo_client: botocore.client.BaseClient) -> Dict:
result = dict(
last_success=_do_one_query(compiler, table, dynamo_client, "OK"),
last_build=_do_one_query(compiler, table, dynamo_client, None)
)
return dict(
statusCode=200,
statusDescription="200 OK",
isBase64Encoded=False,
headers={
"Content-Type": "application/json; charset=utf-8",
"Cache-Control": "max-age: 180, must-revalidate",
"Access-Control-Allow-Origin": "*"
},
body=json.dumps(result)
)
|
import os
import pytest
import sys
import random
import tempfile
import requests
from pathlib import Path
import ray
from ray.test_utils import (run_string_as_driver,
run_string_as_driver_nonblocking)
from ray._private.utils import (get_wheel_filename, get_master_wheel_url,
get_release_wheel_url)
import ray.experimental.internal_kv as kv
from time import sleep
driver_script = """
from time import sleep
import sys
import logging
sys.path.insert(0, "{working_dir}")
import ray
import ray.util
import os
try:
import test_module
except:
pass
try:
job_config = ray.job_config.JobConfig(
runtime_env={runtime_env}
)
if not job_config.runtime_env:
job_config=None
if os.environ.get("USE_RAY_CLIENT"):
ray.client("{address}").env({runtime_env}).namespace("").connect()
else:
ray.init(address="{address}",
job_config=job_config,
logging_level=logging.DEBUG,
namespace=""
)
except ValueError:
print("ValueError")
sys.exit(0)
except TypeError:
print("TypeError")
sys.exit(0)
except:
print("ERROR")
sys.exit(0)
if os.environ.get("EXIT_AFTER_INIT"):
sys.exit(0)
@ray.remote
def run_test():
return test_module.one()
@ray.remote
def check_file(name):
try:
with open(name) as f:
return f.read()
except:
return "FAILED"
@ray.remote
class TestActor(object):
@ray.method(num_returns=1)
def one(self):
return test_module.one()
{execute_statement}
if os.environ.get("USE_RAY_CLIENT"):
ray.util.disconnect()
else:
ray.shutdown()
sleep(10)
"""
def create_file(p):
if not p.parent.exists():
p.parent.mkdir()
with p.open("w") as f:
f.write("Test")
@pytest.fixture(scope="function")
def working_dir():
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir)
module_path = path / "test_module"
module_path.mkdir(parents=True)
init_file = module_path / "__init__.py"
test_file = module_path / "test.py"
with test_file.open(mode="w") as f:
f.write("""
def one():
return 1
""")
with init_file.open(mode="w") as f:
f.write("""
from test_module.test import one
""")
old_dir = os.getcwd()
os.chdir(tmp_dir)
yield tmp_dir
os.chdir(old_dir)
def start_client_server(cluster, client_mode):
from ray._private.runtime_env import PKG_DIR
if not client_mode:
return (cluster.address, {}, PKG_DIR)
ray.worker._global_node._ray_params.ray_client_server_port = "10003"
ray.worker._global_node.start_ray_client_server()
return ("localhost:10003", {"USE_RAY_CLIENT": "1"}, PKG_DIR)
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_travel():
import uuid
with tempfile.TemporaryDirectory() as tmp_dir:
dir_paths = set()
file_paths = set()
item_num = 0
excludes = []
root = Path(tmp_dir) / "test"
def construct(path, excluded=False, depth=0):
nonlocal item_num
path.mkdir(parents=True)
if not excluded:
dir_paths.add(str(path))
if depth > 8:
return
if item_num > 500:
return
dir_num = random.randint(0, 10)
file_num = random.randint(0, 10)
for _ in range(dir_num):
uid = str(uuid.uuid4()).split("-")[0]
dir_path = path / uid
exclud_sub = random.randint(0, 5) == 0
if not excluded and exclud_sub:
excludes.append(str(dir_path.relative_to(root)))
if not excluded:
construct(dir_path, exclud_sub or excluded, depth + 1)
item_num += 1
if item_num > 1000:
return
for _ in range(file_num):
uid = str(uuid.uuid4()).split("-")[0]
with (path / uid).open("w") as f:
v = random.randint(0, 1000)
f.write(str(v))
if not excluded:
if random.randint(0, 5) == 0:
excludes.append(
str((path / uid).relative_to(root)))
else:
file_paths.add((str(path / uid), str(v)))
item_num += 1
construct(root)
exclude_spec = ray._private.runtime_env._get_excludes(root, excludes)
visited_dir_paths = set()
visited_file_paths = set()
def handler(path):
if path.is_dir():
visited_dir_paths.add(str(path))
else:
with open(path) as f:
visited_file_paths.add((str(path), f.read()))
ray._private.runtime_env._dir_travel(root, [exclude_spec], handler)
assert file_paths == visited_file_paths
assert dir_paths == visited_dir_paths
"""
The following test cases are related with runtime env. It following these steps
1) Creating a temporary dir with fixture working_dir
2) Using a template named driver_script defined globally
3) Overwrite runtime_env and execute_statement in the template
4) Execute it as a separate driver and return the result
"""
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_empty_working_dir(ray_start_cluster_head, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
env["EXIT_AFTER_INIT"] = "1"
with tempfile.TemporaryDirectory() as working_dir:
runtime_env = f"""{{
"working_dir": r"{working_dir}",
"py_modules": [r"{working_dir}"]
}}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "sys.exit(0)"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out != "ERROR"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_invalid_working_dir(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
env["EXIT_AFTER_INIT"] = "1"
runtime_env = "{ 'working_dir': 10 }"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "TypeError"
runtime_env = "{ 'py_modules': [10] }"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "TypeError"
runtime_env = f"{{ "working_dir": os.path.join(r"{working_dir}', 'na') }}"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "ValueError"
runtime_env = f"{{ "py_modules": [os.path.join(r"{working_dir}', 'na')] }}"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "ValueError"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_single_node(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# Setup runtime env here
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# Testing runtime env with working_dir
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_module(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# test runtime_env iwth py_modules
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_local_file(two_node_cluster, working_dir, client_mode):
with open(os.path.join(working_dir, "test_file"), "w") as f:
f.write("1")
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# test runtime_env iwth working_dir
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
vals = ray.get([check_file.remote('test_file')] * 1000)
print(sum([int(v) for v in vals]))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_exclusion(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
working_path = Path(working_dir)
create_file(working_path / "tmp_dir" / "test_1")
create_file(working_path / "tmp_dir" / "test_2")
create_file(working_path / "tmp_dir" / "test_3")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_1")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_2")
create_file(working_path / "test1")
create_file(working_path / "test2")
create_file(working_path / "test3")
tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute())
runtime_env = f"""{{
"working_dir": r"{working_dir}",
}}"""
execute_statement = """
vals = ray.get([
check_file.remote('test1'),
check_file.remote('test2'),
check_file.remote('test3'),
check_file.remote(os.path.join('tmp_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'test_2')),
check_file.remote(os.path.join('tmp_dir', 'test_3')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')),
])
print(','.join(vals))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
# Test it works before
assert out.strip().split("\n")[-1] == \
"Test,Test,Test,Test,Test,Test,Test,Test"
runtime_env = f"""{{
"working_dir": r"{working_dir}",
"excludes": [
# exclude by relative path
r"test2",
# exclude by dir
r"{str(Path("tmp_dir") / "sub_dir")}",
# exclude part of the dir
r"{str(Path("tmp_dir") / "test_1")}",
# exclude part of the dir
r"{str(Path("tmp_dir") / "test_2")}",
]
}}"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split("\n")[-1] == \
"Test,FAILED,Test,FAILED,FAILED,Test,FAILED,FAILED"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_exclusion_2(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
working_path = Path(working_dir)
def create_file(p):
if not p.parent.exists():
p.parent.mkdir(parents=True)
with p.open("w") as f:
f.write("Test")
create_file(working_path / "tmp_dir" / "test_1")
create_file(working_path / "tmp_dir" / "test_2")
create_file(working_path / "tmp_dir" / "test_3")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_1")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_2")
create_file(working_path / "test1")
create_file(working_path / "test2")
create_file(working_path / "test3")
create_file(working_path / "cache" / "test_1")
create_file(working_path / "tmp_dir" / "cache" / "test_1")
create_file(working_path / "another_dir" / "cache" / "test_1")
tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute())
runtime_env = f"""{{
"working_dir": r"{working_dir}",
}}"""
execute_statement = """
vals = ray.get([
check_file.remote('test1'),
check_file.remote('test2'),
check_file.remote('test3'),
check_file.remote(os.path.join('tmp_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'test_2')),
check_file.remote(os.path.join('tmp_dir', 'test_3')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')),
check_file.remote(os.path.join("cache", "test_1")),
check_file.remote(os.path.join("tmp_dir", "cache", "test_1")),
check_file.remote(os.path.join("another_dir", "cache", "test_1")),
])
print(','.join(vals))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
# Test it works before
assert out.strip().split("\n")[-1] == \
"Test,Test,Test,Test,Test,Test,Test,Test,Test,Test,Test"
with open(f"{working_dir}/.gitignore", "w") as f:
f.write("""
# Comment
test_[12]
/test1
!/tmp_dir/sub_dir/test_1
cache/
""")
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
t = out.strip().split("\n")[-1]
assert out.strip().split("\n")[-1] == \
"FAILED,Test,Test,FAILED,FAILED,Test,Test,FAILED,FAILED,FAILED,FAILED"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_runtime_env_getter(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
print(ray.get_runtime_context().runtime_env["working_dir"])
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == working_dir
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_uri(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
import ray._private.runtime_env as runtime_env
import tempfile
with tempfile.NamedTemporaryFile(suffix="zip") as tmp_file:
pkg_name = runtime_env.get_project_package_name(working_dir, [], [])
pkg_uri = runtime_env.Protocol.PIN_GCS.value + "://" + pkg_name
runtime_env.create_project_package(working_dir, [], [], tmp_file.name)
runtime_env.push_package(pkg_uri, tmp_file.name)
runtime_env = f"""{{ "uris": ["{pkg_uri}"] }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
# pinned uri will not be deleted
print(list(kv._internal_kv_list("")))
assert len(kv._internal_kv_list("pingcs://")) == 1
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_regular_actors(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
test_actor = TestActor.options(name="test_actor").remote()
print(sum(ray.get([test_actor.one.remote()] * 1000)))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_detached_actors(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
test_actor = TestActor.options(name="test_actor", lifetime="detached").remote()
print(sum(ray.get([test_actor.one.remote()] * 1000)))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
# It's a detached actors, so it should still be there
assert len(kv._internal_kv_list("gcs://")) == 1
assert len(list(Path(PKG_DIR).iterdir())) == 2
pkg_dir = [f for f in Path(PKG_DIR).glob("*") if f.is_dir()][0]
import sys
sys.path.insert(0, str(pkg_dir))
test_actor = ray.get_actor("test_actor")
assert sum(ray.get([test_actor.one.remote()] * 1000)) == 1000
ray.kill(test_actor)
from time import sleep
sleep(5)
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_1(ray_start_cluster_head, working_dir):
# start job_config=None
# start job_config=something
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = None
# To make the first one hanging there
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
# Have one running with job config = None
proc = run_string_as_driver_nonblocking(script, env)
# waiting it to be up
sleep(5)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the second one which should work because Ray Client servers.
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
proc.kill()
proc.wait()
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_2(ray_start_cluster_head, working_dir):
# start job_config=something
# start job_config=None
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# To make the first one hanging there
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
proc = run_string_as_driver_nonblocking(script, env)
sleep(5)
runtime_env = None
# Execute the following in the second one which should
# succeed
execute_statement = "print('OK')"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "OK", out
proc.kill()
proc.wait()
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_3(ray_start_cluster_head, working_dir):
# start job_config=something
# start job_config=something else
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# To make the first one hanging ther
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
proc = run_string_as_driver_nonblocking(script, env)
sleep(5)
runtime_env = f"""
{{ "working_dir": test_module.__path__[0] }}""" # noqa: F541
# Execute the following cmd in the second one and ensure that
# it is able to run.
execute_statement = "print('OK')"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
proc.kill()
proc.wait()
assert out.strip().split()[-1] == "OK"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_util_without_job_config(shutdown_only):
from ray.cluster_utils import Cluster
with tempfile.TemporaryDirectory() as tmp_dir:
with (Path(tmp_dir) / "lib.py").open("w") as f:
f.write("""
def one():
return 1
""")
old_dir = os.getcwd()
os.chdir(tmp_dir)
cluster = Cluster()
cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
(address, env, PKG_DIR) = start_client_server(cluster, True)
script = f"""
import ray
import ray.util
import os
ray.util.connect("{address}", job_config=None)
@ray.remote
def run():
from lib import one
return one()
print(ray.get([run.remote()])[0])
"""
out = run_string_as_driver(script, env)
print(out)
os.chdir(old_dir)
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_init(shutdown_only):
with tempfile.TemporaryDirectory() as tmp_dir:
old_dir = os.getcwd()
os.chdir(tmp_dir)
with open("hello", "w") as f:
f.write("world")
ray.init(runtime_env={"working_dir": "."})
@ray.remote
class Test:
def test(self):
with open("hello") as f:
return f.read()
t = Test.remote()
assert ray.get(t.test.remote()) == "world"
os.chdir(old_dir)
def test_get_wheel_filename():
ray_version = "2.0.0.dev0"
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
filename = get_wheel_filename(sys_platform, ray_version,
py_version)
prefix = "https://s3-us-west-2.amazonaws.com/ray-wheels/latest/"
url = f"{prefix}{filename}"
assert requests.head(url).status_code == 200
def test_get_master_wheel_url():
ray_version = "2.0.0.dev0"
test_commit = "ba6cebe30fab6925e5b2d9e859ad064d53015246"
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
url = get_master_wheel_url(test_commit, sys_platform, ray_version,
py_version)
assert requests.head(url).status_code == 200, url
def test_get_release_wheel_url():
test_commits = {
"1.4.0rc1": "e7c7f6371a69eb727fa469e4cd6f4fbefd143b4c",
"1.3.0": "0b4b444fadcdc23226e11fef066b982175804232",
"1.2.0": "1b1a2496ca51b745c07c79fb859946d3350d471b"
}
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
for version, commit in test_commits.items():
url = get_release_wheel_url(commit, sys_platform, version,
py_version)
assert requests.head(url).status_code == 200, url
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_task(ray_start_cluster_head):
@ray.remote(runtime_env={"env_vars": {"foo": "bar"}})
def f():
return os.environ.get("foo")
assert ray.get(f.remote()) == "bar"
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_actor(ray_start_cluster_head):
@ray.remote(runtime_env={"env_vars": {"foo": "bar"}})
class A:
def g(self):
return os.environ.get("foo")
a = A.remote()
assert ray.get(a.g.remote()) == "bar"
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_complex(shutdown_only):
ray.init(
job_config=ray.job_config.JobConfig(
runtime_env={"env_vars": {
"foo": "job"
}}))
@ray.remote
def env_from_job():
return os.environ.get("foo")
assert ray.get(env_from_job.remote()) == "job"
@ray.remote(runtime_env={"env_vars": {"foo": "task"}})
def f():
return os.environ.get("foo")
assert ray.get(f.remote()) == "task"
@ray.remote(runtime_env={"env_vars": {"foo": "actor"}})
class A:
def g(self):
return os.environ.get("foo")
a = A.remote()
assert ray.get(a.g.remote()) == "actor"
# Test that runtime_env can be overridden by specifying .options().
assert ray.get(
f.options(runtime_env={
"env_vars": {
"foo": "new"
}
}).remote()) == "new"
a = A.options(runtime_env={"env_vars": {"foo": "new2"}}).remote()
assert ray.get(a.g.remote()) == "new2"
def test_container_option_serialize():
runtime_env = {
"container": {
"image": "ray:latest",
"run_options": ["--name=test"]
}
}
job_config = ray.job_config.JobConfig(runtime_env=runtime_env)
job_config_serialized = job_config.serialize()
# job_config_serialized is JobConfig protobuf serialized string,
# job_config.runtime_env.raw_json has container_option info
# job_config.serialized_runtime_env also has container_option info
assert job_config_serialized.count(b"image") == 2
def test_working_dir_override_failure(shutdown_only):
ray.init()
@ray.remote(runtime_env={"working_dir": "."})
def f():
pass
with pytest.raises(NotImplementedError):
f.remote()
@ray.remote
def g():
pass
with pytest.raises(NotImplementedError):
g.options(runtime_env={"working_dir": "."}).remote()
@ray.remote(runtime_env={"working_dir": "."})
class A:
pass
with pytest.raises(NotImplementedError):
A.remote()
@ray.remote
class B:
pass
with pytest.raises(NotImplementedError):
B.options(runtime_env={"working_dir": "."}).remote()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-sv", __file__]))
|
import os
import pytest
import sys
import random
import tempfile
import requests
from pathlib import Path
import ray
from ray.test_utils import (run_string_as_driver,
run_string_as_driver_nonblocking)
from ray._private.utils import (get_wheel_filename, get_master_wheel_url,
get_release_wheel_url)
import ray.experimental.internal_kv as kv
from time import sleep
driver_script = """
from time import sleep
import sys
import logging
sys.path.insert(0, "{working_dir}")
import ray
import ray.util
import os
try:
import test_module
except:
pass
try:
job_config = ray.job_config.JobConfig(
runtime_env={runtime_env}
)
if not job_config.runtime_env:
job_config=None
if os.environ.get("USE_RAY_CLIENT"):
ray.client("{address}").env({runtime_env}).namespace("").connect()
else:
ray.init(address="{address}",
job_config=job_config,
logging_level=logging.DEBUG,
namespace=""
)
except ValueError:
print("ValueError")
sys.exit(0)
except TypeError:
print("TypeError")
sys.exit(0)
except:
print("ERROR")
sys.exit(0)
if os.environ.get("EXIT_AFTER_INIT"):
sys.exit(0)
@ray.remote
def run_test():
return test_module.one()
@ray.remote
def check_file(name):
try:
with open(name) as f:
return f.read()
except:
return "FAILED"
@ray.remote
class TestActor(object):
@ray.method(num_returns=1)
def one(self):
return test_module.one()
{execute_statement}
if os.environ.get("USE_RAY_CLIENT"):
ray.util.disconnect()
else:
ray.shutdown()
sleep(10)
"""
def create_file(p):
if not p.parent.exists():
p.parent.mkdir()
with p.open("w") as f:
f.write("Test")
@pytest.fixture(scope="function")
def working_dir():
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir)
module_path = path / "test_module"
module_path.mkdir(parents=True)
init_file = module_path / "__init__.py"
test_file = module_path / "test.py"
with test_file.open(mode="w") as f:
f.write("""
def one():
return 1
""")
with init_file.open(mode="w") as f:
f.write("""
from test_module.test import one
""")
old_dir = os.getcwd()
os.chdir(tmp_dir)
yield tmp_dir
os.chdir(old_dir)
def start_client_server(cluster, client_mode):
from ray._private.runtime_env import PKG_DIR
if not client_mode:
return (cluster.address, {}, PKG_DIR)
ray.worker._global_node._ray_params.ray_client_server_port = "10003"
ray.worker._global_node.start_ray_client_server()
return ("localhost:10003", {"USE_RAY_CLIENT": "1"}, PKG_DIR)
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_travel():
import uuid
with tempfile.TemporaryDirectory() as tmp_dir:
dir_paths = set()
file_paths = set()
item_num = 0
excludes = []
root = Path(tmp_dir) / "test"
def construct(path, excluded=False, depth=0):
nonlocal item_num
path.mkdir(parents=True)
if not excluded:
dir_paths.add(str(path))
if depth > 8:
return
if item_num > 500:
return
dir_num = random.randint(0, 10)
file_num = random.randint(0, 10)
for _ in range(dir_num):
uid = str(uuid.uuid4()).split("-")[0]
dir_path = path / uid
exclud_sub = random.randint(0, 5) == 0
if not excluded and exclud_sub:
excludes.append(str(dir_path.relative_to(root)))
if not excluded:
construct(dir_path, exclud_sub or excluded, depth + 1)
item_num += 1
if item_num > 1000:
return
for _ in range(file_num):
uid = str(uuid.uuid4()).split("-")[0]
with (path / uid).open("w") as f:
v = random.randint(0, 1000)
f.write(str(v))
if not excluded:
if random.randint(0, 5) == 0:
excludes.append(
str((path / uid).relative_to(root)))
else:
file_paths.add((str(path / uid), str(v)))
item_num += 1
construct(root)
exclude_spec = ray._private.runtime_env._get_excludes(root, excludes)
visited_dir_paths = set()
visited_file_paths = set()
def handler(path):
if path.is_dir():
visited_dir_paths.add(str(path))
else:
with open(path) as f:
visited_file_paths.add((str(path), f.read()))
ray._private.runtime_env._dir_travel(root, [exclude_spec], handler)
assert file_paths == visited_file_paths
assert dir_paths == visited_dir_paths
"""
The following test cases are related with runtime env. It following these steps
1) Creating a temporary dir with fixture working_dir
2) Using a template named driver_script defined globally
3) Overwrite runtime_env and execute_statement in the template
4) Execute it as a separate driver and return the result
"""
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_empty_working_dir(ray_start_cluster_head, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
env["EXIT_AFTER_INIT"] = "1"
with tempfile.TemporaryDirectory() as working_dir:
runtime_env = f"""{{
"working_dir": r"{working_dir}",
"py_modules": [r"{working_dir}"]
}}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "sys.exit(0)"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out != "ERROR"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_invalid_working_dir(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
env["EXIT_AFTER_INIT"] = "1"
runtime_env = "{ 'working_dir': 10 }"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "TypeError"
runtime_env = "{ 'py_modules': [10] }"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "TypeError"
runtime_env = f"{{ 'working_dir': os.path.join(r'{working_dir}', 'na') }}"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "ValueError"
runtime_env = f"{{ 'py_modules': [os.path.join(r'{working_dir}', 'na')] }}"
# Execute the following cmd in driver with runtime_env
execute_statement = ""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env).strip().split()[-1]
assert out == "ValueError"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_single_node(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# Setup runtime env here
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# Testing runtime env with working_dir
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_module(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# test runtime_env iwth py_modules
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_local_file(two_node_cluster, working_dir, client_mode):
with open(os.path.join(working_dir, "test_file"), "w") as f:
f.write("1")
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
# test runtime_env iwth working_dir
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
vals = ray.get([check_file.remote('test_file')] * 1000)
print(sum([int(v) for v in vals]))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_exclusion(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
working_path = Path(working_dir)
create_file(working_path / "tmp_dir" / "test_1")
create_file(working_path / "tmp_dir" / "test_2")
create_file(working_path / "tmp_dir" / "test_3")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_1")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_2")
create_file(working_path / "test1")
create_file(working_path / "test2")
create_file(working_path / "test3")
tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute())
runtime_env = f"""{{
"working_dir": r"{working_dir}",
}}"""
execute_statement = """
vals = ray.get([
check_file.remote('test1'),
check_file.remote('test2'),
check_file.remote('test3'),
check_file.remote(os.path.join('tmp_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'test_2')),
check_file.remote(os.path.join('tmp_dir', 'test_3')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')),
])
print(','.join(vals))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
# Test it works before
assert out.strip().split("\n")[-1] == \
"Test,Test,Test,Test,Test,Test,Test,Test"
runtime_env = f"""{{
"working_dir": r"{working_dir}",
"excludes": [
# exclude by relative path
r"test2",
# exclude by dir
r"{str(Path("tmp_dir") / "sub_dir")}",
# exclude part of the dir
r"{str(Path("tmp_dir") / "test_1")}",
# exclude part of the dir
r"{str(Path("tmp_dir") / "test_2")}",
]
}}"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split("\n")[-1] == \
"Test,FAILED,Test,FAILED,FAILED,Test,FAILED,FAILED"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_exclusion_2(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
working_path = Path(working_dir)
def create_file(p):
if not p.parent.exists():
p.parent.mkdir(parents=True)
with p.open("w") as f:
f.write("Test")
create_file(working_path / "tmp_dir" / "test_1")
create_file(working_path / "tmp_dir" / "test_2")
create_file(working_path / "tmp_dir" / "test_3")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_1")
create_file(working_path / "tmp_dir" / "sub_dir" / "test_2")
create_file(working_path / "test1")
create_file(working_path / "test2")
create_file(working_path / "test3")
create_file(working_path / "cache" / "test_1")
create_file(working_path / "tmp_dir" / "cache" / "test_1")
create_file(working_path / "another_dir" / "cache" / "test_1")
tmp_dir_test_3 = str((working_path / "tmp_dir" / "test_3").absolute())
runtime_env = f"""{{
"working_dir": r"{working_dir}",
}}"""
execute_statement = """
vals = ray.get([
check_file.remote('test1'),
check_file.remote('test2'),
check_file.remote('test3'),
check_file.remote(os.path.join('tmp_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'test_2')),
check_file.remote(os.path.join('tmp_dir', 'test_3')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_1')),
check_file.remote(os.path.join('tmp_dir', 'sub_dir', 'test_2')),
check_file.remote(os.path.join("cache", "test_1")),
check_file.remote(os.path.join("tmp_dir", "cache", "test_1")),
check_file.remote(os.path.join("another_dir", "cache", "test_1")),
])
print(','.join(vals))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
# Test it works before
assert out.strip().split("\n")[-1] == \
"Test,Test,Test,Test,Test,Test,Test,Test,Test,Test,Test"
with open(f"{working_dir}/.gitignore", "w") as f:
f.write("""
# Comment
test_[12]
/test1
!/tmp_dir/sub_dir/test_1
cache/
""")
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
t = out.strip().split("\n")[-1]
assert out.strip().split("\n")[-1] == \
"FAILED,Test,Test,FAILED,FAILED,Test,Test,FAILED,FAILED,FAILED,FAILED"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_runtime_env_getter(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
print(ray.get_runtime_context().runtime_env["working_dir"])
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == working_dir
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_two_node_uri(two_node_cluster, working_dir, client_mode):
cluster, _ = two_node_cluster
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
import ray._private.runtime_env as runtime_env
import tempfile
with tempfile.NamedTemporaryFile(suffix="zip") as tmp_file:
pkg_name = runtime_env.get_project_package_name(working_dir, [], [])
pkg_uri = runtime_env.Protocol.PIN_GCS.value + "://" + pkg_name
runtime_env.create_project_package(working_dir, [], [], tmp_file.name)
runtime_env.push_package(pkg_uri, tmp_file.name)
runtime_env = f"""{{ "uris": ["{pkg_uri}"] }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
# pinned uri will not be deleted
print(list(kv._internal_kv_list("")))
assert len(kv._internal_kv_list("pingcs://")) == 1
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_regular_actors(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
test_actor = TestActor.options(name="test_actor").remote()
print(sum(ray.get([test_actor.one.remote()] * 1000)))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("client_mode", [True, False])
def test_detached_actors(ray_start_cluster_head, working_dir, client_mode):
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, client_mode)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the following cmd in driver with runtime_env
execute_statement = """
test_actor = TestActor.options(name="test_actor", lifetime="detached").remote()
print(sum(ray.get([test_actor.one.remote()] * 1000)))
"""
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
# It's a detached actors, so it should still be there
assert len(kv._internal_kv_list("gcs://")) == 1
assert len(list(Path(PKG_DIR).iterdir())) == 2
pkg_dir = [f for f in Path(PKG_DIR).glob("*") if f.is_dir()][0]
import sys
sys.path.insert(0, str(pkg_dir))
test_actor = ray.get_actor("test_actor")
assert sum(ray.get([test_actor.one.remote()] * 1000)) == 1000
ray.kill(test_actor)
from time import sleep
sleep(5)
assert len(list(Path(PKG_DIR).iterdir())) == 1
assert len(kv._internal_kv_list("gcs://")) == 0
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_1(ray_start_cluster_head, working_dir):
# start job_config=None
# start job_config=something
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = None
# To make the first one hanging there
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
# Have one running with job config = None
proc = run_string_as_driver_nonblocking(script, env)
# waiting it to be up
sleep(5)
runtime_env = f"""{{ "working_dir": "{working_dir}" }}"""
# Execute the second one which should work because Ray Client servers.
execute_statement = "print(sum(ray.get([run_test.remote()] * 1000)))"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "1000"
proc.kill()
proc.wait()
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_2(ray_start_cluster_head, working_dir):
# start job_config=something
# start job_config=None
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# To make the first one hanging there
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
proc = run_string_as_driver_nonblocking(script, env)
sleep(5)
runtime_env = None
# Execute the following in the second one which should
# succeed
execute_statement = "print('OK')"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
assert out.strip().split()[-1] == "OK", out
proc.kill()
proc.wait()
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_jobconfig_compatible_3(ray_start_cluster_head, working_dir):
# start job_config=something
# start job_config=something else
cluster = ray_start_cluster_head
(address, env, PKG_DIR) = start_client_server(cluster, True)
runtime_env = """{ "py_modules": [test_module.__path__[0]] }"""
# To make the first one hanging ther
execute_statement = """
sleep(600)
"""
script = driver_script.format(**locals())
proc = run_string_as_driver_nonblocking(script, env)
sleep(5)
runtime_env = f"""
{{ "working_dir": test_module.__path__[0] }}""" # noqa: F541
# Execute the following cmd in the second one and ensure that
# it is able to run.
execute_statement = "print('OK')"
script = driver_script.format(**locals())
out = run_string_as_driver(script, env)
proc.kill()
proc.wait()
assert out.strip().split()[-1] == "OK"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_util_without_job_config(shutdown_only):
from ray.cluster_utils import Cluster
with tempfile.TemporaryDirectory() as tmp_dir:
with (Path(tmp_dir) / "lib.py").open("w") as f:
f.write("""
def one():
return 1
""")
old_dir = os.getcwd()
os.chdir(tmp_dir)
cluster = Cluster()
cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
(address, env, PKG_DIR) = start_client_server(cluster, True)
script = f"""
import ray
import ray.util
import os
ray.util.connect("{address}", job_config=None)
@ray.remote
def run():
from lib import one
return one()
print(ray.get([run.remote()])[0])
"""
out = run_string_as_driver(script, env)
print(out)
os.chdir(old_dir)
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_init(shutdown_only):
with tempfile.TemporaryDirectory() as tmp_dir:
old_dir = os.getcwd()
os.chdir(tmp_dir)
with open("hello", "w") as f:
f.write("world")
ray.init(runtime_env={"working_dir": "."})
@ray.remote
class Test:
def test(self):
with open("hello") as f:
return f.read()
t = Test.remote()
assert ray.get(t.test.remote()) == "world"
os.chdir(old_dir)
def test_get_wheel_filename():
ray_version = "2.0.0.dev0"
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
filename = get_wheel_filename(sys_platform, ray_version,
py_version)
prefix = "https://s3-us-west-2.amazonaws.com/ray-wheels/latest/"
url = f"{prefix}{filename}"
assert requests.head(url).status_code == 200
def test_get_master_wheel_url():
ray_version = "2.0.0.dev0"
test_commit = "ba6cebe30fab6925e5b2d9e859ad064d53015246"
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
url = get_master_wheel_url(test_commit, sys_platform, ray_version,
py_version)
assert requests.head(url).status_code == 200, url
def test_get_release_wheel_url():
test_commits = {
"1.4.0rc1": "e7c7f6371a69eb727fa469e4cd6f4fbefd143b4c",
"1.3.0": "0b4b444fadcdc23226e11fef066b982175804232",
"1.2.0": "1b1a2496ca51b745c07c79fb859946d3350d471b"
}
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ["36", "37", "38"]:
for version, commit in test_commits.items():
url = get_release_wheel_url(commit, sys_platform, version,
py_version)
assert requests.head(url).status_code == 200, url
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_task(ray_start_cluster_head):
@ray.remote(runtime_env={"env_vars": {"foo": "bar"}})
def f():
return os.environ.get("foo")
assert ray.get(f.remote()) == "bar"
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_actor(ray_start_cluster_head):
@ray.remote(runtime_env={"env_vars": {"foo": "bar"}})
class A:
def g(self):
return os.environ.get("foo")
a = A.remote()
assert ray.get(a.g.remote()) == "bar"
@pytest.mark.skipif(
sys.platform == "win32", reason="runtime_env unsupported on Windows.")
def test_decorator_complex(shutdown_only):
ray.init(
job_config=ray.job_config.JobConfig(
runtime_env={"env_vars": {
"foo": "job"
}}))
@ray.remote
def env_from_job():
return os.environ.get("foo")
assert ray.get(env_from_job.remote()) == "job"
@ray.remote(runtime_env={"env_vars": {"foo": "task"}})
def f():
return os.environ.get("foo")
assert ray.get(f.remote()) == "task"
@ray.remote(runtime_env={"env_vars": {"foo": "actor"}})
class A:
def g(self):
return os.environ.get("foo")
a = A.remote()
assert ray.get(a.g.remote()) == "actor"
# Test that runtime_env can be overridden by specifying .options().
assert ray.get(
f.options(runtime_env={
"env_vars": {
"foo": "new"
}
}).remote()) == "new"
a = A.options(runtime_env={"env_vars": {"foo": "new2"}}).remote()
assert ray.get(a.g.remote()) == "new2"
def test_container_option_serialize():
runtime_env = {
"container": {
"image": "ray:latest",
"run_options": ["--name=test"]
}
}
job_config = ray.job_config.JobConfig(runtime_env=runtime_env)
job_config_serialized = job_config.serialize()
# job_config_serialized is JobConfig protobuf serialized string,
# job_config.runtime_env.raw_json has container_option info
# job_config.serialized_runtime_env also has container_option info
assert job_config_serialized.count(b"image") == 2
def test_working_dir_override_failure(shutdown_only):
ray.init()
@ray.remote(runtime_env={"working_dir": "."})
def f():
pass
with pytest.raises(NotImplementedError):
f.remote()
@ray.remote
def g():
pass
with pytest.raises(NotImplementedError):
g.options(runtime_env={"working_dir": "."}).remote()
@ray.remote(runtime_env={"working_dir": "."})
class A:
pass
with pytest.raises(NotImplementedError):
A.remote()
@ray.remote
class B:
pass
with pytest.raises(NotImplementedError):
B.options(runtime_env={"working_dir": "."}).remote()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-sv", __file__]))
|
from __future__ import (absolute_import, division, print_function, unicode_literals)
import arrow
import backtrader as bt
import importlib
import time
import backtrader.indicators as btind
# {'startDay': 283968000, 'endDay': 2524579200, 'kline': 'kline_day', 'myStocks': ['SH.603922'], 'market': 'SH', 'maxTimeBuffer':3, 'factors':2}
class iStrategyBase(bt.Strategy):
params = dict(rule=dict())
def __init__(self):
data = self.datas[0]
self._inds = dict()
for cond in self.p.rule['tradeCondition']:
if cond['modName'] == 'pattern_indicator':
factor = f"{cond["clsName"]}_{cond["params"]["line"]}"
else:
factor = cond['clsName']
_moudle = getattr(importlib.import_module(f"api.loop_stack.loop_indicators.{cond["modName"]}"), cond['clsName'])
_sigline = cond['params'].get('line','')
_position = cond['params']['logic'].get('position','')
self._inds['X_'+factor+'_'+_sigline+'_'+str(_position)] = (_moudle(data, rule=cond['params']))
class iStrategy(iStrategyBase):
def __init__(self):
super(iStrategy, self).__init__()
print()
# self.result_dict = {}
# for _ind in self._inds:
# self.result_dict[_ind] = {'factor':_ind, 'code':self.datas[0]._name, 'result_1d':[], 'result_2d':[], 'result_3d':[], 'result_4d':[], 'result_5d':[],
# 'create_time':time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())), 'market':self.p.rule['market'], 'validity':0,}
self.result_dict = {'code': self.datas[0]._name, 'result': {},
'create_time': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
'market': self.p.rule['market'], 'validity': 0,
}
# self.oscillator = btind.Oscillator()
self.upmove = btind.UpMove()
self.downmove = btind.DownMove()
self.directionalindicator = btind.DirectionalIndicator()
self.plusdirectionalindicator = btind.PlusDirectionalIndicator()
self.minusdirectionalindicator = btind.MinusDirectionalIndicator()
self.averagedirectionalmovementindex = btind.AverageDirectionalMovementIndex()
self.averagedirectionalmovementindexrating = btind.AverageDirectionalMovementIndexRating()
self.directionalmovementindex = btind.DirectionalMovementIndex()
self.directionalmovement = btind.DirectionalMovement()
self.relativemomentumindex = btind.RelativeMomentumIndex()
self.zerolagindicator = btind.ZeroLagIndicator()
self.awesomeoscillator = btind.AwesomeOscillator()
self.zerolagexponentialmovingaverage = btind.ZeroLagExponentialMovingAverage()
# self.heikinashi = btind.HeikinAshi()
self.percentrank = btind.PercentRank()
# self.movingaveragebase = btind.MovingAverageBase()
self.weightedmovingaverage = btind.WeightedMovingAverage()
self.vortex = btind.Vortex()
self.accelerationdecelerationoscillator = btind.AccelerationDecelerationOscillator()
self.priceoscillator = btind.PriceOscillator()
self.percentagepriceoscillator = btind.PercentagePriceOscillator()
self.percentagepriceoscillatorshort = btind.PercentagePriceOscillatorShort()
self.ultimateoscillator = btind.UltimateOscillator()
self.parabolicsar = btind.ParabolicSAR()
self.macd = btind.MACD()
self.macdhisto = btind.MACDHisto()
# self.periodn = btind.PeriodN()
# self.operationn = btind.OperationN()
# self.baseapplyn = btind.BaseApplyN()
# self.applyn = btind.ApplyN()
self.highest = btind.Highest()
self.lowest = btind.Lowest()
# self.reducen = btind.ReduceN()
self.sumn = btind.SumN()
self.anyn = btind.AnyN()
self.alln = btind.AllN()
# self.findfirstindex = btind.FindFirstIndex()
self.findfirstindexhighest = btind.FindFirstIndexHighest()
self.findfirstindexlowest = btind.FindFirstIndexLowest()
# self.findlastindex = btind.FindLastIndex()
self.findlastindexhighest = btind.FindLastIndexHighest()
self.findlastindexlowest = btind.FindLastIndexLowest()
self.accum = btind.Accum()
self.average = btind.Average()
self.exponentialsmoothing = btind.ExponentialSmoothing()
# self.exponentialsmoothingdynamic = btind.ExponentialSmoothingDynamic()
self.weightedaverage = btind.WeightedAverage()
self.exponentialmovingaverage = btind.ExponentialMovingAverage()
# self.ols_slope_interceptn = btind.OLS_Slope_InterceptN()
# self.ols_transformationn = btind.OLS_TransformationN()
# self.ols_betan = btind.OLS_BetaN()
# self.cointn = btind.CointN()
self.stochasticfast = btind.StochasticFast()
self.stochastic = btind.Stochastic()
self.stochasticfull = btind.StochasticFull()
self.truehigh = btind.TrueHigh()
self.truelow = btind.TrueLow()
self.truerange = btind.TrueRange()
self.averagetruerange = btind.AverageTrueRange()
# self.oscillatormixin = btind.OscillatorMixIn()
self.prettygoodoscillator = btind.PrettyGoodOscillator()
self.dicksonmovingaverage = btind.DicksonMovingAverage()
self.percentchange = btind.PercentChange()
# self.hadelta = btind.haDelta(self.data)
self.commoditychannelindex = btind.CommodityChannelIndex()
self.hullmovingaverage = btind.HullMovingAverage()
self.standarddeviation = btind.StandardDeviation()
self.meandeviation = btind.MeanDeviation()
self.doubleexponentialmovingaverage = btind.DoubleExponentialMovingAverage()
self.tripleexponentialmovingaverage = btind.TripleExponentialMovingAverage()
self.williamsr = btind.WilliamsR()
self.williamsad = btind.WilliamsAD()
# self.dv2 = btind.DV2()
self.truestrengthindicator = btind.TrueStrengthIndicator()
self.ichimoku = btind.Ichimoku()
self.adaptivemovingaverage = btind.AdaptiveMovingAverage()
self.movingaveragesimple = btind.MovingAverageSimple()
# self.nonzerodifference = btind.NonZeroDifference()
# self.crossup = btind.CrossUp()
# self.crossdown = btind.CrossDown()
# self.crossover = btind.CrossOver()
self.pivotpoint = btind.PivotPoint()
self.fibonaccipivotpoint = btind.FibonacciPivotPoint()
self.demarkpivotpoint = btind.DemarkPivotPoint()
self.upday = btind.UpDay()
self.downday = btind.DownDay()
self.updaybool = btind.UpDayBool()
self.downdaybool = btind.DownDayBool()
self.relativestrengthindex = btind.RelativeStrengthIndex()
self.rsi_safe = btind.RSI_Safe()
self.rsi_sma = btind.RSI_SMA()
self.rsi_ema = btind.RSI_EMA()
self.trix = btind.Trix()
self.trixsignal = btind.TrixSignal()
self.laguerrersi = btind.LaguerreRSI()
self.laguerrefilter = btind.LaguerreFilter()
self.hurstexponent = btind.HurstExponent()
self.aroonup = btind.AroonUp()
self.aroondown = btind.AroonDown()
self.aroonupdown = btind.AroonUpDown()
self.aroonoscillator = btind.AroonOscillator()
self.aroonupdownoscillator = btind.AroonUpDownOscillator()
self.bollingerbands = btind.BollingerBands()
self.bollingerbandspct = btind.BollingerBandsPct()
self.momentum = btind.Momentum()
self.momentumoscillator = btind.MomentumOscillator()
self.rateofchange = btind.RateOfChange()
self.rateofchange100 = btind.RateOfChange100()
self.detrendedpriceoscillator = btind.DetrendedPriceOscillator()
self.smoothedmovingaverage = btind.SmoothedMovingAverage()
self.envelope = btind.Envelope()
self.knowsurething = btind.KnowSureThing()
def next(self):
stock = self.datas[0]
# _match = {'date': str(stock.datetime.date()), 'match_factor': [],'close': stock.close[0], 'high':stock.high[0],
# 'score': {'result_1d': {}, 'result_2d': {}, 'result_3d': {}, 'result_4d': {}, 'result_5d': {}}, 'trading_days': [int(stock.datetime[-i]) for i in range(1,8)]}
# for k in self._inds:
# if self._inds[k]:
#
# _match['match_factor'].append(k)
# self.result_dict['validity'] = 1
# 收盘价
# for i in range(1, 6):
# _d = _match['score'][f'result_{i}d']
# try:
# _d["percent"] = (stock.close[i] - stock.close[0]) / stock.close[0]
# if _d["percent"]>0:
# _d["status"] = 1
# else:
# _d["status"] = 0
# except:
# _d["status"] = -1
#
# # 最高价
# try:
# _d["high_percent"] = (stock.high[i] - stock.close[0]) / stock.close[0]
# if _d["high_percent"] > 0:
# _d["high_status"] = 1
# else:
# _d["high_status"] = 0
# except:
# _d["high_status"] = -1
# self.result_dict['result'][str(int(stock.datetime[0]))] = _match
dc = {'date': str(stock.datetime.datetime()),'close': stock.close[0], 'high':stock.high[0],'open':stock.open[0],'low':stock.low[0],'volume':stock.volume[0]}
########## UpMove
dc['X_upmove'] = self.upmove.upmove[0]
########## DownMove
dc['X_downmove'] = self.downmove.downmove[0]
########## DirectionalIndicator
dc['X_plusDI'] = self.directionalindicator.plusDI[0]
dc['X_minusDI'] = self.directionalindicator.minusDI[0]
########## PlusDirectionalIndicator
dc['X_plusDI'] = self.plusdirectionalindicator.plusDI[0]
########## MinusDirectionalIndicator
dc['X_minusDI'] = self.minusdirectionalindicator.minusDI[0]
########## AverageDirectionalMovementIndex
dc['X_adx'] = self.averagedirectionalmovementindex.adx[0]
########## AverageDirectionalMovementIndexRating
dc['X_adxr'] = self.averagedirectionalmovementindexrating.adxr[0]
########## DirectionalMovementIndex
dc['X_plusDI'] = self.directionalmovementindex.plusDI[0]
dc['X_minusDI'] = self.directionalmovementindex.minusDI[0]
########## DirectionalMovement
dc['X_plusDI'] = self.directionalmovement.plusDI[0]
dc['X_minusDI'] = self.directionalmovement.minusDI[0]
########## RelativeMomentumIndex
dc['X_rmi'] = self.relativemomentumindex.rmi[0]
########## ZeroLagIndicator
dc['X_ec'] = self.zerolagindicator.ec[0]
########## AwesomeOscillator
dc['X_ao'] = self.awesomeoscillator.ao[0]
########## ZeroLagExponentialMovingAverage
dc['X_zlema'] = self.zerolagexponentialmovingaverage.zlema[0]
# ########## HeikinAshi
# dc['X_ha_open'] = self.heikinashi.ha_open[0]
# dc['X_ha_high'] = self.heikinashi.ha_high[0]
# dc['X_ha_low'] = self.heikinashi.ha_low[0]
# dc['X_ha_close'] = self.heikinashi.ha_close[0]
# dc['X_open'] = self.heikinashi.open[0]
# dc['X_high'] = self.heikinashi.high[0]
# dc['X_low'] = self.heikinashi.low[0]
# dc['X_close'] = self.heikinashi.close[0]
########## PercentRank
dc['X_pctrank'] = self.percentrank.pctrank[0]
########## MovingAverageBase
########## WeightedMovingAverage
dc['X_wma'] = self.weightedmovingaverage.wma[0]
########## Vortex
dc['X_vi_plus'] = self.vortex.vi_plus[0]
dc['X_vi_minus'] = self.vortex.vi_minus[0]
dc['X_iViDistance'] = self.vortex.vi_plus[0] - self.vortex.vi_minus[0]
########## AccelerationDecelerationOscillator
dc['X_accde'] = self.accelerationdecelerationoscillator.accde[0]
########## PriceOscillator
dc['X_po'] = self.priceoscillator.po[0]
########## PercentagePriceOscillator
dc['X_ppo'] = self.percentagepriceoscillator.ppo[0]
dc['X_signal'] = self.percentagepriceoscillator.signal[0]
dc['X_histo'] = self.percentagepriceoscillator.histo[0]
########## PercentagePriceOscillatorShort
########## UltimateOscillator
dc['X_uo'] = self.ultimateoscillator.uo[0]
########## ParabolicSAR
dc['X_psar'] = self.parabolicsar.psar[0]
########## MACD
dc['X_macd'] = self.macd.macd[0]
dc['X_signal'] = self.macd.signal[0]
########## MACDHisto
dc['X_histo'] = self.macdhisto.histo[0]
########## PeriodN
########## OperationN
########## BaseApplyN
########## ApplyN
# dc['X_apply'] = self.applyn.apply[0]
########## Highest
dc['X_highest'] = self.highest.highest[0]
########## Lowest
dc['X_lowest'] = self.lowest.lowest[0]
########## ReduceN
# dc['X_reduced'] = self.reducen.reduced[0]
########## SumN
dc['X_sumn'] = self.sumn.sumn[0]
########## AnyN
dc['X_anyn'] = self.anyn.anyn[0]
########## AllN
dc['X_alln'] = self.alln.alln[0]
########## FindFirstIndex
# dc['X_index'] = self.findfirstindex.index[0]
########## FindFirstIndexHighest
########## FindFirstIndexLowest
########## FindLastIndex
# dc['X_index'] = self.findlastindex.index[0]
########## FindLastIndexHighest
########## FindLastIndexLowest
########## Accum
dc['X_accum'] = self.accum.accum[0]
########## Average
dc['X_av'] = self.average.av[0]
########## ExponentialSmoothing
########## ExponentialSmoothingDynamic
########## WeightedAverage
dc['X_av'] = self.weightedaverage.av[0]
########## ExponentialMovingAverage
dc['X_ema'] = self.exponentialmovingaverage.ema[0]
########## OLS_Slope_InterceptN
# dc['X_slope'] = self.ols_slope_interceptn.slope[0]
# dc['X_intercept'] = self.ols_slope_interceptn.intercept[0]
########## OLS_TransformationN
# dc['X_spread'] = self.ols_transformationn.spread[0]
# dc['X_spread_mean'] = self.ols_transformationn.spread_mean[0]
# dc['X_spread_std'] = self.ols_transformationn.spread_std[0]
# dc['X_zscore'] = self.ols_transformationn.zscore[0]
########## OLS_BetaN
# dc['X_beta'] = self.ols_betan.beta[0]
########## CointN
# dc['X_score'] = self.cointn.score[0]
# dc['X_pvalue'] = self.cointn.pvalue[0]
########## StochasticFast
########## Stochastic
########## StochasticFull
dc['X_percDSlow'] = self.stochasticfull.percDSlow[0]
########## TrueHigh
dc['X_truehigh'] = self.truehigh.truehigh[0]
########## TrueLow
dc['X_truelow'] = self.truelow.truelow[0]
########## TrueRange
dc['X_tr'] = self.truerange.tr[0]
########## AverageTrueRange
dc['X_atr'] = self.averagetruerange.atr[0]
########## OscillatorMixIn
########## Oscillator
# dc['X_osc'] = self.oscillator.osc[0]
########## PrettyGoodOscillator
dc['X_pgo'] = self.prettygoodoscillator.pgo[0]
########## DicksonMovingAverage
dc['X_dma'] = self.dicksonmovingaverage.dma[0]
########## PercentChange
dc['X_pctchange'] = self.percentchange.pctchange[0]
########## haDelta
# dc['X_haDelta'] = self.hadelta.haDelta[0]
# dc['X_smoothed'] = self.hadelta.smoothed[0]
########## CommodityChannelIndex
dc['X_cci'] = self.commoditychannelindex.cci[0]
########## HullMovingAverage
dc['X_hma'] = self.hullmovingaverage.hma[0]
########## StandardDeviation
dc['X_stddev'] = self.standarddeviation.stddev[0]
########## MeanDeviation
dc['X_meandev'] = self.meandeviation.meandev[0]
########## DoubleExponentialMovingAverage
dc['X_dema'] = self.doubleexponentialmovingaverage.dema[0]
########## TripleExponentialMovingAverage
dc['X_tema'] = self.tripleexponentialmovingaverage.tema[0]
########## WilliamsR
dc['X_percR'] = self.williamsr.percR[0]
########## WilliamsAD
dc['X_ad'] = self.williamsad.ad[0]
########## DV2
# dc['X_dv2'] = self.dv2.dv2[0]
########## TrueStrengthIndicator
dc['X_tsi'] = self.truestrengthindicator.tsi[0]
########## Ichimoku
dc['X_tenkan_sen'] = self.ichimoku.tenkan_sen[0]
dc['X_kijun_sen'] = self.ichimoku.kijun_sen[0]
dc['X_senkou_span_a'] = self.ichimoku.senkou_span_a[0]
dc['X_senkou_span_b'] = self.ichimoku.senkou_span_b[0]
# dc['X_chikou_span'] = self.ichimoku.chikou_span[0]
########## AdaptiveMovingAverage
dc['X_kama'] = self.adaptivemovingaverage.kama[0]
########## MovingAverageSimple
dc['X_sma'] = self.movingaveragesimple.sma[0]
########## NonZeroDifference
# dc['X_nzd'] = self.nonzerodifference.nzd[0]
########## CrossUp
########## CrossDown
########## CrossOver
# dc['X_crossover'] = self.crossover.crossover[0]
########## PivotPoint
# dc['X_p'] = self.pivotpoint.p[0]
dc['X_s1'] = self.pivotpoint.s1[0]
dc['X_s2'] = self.pivotpoint.s2[0]
dc['X_r1'] = self.pivotpoint.r1[0]
dc['X_r2'] = self.pivotpoint.r2[0]
########## FibonacciPivotPoint
# dc['X_p'] = self.fibonaccipivotpoint.p[0]
dc['X_s1'] = self.fibonaccipivotpoint.s1[0]
dc['X_s2'] = self.fibonaccipivotpoint.s2[0]
dc['X_s3'] = self.fibonaccipivotpoint.s3[0]
dc['X_r1'] = self.fibonaccipivotpoint.r1[0]
dc['X_r2'] = self.fibonaccipivotpoint.r2[0]
dc['X_r3'] = self.fibonaccipivotpoint.r3[0]
########## DemarkPivotPoint
# dc['X_p'] = self.demarkpivotpoint.p[0]
dc['X_s1'] = self.demarkpivotpoint.s1[0]
dc['X_r1'] = self.demarkpivotpoint.r1[0]
########## UpDay
dc['X_upday'] = self.upday.upday[0]
########## DownDay
dc['X_downday'] = self.downday.downday[0]
########## UpDayBool
dc['X_upday'] = self.updaybool.upday[0]
########## DownDayBool
dc['X_downday'] = self.downdaybool.downday[0]
########## RelativeStrengthIndex
dc['X_rsi'] = self.relativestrengthindex.rsi[0]
########## RSI_Safe
########## RSI_SMA
########## RSI_EMA
########## Trix
dc['X_trix'] = self.trix.trix[0]
########## TrixSignal
dc['X_signal'] = self.trixsignal.signal[0]
########## LaguerreRSI
dc['X_lrsi'] = self.laguerrersi.lrsi[0]
########## LaguerreFilter
dc['X_lfilter'] = self.laguerrefilter.lfilter[0]
########## HurstExponent
dc['X_hurst'] = self.hurstexponent.hurst[0]
########## AroonUp
dc['X_aroonup'] = self.aroonup.aroonup[0]
########## AroonDown
dc['X_aroondown'] = self.aroondown.aroondown[0]
########## AroonUpDown
dc['X_aroondown'] = self.aroonupdown.aroondown[0]
########## AroonOscillator
dc['X_aroonosc'] = self.aroonoscillator.aroonosc[0]
########## AroonUpDownOscillator
dc['X_aroonosc'] = self.aroonupdownoscillator.aroonosc[0]
########## BollingerBands
dc['X_mid'] = self.bollingerbands.mid[0]
dc['X_top'] = self.bollingerbands.top[0]
dc['X_bot'] = self.bollingerbands.bot[0]
########## BollingerBandsPct
dc['X_pctb'] = self.bollingerbandspct.pctb[0]
########## Momentum
dc['X_momentum'] = self.momentum.momentum[0]
########## MomentumOscillator
dc['X_momosc'] = self.momentumoscillator.momosc[0]
########## RateOfChange
dc['X_roc'] = self.rateofchange.roc[0]
########## RateOfChange100
dc['X_roc100'] = self.rateofchange100.roc100[0]
########## DetrendedPriceOscillator
dc['X_dpo'] = self.detrendedpriceoscillator.dpo[0]
########## SmoothedMovingAverage
dc['X_smma'] = self.smoothedmovingaverage.smma[0]
########## Envelope
dc['X_top'] = self.envelope.top[0]
dc['X_bot'] = self.envelope.bot[0]
########## KnowSureThing
dc['X_kst'] = self.knowsurething.kst[0]
dc['X_signal'] = self.knowsurething.signal[0]
for k in self._inds:
if self._inds[k]:
dc[k] = 1
else:
dc[k] = 0
# print(dc)
# print(int(stock.datetime[0]))
# print(stock.datetime.datetime())
self.result_dict['result'][str(int(arrow.get(stock.datetime.datetime()).timestamp))] = dc
|
from __future__ import (absolute_import, division, print_function, unicode_literals)
import arrow
import backtrader as bt
import importlib
import time
import backtrader.indicators as btind
# {'startDay': 283968000, 'endDay': 2524579200, 'kline': 'kline_day', 'myStocks': ['SH.603922'], 'market': 'SH', 'maxTimeBuffer':3, 'factors':2}
class iStrategyBase(bt.Strategy):
params = dict(rule=dict())
def __init__(self):
data = self.datas[0]
self._inds = dict()
for cond in self.p.rule['tradeCondition']:
if cond['modName'] == 'pattern_indicator':
factor = f"{cond['clsName']}_{cond['params']['line']}"
else:
factor = cond['clsName']
_moudle = getattr(importlib.import_module(f"api.loop_stack.loop_indicators.{cond['modName']}"), cond['clsName'])
_sigline = cond['params'].get('line','')
_position = cond['params']['logic'].get('position','')
self._inds['X_'+factor+'_'+_sigline+'_'+str(_position)] = (_moudle(data, rule=cond['params']))
class iStrategy(iStrategyBase):
def __init__(self):
super(iStrategy, self).__init__()
print()
# self.result_dict = {}
# for _ind in self._inds:
# self.result_dict[_ind] = {'factor':_ind, 'code':self.datas[0]._name, 'result_1d':[], 'result_2d':[], 'result_3d':[], 'result_4d':[], 'result_5d':[],
# 'create_time':time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())), 'market':self.p.rule['market'], 'validity':0,}
self.result_dict = {'code': self.datas[0]._name, 'result': {},
'create_time': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())),
'market': self.p.rule['market'], 'validity': 0,
}
# self.oscillator = btind.Oscillator()
self.upmove = btind.UpMove()
self.downmove = btind.DownMove()
self.directionalindicator = btind.DirectionalIndicator()
self.plusdirectionalindicator = btind.PlusDirectionalIndicator()
self.minusdirectionalindicator = btind.MinusDirectionalIndicator()
self.averagedirectionalmovementindex = btind.AverageDirectionalMovementIndex()
self.averagedirectionalmovementindexrating = btind.AverageDirectionalMovementIndexRating()
self.directionalmovementindex = btind.DirectionalMovementIndex()
self.directionalmovement = btind.DirectionalMovement()
self.relativemomentumindex = btind.RelativeMomentumIndex()
self.zerolagindicator = btind.ZeroLagIndicator()
self.awesomeoscillator = btind.AwesomeOscillator()
self.zerolagexponentialmovingaverage = btind.ZeroLagExponentialMovingAverage()
# self.heikinashi = btind.HeikinAshi()
self.percentrank = btind.PercentRank()
# self.movingaveragebase = btind.MovingAverageBase()
self.weightedmovingaverage = btind.WeightedMovingAverage()
self.vortex = btind.Vortex()
self.accelerationdecelerationoscillator = btind.AccelerationDecelerationOscillator()
self.priceoscillator = btind.PriceOscillator()
self.percentagepriceoscillator = btind.PercentagePriceOscillator()
self.percentagepriceoscillatorshort = btind.PercentagePriceOscillatorShort()
self.ultimateoscillator = btind.UltimateOscillator()
self.parabolicsar = btind.ParabolicSAR()
self.macd = btind.MACD()
self.macdhisto = btind.MACDHisto()
# self.periodn = btind.PeriodN()
# self.operationn = btind.OperationN()
# self.baseapplyn = btind.BaseApplyN()
# self.applyn = btind.ApplyN()
self.highest = btind.Highest()
self.lowest = btind.Lowest()
# self.reducen = btind.ReduceN()
self.sumn = btind.SumN()
self.anyn = btind.AnyN()
self.alln = btind.AllN()
# self.findfirstindex = btind.FindFirstIndex()
self.findfirstindexhighest = btind.FindFirstIndexHighest()
self.findfirstindexlowest = btind.FindFirstIndexLowest()
# self.findlastindex = btind.FindLastIndex()
self.findlastindexhighest = btind.FindLastIndexHighest()
self.findlastindexlowest = btind.FindLastIndexLowest()
self.accum = btind.Accum()
self.average = btind.Average()
self.exponentialsmoothing = btind.ExponentialSmoothing()
# self.exponentialsmoothingdynamic = btind.ExponentialSmoothingDynamic()
self.weightedaverage = btind.WeightedAverage()
self.exponentialmovingaverage = btind.ExponentialMovingAverage()
# self.ols_slope_interceptn = btind.OLS_Slope_InterceptN()
# self.ols_transformationn = btind.OLS_TransformationN()
# self.ols_betan = btind.OLS_BetaN()
# self.cointn = btind.CointN()
self.stochasticfast = btind.StochasticFast()
self.stochastic = btind.Stochastic()
self.stochasticfull = btind.StochasticFull()
self.truehigh = btind.TrueHigh()
self.truelow = btind.TrueLow()
self.truerange = btind.TrueRange()
self.averagetruerange = btind.AverageTrueRange()
# self.oscillatormixin = btind.OscillatorMixIn()
self.prettygoodoscillator = btind.PrettyGoodOscillator()
self.dicksonmovingaverage = btind.DicksonMovingAverage()
self.percentchange = btind.PercentChange()
# self.hadelta = btind.haDelta(self.data)
self.commoditychannelindex = btind.CommodityChannelIndex()
self.hullmovingaverage = btind.HullMovingAverage()
self.standarddeviation = btind.StandardDeviation()
self.meandeviation = btind.MeanDeviation()
self.doubleexponentialmovingaverage = btind.DoubleExponentialMovingAverage()
self.tripleexponentialmovingaverage = btind.TripleExponentialMovingAverage()
self.williamsr = btind.WilliamsR()
self.williamsad = btind.WilliamsAD()
# self.dv2 = btind.DV2()
self.truestrengthindicator = btind.TrueStrengthIndicator()
self.ichimoku = btind.Ichimoku()
self.adaptivemovingaverage = btind.AdaptiveMovingAverage()
self.movingaveragesimple = btind.MovingAverageSimple()
# self.nonzerodifference = btind.NonZeroDifference()
# self.crossup = btind.CrossUp()
# self.crossdown = btind.CrossDown()
# self.crossover = btind.CrossOver()
self.pivotpoint = btind.PivotPoint()
self.fibonaccipivotpoint = btind.FibonacciPivotPoint()
self.demarkpivotpoint = btind.DemarkPivotPoint()
self.upday = btind.UpDay()
self.downday = btind.DownDay()
self.updaybool = btind.UpDayBool()
self.downdaybool = btind.DownDayBool()
self.relativestrengthindex = btind.RelativeStrengthIndex()
self.rsi_safe = btind.RSI_Safe()
self.rsi_sma = btind.RSI_SMA()
self.rsi_ema = btind.RSI_EMA()
self.trix = btind.Trix()
self.trixsignal = btind.TrixSignal()
self.laguerrersi = btind.LaguerreRSI()
self.laguerrefilter = btind.LaguerreFilter()
self.hurstexponent = btind.HurstExponent()
self.aroonup = btind.AroonUp()
self.aroondown = btind.AroonDown()
self.aroonupdown = btind.AroonUpDown()
self.aroonoscillator = btind.AroonOscillator()
self.aroonupdownoscillator = btind.AroonUpDownOscillator()
self.bollingerbands = btind.BollingerBands()
self.bollingerbandspct = btind.BollingerBandsPct()
self.momentum = btind.Momentum()
self.momentumoscillator = btind.MomentumOscillator()
self.rateofchange = btind.RateOfChange()
self.rateofchange100 = btind.RateOfChange100()
self.detrendedpriceoscillator = btind.DetrendedPriceOscillator()
self.smoothedmovingaverage = btind.SmoothedMovingAverage()
self.envelope = btind.Envelope()
self.knowsurething = btind.KnowSureThing()
def next(self):
stock = self.datas[0]
# _match = {'date': str(stock.datetime.date()), 'match_factor': [],'close': stock.close[0], 'high':stock.high[0],
# 'score': {'result_1d': {}, 'result_2d': {}, 'result_3d': {}, 'result_4d': {}, 'result_5d': {}}, 'trading_days': [int(stock.datetime[-i]) for i in range(1,8)]}
# for k in self._inds:
# if self._inds[k]:
#
# _match['match_factor'].append(k)
# self.result_dict['validity'] = 1
# 收盘价
# for i in range(1, 6):
# _d = _match['score'][f'result_{i}d']
# try:
# _d["percent"] = (stock.close[i] - stock.close[0]) / stock.close[0]
# if _d["percent"]>0:
# _d["status"] = 1
# else:
# _d["status"] = 0
# except:
# _d["status"] = -1
#
# # 最高价
# try:
# _d["high_percent"] = (stock.high[i] - stock.close[0]) / stock.close[0]
# if _d["high_percent"] > 0:
# _d["high_status"] = 1
# else:
# _d["high_status"] = 0
# except:
# _d["high_status"] = -1
# self.result_dict['result'][str(int(stock.datetime[0]))] = _match
dc = {'date': str(stock.datetime.datetime()),'close': stock.close[0], 'high':stock.high[0],'open':stock.open[0],'low':stock.low[0],'volume':stock.volume[0]}
########## UpMove
dc['X_upmove'] = self.upmove.upmove[0]
########## DownMove
dc['X_downmove'] = self.downmove.downmove[0]
########## DirectionalIndicator
dc['X_plusDI'] = self.directionalindicator.plusDI[0]
dc['X_minusDI'] = self.directionalindicator.minusDI[0]
########## PlusDirectionalIndicator
dc['X_plusDI'] = self.plusdirectionalindicator.plusDI[0]
########## MinusDirectionalIndicator
dc['X_minusDI'] = self.minusdirectionalindicator.minusDI[0]
########## AverageDirectionalMovementIndex
dc['X_adx'] = self.averagedirectionalmovementindex.adx[0]
########## AverageDirectionalMovementIndexRating
dc['X_adxr'] = self.averagedirectionalmovementindexrating.adxr[0]
########## DirectionalMovementIndex
dc['X_plusDI'] = self.directionalmovementindex.plusDI[0]
dc['X_minusDI'] = self.directionalmovementindex.minusDI[0]
########## DirectionalMovement
dc['X_plusDI'] = self.directionalmovement.plusDI[0]
dc['X_minusDI'] = self.directionalmovement.minusDI[0]
########## RelativeMomentumIndex
dc['X_rmi'] = self.relativemomentumindex.rmi[0]
########## ZeroLagIndicator
dc['X_ec'] = self.zerolagindicator.ec[0]
########## AwesomeOscillator
dc['X_ao'] = self.awesomeoscillator.ao[0]
########## ZeroLagExponentialMovingAverage
dc['X_zlema'] = self.zerolagexponentialmovingaverage.zlema[0]
# ########## HeikinAshi
# dc['X_ha_open'] = self.heikinashi.ha_open[0]
# dc['X_ha_high'] = self.heikinashi.ha_high[0]
# dc['X_ha_low'] = self.heikinashi.ha_low[0]
# dc['X_ha_close'] = self.heikinashi.ha_close[0]
# dc['X_open'] = self.heikinashi.open[0]
# dc['X_high'] = self.heikinashi.high[0]
# dc['X_low'] = self.heikinashi.low[0]
# dc['X_close'] = self.heikinashi.close[0]
########## PercentRank
dc['X_pctrank'] = self.percentrank.pctrank[0]
########## MovingAverageBase
########## WeightedMovingAverage
dc['X_wma'] = self.weightedmovingaverage.wma[0]
########## Vortex
dc['X_vi_plus'] = self.vortex.vi_plus[0]
dc['X_vi_minus'] = self.vortex.vi_minus[0]
dc['X_iViDistance'] = self.vortex.vi_plus[0] - self.vortex.vi_minus[0]
########## AccelerationDecelerationOscillator
dc['X_accde'] = self.accelerationdecelerationoscillator.accde[0]
########## PriceOscillator
dc['X_po'] = self.priceoscillator.po[0]
########## PercentagePriceOscillator
dc['X_ppo'] = self.percentagepriceoscillator.ppo[0]
dc['X_signal'] = self.percentagepriceoscillator.signal[0]
dc['X_histo'] = self.percentagepriceoscillator.histo[0]
########## PercentagePriceOscillatorShort
########## UltimateOscillator
dc['X_uo'] = self.ultimateoscillator.uo[0]
########## ParabolicSAR
dc['X_psar'] = self.parabolicsar.psar[0]
########## MACD
dc['X_macd'] = self.macd.macd[0]
dc['X_signal'] = self.macd.signal[0]
########## MACDHisto
dc['X_histo'] = self.macdhisto.histo[0]
########## PeriodN
########## OperationN
########## BaseApplyN
########## ApplyN
# dc['X_apply'] = self.applyn.apply[0]
########## Highest
dc['X_highest'] = self.highest.highest[0]
########## Lowest
dc['X_lowest'] = self.lowest.lowest[0]
########## ReduceN
# dc['X_reduced'] = self.reducen.reduced[0]
########## SumN
dc['X_sumn'] = self.sumn.sumn[0]
########## AnyN
dc['X_anyn'] = self.anyn.anyn[0]
########## AllN
dc['X_alln'] = self.alln.alln[0]
########## FindFirstIndex
# dc['X_index'] = self.findfirstindex.index[0]
########## FindFirstIndexHighest
########## FindFirstIndexLowest
########## FindLastIndex
# dc['X_index'] = self.findlastindex.index[0]
########## FindLastIndexHighest
########## FindLastIndexLowest
########## Accum
dc['X_accum'] = self.accum.accum[0]
########## Average
dc['X_av'] = self.average.av[0]
########## ExponentialSmoothing
########## ExponentialSmoothingDynamic
########## WeightedAverage
dc['X_av'] = self.weightedaverage.av[0]
########## ExponentialMovingAverage
dc['X_ema'] = self.exponentialmovingaverage.ema[0]
########## OLS_Slope_InterceptN
# dc['X_slope'] = self.ols_slope_interceptn.slope[0]
# dc['X_intercept'] = self.ols_slope_interceptn.intercept[0]
########## OLS_TransformationN
# dc['X_spread'] = self.ols_transformationn.spread[0]
# dc['X_spread_mean'] = self.ols_transformationn.spread_mean[0]
# dc['X_spread_std'] = self.ols_transformationn.spread_std[0]
# dc['X_zscore'] = self.ols_transformationn.zscore[0]
########## OLS_BetaN
# dc['X_beta'] = self.ols_betan.beta[0]
########## CointN
# dc['X_score'] = self.cointn.score[0]
# dc['X_pvalue'] = self.cointn.pvalue[0]
########## StochasticFast
########## Stochastic
########## StochasticFull
dc['X_percDSlow'] = self.stochasticfull.percDSlow[0]
########## TrueHigh
dc['X_truehigh'] = self.truehigh.truehigh[0]
########## TrueLow
dc['X_truelow'] = self.truelow.truelow[0]
########## TrueRange
dc['X_tr'] = self.truerange.tr[0]
########## AverageTrueRange
dc['X_atr'] = self.averagetruerange.atr[0]
########## OscillatorMixIn
########## Oscillator
# dc['X_osc'] = self.oscillator.osc[0]
########## PrettyGoodOscillator
dc['X_pgo'] = self.prettygoodoscillator.pgo[0]
########## DicksonMovingAverage
dc['X_dma'] = self.dicksonmovingaverage.dma[0]
########## PercentChange
dc['X_pctchange'] = self.percentchange.pctchange[0]
########## haDelta
# dc['X_haDelta'] = self.hadelta.haDelta[0]
# dc['X_smoothed'] = self.hadelta.smoothed[0]
########## CommodityChannelIndex
dc['X_cci'] = self.commoditychannelindex.cci[0]
########## HullMovingAverage
dc['X_hma'] = self.hullmovingaverage.hma[0]
########## StandardDeviation
dc['X_stddev'] = self.standarddeviation.stddev[0]
########## MeanDeviation
dc['X_meandev'] = self.meandeviation.meandev[0]
########## DoubleExponentialMovingAverage
dc['X_dema'] = self.doubleexponentialmovingaverage.dema[0]
########## TripleExponentialMovingAverage
dc['X_tema'] = self.tripleexponentialmovingaverage.tema[0]
########## WilliamsR
dc['X_percR'] = self.williamsr.percR[0]
########## WilliamsAD
dc['X_ad'] = self.williamsad.ad[0]
########## DV2
# dc['X_dv2'] = self.dv2.dv2[0]
########## TrueStrengthIndicator
dc['X_tsi'] = self.truestrengthindicator.tsi[0]
########## Ichimoku
dc['X_tenkan_sen'] = self.ichimoku.tenkan_sen[0]
dc['X_kijun_sen'] = self.ichimoku.kijun_sen[0]
dc['X_senkou_span_a'] = self.ichimoku.senkou_span_a[0]
dc['X_senkou_span_b'] = self.ichimoku.senkou_span_b[0]
# dc['X_chikou_span'] = self.ichimoku.chikou_span[0]
########## AdaptiveMovingAverage
dc['X_kama'] = self.adaptivemovingaverage.kama[0]
########## MovingAverageSimple
dc['X_sma'] = self.movingaveragesimple.sma[0]
########## NonZeroDifference
# dc['X_nzd'] = self.nonzerodifference.nzd[0]
########## CrossUp
########## CrossDown
########## CrossOver
# dc['X_crossover'] = self.crossover.crossover[0]
########## PivotPoint
# dc['X_p'] = self.pivotpoint.p[0]
dc['X_s1'] = self.pivotpoint.s1[0]
dc['X_s2'] = self.pivotpoint.s2[0]
dc['X_r1'] = self.pivotpoint.r1[0]
dc['X_r2'] = self.pivotpoint.r2[0]
########## FibonacciPivotPoint
# dc['X_p'] = self.fibonaccipivotpoint.p[0]
dc['X_s1'] = self.fibonaccipivotpoint.s1[0]
dc['X_s2'] = self.fibonaccipivotpoint.s2[0]
dc['X_s3'] = self.fibonaccipivotpoint.s3[0]
dc['X_r1'] = self.fibonaccipivotpoint.r1[0]
dc['X_r2'] = self.fibonaccipivotpoint.r2[0]
dc['X_r3'] = self.fibonaccipivotpoint.r3[0]
########## DemarkPivotPoint
# dc['X_p'] = self.demarkpivotpoint.p[0]
dc['X_s1'] = self.demarkpivotpoint.s1[0]
dc['X_r1'] = self.demarkpivotpoint.r1[0]
########## UpDay
dc['X_upday'] = self.upday.upday[0]
########## DownDay
dc['X_downday'] = self.downday.downday[0]
########## UpDayBool
dc['X_upday'] = self.updaybool.upday[0]
########## DownDayBool
dc['X_downday'] = self.downdaybool.downday[0]
########## RelativeStrengthIndex
dc['X_rsi'] = self.relativestrengthindex.rsi[0]
########## RSI_Safe
########## RSI_SMA
########## RSI_EMA
########## Trix
dc['X_trix'] = self.trix.trix[0]
########## TrixSignal
dc['X_signal'] = self.trixsignal.signal[0]
########## LaguerreRSI
dc['X_lrsi'] = self.laguerrersi.lrsi[0]
########## LaguerreFilter
dc['X_lfilter'] = self.laguerrefilter.lfilter[0]
########## HurstExponent
dc['X_hurst'] = self.hurstexponent.hurst[0]
########## AroonUp
dc['X_aroonup'] = self.aroonup.aroonup[0]
########## AroonDown
dc['X_aroondown'] = self.aroondown.aroondown[0]
########## AroonUpDown
dc['X_aroondown'] = self.aroonupdown.aroondown[0]
########## AroonOscillator
dc['X_aroonosc'] = self.aroonoscillator.aroonosc[0]
########## AroonUpDownOscillator
dc['X_aroonosc'] = self.aroonupdownoscillator.aroonosc[0]
########## BollingerBands
dc['X_mid'] = self.bollingerbands.mid[0]
dc['X_top'] = self.bollingerbands.top[0]
dc['X_bot'] = self.bollingerbands.bot[0]
########## BollingerBandsPct
dc['X_pctb'] = self.bollingerbandspct.pctb[0]
########## Momentum
dc['X_momentum'] = self.momentum.momentum[0]
########## MomentumOscillator
dc['X_momosc'] = self.momentumoscillator.momosc[0]
########## RateOfChange
dc['X_roc'] = self.rateofchange.roc[0]
########## RateOfChange100
dc['X_roc100'] = self.rateofchange100.roc100[0]
########## DetrendedPriceOscillator
dc['X_dpo'] = self.detrendedpriceoscillator.dpo[0]
########## SmoothedMovingAverage
dc['X_smma'] = self.smoothedmovingaverage.smma[0]
########## Envelope
dc['X_top'] = self.envelope.top[0]
dc['X_bot'] = self.envelope.bot[0]
########## KnowSureThing
dc['X_kst'] = self.knowsurething.kst[0]
dc['X_signal'] = self.knowsurething.signal[0]
for k in self._inds:
if self._inds[k]:
dc[k] = 1
else:
dc[k] = 0
# print(dc)
# print(int(stock.datetime[0]))
# print(stock.datetime.datetime())
self.result_dict['result'][str(int(arrow.get(stock.datetime.datetime()).timestamp))] = dc
|
# -*- coding: utf-8 -*-
import asyncio
import base64
import json
import ssl
import time
import typing
import urllib.parse
from asyncio import TimeoutError
from tornado.httpclient import AsyncHTTPClient, HTTPResponse
from . import AppConfig
from .AppConfig import Expose
from .Exceptions import K8sError
from .constants.ServiceConstants import ServiceConstants
from .entities.Volume import Volumes
from .utils.HttpUtils import HttpUtils
class Kubernetes:
@classmethod
def is_2xx(cls, res: HTTPResponse):
return int(res.code / 100) == 2
@classmethod
def raise_if_not_2xx(cls, res: HTTPResponse):
if cls.is_2xx(res):
return
path = res.request.url
raise K8sError(message=f'Failed to call {path}! '
f'code={res.code}; body={res.body}; '
f'error={res.error}')
@classmethod
async def create_ingress(cls, ingress_name, app, expose: Expose,
container_name: str, hostname: str):
if await cls._does_resource_exist(app, 'ingresses', ingress_name):
app.logger.debug(f'Kubernetes ingress for {expose} exists')
return
expose_conf = app.services[expose.service][
ServiceConstants.config][AppConfig.KEY_EXPOSE][
expose.service_expose_name]
http_conf = expose_conf['http']
payload = {
'apiVersion': 'extensions/v1beta1',
'kind': 'Ingress',
'metadata': {
'name': ingress_name,
'annotations': {
'kubernetes.io/ingress.class': 'nginx',
'kubernetes.io/ingress.global-static-ip-name':
app.config.INGRESS_GLOBAL_STATIC_IP_NAME,
'ingress.kubernetes.io/rewrite-target': expose.http_path,
'nginx.ingress.kubernetes.io/proxy-body-size': '1m',
'nginx.ingress.kubernetes.io/proxy-read-timeout': '120'
}
},
'spec': {
'tls': [
{
'hosts': [f'{hostname}.'
f'{app.config.APP_DOMAIN}']
}
],
'rules': [
{
'host': f'{hostname}.{app.config.APP_DOMAIN}',
'http': {
'paths': [
{
'path': http_conf['path'],
'backend': {
'serviceName': container_name,
'servicePort': http_conf['port']
}
}
]
}
}
]
}
}
prefix = cls._get_api_path_prefix('ingresses')
res = await cls.make_k8s_call(app.config, app.logger,
f'{prefix}/{app.app_id}/ingresses',
payload=payload)
if not cls.is_2xx(res):
raise K8sError(f'Failed to create ingress for expose {expose}!')
app.logger.debug(f'Kubernetes ingress created')
@classmethod
async def create_namespace(cls, app):
res = await cls.make_k8s_call(app.config, app.logger,
f'/api/v1/namespaces/{app.app_id}')
if res.code == 200:
app.logger.debug(f'Kubernetes namespace exists')
return
app.logger.debug(f'Kubernetes namespace does not exist')
payload = {
'apiVersion': 'v1',
'kind': 'Namespace',
'metadata': {
'name': app.app_id
}
}
res = await cls.make_k8s_call(app.config, app.logger,
'/api/v1/namespaces', payload=payload)
if not cls.is_2xx(res):
raise K8sError('Failed to create namespace!')
app.logger.debug(f'Kubernetes namespace created')
@classmethod
def new_ssl_context(cls):
return ssl.SSLContext()
@classmethod
async def make_k8s_call(cls, config, logger, path: str,
payload: dict = None,
method: str = 'get') -> HTTPResponse:
context = cls.new_ssl_context()
cert = config.CLUSTER_CERT
cert = cert.replace('\\n', '\n')
context.load_verify_locations(cadata=cert)
kwargs = {
'ssl_options': context,
'headers': {
'Authorization': f'bearer {config.CLUSTER_AUTH_TOKEN}',
'Content-Type': 'application/json; charset=utf-8'
},
'method': method.upper()
}
if method.lower() == 'patch':
kwargs['headers']['Content-Type'] = \
'application/merge-patch+json; charset=utf-8'
if payload is not None:
kwargs['body'] = json.dumps(payload)
if method == 'get': # Default value.
kwargs['method'] = 'POST'
client = AsyncHTTPClient()
return await HttpUtils.fetch_with_retry(
3, logger, f'https://{config.CLUSTER_HOST}{path}',
client, kwargs)
@classmethod
async def remove_volume(cls, app, name):
await cls._delete_resource(app, 'persistentvolumeclaims', name)
@classmethod
async def _does_resource_exist(cls, app, resource, name):
prefix = cls._get_api_path_prefix(resource)
path = f'{prefix}/{app.app_id}' \
f'/{resource}/{name}'
res = await cls.make_k8s_call(app.config, app.logger, path)
if res.code == 404:
return False
elif res.code == 200:
return True
raise K8sError(
message=f'Failed to check if {resource}/{name} exists! '
f'Kubernetes API returned {res.code}.')
@classmethod
async def _update_volume_label(cls, app, name):
path = f'/api/v1/namespaces/{app.app_id}/persistentvolumeclaims/{name}'
payload = {
'metadata': {
'labels': {
'last_referenced_on': f'{int(time.time())}'
}
}
}
res = await cls.make_k8s_call(app.config, app.logger,
path, payload, method='patch')
cls.raise_if_not_2xx(res)
app.logger.debug(
f'Updated reference time for volume {name}')
@classmethod
async def create_volume(cls, app, name, persist):
if await cls._does_resource_exist(
app, 'persistentvolumeclaims', name):
app.logger.debug(f'Kubernetes volume {name} already exists')
# Update the last_referenced_on label
await cls._update_volume_label(app, name)
return
path = f'/api/v1/namespaces/{app.app_id}/persistentvolumeclaims'
payload = {
'apiVersion': 'v1',
'kind': 'PersistentVolumeClaim',
'metadata': {
'name': name,
'namespace': app.app_id,
'labels': {
'last_referenced_on': f'{int(time.time())}',
'omg_persist': f'{persist}'
}
},
'spec': {
'accessModes': ['ReadWriteOnce'],
'resources': {
'requests': {
'storage': '100Mi' # For now, during beta.
}
}
}
}
res = await cls.make_k8s_call(app.config, app.logger, path, payload)
cls.raise_if_not_2xx(res)
app.logger.debug(f'Created a Kubernetes volume - {name}')
@classmethod
def _get_api_path_prefix(cls, resource):
if resource == 'deployments':
return '/apis/apps/v1/namespaces'
elif resource == 'ingresses':
return '/apis/extensions/v1beta1/namespaces'
elif resource == 'services' or \
resource == 'persistentvolumeclaims' or \
resource == 'pods':
return '/api/v1/namespaces'
else:
raise Exception(f'Unsupported resource type {resource}')
@classmethod
async def _list_resource_names(cls, app, resource) -> typing.List[str]:
prefix = cls._get_api_path_prefix(resource)
res = await cls.make_k8s_call(
app.config, app.logger, f'{prefix}/{app.app_id}/{resource}'
f'?includeUninitialized=true')
body = json.loads(res.body, encoding='utf-8')
out = []
for i in body['items']:
out.append(i['metadata']['name'])
return out
@classmethod
async def _delete_resource(cls, app, resource, name):
"""
Deletes a resource immediately.
:param app: An instance of App
:param resource: "services"/"deployments"/etc.
:param name: The resource name
"""
prefix = cls._get_api_path_prefix(resource)
res = await cls.make_k8s_call(
app.config, app.logger,
f'{prefix}/{app.app_id}/{resource}/{name}'
f'?gracePeriodSeconds=0',
method='delete')
if res.code == 404:
app.logger.debug(f'Resource {resource}/{name} not found')
return
# Sometimes, the API will throw a 409, indicating that a
# deletion is in progress. Don't assert that the status code
# is 2xx in this case.
if res.code != 409:
cls.raise_if_not_2xx(res)
# Wait until the resource has actually been killed.
while True:
res = await cls.make_k8s_call(
app.config, app.logger,
f'{prefix}/{app.app_id}/{resource}/{name}')
if res.code == 404:
break
app.logger.debug(f'{resource}/{name} is still terminating...')
await asyncio.sleep(0.7)
app.logger.debug(f'Deleted {resource}/{name} successfully!')
@classmethod
async def clean_namespace(cls, app):
app.logger.debug(f'Clearing namespace contents...')
# Things to delete:
# 1. Services
# 2. Deployments (should delete all pods internally too)
# 3. Volumes which are marked with persist as false
# 4. Ingresses
for i in await cls._list_resource_names(app, 'services'):
await cls._delete_resource(app, 'services', i)
for i in await cls._list_resource_names(app, 'deployments'):
await cls._delete_resource(app, 'deployments', i)
for i in await cls._list_resource_names(app, 'pods'):
await cls._delete_resource(app, 'pods', i)
for i in await cls._list_resource_names(app, 'ingresses'):
await cls._delete_resource(app, 'ingresses', i)
# Volumes are not deleted at this moment.
# See https://github.com/asyncy/platform-engine/issues/189
@classmethod
def get_hostname(cls, app, container_name):
return f'{container_name}.' \
f'{app.app_id}.svc.cluster.local'
@classmethod
def find_all_ports(cls, service_config: dict, inside_http=False) -> set:
ports = set()
for key, value in service_config.items():
if isinstance(value, dict):
http = key == 'http' or inside_http
ports.update(cls.find_all_ports(value, inside_http=http))
elif inside_http and key == 'port':
assert isinstance(value, int)
ports.add(value)
if not inside_http:
expose = service_config.get('expose', {})
for name, expose_conf in expose.items():
expose_port = expose_conf.get('http', {}).get('port')
if expose_port is not None:
ports.add(expose_port)
return ports
@classmethod
def format_ports(cls, ports: {int}):
port_list = []
for port in ports:
port_list.append({
'port': port,
'protocol': 'TCP',
'targetPort': port
})
return port_list
@classmethod
async def create_service(cls, app, service: str,
container_name: str):
# Note: We don't check if this service exists because if it did,
# then we'd not get here. create_pod checks it. During beta, we tie
# 1:1 between a pod and a service.
ports = cls.find_all_ports(app.services[service])
port_list = cls.format_ports(ports)
payload = {
'apiVersion': 'v1',
'kind': 'Service',
'metadata': {
'name': container_name,
'namespace': app.app_id,
'labels': {
'app': container_name
}
},
'spec': {
'ports': port_list,
'selector': {
'app': container_name
}
}
}
path = f'/api/v1/namespaces/{app.app_id}/services'
res = await cls.make_k8s_call(app.config, app.logger, path, payload)
cls.raise_if_not_2xx(res)
# Wait until the ports of the destination pod are open.
hostname = cls.get_hostname(app, container_name)
app.logger.info(f'Waiting for ports to open: {ports}')
for port in ports:
success = await cls.wait_for_port(hostname, port)
if not success:
app.logger.warn(
f'Timed out waiting for {hostname}:{port} to open. '
f'Some actions of {service} might fail!')
@classmethod
async def wait_for_port(cls, host, port):
attempts = 0
timeout_secs = 2
while attempts < 60: # Max wait time = attempts * timeout_secs = 120
attempts += 1
try:
fut = asyncio.open_connection(host, port)
await asyncio.wait_for(fut, timeout=timeout_secs)
return True
except (TimeoutError, ConnectionRefusedError):
await asyncio.sleep(timeout_secs)
return False
@classmethod
async def check_for_image_errors(cls, app, container_name):
# List of image pull errors taken from the kubernetes source code
# github/kubernetes/kubernetes/blob/master/pkg/kubelet/images/types.go
image_errors = [
'ImagePullBackOff',
'ImageInspectError',
'ErrImagePull',
'ErrImageNeverPull',
'RegistryUnavailable',
'InvalidImageName'
]
prefix = cls._get_api_path_prefix('pods')
qs = urllib.parse.urlencode({
'labelSelector': f'app={container_name}'
})
res = await cls.make_k8s_call(app.config, app.logger,
f'{prefix}/{app.app_id}/pods?{qs}')
cls.raise_if_not_2xx(res)
body = json.loads(res.body, encoding='utf-8')
for pod in body['items']:
for container_status in pod['status'].get('containerStatuses', []):
is_waiting = container_status['state'].get('waiting', False)
if is_waiting and is_waiting['reason'] in image_errors:
raise K8sError(
message=f'{is_waiting['reason']} - '
f'Failed to pull image {container_status['image']}'
)
@classmethod
async def create_deployment(cls, app, service_name: str, image: str,
container_name: str, start_command: [] or str,
shutdown_command: [] or str, env: dict,
volumes: Volumes):
# Note: We don't check if this deployment exists because if it did,
# then we'd not get here. create_pod checks it. During beta, we tie
# 1:1 between a pod and a deployment.
env_k8s = [] # Must container {name:'foo', value:'bar'}.
if env:
for k, v in env.items():
if isinstance(v, bool):
if v:
v = 'true'
else:
v = 'false'
v = str(v) # In case it was a number.
env_k8s.append({
'name': k,
'value': v
})
volume_mounts = []
volumes_k8s = []
for vol in volumes:
volume_mounts.append({
'mountPath': vol.mount_path,
'name': vol.name
})
volumes_k8s.append({
'name': vol.name,
'persistentVolumeClaim': {
'claimName': vol.name
}
})
if not vol.persist:
await cls.remove_volume(app, vol.name)
await cls.create_volume(app, vol.name, vol.persist)
b16_service_name = base64.b16encode(service_name.encode()).decode()
payload = {
'apiVersion': 'apps/v1',
'kind': 'Deployment',
'metadata': {
'name': container_name,
'namespace': app.app_id
},
'spec': {
'replicas': 1,
'strategy': {
'type': 'RollingUpdate'
},
'selector': {
'matchLabels': {
'app': container_name
}
},
'template': {
'metadata': {
'labels': {
'app': container_name,
'b16-service-name': b16_service_name,
'logstash-enabled': 'true'
}
},
'spec': {
'containers': [
{
'name': container_name,
'image': image,
'resources': {
'limits': {
'memory': '200Mi', # During beta.
# 'cpu': '500m', # During beta.
}
},
'command': start_command,
'imagePullPolicy': 'Always',
'env': env_k8s,
'lifecycle': {
},
'volumeMounts': volume_mounts
}
],
'volumes': volumes_k8s
}
}
}
}
if shutdown_command is not None:
payload['spec']['template']['spec']['containers'][0]['lifecycle'][
'preStop'] = {
'exec': {
'command': shutdown_command
}
}
path = f'/apis/apps/v1/namespaces/{app.app_id}/deployments'
# When a namespace is created for the first time, K8s needs to perform
# some sort of preparation. Pods creation fails sporadically for new
# namespaces. Check the status and retry.
tries = 0
res = None
while tries < 10:
tries = tries + 1
res = await cls.make_k8s_call(app.config, app.logger,
path, payload)
if cls.is_2xx(res):
break
app.logger.debug(f'Failed to create deployment, retrying...')
await asyncio.sleep(1)
cls.raise_if_not_2xx(res)
path = f'/apis/apps/v1/namespaces/{app.app_id}' \
f'/deployments/{container_name}'
# Wait until the deployment is ready.
app.logger.debug('Waiting for deployment to be ready...')
while True:
res = await cls.make_k8s_call(app.config, app.logger, path)
cls.raise_if_not_2xx(res)
body = json.loads(res.body, encoding='utf-8')
if body['status'].get('readyReplicas', 0) > 0:
break
await cls.check_for_image_errors(app, container_name)
await asyncio.sleep(1)
app.logger.debug('Deployment is ready')
@classmethod
async def create_pod(cls, app, service: str, image: str,
container_name: str, start_command: [] or str,
shutdown_command: [] or str, env: dict,
volumes: Volumes):
res = await cls.make_k8s_call(
app.config, app.logger,
f'/apis/apps/v1/namespaces/{app.app_id}'
f'/deployments/{container_name}')
if res.code == 200:
app.logger.debug(f'Deployment {container_name} '
f'already exists, reusing')
return
await cls.create_deployment(app, service, image, container_name,
start_command, shutdown_command, env,
volumes)
await cls.create_service(app, service, container_name)
|
# -*- coding: utf-8 -*-
import asyncio
import base64
import json
import ssl
import time
import typing
import urllib.parse
from asyncio import TimeoutError
from tornado.httpclient import AsyncHTTPClient, HTTPResponse
from . import AppConfig
from .AppConfig import Expose
from .Exceptions import K8sError
from .constants.ServiceConstants import ServiceConstants
from .entities.Volume import Volumes
from .utils.HttpUtils import HttpUtils
class Kubernetes:
@classmethod
def is_2xx(cls, res: HTTPResponse):
return int(res.code / 100) == 2
@classmethod
def raise_if_not_2xx(cls, res: HTTPResponse):
if cls.is_2xx(res):
return
path = res.request.url
raise K8sError(message=f'Failed to call {path}! '
f'code={res.code}; body={res.body}; '
f'error={res.error}')
@classmethod
async def create_ingress(cls, ingress_name, app, expose: Expose,
container_name: str, hostname: str):
if await cls._does_resource_exist(app, 'ingresses', ingress_name):
app.logger.debug(f'Kubernetes ingress for {expose} exists')
return
expose_conf = app.services[expose.service][
ServiceConstants.config][AppConfig.KEY_EXPOSE][
expose.service_expose_name]
http_conf = expose_conf['http']
payload = {
'apiVersion': 'extensions/v1beta1',
'kind': 'Ingress',
'metadata': {
'name': ingress_name,
'annotations': {
'kubernetes.io/ingress.class': 'nginx',
'kubernetes.io/ingress.global-static-ip-name':
app.config.INGRESS_GLOBAL_STATIC_IP_NAME,
'ingress.kubernetes.io/rewrite-target': expose.http_path,
'nginx.ingress.kubernetes.io/proxy-body-size': '1m',
'nginx.ingress.kubernetes.io/proxy-read-timeout': '120'
}
},
'spec': {
'tls': [
{
'hosts': [f'{hostname}.'
f'{app.config.APP_DOMAIN}']
}
],
'rules': [
{
'host': f'{hostname}.{app.config.APP_DOMAIN}',
'http': {
'paths': [
{
'path': http_conf['path'],
'backend': {
'serviceName': container_name,
'servicePort': http_conf['port']
}
}
]
}
}
]
}
}
prefix = cls._get_api_path_prefix('ingresses')
res = await cls.make_k8s_call(app.config, app.logger,
f'{prefix}/{app.app_id}/ingresses',
payload=payload)
if not cls.is_2xx(res):
raise K8sError(f'Failed to create ingress for expose {expose}!')
app.logger.debug(f'Kubernetes ingress created')
@classmethod
async def create_namespace(cls, app):
res = await cls.make_k8s_call(app.config, app.logger,
f'/api/v1/namespaces/{app.app_id}')
if res.code == 200:
app.logger.debug(f'Kubernetes namespace exists')
return
app.logger.debug(f'Kubernetes namespace does not exist')
payload = {
'apiVersion': 'v1',
'kind': 'Namespace',
'metadata': {
'name': app.app_id
}
}
res = await cls.make_k8s_call(app.config, app.logger,
'/api/v1/namespaces', payload=payload)
if not cls.is_2xx(res):
raise K8sError('Failed to create namespace!')
app.logger.debug(f'Kubernetes namespace created')
@classmethod
def new_ssl_context(cls):
return ssl.SSLContext()
@classmethod
async def make_k8s_call(cls, config, logger, path: str,
payload: dict = None,
method: str = 'get') -> HTTPResponse:
context = cls.new_ssl_context()
cert = config.CLUSTER_CERT
cert = cert.replace('\\n', '\n')
context.load_verify_locations(cadata=cert)
kwargs = {
'ssl_options': context,
'headers': {
'Authorization': f'bearer {config.CLUSTER_AUTH_TOKEN}',
'Content-Type': 'application/json; charset=utf-8'
},
'method': method.upper()
}
if method.lower() == 'patch':
kwargs['headers']['Content-Type'] = \
'application/merge-patch+json; charset=utf-8'
if payload is not None:
kwargs['body'] = json.dumps(payload)
if method == 'get': # Default value.
kwargs['method'] = 'POST'
client = AsyncHTTPClient()
return await HttpUtils.fetch_with_retry(
3, logger, f'https://{config.CLUSTER_HOST}{path}',
client, kwargs)
@classmethod
async def remove_volume(cls, app, name):
await cls._delete_resource(app, 'persistentvolumeclaims', name)
@classmethod
async def _does_resource_exist(cls, app, resource, name):
prefix = cls._get_api_path_prefix(resource)
path = f'{prefix}/{app.app_id}' \
f'/{resource}/{name}'
res = await cls.make_k8s_call(app.config, app.logger, path)
if res.code == 404:
return False
elif res.code == 200:
return True
raise K8sError(
message=f'Failed to check if {resource}/{name} exists! '
f'Kubernetes API returned {res.code}.')
@classmethod
async def _update_volume_label(cls, app, name):
path = f'/api/v1/namespaces/{app.app_id}/persistentvolumeclaims/{name}'
payload = {
'metadata': {
'labels': {
'last_referenced_on': f'{int(time.time())}'
}
}
}
res = await cls.make_k8s_call(app.config, app.logger,
path, payload, method='patch')
cls.raise_if_not_2xx(res)
app.logger.debug(
f'Updated reference time for volume {name}')
@classmethod
async def create_volume(cls, app, name, persist):
if await cls._does_resource_exist(
app, 'persistentvolumeclaims', name):
app.logger.debug(f'Kubernetes volume {name} already exists')
# Update the last_referenced_on label
await cls._update_volume_label(app, name)
return
path = f'/api/v1/namespaces/{app.app_id}/persistentvolumeclaims'
payload = {
'apiVersion': 'v1',
'kind': 'PersistentVolumeClaim',
'metadata': {
'name': name,
'namespace': app.app_id,
'labels': {
'last_referenced_on': f'{int(time.time())}',
'omg_persist': f'{persist}'
}
},
'spec': {
'accessModes': ['ReadWriteOnce'],
'resources': {
'requests': {
'storage': '100Mi' # For now, during beta.
}
}
}
}
res = await cls.make_k8s_call(app.config, app.logger, path, payload)
cls.raise_if_not_2xx(res)
app.logger.debug(f'Created a Kubernetes volume - {name}')
@classmethod
def _get_api_path_prefix(cls, resource):
if resource == 'deployments':
return '/apis/apps/v1/namespaces'
elif resource == 'ingresses':
return '/apis/extensions/v1beta1/namespaces'
elif resource == 'services' or \
resource == 'persistentvolumeclaims' or \
resource == 'pods':
return '/api/v1/namespaces'
else:
raise Exception(f'Unsupported resource type {resource}')
@classmethod
async def _list_resource_names(cls, app, resource) -> typing.List[str]:
prefix = cls._get_api_path_prefix(resource)
res = await cls.make_k8s_call(
app.config, app.logger, f'{prefix}/{app.app_id}/{resource}'
f'?includeUninitialized=true')
body = json.loads(res.body, encoding='utf-8')
out = []
for i in body['items']:
out.append(i['metadata']['name'])
return out
@classmethod
async def _delete_resource(cls, app, resource, name):
"""
Deletes a resource immediately.
:param app: An instance of App
:param resource: "services"/"deployments"/etc.
:param name: The resource name
"""
prefix = cls._get_api_path_prefix(resource)
res = await cls.make_k8s_call(
app.config, app.logger,
f'{prefix}/{app.app_id}/{resource}/{name}'
f'?gracePeriodSeconds=0',
method='delete')
if res.code == 404:
app.logger.debug(f'Resource {resource}/{name} not found')
return
# Sometimes, the API will throw a 409, indicating that a
# deletion is in progress. Don't assert that the status code
# is 2xx in this case.
if res.code != 409:
cls.raise_if_not_2xx(res)
# Wait until the resource has actually been killed.
while True:
res = await cls.make_k8s_call(
app.config, app.logger,
f'{prefix}/{app.app_id}/{resource}/{name}')
if res.code == 404:
break
app.logger.debug(f'{resource}/{name} is still terminating...')
await asyncio.sleep(0.7)
app.logger.debug(f'Deleted {resource}/{name} successfully!')
@classmethod
async def clean_namespace(cls, app):
app.logger.debug(f'Clearing namespace contents...')
# Things to delete:
# 1. Services
# 2. Deployments (should delete all pods internally too)
# 3. Volumes which are marked with persist as false
# 4. Ingresses
for i in await cls._list_resource_names(app, 'services'):
await cls._delete_resource(app, 'services', i)
for i in await cls._list_resource_names(app, 'deployments'):
await cls._delete_resource(app, 'deployments', i)
for i in await cls._list_resource_names(app, 'pods'):
await cls._delete_resource(app, 'pods', i)
for i in await cls._list_resource_names(app, 'ingresses'):
await cls._delete_resource(app, 'ingresses', i)
# Volumes are not deleted at this moment.
# See https://github.com/asyncy/platform-engine/issues/189
@classmethod
def get_hostname(cls, app, container_name):
return f'{container_name}.' \
f'{app.app_id}.svc.cluster.local'
@classmethod
def find_all_ports(cls, service_config: dict, inside_http=False) -> set:
ports = set()
for key, value in service_config.items():
if isinstance(value, dict):
http = key == 'http' or inside_http
ports.update(cls.find_all_ports(value, inside_http=http))
elif inside_http and key == 'port':
assert isinstance(value, int)
ports.add(value)
if not inside_http:
expose = service_config.get('expose', {})
for name, expose_conf in expose.items():
expose_port = expose_conf.get('http', {}).get('port')
if expose_port is not None:
ports.add(expose_port)
return ports
@classmethod
def format_ports(cls, ports: {int}):
port_list = []
for port in ports:
port_list.append({
'port': port,
'protocol': 'TCP',
'targetPort': port
})
return port_list
@classmethod
async def create_service(cls, app, service: str,
container_name: str):
# Note: We don't check if this service exists because if it did,
# then we'd not get here. create_pod checks it. During beta, we tie
# 1:1 between a pod and a service.
ports = cls.find_all_ports(app.services[service])
port_list = cls.format_ports(ports)
payload = {
'apiVersion': 'v1',
'kind': 'Service',
'metadata': {
'name': container_name,
'namespace': app.app_id,
'labels': {
'app': container_name
}
},
'spec': {
'ports': port_list,
'selector': {
'app': container_name
}
}
}
path = f'/api/v1/namespaces/{app.app_id}/services'
res = await cls.make_k8s_call(app.config, app.logger, path, payload)
cls.raise_if_not_2xx(res)
# Wait until the ports of the destination pod are open.
hostname = cls.get_hostname(app, container_name)
app.logger.info(f'Waiting for ports to open: {ports}')
for port in ports:
success = await cls.wait_for_port(hostname, port)
if not success:
app.logger.warn(
f'Timed out waiting for {hostname}:{port} to open. '
f'Some actions of {service} might fail!')
@classmethod
async def wait_for_port(cls, host, port):
attempts = 0
timeout_secs = 2
while attempts < 60: # Max wait time = attempts * timeout_secs = 120
attempts += 1
try:
fut = asyncio.open_connection(host, port)
await asyncio.wait_for(fut, timeout=timeout_secs)
return True
except (TimeoutError, ConnectionRefusedError):
await asyncio.sleep(timeout_secs)
return False
@classmethod
async def check_for_image_errors(cls, app, container_name):
# List of image pull errors taken from the kubernetes source code
# github/kubernetes/kubernetes/blob/master/pkg/kubelet/images/types.go
image_errors = [
'ImagePullBackOff',
'ImageInspectError',
'ErrImagePull',
'ErrImageNeverPull',
'RegistryUnavailable',
'InvalidImageName'
]
prefix = cls._get_api_path_prefix('pods')
qs = urllib.parse.urlencode({
'labelSelector': f'app={container_name}'
})
res = await cls.make_k8s_call(app.config, app.logger,
f'{prefix}/{app.app_id}/pods?{qs}')
cls.raise_if_not_2xx(res)
body = json.loads(res.body, encoding='utf-8')
for pod in body['items']:
for container_status in pod['status'].get('containerStatuses', []):
is_waiting = container_status['state'].get('waiting', False)
if is_waiting and is_waiting['reason'] in image_errors:
raise K8sError(
message=f'{is_waiting["reason"]} - '
f'Failed to pull image {container_status["image"]}'
)
@classmethod
async def create_deployment(cls, app, service_name: str, image: str,
container_name: str, start_command: [] or str,
shutdown_command: [] or str, env: dict,
volumes: Volumes):
# Note: We don't check if this deployment exists because if it did,
# then we'd not get here. create_pod checks it. During beta, we tie
# 1:1 between a pod and a deployment.
env_k8s = [] # Must container {name:'foo', value:'bar'}.
if env:
for k, v in env.items():
if isinstance(v, bool):
if v:
v = 'true'
else:
v = 'false'
v = str(v) # In case it was a number.
env_k8s.append({
'name': k,
'value': v
})
volume_mounts = []
volumes_k8s = []
for vol in volumes:
volume_mounts.append({
'mountPath': vol.mount_path,
'name': vol.name
})
volumes_k8s.append({
'name': vol.name,
'persistentVolumeClaim': {
'claimName': vol.name
}
})
if not vol.persist:
await cls.remove_volume(app, vol.name)
await cls.create_volume(app, vol.name, vol.persist)
b16_service_name = base64.b16encode(service_name.encode()).decode()
payload = {
'apiVersion': 'apps/v1',
'kind': 'Deployment',
'metadata': {
'name': container_name,
'namespace': app.app_id
},
'spec': {
'replicas': 1,
'strategy': {
'type': 'RollingUpdate'
},
'selector': {
'matchLabels': {
'app': container_name
}
},
'template': {
'metadata': {
'labels': {
'app': container_name,
'b16-service-name': b16_service_name,
'logstash-enabled': 'true'
}
},
'spec': {
'containers': [
{
'name': container_name,
'image': image,
'resources': {
'limits': {
'memory': '200Mi', # During beta.
# 'cpu': '500m', # During beta.
}
},
'command': start_command,
'imagePullPolicy': 'Always',
'env': env_k8s,
'lifecycle': {
},
'volumeMounts': volume_mounts
}
],
'volumes': volumes_k8s
}
}
}
}
if shutdown_command is not None:
payload['spec']['template']['spec']['containers'][0]['lifecycle'][
'preStop'] = {
'exec': {
'command': shutdown_command
}
}
path = f'/apis/apps/v1/namespaces/{app.app_id}/deployments'
# When a namespace is created for the first time, K8s needs to perform
# some sort of preparation. Pods creation fails sporadically for new
# namespaces. Check the status and retry.
tries = 0
res = None
while tries < 10:
tries = tries + 1
res = await cls.make_k8s_call(app.config, app.logger,
path, payload)
if cls.is_2xx(res):
break
app.logger.debug(f'Failed to create deployment, retrying...')
await asyncio.sleep(1)
cls.raise_if_not_2xx(res)
path = f'/apis/apps/v1/namespaces/{app.app_id}' \
f'/deployments/{container_name}'
# Wait until the deployment is ready.
app.logger.debug('Waiting for deployment to be ready...')
while True:
res = await cls.make_k8s_call(app.config, app.logger, path)
cls.raise_if_not_2xx(res)
body = json.loads(res.body, encoding='utf-8')
if body['status'].get('readyReplicas', 0) > 0:
break
await cls.check_for_image_errors(app, container_name)
await asyncio.sleep(1)
app.logger.debug('Deployment is ready')
@classmethod
async def create_pod(cls, app, service: str, image: str,
container_name: str, start_command: [] or str,
shutdown_command: [] or str, env: dict,
volumes: Volumes):
res = await cls.make_k8s_call(
app.config, app.logger,
f'/apis/apps/v1/namespaces/{app.app_id}'
f'/deployments/{container_name}')
if res.code == 200:
app.logger.debug(f'Deployment {container_name} '
f'already exists, reusing')
return
await cls.create_deployment(app, service, image, container_name,
start_command, shutdown_command, env,
volumes)
await cls.create_service(app, service, container_name)
|
import discord
from discord.ext import commands
from traceback import format_exception
import io
import textwrap
import contextlib
from discord.ext.buttons import Paginator
import os
class Pag(Paginator):
async def teardown(self):
try:
await self.page.clear_reactions()
except discord.HTTPException:
pass
def clean_code(content):
if content.startswith("```py") and content.endswith("```"):
return "\n".join(content.split("\n")[1:])[:-3]
else:
return content
def is_dev():
def predicate(ctx):
return ctx.message.author.id == 759850502661472321, 225685670788726784
return commands.check(predicate)
class EvalCMD(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(name="eval", aliases=["exec", "evel", "run"], hidden=True)
@is_dev()
async def _eval(self, ctx, *, code):
async with ctx.typing():
code = clean_code(code)
local_variables = {
"discord": discord,
"commands": commands,
"client": self.client,
"owl": self.client,
"bot": self.client,
"ctx": ctx,
"send": ctx.send,
"channel": ctx.channel,
"author": ctx.author,
"guild": ctx.guild,
"message": ctx.message,
"self": self,
"os": os
}
stdout = io.StringIO()
try:
with contextlib.redirect_stdout(stdout):
exec(
f"async def func():\n{textwrap.indent(code, " ")}", local_variables,
)
obj = await local_variables["func"]()
result = str(stdout.getvalue())
except Exception as e:
result = "".join(format_exception(e, e, e.__traceback__))
if len(result) > 0:
pager = Pag(
timeout=100,
entries=[result[i: i + 2000]
for i in range(0, len(result), 2000)],
length=1,
prefix="```py\n",
suffix="```"
)
await pager.start(ctx)
def setup(client):
client.add_cog(EvalCMD(client))
|
import discord
from discord.ext import commands
from traceback import format_exception
import io
import textwrap
import contextlib
from discord.ext.buttons import Paginator
import os
class Pag(Paginator):
async def teardown(self):
try:
await self.page.clear_reactions()
except discord.HTTPException:
pass
def clean_code(content):
if content.startswith("```py") and content.endswith("```"):
return "\n".join(content.split("\n")[1:])[:-3]
else:
return content
def is_dev():
def predicate(ctx):
return ctx.message.author.id == 759850502661472321, 225685670788726784
return commands.check(predicate)
class EvalCMD(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(name="eval", aliases=["exec", "evel", "run"], hidden=True)
@is_dev()
async def _eval(self, ctx, *, code):
async with ctx.typing():
code = clean_code(code)
local_variables = {
"discord": discord,
"commands": commands,
"client": self.client,
"owl": self.client,
"bot": self.client,
"ctx": ctx,
"send": ctx.send,
"channel": ctx.channel,
"author": ctx.author,
"guild": ctx.guild,
"message": ctx.message,
"self": self,
"os": os
}
stdout = io.StringIO()
try:
with contextlib.redirect_stdout(stdout):
exec(
f"async def func():\n{textwrap.indent(code, ' ')}", local_variables,
)
obj = await local_variables["func"]()
result = str(stdout.getvalue())
except Exception as e:
result = "".join(format_exception(e, e, e.__traceback__))
if len(result) > 0:
pager = Pag(
timeout=100,
entries=[result[i: i + 2000]
for i in range(0, len(result), 2000)],
length=1,
prefix="```py\n",
suffix="```"
)
await pager.start(ctx)
def setup(client):
client.add_cog(EvalCMD(client))
|
"""Implementation of the wired WebComponents"""
import ast
import datetime
import json
from typing import Optional
import param
from awesome_panel.express.components.material import MWC_ICONS
from awesome_panel.express.pane.web_component import WebComponent
# @Philippfr. Should we load the full bundle or individual bundles?
# @Philippfr. How should users load the js pn.extions("wired") or?
# @Philippfr. Should we include the webcomponents-loader for older browsers?
JS_FILES = {
"webcomponents-loader": (
"https://unpkg.com/@webcomponents/[email protected]/webcomponents-loader.js"
),
"wired-bundle": "https://wiredjs.com/dist/showcase.min.js",
}
GRAY = "rgb(55, 71, 79)"
PINK = "rgb(240, 230, 244)"
RED = "rgb(255, 0, 0)"
YELLOW = "rgb(255, 255, 0)"
LOGO = "https://wiredjs.com/images/logo_400.png"
FONT_FAMILY = "Gloria Hallelujah, sans-serif"
ELEVATION_DEFAULT = 0
ELEVATION_BOUNDS = (0, 10)
DATE_BOUNDS = (datetime.date(1976, 9, 17), datetime.date(datetime.datetime.now().year + 10, 12, 31))
class WiredBase(WebComponent):
"""Inherit from this class"""
def __init__(self, **params):
if not self.param.attributes_to_watch.default:
self.param.attributes_to_watch.default = {}
self.attributes_to_watch["disabled"] = "disabled"
super().__init__(**params)
def _child_parameters(self):
parameters = super()._child_parameters()
parameters.add("disabled")
return parameters
class Button(WiredBase):
"""A Wired RadioButton
- You can set the `text` shown via the `name` parameter.
"""
html = param.String("<wired-button></wired-button>")
attributes_to_watch = param.Dict({"elevation": "elevation"})
events_to_watch = param.Dict(default={"click": "clicks"})
parameters_to_watch = param.List(["name"])
clicks = param.Integer()
elevation = param.Integer(ELEVATION_DEFAULT, bounds=ELEVATION_BOUNDS)
def __init__(self, **params):
if "height" not in params:
params["height"] = 40
super().__init__(**params)
def _get_html_from_parameters_to_watch(self, **params) -> str:
return f"<wired-button>{params["name"]}</wired-button>"
class Checkbox(WiredBase):
"""wired-checkbox"""
html = param.String("<wired-checkbox></wired-checkbox>")
properties_to_watch = param.Dict({"checked": "value"})
parameters_to_watch = param.List(["name"])
value = param.Boolean()
def __init__(self, **params):
if "height" not in params:
params["height"] = 40
super().__init__(**params)
def _get_html_from_parameters_to_watch(self, **params) -> str:
return f"<wired-checkbox>{params["name"]}</wired-checkbox>"
class DatePicker(WiredBase):
"""wired-calendar"""
component_type = param.String("inputgroup")
html = param.String(
'<wired-calendar initials="" role="dialog tabindex="0">Button</wired-calendar>'
)
attributes_to_watch = param.Dict(
{
"elevation": "elevation",
"firstdate": "firstdate",
"lastdate": "lastdate",
"locale": "locale",
}
)
properties_to_watch = param.Dict({"selected": "selected"})
events_to_watch = param.Dict(default={"selected": "selects"})
elevation = param.Integer(ELEVATION_DEFAULT, bounds=ELEVATION_BOUNDS)
firstdate = param.String(
doc="""
Example: firstdate="Apr 15, 2019"""
)
lastdate = param.String(
doc="""
Example: lastdate="Jul 15, 2019"""
)
locale = param.ObjectSelector("en", objects=["en", "fr", "de"])
selected = param.String(
doc="""
Example: selected="Jul 4, 2019"""
)
selects = param.Integer(bounds=(0, None))
value = param.CalendarDate(default=None, bounds=DATE_BOUNDS)
start = param.CalendarDate(bounds=DATE_BOUNDS)
end = param.CalendarDate(bounds=DATE_BOUNDS)
def __init__(self, min_height=340, min_width=300, **params):
super().__init__(min_height=min_height, min_width=min_width, **params)
@staticmethod
def _to_date(value: Optional[str]) -> Optional[datetime.date]:
if value:
return datetime.datetime.strptime(value, "%b %d, %Y").date()
return None
@staticmethod
def _to_string(value: datetime.date) -> Optional[str]:
if value:
return value.strftime("%b %e, %Y").replace(" ", " ")
return None
@param.depends("selected", watch=True)
def _set_value(self):
value = self._to_date(self.selected)
if value != self.value:
self.value = value
@param.depends("value", watch=True)
def _set_selected(self):
selected = self._to_string(self.value)
if selected != self.selected:
self.selected = selected
@param.depends("firstdate", watch=True)
def _set_start(self):
start = self._to_date(self.firstdate)
if start != self.start:
self.start = start
@param.depends("start", watch=True)
def _set_firstdate(self):
firstdate = self._to_string(self.start)
if firstdate != self.firstdate:
self.firstdate = firstdate
@param.depends("lastdate", watch=True)
def _set_end(self):
end = self._to_date(self.lastdate)
if end != self.end:
self.end = end
@param.depends("end", watch=True)
def _set_lastdate(self):
lastdate = self._to_string(self.end)
if lastdate != self.lastdate:
self.lastdate = lastdate
class Dialog(WebComponent):
"""wired-dialog"""
html = param.String("<wired-dialog></wired-checkbox>")
attributes_to_watch = param.Dict({"open": "is_open"})
parameters_to_watch = param.List(["text"])
is_open = param.Boolean(default=False)
text = param.String()
def _get_html_from_parameters_to_watch(self, **params) -> str:
return f"<wired-dialog>{params["text"]}</wired-dialog>"
class Divider(WebComponent):
"""wired-divider"""
html = param.String("<wired-divider></wired-divider>")
def __init__(self, min_height=20, **params):
super().__init__(min_height=min_height, **params)
attributes_to_watch = param.Dict({"elevation": "elevation"})
elevation = param.Integer(ELEVATION_DEFAULT, bounds=ELEVATION_BOUNDS)
class Fab(WiredBase):
"""wired-fab"""
html = param.String("<wired-fab><mwc-icon>favorite</mwc-icon></wired-fab>")
parameters_to_watch = param.List(["icon"])
icon = param.ObjectSelector(
"favorite",
objects=MWC_ICONS,
doc="""
The name of an `mwc-icon <https://github.com/material-components/material-components-web-components/tree/master/packages/icon>`_
""",
)
def __init__(
self,
min_height=40,
**params,
):
super().__init__(min_height=min_height, **params)
def _get_html_from_parameters_to_watch(self, **params) -> str:
return f"<wired-fab><mwc-icon>{params["icon"]}</mwc-icon></wired-fab>"
# Issue: Value is not set on construction. See
# https://github.com/wiredjs/wired-elements/issues/121#issue-573516963
class FloatSlider(WebComponent):
"""wired-slider for floats"""
component_type = param.String("inputgroup")
html = param.String("<wired-slider style='width: 100%;height:100%'></wired-slider>")
attributes_to_watch = param.Dict({"min": "start", "max": "end", "step": "step"})
properties_to_watch = param.Dict({"input.value": "value"})
events_to_watch = param.Dict({"change": None})
def __init__(self, min_height=40, **params):
super().__init__(min_height=min_height, **params)
start = param.Number(0.0)
end = param.Number(1.0)
step = param.Number(0.1)
value = param.Number(default=0.0)
class IconButton(WiredBase):
"""wired-icon-button"""
html = param.String("<wired-icon-button><mwc-icon>favorite</mwc-icon><wired-icon-button>")
parameters_to_watch = param.List(["icon"])
events_to_watch = param.Dict(default={"click": "clicks"})
icon = param.ObjectSelector(
"favorite",
objects=MWC_ICONS,
doc="""
The name of an `mwc-icon <https://github.com/material-components/material-components-web-components/tree/master/packages/icon>`_
""",
)
clicks = param.Integer()
def __init__(
self,
min_height=40,
**params,
):
super().__init__(min_height=min_height, **params)
def _get_html_from_parameters_to_watch(self, **params) -> str:
return f"<wired-icon-button><mwc-icon>{params["icon"]}</mwc-icon></wired-icon-button>"
class Image(WebComponent):
"""wired-image"""
html = param.String('<wired-image style="width:100%;height:100%"></wired-image>')
attributes_to_watch = param.Dict({"elevation": "elevation", "src": "object", "alt": "alt_text"})
# @Philippfr: How do I handle height and width in general in the .ts model?
def __init__(self, height=100, **params):
super().__init__(height=height, **params)
object = param.String(default=None, doc="""Currently only an url is supported""")
alt_text = param.String(default=None)
elevation = param.Integer(ELEVATION_DEFAULT, bounds=ELEVATION_BOUNDS)
# Issue: Value is not set on construction. See
# https://github.com/wiredjs/wired-elements/issues/121#issue-573516963
class IntSlider(FloatSlider):
"""wired-slider for int"""
def __init__(self, min_height=40, **params):
super().__init__(min_height=min_height, **params)
start = param.Integer(0)
end = param.Integer(1)
step = param.Integer(1)
value = param.Integer(0)
class Link(WebComponent):
"""wired-link"""
html = param.String("<wired-link></wired-link>")
attributes_to_watch = param.Dict({"href": "href", "target": "target"})
parameters_to_watch = param.List(["text"])
href = param.String()
target = param.ObjectSelector("_blank", objects=["_self", "_blank", "_parent", "_top"])
text = param.String()
def _get_html_from_parameters_to_watch(self, **params) -> str:
return f"<wired-link>{params["text"]}</wired-link>"
class LiteralInput(WiredBase):
"""wired-input for list and dict"""
component_type = param.String("inputgroup")
html = param.String("""<wired-input style="width:100%"></wired-input>""")
attributes_to_watch = param.Dict(
{
"placeholder": "placeholder",
}
)
properties_to_watch = param.Dict({"textInput.value": "value"})
events_to_watch = param.Dict({"change": None})
# @Philippff. I sthis the right place to define height? And what about width?
def __init__(self, min_height=60, **params):
# Hack: To solve https://github.com/wiredjs/wired-elements/issues/123
if "value" in params:
self.param.html.default = (
f'<wired-input value="{params['value']}" '
'style="width:100%;height:100%"></wired-input>'
)
elif self.param.value.default:
self.param.html.default = (
f'<wired-input value="{self.param.value.default}" '
'style="width:100%;height:100%"></wired-input>'
)
super().__init__(min_height=min_height, **params)
placeholder = param.String(default="Enter Value")
value = param.Parameter()
type = param.ClassSelector(default=None, class_=(type, tuple), is_instance=True)
serializer = param.ObjectSelector(
default="ast",
objects=["ast", "json"],
doc="""
The serialization (and deserialization) method to use. 'ast'
uses ast.literal_eval and 'json' uses json.loads and json.dumps.
""",
)
def _set_type(self):
if not self.value:
self.type.class_ = (type, tuple)
def _handle_properties_last_change(self, event):
if "textInput.value" in event.new:
value = event.new["textInput.value"]
if not value or not isinstance(value, str):
pass
elif self.serializer == "json":
value = json.loads(value)
else:
value = ast.literal_eval(value)
if value != self.value:
self.value = value
else:
super()._handle_properties_last_change(event)
def _handle_parameter_property_change(self, event):
if event.name == "value":
value = event.new
if not value or isinstance(value, str):
pass
else:
if self.serializer == "json":
value = json.dumps(value)
else:
value = repr(value)
properties_last_change = {"textInput.value": value}
if properties_last_change != self.properties_last_change:
self.properties_last_change = properties_last_change
else:
super()._handle_parameter_property_change(event)
class Progress(WebComponent):
"""wired-progress"""
html = param.String("<wired-progress></wired-progress>")
attributes_to_watch = param.Dict({"value": "value", "percentage": "percentage", "max": "max"})
def __init__(self, min_height=40, **params):
super().__init__(min_height=min_height, **params)
if "max" in params:
self._handle_max_changed()
value = param.Integer(None, bounds=(0, 100))
max = param.Integer(100, bounds=(0, None))
percentage = param.Boolean()
@param.depends("max", watch=True)
def _handle_max_changed(self):
self.param.value.bounds = (0, self.max)
class RadioButton(WebComponent):
"""wired-radio"""
html = param.String("<wired-radio>Radio Button</wired-radio>")
properties_to_watch = param.Dict({"checked": "value"})
parameters_to_watch = param.List(["name"])
value = param.Boolean(default=False)
def __init__(self, **params):
if "height" not in params:
params["height"] = 15
super().__init__(**params)
def _get_html_from_parameters_to_watch(self, **params) -> str:
return f"<wired-radio>{params["name"]}</wired-radio>"
class SearchInput(WiredBase):
"""wired-search-input"""
html = param.String("<wired-search-input></wired-search-input>")
attributes_to_watch = param.Dict({"placeholder": "placeholder", "autocomplete": "autocomplete"})
properties_to_watch = param.Dict({"textInput.value": "value"})
events_to_watch = param.Dict({"input": None})
def __init__(self, min_height=40, **params):
if "value" in params:
self.param.html.default = (
f'<wired-search-input value="{params['value']}" '
'style="width:100%;height:100%"></wired-search-input>'
)
elif self.param.value.default:
self.param.html.default = (
f'<wired-search-input value="{self.param.value.default}" '
'style="width:100%;height:100%"></wired-search-input>'
)
super().__init__(min_height=min_height, **params)
placeholder = param.String("")
value = param.String()
autocomplete = param.ObjectSelector("off", objects=["on", "off"])
class ProgressSpinner(WebComponent):
"""wired-spinnner"""
html = param.String("<wired-spinner></wired-spinner>")
attributes_to_watch = param.Dict({"spinning": "active", "duration": "duration"})
active = param.Boolean(default=True)
duration = param.Integer(default=1000, bounds=(1, 10000))
def __init__(self, min_height=40, **params):
super().__init__(min_height=min_height, **params)
class TextAreaInput(WiredBase):
"""wired-textarea"""
component_type = param.String("inputgroup")
html = param.String('<wired-textarea placeholder="Enter text"></wired-textarea>')
attributes_to_watch = param.Dict({"placeholder": "placeholder"})
properties_to_watch = param.Dict(
{"textareaInput.value": "value", "rows": "rows", "maxlength": "max_length"}
)
events_to_watch = param.ObjectSelector(
{"change": None},
objects=[{"change": None}, {"input": None}],
doc="""
The event(s) to watch. When the event(s) are catched the js model properties are checked and
any changed values are sent to the python model. The event can be
- `change` (when done) or
- `input` (for every character change)
""",
)
placeholder = param.String("")
value = param.String()
rows = param.Integer(2, bounds=(1, 100))
max_length = param.Integer(default=5000)
def __init__(self, **params):
super().__init__(**params)
if "min_height" not in params:
self._set_height()
@param.depends("rows", "disabled", watch=True)
def _set_height(self):
height = 20 + 19 * self.rows
if self.disabled:
height += 4
if height != self.height:
self.height = height
class TextInput(WiredBase):
"""wired-input for text"""
component_type = param.String("inputgroup")
html = param.String("""<wired-input style="width:100%;height:100%"></wired-input>""")
attributes_to_watch = param.Dict(
{
"placeholder": "placeholder",
"type": "type_",
# "min": "start",
# "max": "end",
# "step": "step",
}
)
properties_to_watch = param.Dict({"textInput.value": "value"})
events_to_watch = param.Dict({"change": None})
# @Philippff. I sthis the right place to define height? And what about width?
def __init__(self, min_height=50, **params):
# Hack: To solve https://github.com/wiredjs/wired-elements/issues/123
if "value" in params:
self.param.html.default = (
f'<wired-input value="{params['value']}" '
'style="width:100%;height:100%"></wired-input>'
)
elif self.param.value.default:
self.param.html.default = (
f'<wired-input value="{self.param.value.default}" '
'style="width:100%;height:100%"></wired-input>'
)
super().__init__(min_height=min_height, **params)
placeholder = param.String(default="")
type_ = param.ObjectSelector("", objects=["", "password"])
value = param.String()
class Toggle(WiredBase):
"""wired-toggle"""
html = param.String("<wired-toggle></wired-toggle>")
properties_to_watch = param.Dict({"checked": "value"})
events_to_watch = param.Dict({"change": None})
def __init__(self, min_height=20, **params):
super().__init__(min_height=min_height, **params)
value = param.Boolean(False)
class Select(WebComponent):
"""wired-combo"""
component_type = param.String("inputgroup")
html = param.String("""<wired-combo></wired-combo>""")
properties_to_watch = param.Dict({"selected": "value"})
events_to_watch = param.Dict(default={"selected": "selects"})
parameters_to_watch = param.List(["options"])
def __init__(self, min_height=40, **params):
super().__init__(min_height=min_height, **params)
self._set_class_()
value = param.Parameter()
selects = param.Integer()
options = param.ClassSelector(default=[], class_=(dict, list))
def _get_html_from_parameters_to_watch(self, **params) -> str:
options = params["options"]
if not options:
return """<wired-combo></wired-combo>"""
innerhtml = []
if isinstance(options, list):
for obj in options:
item = f'<wired-item value="{str(obj)}" role="option">{str(obj)}</wired-item>'
innerhtml.append(item)
if isinstance(options, dict):
for key, value in options.items():
item = f'<wired-item value="{str(key)}" role="option">{str(value)}</wired-item>'
innerhtml.append(item)
return f"""<wired-combo>{"".join(innerhtml)}</wired-combo>"""
# @Phillipfr: Don't understand why this is nescessary. But get error if I don't do it.
@param.depends("options", watch=True)
def _set_class_(self):
if isinstance(self.options, list):
self.param.options.class_ = list
if isinstance(self.options, dict):
self.param.options.class_ = dict
class Video(WebComponent):
"""wired-video"""
html = param.String(
"""<wired-video autoplay="" playsinline="" muted="" loop="" style="height: 80%;"\
src="https://file-examples.com/wp-content/uploads/2017/04/file_example_MP4_480_1_5MG.mp4">\
</wired-video>"""
)
attributes_to_watch = param.Dict(
{"autoplay": "autoplay", "playsinline": "playsinline", "loop": "loop", "src": "object"}
)
def __init__(self, min_height=250, **params):
super().__init__(min_height=min_height, **params)
object = param.String(doc="""Currently only an url is supported""")
autoplay = param.Boolean()
playsinline = param.Boolean()
muted = param.Boolean()
loop = param.Boolean()
|
"""Implementation of the wired WebComponents"""
import ast
import datetime
import json
from typing import Optional
import param
from awesome_panel.express.components.material import MWC_ICONS
from awesome_panel.express.pane.web_component import WebComponent
# @Philippfr. Should we load the full bundle or individual bundles?
# @Philippfr. How should users load the js pn.extions("wired") or?
# @Philippfr. Should we include the webcomponents-loader for older browsers?
JS_FILES = {
"webcomponents-loader": (
"https://unpkg.com/@webcomponents/[email protected]/webcomponents-loader.js"
),
"wired-bundle": "https://wiredjs.com/dist/showcase.min.js",
}
GRAY = "rgb(55, 71, 79)"
PINK = "rgb(240, 230, 244)"
RED = "rgb(255, 0, 0)"
YELLOW = "rgb(255, 255, 0)"
LOGO = "https://wiredjs.com/images/logo_400.png"
FONT_FAMILY = "Gloria Hallelujah, sans-serif"
ELEVATION_DEFAULT = 0
ELEVATION_BOUNDS = (0, 10)
DATE_BOUNDS = (datetime.date(1976, 9, 17), datetime.date(datetime.datetime.now().year + 10, 12, 31))
class WiredBase(WebComponent):
"""Inherit from this class"""
def __init__(self, **params):
if not self.param.attributes_to_watch.default:
self.param.attributes_to_watch.default = {}
self.attributes_to_watch["disabled"] = "disabled"
super().__init__(**params)
def _child_parameters(self):
parameters = super()._child_parameters()
parameters.add("disabled")
return parameters
class Button(WiredBase):
"""A Wired RadioButton
- You can set the `text` shown via the `name` parameter.
"""
html = param.String("<wired-button></wired-button>")
attributes_to_watch = param.Dict({"elevation": "elevation"})
events_to_watch = param.Dict(default={"click": "clicks"})
parameters_to_watch = param.List(["name"])
clicks = param.Integer()
elevation = param.Integer(ELEVATION_DEFAULT, bounds=ELEVATION_BOUNDS)
def __init__(self, **params):
if "height" not in params:
params["height"] = 40
super().__init__(**params)
def _get_html_from_parameters_to_watch(self, **params) -> str:
return f"<wired-button>{params['name']}</wired-button>"
class Checkbox(WiredBase):
"""wired-checkbox"""
html = param.String("<wired-checkbox></wired-checkbox>")
properties_to_watch = param.Dict({"checked": "value"})
parameters_to_watch = param.List(["name"])
value = param.Boolean()
def __init__(self, **params):
if "height" not in params:
params["height"] = 40
super().__init__(**params)
def _get_html_from_parameters_to_watch(self, **params) -> str:
return f"<wired-checkbox>{params['name']}</wired-checkbox>"
class DatePicker(WiredBase):
"""wired-calendar"""
component_type = param.String("inputgroup")
html = param.String(
'<wired-calendar initials="" role="dialog tabindex="0">Button</wired-calendar>'
)
attributes_to_watch = param.Dict(
{
"elevation": "elevation",
"firstdate": "firstdate",
"lastdate": "lastdate",
"locale": "locale",
}
)
properties_to_watch = param.Dict({"selected": "selected"})
events_to_watch = param.Dict(default={"selected": "selects"})
elevation = param.Integer(ELEVATION_DEFAULT, bounds=ELEVATION_BOUNDS)
firstdate = param.String(
doc="""
Example: firstdate="Apr 15, 2019"""
)
lastdate = param.String(
doc="""
Example: lastdate="Jul 15, 2019"""
)
locale = param.ObjectSelector("en", objects=["en", "fr", "de"])
selected = param.String(
doc="""
Example: selected="Jul 4, 2019"""
)
selects = param.Integer(bounds=(0, None))
value = param.CalendarDate(default=None, bounds=DATE_BOUNDS)
start = param.CalendarDate(bounds=DATE_BOUNDS)
end = param.CalendarDate(bounds=DATE_BOUNDS)
def __init__(self, min_height=340, min_width=300, **params):
super().__init__(min_height=min_height, min_width=min_width, **params)
@staticmethod
def _to_date(value: Optional[str]) -> Optional[datetime.date]:
if value:
return datetime.datetime.strptime(value, "%b %d, %Y").date()
return None
@staticmethod
def _to_string(value: datetime.date) -> Optional[str]:
if value:
return value.strftime("%b %e, %Y").replace(" ", " ")
return None
@param.depends("selected", watch=True)
def _set_value(self):
value = self._to_date(self.selected)
if value != self.value:
self.value = value
@param.depends("value", watch=True)
def _set_selected(self):
selected = self._to_string(self.value)
if selected != self.selected:
self.selected = selected
@param.depends("firstdate", watch=True)
def _set_start(self):
start = self._to_date(self.firstdate)
if start != self.start:
self.start = start
@param.depends("start", watch=True)
def _set_firstdate(self):
firstdate = self._to_string(self.start)
if firstdate != self.firstdate:
self.firstdate = firstdate
@param.depends("lastdate", watch=True)
def _set_end(self):
end = self._to_date(self.lastdate)
if end != self.end:
self.end = end
@param.depends("end", watch=True)
def _set_lastdate(self):
lastdate = self._to_string(self.end)
if lastdate != self.lastdate:
self.lastdate = lastdate
class Dialog(WebComponent):
"""wired-dialog"""
html = param.String("<wired-dialog></wired-checkbox>")
attributes_to_watch = param.Dict({"open": "is_open"})
parameters_to_watch = param.List(["text"])
is_open = param.Boolean(default=False)
text = param.String()
def _get_html_from_parameters_to_watch(self, **params) -> str:
return f"<wired-dialog>{params['text']}</wired-dialog>"
class Divider(WebComponent):
"""wired-divider"""
html = param.String("<wired-divider></wired-divider>")
def __init__(self, min_height=20, **params):
super().__init__(min_height=min_height, **params)
attributes_to_watch = param.Dict({"elevation": "elevation"})
elevation = param.Integer(ELEVATION_DEFAULT, bounds=ELEVATION_BOUNDS)
class Fab(WiredBase):
"""wired-fab"""
html = param.String("<wired-fab><mwc-icon>favorite</mwc-icon></wired-fab>")
parameters_to_watch = param.List(["icon"])
icon = param.ObjectSelector(
"favorite",
objects=MWC_ICONS,
doc="""
The name of an `mwc-icon <https://github.com/material-components/material-components-web-components/tree/master/packages/icon>`_
""",
)
def __init__(
self,
min_height=40,
**params,
):
super().__init__(min_height=min_height, **params)
def _get_html_from_parameters_to_watch(self, **params) -> str:
return f"<wired-fab><mwc-icon>{params['icon']}</mwc-icon></wired-fab>"
# Issue: Value is not set on construction. See
# https://github.com/wiredjs/wired-elements/issues/121#issue-573516963
class FloatSlider(WebComponent):
"""wired-slider for floats"""
component_type = param.String("inputgroup")
html = param.String("<wired-slider style='width: 100%;height:100%'></wired-slider>")
attributes_to_watch = param.Dict({"min": "start", "max": "end", "step": "step"})
properties_to_watch = param.Dict({"input.value": "value"})
events_to_watch = param.Dict({"change": None})
def __init__(self, min_height=40, **params):
super().__init__(min_height=min_height, **params)
start = param.Number(0.0)
end = param.Number(1.0)
step = param.Number(0.1)
value = param.Number(default=0.0)
class IconButton(WiredBase):
"""wired-icon-button"""
html = param.String("<wired-icon-button><mwc-icon>favorite</mwc-icon><wired-icon-button>")
parameters_to_watch = param.List(["icon"])
events_to_watch = param.Dict(default={"click": "clicks"})
icon = param.ObjectSelector(
"favorite",
objects=MWC_ICONS,
doc="""
The name of an `mwc-icon <https://github.com/material-components/material-components-web-components/tree/master/packages/icon>`_
""",
)
clicks = param.Integer()
def __init__(
self,
min_height=40,
**params,
):
super().__init__(min_height=min_height, **params)
def _get_html_from_parameters_to_watch(self, **params) -> str:
return f"<wired-icon-button><mwc-icon>{params['icon']}</mwc-icon></wired-icon-button>"
class Image(WebComponent):
"""wired-image"""
html = param.String('<wired-image style="width:100%;height:100%"></wired-image>')
attributes_to_watch = param.Dict({"elevation": "elevation", "src": "object", "alt": "alt_text"})
# @Philippfr: How do I handle height and width in general in the .ts model?
def __init__(self, height=100, **params):
super().__init__(height=height, **params)
object = param.String(default=None, doc="""Currently only an url is supported""")
alt_text = param.String(default=None)
elevation = param.Integer(ELEVATION_DEFAULT, bounds=ELEVATION_BOUNDS)
# Issue: Value is not set on construction. See
# https://github.com/wiredjs/wired-elements/issues/121#issue-573516963
class IntSlider(FloatSlider):
"""wired-slider for int"""
def __init__(self, min_height=40, **params):
super().__init__(min_height=min_height, **params)
start = param.Integer(0)
end = param.Integer(1)
step = param.Integer(1)
value = param.Integer(0)
class Link(WebComponent):
"""wired-link"""
html = param.String("<wired-link></wired-link>")
attributes_to_watch = param.Dict({"href": "href", "target": "target"})
parameters_to_watch = param.List(["text"])
href = param.String()
target = param.ObjectSelector("_blank", objects=["_self", "_blank", "_parent", "_top"])
text = param.String()
def _get_html_from_parameters_to_watch(self, **params) -> str:
return f"<wired-link>{params['text']}</wired-link>"
class LiteralInput(WiredBase):
"""wired-input for list and dict"""
component_type = param.String("inputgroup")
html = param.String("""<wired-input style="width:100%"></wired-input>""")
attributes_to_watch = param.Dict(
{
"placeholder": "placeholder",
}
)
properties_to_watch = param.Dict({"textInput.value": "value"})
events_to_watch = param.Dict({"change": None})
# @Philippff. I sthis the right place to define height? And what about width?
def __init__(self, min_height=60, **params):
# Hack: To solve https://github.com/wiredjs/wired-elements/issues/123
if "value" in params:
self.param.html.default = (
f'<wired-input value="{params["value"]}" '
'style="width:100%;height:100%"></wired-input>'
)
elif self.param.value.default:
self.param.html.default = (
f'<wired-input value="{self.param.value.default}" '
'style="width:100%;height:100%"></wired-input>'
)
super().__init__(min_height=min_height, **params)
placeholder = param.String(default="Enter Value")
value = param.Parameter()
type = param.ClassSelector(default=None, class_=(type, tuple), is_instance=True)
serializer = param.ObjectSelector(
default="ast",
objects=["ast", "json"],
doc="""
The serialization (and deserialization) method to use. 'ast'
uses ast.literal_eval and 'json' uses json.loads and json.dumps.
""",
)
def _set_type(self):
if not self.value:
self.type.class_ = (type, tuple)
def _handle_properties_last_change(self, event):
if "textInput.value" in event.new:
value = event.new["textInput.value"]
if not value or not isinstance(value, str):
pass
elif self.serializer == "json":
value = json.loads(value)
else:
value = ast.literal_eval(value)
if value != self.value:
self.value = value
else:
super()._handle_properties_last_change(event)
def _handle_parameter_property_change(self, event):
if event.name == "value":
value = event.new
if not value or isinstance(value, str):
pass
else:
if self.serializer == "json":
value = json.dumps(value)
else:
value = repr(value)
properties_last_change = {"textInput.value": value}
if properties_last_change != self.properties_last_change:
self.properties_last_change = properties_last_change
else:
super()._handle_parameter_property_change(event)
class Progress(WebComponent):
"""wired-progress"""
html = param.String("<wired-progress></wired-progress>")
attributes_to_watch = param.Dict({"value": "value", "percentage": "percentage", "max": "max"})
def __init__(self, min_height=40, **params):
super().__init__(min_height=min_height, **params)
if "max" in params:
self._handle_max_changed()
value = param.Integer(None, bounds=(0, 100))
max = param.Integer(100, bounds=(0, None))
percentage = param.Boolean()
@param.depends("max", watch=True)
def _handle_max_changed(self):
self.param.value.bounds = (0, self.max)
class RadioButton(WebComponent):
"""wired-radio"""
html = param.String("<wired-radio>Radio Button</wired-radio>")
properties_to_watch = param.Dict({"checked": "value"})
parameters_to_watch = param.List(["name"])
value = param.Boolean(default=False)
def __init__(self, **params):
if "height" not in params:
params["height"] = 15
super().__init__(**params)
def _get_html_from_parameters_to_watch(self, **params) -> str:
return f"<wired-radio>{params['name']}</wired-radio>"
class SearchInput(WiredBase):
"""wired-search-input"""
html = param.String("<wired-search-input></wired-search-input>")
attributes_to_watch = param.Dict({"placeholder": "placeholder", "autocomplete": "autocomplete"})
properties_to_watch = param.Dict({"textInput.value": "value"})
events_to_watch = param.Dict({"input": None})
def __init__(self, min_height=40, **params):
if "value" in params:
self.param.html.default = (
f'<wired-search-input value="{params["value"]}" '
'style="width:100%;height:100%"></wired-search-input>'
)
elif self.param.value.default:
self.param.html.default = (
f'<wired-search-input value="{self.param.value.default}" '
'style="width:100%;height:100%"></wired-search-input>'
)
super().__init__(min_height=min_height, **params)
placeholder = param.String("")
value = param.String()
autocomplete = param.ObjectSelector("off", objects=["on", "off"])
class ProgressSpinner(WebComponent):
"""wired-spinnner"""
html = param.String("<wired-spinner></wired-spinner>")
attributes_to_watch = param.Dict({"spinning": "active", "duration": "duration"})
active = param.Boolean(default=True)
duration = param.Integer(default=1000, bounds=(1, 10000))
def __init__(self, min_height=40, **params):
super().__init__(min_height=min_height, **params)
class TextAreaInput(WiredBase):
"""wired-textarea"""
component_type = param.String("inputgroup")
html = param.String('<wired-textarea placeholder="Enter text"></wired-textarea>')
attributes_to_watch = param.Dict({"placeholder": "placeholder"})
properties_to_watch = param.Dict(
{"textareaInput.value": "value", "rows": "rows", "maxlength": "max_length"}
)
events_to_watch = param.ObjectSelector(
{"change": None},
objects=[{"change": None}, {"input": None}],
doc="""
The event(s) to watch. When the event(s) are catched the js model properties are checked and
any changed values are sent to the python model. The event can be
- `change` (when done) or
- `input` (for every character change)
""",
)
placeholder = param.String("")
value = param.String()
rows = param.Integer(2, bounds=(1, 100))
max_length = param.Integer(default=5000)
def __init__(self, **params):
super().__init__(**params)
if "min_height" not in params:
self._set_height()
@param.depends("rows", "disabled", watch=True)
def _set_height(self):
height = 20 + 19 * self.rows
if self.disabled:
height += 4
if height != self.height:
self.height = height
class TextInput(WiredBase):
"""wired-input for text"""
component_type = param.String("inputgroup")
html = param.String("""<wired-input style="width:100%;height:100%"></wired-input>""")
attributes_to_watch = param.Dict(
{
"placeholder": "placeholder",
"type": "type_",
# "min": "start",
# "max": "end",
# "step": "step",
}
)
properties_to_watch = param.Dict({"textInput.value": "value"})
events_to_watch = param.Dict({"change": None})
# @Philippff. I sthis the right place to define height? And what about width?
def __init__(self, min_height=50, **params):
# Hack: To solve https://github.com/wiredjs/wired-elements/issues/123
if "value" in params:
self.param.html.default = (
f'<wired-input value="{params["value"]}" '
'style="width:100%;height:100%"></wired-input>'
)
elif self.param.value.default:
self.param.html.default = (
f'<wired-input value="{self.param.value.default}" '
'style="width:100%;height:100%"></wired-input>'
)
super().__init__(min_height=min_height, **params)
placeholder = param.String(default="")
type_ = param.ObjectSelector("", objects=["", "password"])
value = param.String()
class Toggle(WiredBase):
"""wired-toggle"""
html = param.String("<wired-toggle></wired-toggle>")
properties_to_watch = param.Dict({"checked": "value"})
events_to_watch = param.Dict({"change": None})
def __init__(self, min_height=20, **params):
super().__init__(min_height=min_height, **params)
value = param.Boolean(False)
class Select(WebComponent):
"""wired-combo"""
component_type = param.String("inputgroup")
html = param.String("""<wired-combo></wired-combo>""")
properties_to_watch = param.Dict({"selected": "value"})
events_to_watch = param.Dict(default={"selected": "selects"})
parameters_to_watch = param.List(["options"])
def __init__(self, min_height=40, **params):
super().__init__(min_height=min_height, **params)
self._set_class_()
value = param.Parameter()
selects = param.Integer()
options = param.ClassSelector(default=[], class_=(dict, list))
def _get_html_from_parameters_to_watch(self, **params) -> str:
options = params["options"]
if not options:
return """<wired-combo></wired-combo>"""
innerhtml = []
if isinstance(options, list):
for obj in options:
item = f'<wired-item value="{str(obj)}" role="option">{str(obj)}</wired-item>'
innerhtml.append(item)
if isinstance(options, dict):
for key, value in options.items():
item = f'<wired-item value="{str(key)}" role="option">{str(value)}</wired-item>'
innerhtml.append(item)
return f"""<wired-combo>{"".join(innerhtml)}</wired-combo>"""
# @Phillipfr: Don't understand why this is nescessary. But get error if I don't do it.
@param.depends("options", watch=True)
def _set_class_(self):
if isinstance(self.options, list):
self.param.options.class_ = list
if isinstance(self.options, dict):
self.param.options.class_ = dict
class Video(WebComponent):
"""wired-video"""
html = param.String(
"""<wired-video autoplay="" playsinline="" muted="" loop="" style="height: 80%;"\
src="https://file-examples.com/wp-content/uploads/2017/04/file_example_MP4_480_1_5MG.mp4">\
</wired-video>"""
)
attributes_to_watch = param.Dict(
{"autoplay": "autoplay", "playsinline": "playsinline", "loop": "loop", "src": "object"}
)
def __init__(self, min_height=250, **params):
super().__init__(min_height=min_height, **params)
object = param.String(doc="""Currently only an url is supported""")
autoplay = param.Boolean()
playsinline = param.Boolean()
muted = param.Boolean()
loop = param.Boolean()
|
"""Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# Sylvain Marie
# License: BSD 3 clause
from functools import wraps
import warnings
import numbers
import operator
import numpy as np
import scipy.sparse as sp
from inspect import signature, isclass, Parameter
# mypy error: Module 'numpy.core.numeric' has no attribute 'ComplexWarning'
from numpy.core.numeric import ComplexWarning # type: ignore
import joblib
from contextlib import suppress
from .fixes import _object_dtype_isnan, parse_version
from .. import get_config as _get_config
from ..exceptions import PositiveSpectrumWarning
from ..exceptions import NotFittedError
from ..exceptions import DataConversionWarning
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
def _deprecate_positional_args(func=None, *, version="1.1 (renaming of 0.26)"):
"""Decorator for methods that issues warnings for positional arguments.
Using the keyword-only argument syntax in pep 3102, arguments after the
* will issue a warning when passed as a positional argument.
Parameters
----------
func : callable, default=None
Function to check arguments on.
version : callable, default="1.1 (renaming of 0.26)"
The version when positional arguments will result in error.
"""
def _inner_deprecate_positional_args(f):
sig = signature(f)
kwonly_args = []
all_args = []
for name, param in sig.parameters.items():
if param.kind == Parameter.POSITIONAL_OR_KEYWORD:
all_args.append(name)
elif param.kind == Parameter.KEYWORD_ONLY:
kwonly_args.append(name)
@wraps(f)
def inner_f(*args, **kwargs):
extra_args = len(args) - len(all_args)
if extra_args <= 0:
return f(*args, **kwargs)
# extra_args > 0
args_msg = [
"{}={}".format(name, arg)
for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:])
]
args_msg = ", ".join(args_msg)
warnings.warn(
f"Pass {args_msg} as keyword args. From version "
f"{version} passing these as positional arguments "
"will result in an error",
FutureWarning,
)
kwargs.update(zip(sig.parameters, args))
return f(**kwargs)
return inner_f
if func is not None:
return _inner_deprecate_positional_args(func)
return _inner_deprecate_positional_args
def _assert_all_finite(X, allow_nan=False, msg_dtype=None):
"""Like assert_all_finite, but only for ndarray."""
# validation is also imported in extmath
from .extmath import _safe_accumulator_op
if _get_config()["assume_finite"]:
return
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method. The sum is also calculated
# safely to reduce dtype induced overflows.
is_float = X.dtype.kind in "fc"
if is_float and (np.isfinite(_safe_accumulator_op(np.sum, X))):
pass
elif is_float:
msg_err = "Input contains {} or a value too large for {!r}."
if (
allow_nan
and np.isinf(X).any()
or not allow_nan
and not np.isfinite(X).all()
):
type_err = "infinity" if allow_nan else "NaN, infinity"
raise ValueError(
msg_err.format(
type_err, msg_dtype if msg_dtype is not None else X.dtype
)
)
# for object dtype data, we only check for NaNs (GH-13254)
elif X.dtype == np.dtype("object") and not allow_nan:
if _object_dtype_isnan(X).any():
raise ValueError("Input contains NaN")
def assert_all_finite(X, *, allow_nan=False):
"""Throw a ValueError if X contains NaN or infinity.
Parameters
----------
X : {ndarray, sparse matrix}
allow_nan : bool, default=False
"""
_assert_all_finite(X.data if sp.issparse(X) else X, allow_nan)
def as_float_array(X, *, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats.
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, default=True
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in X. The
possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan and pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
Returns
-------
XT : {ndarray, sparse matrix}
An array of type float.
"""
if isinstance(X, np.matrix) or (
not isinstance(X, np.ndarray) and not sp.issparse(X)
):
return check_array(
X,
accept_sparse=["csr", "csc", "coo"],
dtype=np.float64,
copy=copy,
force_all_finite=force_all_finite,
ensure_2d=False,
)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy("F" if X.flags["F_CONTIGUOUS"] else "C") if copy else X
else:
if X.dtype.kind in "uib" and X.dtype.itemsize <= 4:
return_dtype = np.float32
else:
return_dtype = np.float64
return X.astype(return_dtype)
def _is_arraylike(x):
"""Returns whether the input is array-like."""
return hasattr(x, "__len__") or hasattr(x, "shape") or hasattr(x, "__array__")
def _num_features(X):
"""Return the number of features in an array-like X.
This helper function tries hard to avoid to materialize an array version
of X unless necessary. For instance, if X is a list of lists,
this function will return the length of the first element, assuming
that subsequent elements are all lists of the same length without
checking.
Parameters
----------
X : array-like
array-like to get the number of features.
Returns
-------
features : int
Number of features
"""
type_ = type(X)
if type_.__module__ == "builtins":
type_name = type_.__qualname__
else:
type_name = f"{type_.__module__}.{type_.__qualname__}"
message = f"Unable to find the number of features from X of type {type_name}"
if not hasattr(X, "__len__") and not hasattr(X, "shape"):
if not hasattr(X, "__array__"):
raise TypeError(message)
# Only convert X to a numpy array if there is no cheaper, heuristic
# option.
X = np.asarray(X)
if hasattr(X, "shape"):
if not hasattr(X.shape, "__len__") or len(X.shape) <= 1:
message += f" with shape {X.shape}"
raise TypeError(message)
return X.shape[1]
first_sample = X[0]
# Do not consider an array-like of strings or dicts to be a 2D array
if isinstance(first_sample, (str, bytes, dict)):
message += f" where the samples are of type {type(first_sample).__qualname__}"
raise TypeError(message)
try:
# If X is a list of lists, for instance, we assume that all nested
# lists have the same length without checking or converting to
# a numpy array to keep this function call as cheap as possible.
return len(first_sample)
except Exception as err:
raise TypeError(message) from err
def _num_samples(x):
"""Return number of samples in array-like x."""
message = "Expected sequence or array-like, got %s" % type(x)
if hasattr(x, "fit") and callable(x.fit):
# Don't get num_samples from an ensembles length!
raise TypeError(message)
if not hasattr(x, "__len__") and not hasattr(x, "shape"):
if hasattr(x, "__array__"):
x = np.asarray(x)
else:
raise TypeError(message)
if hasattr(x, "shape") and x.shape is not None:
if len(x.shape) == 0:
raise TypeError(
"Singleton array %r cannot be considered a valid collection." % x
)
# Check that shape is returning an integer or default to len
# Dask dataframes may not return numeric shape[0] value
if isinstance(x.shape[0], numbers.Integral):
return x.shape[0]
try:
return len(x)
except TypeError as type_error:
raise TypeError(message) from type_error
def check_memory(memory):
"""Check that ``memory`` is joblib.Memory-like.
joblib.Memory-like means that ``memory`` can be converted into a
joblib.Memory instance (typically a str denoting the ``location``)
or has the same interface (has a ``cache`` method).
Parameters
----------
memory : None, str or object with the joblib.Memory interface
Returns
-------
memory : object with the joblib.Memory interface
Raises
------
ValueError
If ``memory`` is not joblib.Memory-like.
"""
if memory is None or isinstance(memory, str):
if parse_version(joblib.__version__) < parse_version("0.12"):
memory = joblib.Memory(cachedir=memory, verbose=0)
else:
memory = joblib.Memory(location=memory, verbose=0)
elif not hasattr(memory, "cache"):
raise ValueError(
"'memory' should be None, a string or have the same"
" interface as joblib.Memory."
" Got memory='{}' instead.".format(memory)
)
return memory
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
lengths = [_num_samples(X) for X in arrays if X is not None]
uniques = np.unique(lengths)
if len(uniques) > 1:
raise ValueError(
"Found input variables with inconsistent numbers of samples: %r"
% [int(l) for l in lengths]
)
def _make_indexable(iterable):
"""Ensure iterable supports indexing or convert to an indexable variant.
Convert sparse matrices to csr and other non-indexable iterable to arrays.
Let `None` and indexable objects (e.g. pandas dataframes) pass unchanged.
Parameters
----------
iterable : {list, dataframe, ndarray, sparse matrix} or None
Object to be converted to an indexable iterable.
"""
if sp.issparse(iterable):
return iterable.tocsr()
elif hasattr(iterable, "__getitem__") or hasattr(iterable, "iloc"):
return iterable
elif iterable is None:
return iterable
return np.array(iterable)
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : {lists, dataframes, ndarrays, sparse matrices}
List of objects to ensure sliceability.
Returns
-------
result : list of {ndarray, sparse matrix, dataframe} or None
Returns a list containing indexable arrays (i.e. NumPy array,
sparse matrix, or dataframe) or `None`.
"""
result = [_make_indexable(X) for X in iterables]
check_consistent_length(*result)
return result
def _ensure_sparse_format(
spmatrix, accept_sparse, dtype, copy, force_all_finite, accept_large_sparse
):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : sparse matrix
Input to validate and convert.
accept_sparse : str, bool or list/tuple of str
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). If the input is sparse but
not in the allowed format, it will be converted to the first listed
format. True allows the input to be any format. False means
that a sparse matrix input will raise an error.
dtype : str, type or None
Data type of result. If None, the dtype of the input is preserved.
copy : bool
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : bool or 'allow-nan'
Whether to raise an error on np.inf, np.nan, pd.NA in X. The
possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan and pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
Returns
-------
spmatrix_converted : sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# Indices dtype validation
_check_large_sparse(spmatrix, accept_large_sparse)
if accept_sparse is False:
raise TypeError(
"A sparse matrix was passed, but dense "
"data is required. Use X.toarray() to "
"convert to a dense numpy array."
)
elif isinstance(accept_sparse, (list, tuple)):
if len(accept_sparse) == 0:
raise ValueError(
"When providing 'accept_sparse' "
"as a tuple or list, it must contain at "
"least one string value."
)
# ensure correct sparse format
if spmatrix.format not in accept_sparse:
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
elif accept_sparse is not True:
# any other type
raise ValueError(
"Parameter 'accept_sparse' should be a string, "
"boolean or list of strings. You provided "
"'accept_sparse={}'.".format(accept_sparse)
)
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn(
"Can't check %s sparse matrix for nan or inf." % spmatrix.format,
stacklevel=2,
)
else:
_assert_all_finite(spmatrix.data, allow_nan=force_all_finite == "allow-nan")
return spmatrix
def _ensure_no_complex_data(array):
if (
hasattr(array, "dtype")
and array.dtype is not None
and hasattr(array.dtype, "kind")
and array.dtype.kind == "c"
):
raise ValueError("Complex data not supported\n{}\n".format(array))
def check_array(
array,
accept_sparse=False,
*,
accept_large_sparse=True,
dtype="numeric",
order=None,
copy=False,
force_all_finite=True,
ensure_2d=True,
allow_nd=False,
ensure_min_samples=1,
ensure_min_features=1,
estimator=None,
):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is checked to be a non-empty 2D array containing
only finite values. If the dtype of the array is object, attempt
converting to float, raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : str, bool or list/tuple of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse=False will cause it to be accepted
only if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'} or None, default=None
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
ensure_2d : bool, default=True
Whether to raise a value error if array is not 2D.
allow_nd : bool, default=False
Whether to allow array.ndim > 2.
ensure_min_samples : int, default=1
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
Returns
-------
array_converted : object
The converted and validated array.
"""
if isinstance(array, np.matrix):
warnings.warn(
"np.matrix usage is deprecated in 1.0 and will raise a TypeError "
"in 1.2. Please convert to a numpy array with np.asarray. For "
"more information see: "
"https://numpy.org/doc/stable/reference/generated/numpy.matrix.html", # noqa
FutureWarning,
)
# store reference to original array to check if copy is needed when
# function returns
array_orig = array
# store whether originally we wanted numeric dtype
dtype_numeric = isinstance(dtype, str) and dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, "kind"):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
# check if the object contains several dtypes (typically a pandas
# DataFrame), and store them. If not, store None.
dtypes_orig = None
has_pd_integer_array = False
if hasattr(array, "dtypes") and hasattr(array.dtypes, "__array__"):
# throw warning if columns are sparse. If all columns are sparse, then
# array.sparse exists and sparsity will be preserved (later).
with suppress(ImportError):
from pandas.api.types import is_sparse
if not hasattr(array, "sparse") and array.dtypes.apply(is_sparse).any():
warnings.warn(
"pandas.DataFrame with sparse columns found."
"It will be converted to a dense numpy array."
)
dtypes_orig = list(array.dtypes)
# pandas boolean dtype __array__ interface coerces bools to objects
for i, dtype_iter in enumerate(dtypes_orig):
if dtype_iter.kind == "b":
dtypes_orig[i] = np.dtype(object)
elif dtype_iter.name.startswith(("Int", "UInt")):
# name looks like an Integer Extension Array, now check for
# the dtype
with suppress(ImportError):
from pandas import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
if isinstance(
dtype_iter,
(
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
),
):
has_pd_integer_array = True
if all(isinstance(dtype, np.dtype) for dtype in dtypes_orig):
dtype_orig = np.result_type(*dtypes_orig)
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if has_pd_integer_array:
# If there are any pandas integer extension arrays,
array = array.astype(dtype)
if force_all_finite not in (True, False, "allow-nan"):
raise ValueError(
'force_all_finite should be a bool or "allow-nan". Got {!r} instead'.format(
force_all_finite
)
)
if estimator is not None:
if isinstance(estimator, str):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = "Estimator"
context = " by %s" % estimator_name if estimator is not None else ""
# When all dataframe columns are sparse, convert to a sparse array
if hasattr(array, "sparse") and array.ndim > 1:
# DataFrame.sparse only supports `to_coo`
array = array.sparse.to_coo()
if array.dtype == np.dtype("object"):
unique_dtypes = set([dt.subtype.name for dt in array_orig.dtypes])
if len(unique_dtypes) > 1:
raise ValueError(
"Pandas DataFrame with mixed sparse extension arrays "
"generated a sparse matrix with object dtype which "
"can not be converted to a scipy sparse matrix."
"Sparse extension arrays should all have the same "
"numeric type."
)
if sp.issparse(array):
_ensure_no_complex_data(array)
array = _ensure_sparse_format(
array,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
force_all_finite=force_all_finite,
accept_large_sparse=accept_large_sparse,
)
else:
# If np.array(..) gives ComplexWarning, then we convert the warning
# to an error. This is needed because specifying a non complex
# dtype to the function converts complex to real dtype,
# thereby passing the test made in the lines following the scope
# of warnings context manager.
with warnings.catch_warnings():
try:
warnings.simplefilter("error", ComplexWarning)
if dtype is not None and np.dtype(dtype).kind in "iu":
# Conversion float -> int should not contain NaN or
# inf (numpy#14412). We cannot use casting='safe' because
# then conversion float -> int would be disallowed.
array = np.asarray(array, order=order)
if array.dtype.kind == "f":
_assert_all_finite(array, allow_nan=False, msg_dtype=dtype)
array = array.astype(dtype, casting="unsafe", copy=False)
else:
array = np.asarray(array, order=order, dtype=dtype)
except ComplexWarning as complex_warning:
raise ValueError(
"Complex data not supported\n{}\n".format(array)
) from complex_warning
# It is possible that the np.array(..) gave no warning. This happens
# when no dtype conversion happened, for example dtype = None. The
# result is that np.array(..) produces an array of complex dtype
# and we need to catch and raise exception for such cases.
_ensure_no_complex_data(array)
if ensure_2d:
# If input is scalar raise error
if array.ndim == 0:
raise ValueError(
"Expected 2D array, got scalar array instead:\narray={}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample.".format(array)
)
# If input is 1D raise error
if array.ndim == 1:
raise ValueError(
"Expected 2D array, got 1D array instead:\narray={}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample.".format(array)
)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind in "OUSV":
warnings.warn(
"Arrays of bytes/strings is being converted to decimal "
"numbers if dtype='numeric'. This behavior is deprecated in "
"0.24 and will be removed in 1.1 (renaming of 0.26). Please "
"convert your data to numeric values explicitly instead.",
FutureWarning,
stacklevel=2,
)
try:
array = array.astype(np.float64)
except ValueError as e:
raise ValueError(
"Unable to convert array of bytes/strings "
"into decimal numbers with dtype='numeric'"
) from e
if not allow_nd and array.ndim >= 3:
raise ValueError(
"Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name)
)
if force_all_finite:
_assert_all_finite(array, allow_nan=force_all_finite == "allow-nan")
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError(
"Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, array.shape, ensure_min_samples, context)
)
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError(
"Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, array.shape, ensure_min_features, context)
)
if copy and np.may_share_memory(array, array_orig):
array = np.array(array, dtype=dtype, order=order)
return array
def _check_large_sparse(X, accept_large_sparse=False):
"""Raise a ValueError if X has 64bit indices and accept_large_sparse=False"""
if not accept_large_sparse:
supported_indices = ["int32"]
if X.getformat() == "coo":
index_keys = ["col", "row"]
elif X.getformat() in ["csr", "csc", "bsr"]:
index_keys = ["indices", "indptr"]
else:
return
for key in index_keys:
indices_datatype = getattr(X, key).dtype
if indices_datatype not in supported_indices:
raise ValueError(
"Only sparse matrices with 32-bit integer"
" indices are accepted. Got %s indices." % indices_datatype
)
def check_X_y(
X,
y,
accept_sparse=False,
*,
accept_large_sparse=True,
dtype="numeric",
order=None,
copy=False,
force_all_finite=True,
ensure_2d=True,
allow_nd=False,
multi_output=False,
ensure_min_samples=1,
ensure_min_features=1,
y_numeric=False,
estimator=None,
):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X to be 2D and y 1D. By
default, X is checked to be non-empty and containing only finite values.
Standard input checks are also applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2D and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : {ndarray, list, sparse matrix}
Input data.
y : {ndarray, list, sparse matrix}
Labels.
accept_sparse : str, bool or list of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse will cause it to be accepted only
if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'}, default=None
Whether an array will be forced to be fortran or c-style.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in X. This parameter
does not influence whether y can have np.inf, np.nan, pd.NA values.
The possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
ensure_2d : bool, default=True
Whether to raise a value error if X is not 2D.
allow_nd : bool, default=False
Whether to allow X.ndim > 2.
multi_output : bool, default=False
Whether to allow 2D y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int, default=1
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : bool, default=False
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
if y is None:
raise ValueError("y cannot be None")
X = check_array(
X,
accept_sparse=accept_sparse,
accept_large_sparse=accept_large_sparse,
dtype=dtype,
order=order,
copy=copy,
force_all_finite=force_all_finite,
ensure_2d=ensure_2d,
allow_nd=allow_nd,
ensure_min_samples=ensure_min_samples,
ensure_min_features=ensure_min_features,
estimator=estimator,
)
y = _check_y(y, multi_output=multi_output, y_numeric=y_numeric)
check_consistent_length(X, y)
return X, y
def _check_y(y, multi_output=False, y_numeric=False):
"""Isolated part of check_X_y dedicated to y validation"""
if multi_output:
y = check_array(
y, accept_sparse="csr", force_all_finite=True, ensure_2d=False, dtype=None
)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
_ensure_no_complex_data(y)
if y_numeric and y.dtype.kind == "O":
y = y.astype(np.float64)
return y
def column_or_1d(y, *, warn=False):
"""Ravel column or 1d numpy array, else raises an error.
Parameters
----------
y : array-like
warn : bool, default=False
To control display of warnings.
Returns
-------
y : ndarray
"""
y = np.asarray(y)
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning,
stacklevel=2,
)
return np.ravel(y)
raise ValueError(
"y should be a 1d array, got an array of shape {} instead.".format(shape)
)
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
Parameters
----------
seed : None, int or instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, numbers.Integral):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError(
"%r cannot be used to seed a numpy.random.RandomState instance" % seed
)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Parameters
----------
estimator : object
An estimator to inspect.
parameter : str
The searched parameter.
Returns
-------
is_parameter: bool
Whether the parameter was found to be a named parameter of the
estimator's fit method.
Examples
--------
>>> from sklearn.svm import SVC
>>> from sklearn.utils.validation import has_fit_parameter
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, *, tol=1e-10, raise_warning=True, raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : {ndarray, sparse matrix}
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float, default=1e-10
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : bool, default=True
If True then raise a warning if conversion is required.
raise_exception : bool, default=False
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : {ndarray, sparse matrix}
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError(
"array must be 2-dimensional and square. shape = {0}".format(array.shape)
)
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ["csr", "csc", "coo"]:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn(
"Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.",
stacklevel=2,
)
if sp.issparse(array):
conversion = "to" + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes=None, *, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
fitted attributes (ending with a trailing underscore) and otherwise
raises a NotFittedError with the given message.
If an estimator does not set any attributes with a trailing underscore, it
can define a ``__sklearn_is_fitted__`` method returning a boolean to specify if the
estimator is fitted or not.
Parameters
----------
estimator : estimator instance
estimator instance for which the check is performed.
attributes : str, list or tuple of str, default=None
Attribute name(s) given as string or a list/tuple of strings
Eg.: ``["coef_", "estimator_", ...], "coef_"``
If `None`, `estimator` is considered fitted if there exist an
attribute that ends with a underscore and does not start with double
underscore.
msg : str, default=None
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this
estimator."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default=all
Specify whether all or any of the given attributes must exist.
Returns
-------
None
Raises
------
NotFittedError
If the attributes are not found.
"""
if isclass(estimator):
raise TypeError("{} is a class, not an instance.".format(estimator))
if msg is None:
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator."
)
if not hasattr(estimator, "fit"):
raise TypeError("%s is not an estimator instance." % (estimator))
if attributes is not None:
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
fitted = all_or_any([hasattr(estimator, attr) for attr in attributes])
elif hasattr(estimator, "__sklearn_is_fitted__"):
fitted = estimator.__sklearn_is_fitted__()
else:
fitted = [
v for v in vars(estimator) if v.endswith("_") and not v.startswith("__")
]
if not fitted:
raise NotFittedError(msg % {"name": type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : {array-like, sparse matrix}
Input data.
whom : str
Who passed X to this function.
"""
# avoid X.min() on sparse matrix since it also sorts the indices
if sp.issparse(X):
if X.format in ["lil", "dok"]:
X = X.tocsr()
if X.data.size == 0:
X_min = 0
else:
X_min = X.data.min()
else:
X_min = X.min()
if X_min < 0:
raise ValueError("Negative values in data passed to %s" % whom)
def check_scalar(
x,
name,
target_type,
*,
min_val=None,
max_val=None,
include_boundaries="both",
):
"""Validate scalar parameters type and value.
Parameters
----------
x : object
The scalar parameter to validate.
name : str
The name of the parameter to be printed in error messages.
target_type : type or tuple
Acceptable data types for the parameter.
min_val : float or int, default=None
The minimum valid value the parameter can take. If None (default) it
is implied that the parameter does not have a lower bound.
max_val : float or int, default=False
The maximum valid value the parameter can take. If None (default) it
is implied that the parameter does not have an upper bound.
include_boundaries : {"left", "right", "both", "neither"}, default="both"
Whether the interval defined by `min_val` and `max_val` should include
the boundaries. Possible choices are:
- `"left"`: only `min_val` is included in the valid interval;
- `"right"`: only `max_val` is included in the valid interval;
- `"both"`: `min_val` and `max_val` are included in the valid interval;
- `"neither"`: neither `min_val` nor `max_val` are included in the
valid interval.
Returns
-------
x : numbers.Number
The validated number.
Raises
------
TypeError
If the parameter's type does not match the desired type.
ValueError
If the parameter's value violates the given bounds.
"""
if not isinstance(x, target_type):
raise TypeError(f"{name} must be an instance of {target_type}, not {type(x)}.")
expected_include_boundaries = ("left", "right", "both", "neither")
if include_boundaries not in expected_include_boundaries:
raise ValueError(
f"Unknown value for `include_boundaries`: {repr(include_boundaries)}. "
f"Possible values are: {expected_include_boundaries}."
)
comparison_operator = (
operator.lt if include_boundaries in ("left", "both") else operator.le
)
if min_val is not None and comparison_operator(x, min_val):
raise ValueError(
f"{name} == {x}, must be"
f" {">=" if include_boundaries in ("left", "both") else ">"} {min_val}."
)
comparison_operator = (
operator.gt if include_boundaries in ("right", "both") else operator.ge
)
if max_val is not None and comparison_operator(x, max_val):
raise ValueError(
f"{name} == {x}, must be"
f" {"<=" if include_boundaries in ("right", "both") else "<"} {max_val}."
)
return x
def _check_psd_eigenvalues(lambdas, enable_warnings=False):
"""Check the eigenvalues of a positive semidefinite (PSD) matrix.
Checks the provided array of PSD matrix eigenvalues for numerical or
conditioning issues and returns a fixed validated version. This method
should typically be used if the PSD matrix is user-provided (e.g. a
Gram matrix) or computed using a user-provided dissimilarity metric
(e.g. kernel function), or if the decomposition process uses approximation
methods (randomized SVD, etc.).
It checks for three things:
- that there are no significant imaginary parts in eigenvalues (more than
1e-5 times the maximum real part). If this check fails, it raises a
``ValueError``. Otherwise all non-significant imaginary parts that may
remain are set to zero. This operation is traced with a
``PositiveSpectrumWarning`` when ``enable_warnings=True``.
- that eigenvalues are not all negative. If this check fails, it raises a
``ValueError``
- that there are no significant negative eigenvalues with absolute value
more than 1e-10 (1e-6) and more than 1e-5 (5e-3) times the largest
positive eigenvalue in double (simple) precision. If this check fails,
it raises a ``ValueError``. Otherwise all negative eigenvalues that may
remain are set to zero. This operation is traced with a
``PositiveSpectrumWarning`` when ``enable_warnings=True``.
Finally, all the positive eigenvalues that are too small (with a value
smaller than the maximum eigenvalue multiplied by 1e-12 (2e-7)) are set to
zero. This operation is traced with a ``PositiveSpectrumWarning`` when
``enable_warnings=True``.
Parameters
----------
lambdas : array-like of shape (n_eigenvalues,)
Array of eigenvalues to check / fix.
enable_warnings : bool, default=False
When this is set to ``True``, a ``PositiveSpectrumWarning`` will be
raised when there are imaginary parts, negative eigenvalues, or
extremely small non-zero eigenvalues. Otherwise no warning will be
raised. In both cases, imaginary parts, negative eigenvalues, and
extremely small non-zero eigenvalues will be set to zero.
Returns
-------
lambdas_fixed : ndarray of shape (n_eigenvalues,)
A fixed validated copy of the array of eigenvalues.
Examples
--------
>>> from sklearn.utils.validation import _check_psd_eigenvalues
>>> _check_psd_eigenvalues([1, 2]) # nominal case
array([1, 2])
>>> _check_psd_eigenvalues([5, 5j]) # significant imag part
Traceback (most recent call last):
...
ValueError: There are significant imaginary parts in eigenvalues (1
of the maximum real part). Either the matrix is not PSD, or there was
an issue while computing the eigendecomposition of the matrix.
>>> _check_psd_eigenvalues([5, 5e-5j]) # insignificant imag part
array([5., 0.])
>>> _check_psd_eigenvalues([-5, -1]) # all negative
Traceback (most recent call last):
...
ValueError: All eigenvalues are negative (maximum is -1). Either the
matrix is not PSD, or there was an issue while computing the
eigendecomposition of the matrix.
>>> _check_psd_eigenvalues([5, -1]) # significant negative
Traceback (most recent call last):
...
ValueError: There are significant negative eigenvalues (0.2 of the
maximum positive). Either the matrix is not PSD, or there was an issue
while computing the eigendecomposition of the matrix.
>>> _check_psd_eigenvalues([5, -5e-5]) # insignificant negative
array([5., 0.])
>>> _check_psd_eigenvalues([5, 4e-12]) # bad conditioning (too small)
array([5., 0.])
"""
lambdas = np.array(lambdas)
is_double_precision = lambdas.dtype == np.float64
# note: the minimum value available is
# - single-precision: np.finfo('float32').eps = 1.2e-07
# - double-precision: np.finfo('float64').eps = 2.2e-16
# the various thresholds used for validation
# we may wish to change the value according to precision.
significant_imag_ratio = 1e-5
significant_neg_ratio = 1e-5 if is_double_precision else 5e-3
significant_neg_value = 1e-10 if is_double_precision else 1e-6
small_pos_ratio = 1e-12 if is_double_precision else 2e-7
# Check that there are no significant imaginary parts
if not np.isreal(lambdas).all():
max_imag_abs = np.abs(np.imag(lambdas)).max()
max_real_abs = np.abs(np.real(lambdas)).max()
if max_imag_abs > significant_imag_ratio * max_real_abs:
raise ValueError(
"There are significant imaginary parts in eigenvalues (%g "
"of the maximum real part). Either the matrix is not PSD, or "
"there was an issue while computing the eigendecomposition "
"of the matrix." % (max_imag_abs / max_real_abs)
)
# warn about imaginary parts being removed
if enable_warnings:
warnings.warn(
"There are imaginary parts in eigenvalues (%g "
"of the maximum real part). Either the matrix is not"
" PSD, or there was an issue while computing the "
"eigendecomposition of the matrix. Only the real "
"parts will be kept." % (max_imag_abs / max_real_abs),
PositiveSpectrumWarning,
)
# Remove all imaginary parts (even if zero)
lambdas = np.real(lambdas)
# Check that there are no significant negative eigenvalues
max_eig = lambdas.max()
if max_eig < 0:
raise ValueError(
"All eigenvalues are negative (maximum is %g). "
"Either the matrix is not PSD, or there was an "
"issue while computing the eigendecomposition of "
"the matrix." % max_eig
)
else:
min_eig = lambdas.min()
if (
min_eig < -significant_neg_ratio * max_eig
and min_eig < -significant_neg_value
):
raise ValueError(
"There are significant negative eigenvalues (%g"
" of the maximum positive). Either the matrix is "
"not PSD, or there was an issue while computing "
"the eigendecomposition of the matrix." % (-min_eig / max_eig)
)
elif min_eig < 0:
# Remove all negative values and warn about it
if enable_warnings:
warnings.warn(
"There are negative eigenvalues (%g of the "
"maximum positive). Either the matrix is not "
"PSD, or there was an issue while computing the"
" eigendecomposition of the matrix. Negative "
"eigenvalues will be replaced with 0." % (-min_eig / max_eig),
PositiveSpectrumWarning,
)
lambdas[lambdas < 0] = 0
# Check for conditioning (small positive non-zeros)
too_small_lambdas = (0 < lambdas) & (lambdas < small_pos_ratio * max_eig)
if too_small_lambdas.any():
if enable_warnings:
warnings.warn(
"Badly conditioned PSD matrix spectrum: the largest "
"eigenvalue is more than %g times the smallest. "
"Small eigenvalues will be replaced with 0."
"" % (1 / small_pos_ratio),
PositiveSpectrumWarning,
)
lambdas[too_small_lambdas] = 0
return lambdas
def _check_sample_weight(
sample_weight, X, dtype=None, copy=False, only_non_negative=False
):
"""Validate sample weights.
Note that passing sample_weight=None will output an array of ones.
Therefore, in some cases, you may want to protect the call with:
if sample_weight is not None:
sample_weight = _check_sample_weight(...)
Parameters
----------
sample_weight : {ndarray, Number or None}, shape (n_samples,)
Input sample weights.
X : {ndarray, list, sparse matrix}
Input data.
only_non_negative : bool, default=False,
Whether or not the weights are expected to be non-negative.
.. versionadded:: 1.0
dtype : dtype, default=None
dtype of the validated `sample_weight`.
If None, and the input `sample_weight` is an array, the dtype of the
input is preserved; otherwise an array with the default numpy dtype
is be allocated. If `dtype` is not one of `float32`, `float64`,
`None`, the output will be of dtype `float64`.
copy : bool, default=False
If True, a copy of sample_weight will be created.
Returns
-------
sample_weight : ndarray of shape (n_samples,)
Validated sample weight. It is guaranteed to be "C" contiguous.
"""
n_samples = _num_samples(X)
if dtype is not None and dtype not in [np.float32, np.float64]:
dtype = np.float64
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=dtype)
elif isinstance(sample_weight, numbers.Number):
sample_weight = np.full(n_samples, sample_weight, dtype=dtype)
else:
if dtype is None:
dtype = [np.float64, np.float32]
sample_weight = check_array(
sample_weight,
accept_sparse=False,
ensure_2d=False,
dtype=dtype,
order="C",
copy=copy,
)
if sample_weight.ndim != 1:
raise ValueError("Sample weights must be 1D array or scalar")
if sample_weight.shape != (n_samples,):
raise ValueError(
"sample_weight.shape == {}, expected {}!".format(
sample_weight.shape, (n_samples,)
)
)
if only_non_negative:
check_non_negative(sample_weight, "`sample_weight`")
return sample_weight
def _allclose_dense_sparse(x, y, rtol=1e-7, atol=1e-9):
"""Check allclose for sparse and dense data.
Both x and y need to be either sparse or dense, they
can't be mixed.
Parameters
----------
x : {array-like, sparse matrix}
First array to compare.
y : {array-like, sparse matrix}
Second array to compare.
rtol : float, default=1e-7
Relative tolerance; see numpy.allclose.
atol : float, default=1e-9
absolute tolerance; see numpy.allclose. Note that the default here is
more tolerant than the default for numpy.testing.assert_allclose, where
atol=0.
"""
if sp.issparse(x) and sp.issparse(y):
x = x.tocsr()
y = y.tocsr()
x.sum_duplicates()
y.sum_duplicates()
return (
np.array_equal(x.indices, y.indices)
and np.array_equal(x.indptr, y.indptr)
and np.allclose(x.data, y.data, rtol=rtol, atol=atol)
)
elif not sp.issparse(x) and not sp.issparse(y):
return np.allclose(x, y, rtol=rtol, atol=atol)
raise ValueError(
"Can only compare two sparse matrices, not a sparse matrix and an array"
)
def _check_fit_params(X, fit_params, indices=None):
"""Check and validate the parameters passed during `fit`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data array.
fit_params : dict
Dictionary containing the parameters passed at fit.
indices : array-like of shape (n_samples,), default=None
Indices to be selected if the parameter has the same size as `X`.
Returns
-------
fit_params_validated : dict
Validated parameters. We ensure that the values support indexing.
"""
from . import _safe_indexing
fit_params_validated = {}
for param_key, param_value in fit_params.items():
if not _is_arraylike(param_value) or _num_samples(param_value) != _num_samples(
X
):
# Non-indexable pass-through (for now for backward-compatibility).
# https://github.com/scikit-learn/scikit-learn/issues/15805
fit_params_validated[param_key] = param_value
else:
# Any other fit_params should support indexing
# (e.g. for cross-validation).
fit_params_validated[param_key] = _make_indexable(param_value)
fit_params_validated[param_key] = _safe_indexing(
fit_params_validated[param_key], indices
)
return fit_params_validated
def _get_feature_names(X):
"""Get feature names from X.
Support for other array containers should place its implementation here.
Parameters
----------
X : {ndarray, dataframe} of shape (n_samples, n_features)
Array container to extract feature names.
- pandas dataframe : The columns will be considered to be feature
names. If the dataframe contains non-string feature names, `None` is
returned.
- All other array containers will return `None`.
Returns
-------
names: ndarray or None
Feature names of `X`. Unrecognized array containers will return `None`.
"""
feature_names = None
# extract feature names for support array containers
if hasattr(X, "columns"):
feature_names = np.asarray(X.columns, dtype=object)
if feature_names is None or len(feature_names) == 0:
return
types = sorted(t.__qualname__ for t in set(type(v) for v in feature_names))
# Warn when types are mixed.
# ints and strings do not warn
if len(types) > 1 or not (types[0].startswith("int") or types[0] == "str"):
# TODO: Convert to an error in 1.2
warnings.warn(
"Feature names only support names that are all strings. "
f"Got feature names with dtypes: {types}. An error will be raised "
"in 1.2.",
FutureWarning,
)
return
# Only feature names of all strings are supported
if types[0] == "str":
return feature_names
def _check_feature_names_in(estimator, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then names are generated: `[x0, x1, ..., x(n_features_in_)]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_in : ndarray of str
Feature names in.
"""
feature_names_in_ = getattr(estimator, "feature_names_in_", None)
n_features_in_ = getattr(estimator, "n_features_in_", None)
if input_features is not None:
input_features = np.asarray(input_features, dtype=object)
if feature_names_in_ is not None and not np.array_equal(
feature_names_in_, input_features
):
raise ValueError("input_features is not equal to feature_names_in_")
if n_features_in_ is not None and len(input_features) != n_features_in_:
raise ValueError(
"input_features should have length equal to number of "
f"features ({n_features_in_}), got {len(input_features)}"
)
return input_features
if feature_names_in_ is not None:
return feature_names_in_
# Generates feature names if `n_features_in_` is defined
if n_features_in_ is None:
raise ValueError("Unable to generate feature names without n_features_in_")
return np.asarray([f"x{i}" for i in range(n_features_in_)], dtype=object)
|
"""Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# Sylvain Marie
# License: BSD 3 clause
from functools import wraps
import warnings
import numbers
import operator
import numpy as np
import scipy.sparse as sp
from inspect import signature, isclass, Parameter
# mypy error: Module 'numpy.core.numeric' has no attribute 'ComplexWarning'
from numpy.core.numeric import ComplexWarning # type: ignore
import joblib
from contextlib import suppress
from .fixes import _object_dtype_isnan, parse_version
from .. import get_config as _get_config
from ..exceptions import PositiveSpectrumWarning
from ..exceptions import NotFittedError
from ..exceptions import DataConversionWarning
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
def _deprecate_positional_args(func=None, *, version="1.1 (renaming of 0.26)"):
"""Decorator for methods that issues warnings for positional arguments.
Using the keyword-only argument syntax in pep 3102, arguments after the
* will issue a warning when passed as a positional argument.
Parameters
----------
func : callable, default=None
Function to check arguments on.
version : callable, default="1.1 (renaming of 0.26)"
The version when positional arguments will result in error.
"""
def _inner_deprecate_positional_args(f):
sig = signature(f)
kwonly_args = []
all_args = []
for name, param in sig.parameters.items():
if param.kind == Parameter.POSITIONAL_OR_KEYWORD:
all_args.append(name)
elif param.kind == Parameter.KEYWORD_ONLY:
kwonly_args.append(name)
@wraps(f)
def inner_f(*args, **kwargs):
extra_args = len(args) - len(all_args)
if extra_args <= 0:
return f(*args, **kwargs)
# extra_args > 0
args_msg = [
"{}={}".format(name, arg)
for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:])
]
args_msg = ", ".join(args_msg)
warnings.warn(
f"Pass {args_msg} as keyword args. From version "
f"{version} passing these as positional arguments "
"will result in an error",
FutureWarning,
)
kwargs.update(zip(sig.parameters, args))
return f(**kwargs)
return inner_f
if func is not None:
return _inner_deprecate_positional_args(func)
return _inner_deprecate_positional_args
def _assert_all_finite(X, allow_nan=False, msg_dtype=None):
"""Like assert_all_finite, but only for ndarray."""
# validation is also imported in extmath
from .extmath import _safe_accumulator_op
if _get_config()["assume_finite"]:
return
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method. The sum is also calculated
# safely to reduce dtype induced overflows.
is_float = X.dtype.kind in "fc"
if is_float and (np.isfinite(_safe_accumulator_op(np.sum, X))):
pass
elif is_float:
msg_err = "Input contains {} or a value too large for {!r}."
if (
allow_nan
and np.isinf(X).any()
or not allow_nan
and not np.isfinite(X).all()
):
type_err = "infinity" if allow_nan else "NaN, infinity"
raise ValueError(
msg_err.format(
type_err, msg_dtype if msg_dtype is not None else X.dtype
)
)
# for object dtype data, we only check for NaNs (GH-13254)
elif X.dtype == np.dtype("object") and not allow_nan:
if _object_dtype_isnan(X).any():
raise ValueError("Input contains NaN")
def assert_all_finite(X, *, allow_nan=False):
"""Throw a ValueError if X contains NaN or infinity.
Parameters
----------
X : {ndarray, sparse matrix}
allow_nan : bool, default=False
"""
_assert_all_finite(X.data if sp.issparse(X) else X, allow_nan)
def as_float_array(X, *, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats.
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, default=True
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in X. The
possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan and pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
Returns
-------
XT : {ndarray, sparse matrix}
An array of type float.
"""
if isinstance(X, np.matrix) or (
not isinstance(X, np.ndarray) and not sp.issparse(X)
):
return check_array(
X,
accept_sparse=["csr", "csc", "coo"],
dtype=np.float64,
copy=copy,
force_all_finite=force_all_finite,
ensure_2d=False,
)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy("F" if X.flags["F_CONTIGUOUS"] else "C") if copy else X
else:
if X.dtype.kind in "uib" and X.dtype.itemsize <= 4:
return_dtype = np.float32
else:
return_dtype = np.float64
return X.astype(return_dtype)
def _is_arraylike(x):
"""Returns whether the input is array-like."""
return hasattr(x, "__len__") or hasattr(x, "shape") or hasattr(x, "__array__")
def _num_features(X):
"""Return the number of features in an array-like X.
This helper function tries hard to avoid to materialize an array version
of X unless necessary. For instance, if X is a list of lists,
this function will return the length of the first element, assuming
that subsequent elements are all lists of the same length without
checking.
Parameters
----------
X : array-like
array-like to get the number of features.
Returns
-------
features : int
Number of features
"""
type_ = type(X)
if type_.__module__ == "builtins":
type_name = type_.__qualname__
else:
type_name = f"{type_.__module__}.{type_.__qualname__}"
message = f"Unable to find the number of features from X of type {type_name}"
if not hasattr(X, "__len__") and not hasattr(X, "shape"):
if not hasattr(X, "__array__"):
raise TypeError(message)
# Only convert X to a numpy array if there is no cheaper, heuristic
# option.
X = np.asarray(X)
if hasattr(X, "shape"):
if not hasattr(X.shape, "__len__") or len(X.shape) <= 1:
message += f" with shape {X.shape}"
raise TypeError(message)
return X.shape[1]
first_sample = X[0]
# Do not consider an array-like of strings or dicts to be a 2D array
if isinstance(first_sample, (str, bytes, dict)):
message += f" where the samples are of type {type(first_sample).__qualname__}"
raise TypeError(message)
try:
# If X is a list of lists, for instance, we assume that all nested
# lists have the same length without checking or converting to
# a numpy array to keep this function call as cheap as possible.
return len(first_sample)
except Exception as err:
raise TypeError(message) from err
def _num_samples(x):
"""Return number of samples in array-like x."""
message = "Expected sequence or array-like, got %s" % type(x)
if hasattr(x, "fit") and callable(x.fit):
# Don't get num_samples from an ensembles length!
raise TypeError(message)
if not hasattr(x, "__len__") and not hasattr(x, "shape"):
if hasattr(x, "__array__"):
x = np.asarray(x)
else:
raise TypeError(message)
if hasattr(x, "shape") and x.shape is not None:
if len(x.shape) == 0:
raise TypeError(
"Singleton array %r cannot be considered a valid collection." % x
)
# Check that shape is returning an integer or default to len
# Dask dataframes may not return numeric shape[0] value
if isinstance(x.shape[0], numbers.Integral):
return x.shape[0]
try:
return len(x)
except TypeError as type_error:
raise TypeError(message) from type_error
def check_memory(memory):
"""Check that ``memory`` is joblib.Memory-like.
joblib.Memory-like means that ``memory`` can be converted into a
joblib.Memory instance (typically a str denoting the ``location``)
or has the same interface (has a ``cache`` method).
Parameters
----------
memory : None, str or object with the joblib.Memory interface
Returns
-------
memory : object with the joblib.Memory interface
Raises
------
ValueError
If ``memory`` is not joblib.Memory-like.
"""
if memory is None or isinstance(memory, str):
if parse_version(joblib.__version__) < parse_version("0.12"):
memory = joblib.Memory(cachedir=memory, verbose=0)
else:
memory = joblib.Memory(location=memory, verbose=0)
elif not hasattr(memory, "cache"):
raise ValueError(
"'memory' should be None, a string or have the same"
" interface as joblib.Memory."
" Got memory='{}' instead.".format(memory)
)
return memory
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
lengths = [_num_samples(X) for X in arrays if X is not None]
uniques = np.unique(lengths)
if len(uniques) > 1:
raise ValueError(
"Found input variables with inconsistent numbers of samples: %r"
% [int(l) for l in lengths]
)
def _make_indexable(iterable):
"""Ensure iterable supports indexing or convert to an indexable variant.
Convert sparse matrices to csr and other non-indexable iterable to arrays.
Let `None` and indexable objects (e.g. pandas dataframes) pass unchanged.
Parameters
----------
iterable : {list, dataframe, ndarray, sparse matrix} or None
Object to be converted to an indexable iterable.
"""
if sp.issparse(iterable):
return iterable.tocsr()
elif hasattr(iterable, "__getitem__") or hasattr(iterable, "iloc"):
return iterable
elif iterable is None:
return iterable
return np.array(iterable)
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : {lists, dataframes, ndarrays, sparse matrices}
List of objects to ensure sliceability.
Returns
-------
result : list of {ndarray, sparse matrix, dataframe} or None
Returns a list containing indexable arrays (i.e. NumPy array,
sparse matrix, or dataframe) or `None`.
"""
result = [_make_indexable(X) for X in iterables]
check_consistent_length(*result)
return result
def _ensure_sparse_format(
spmatrix, accept_sparse, dtype, copy, force_all_finite, accept_large_sparse
):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : sparse matrix
Input to validate and convert.
accept_sparse : str, bool or list/tuple of str
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). If the input is sparse but
not in the allowed format, it will be converted to the first listed
format. True allows the input to be any format. False means
that a sparse matrix input will raise an error.
dtype : str, type or None
Data type of result. If None, the dtype of the input is preserved.
copy : bool
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : bool or 'allow-nan'
Whether to raise an error on np.inf, np.nan, pd.NA in X. The
possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan and pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
Returns
-------
spmatrix_converted : sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# Indices dtype validation
_check_large_sparse(spmatrix, accept_large_sparse)
if accept_sparse is False:
raise TypeError(
"A sparse matrix was passed, but dense "
"data is required. Use X.toarray() to "
"convert to a dense numpy array."
)
elif isinstance(accept_sparse, (list, tuple)):
if len(accept_sparse) == 0:
raise ValueError(
"When providing 'accept_sparse' "
"as a tuple or list, it must contain at "
"least one string value."
)
# ensure correct sparse format
if spmatrix.format not in accept_sparse:
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
elif accept_sparse is not True:
# any other type
raise ValueError(
"Parameter 'accept_sparse' should be a string, "
"boolean or list of strings. You provided "
"'accept_sparse={}'.".format(accept_sparse)
)
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn(
"Can't check %s sparse matrix for nan or inf." % spmatrix.format,
stacklevel=2,
)
else:
_assert_all_finite(spmatrix.data, allow_nan=force_all_finite == "allow-nan")
return spmatrix
def _ensure_no_complex_data(array):
if (
hasattr(array, "dtype")
and array.dtype is not None
and hasattr(array.dtype, "kind")
and array.dtype.kind == "c"
):
raise ValueError("Complex data not supported\n{}\n".format(array))
def check_array(
array,
accept_sparse=False,
*,
accept_large_sparse=True,
dtype="numeric",
order=None,
copy=False,
force_all_finite=True,
ensure_2d=True,
allow_nd=False,
ensure_min_samples=1,
ensure_min_features=1,
estimator=None,
):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is checked to be a non-empty 2D array containing
only finite values. If the dtype of the array is object, attempt
converting to float, raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : str, bool or list/tuple of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse=False will cause it to be accepted
only if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'} or None, default=None
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
ensure_2d : bool, default=True
Whether to raise a value error if array is not 2D.
allow_nd : bool, default=False
Whether to allow array.ndim > 2.
ensure_min_samples : int, default=1
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
Returns
-------
array_converted : object
The converted and validated array.
"""
if isinstance(array, np.matrix):
warnings.warn(
"np.matrix usage is deprecated in 1.0 and will raise a TypeError "
"in 1.2. Please convert to a numpy array with np.asarray. For "
"more information see: "
"https://numpy.org/doc/stable/reference/generated/numpy.matrix.html", # noqa
FutureWarning,
)
# store reference to original array to check if copy is needed when
# function returns
array_orig = array
# store whether originally we wanted numeric dtype
dtype_numeric = isinstance(dtype, str) and dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, "kind"):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
# check if the object contains several dtypes (typically a pandas
# DataFrame), and store them. If not, store None.
dtypes_orig = None
has_pd_integer_array = False
if hasattr(array, "dtypes") and hasattr(array.dtypes, "__array__"):
# throw warning if columns are sparse. If all columns are sparse, then
# array.sparse exists and sparsity will be preserved (later).
with suppress(ImportError):
from pandas.api.types import is_sparse
if not hasattr(array, "sparse") and array.dtypes.apply(is_sparse).any():
warnings.warn(
"pandas.DataFrame with sparse columns found."
"It will be converted to a dense numpy array."
)
dtypes_orig = list(array.dtypes)
# pandas boolean dtype __array__ interface coerces bools to objects
for i, dtype_iter in enumerate(dtypes_orig):
if dtype_iter.kind == "b":
dtypes_orig[i] = np.dtype(object)
elif dtype_iter.name.startswith(("Int", "UInt")):
# name looks like an Integer Extension Array, now check for
# the dtype
with suppress(ImportError):
from pandas import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
if isinstance(
dtype_iter,
(
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
),
):
has_pd_integer_array = True
if all(isinstance(dtype, np.dtype) for dtype in dtypes_orig):
dtype_orig = np.result_type(*dtypes_orig)
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if has_pd_integer_array:
# If there are any pandas integer extension arrays,
array = array.astype(dtype)
if force_all_finite not in (True, False, "allow-nan"):
raise ValueError(
'force_all_finite should be a bool or "allow-nan". Got {!r} instead'.format(
force_all_finite
)
)
if estimator is not None:
if isinstance(estimator, str):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = "Estimator"
context = " by %s" % estimator_name if estimator is not None else ""
# When all dataframe columns are sparse, convert to a sparse array
if hasattr(array, "sparse") and array.ndim > 1:
# DataFrame.sparse only supports `to_coo`
array = array.sparse.to_coo()
if array.dtype == np.dtype("object"):
unique_dtypes = set([dt.subtype.name for dt in array_orig.dtypes])
if len(unique_dtypes) > 1:
raise ValueError(
"Pandas DataFrame with mixed sparse extension arrays "
"generated a sparse matrix with object dtype which "
"can not be converted to a scipy sparse matrix."
"Sparse extension arrays should all have the same "
"numeric type."
)
if sp.issparse(array):
_ensure_no_complex_data(array)
array = _ensure_sparse_format(
array,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
force_all_finite=force_all_finite,
accept_large_sparse=accept_large_sparse,
)
else:
# If np.array(..) gives ComplexWarning, then we convert the warning
# to an error. This is needed because specifying a non complex
# dtype to the function converts complex to real dtype,
# thereby passing the test made in the lines following the scope
# of warnings context manager.
with warnings.catch_warnings():
try:
warnings.simplefilter("error", ComplexWarning)
if dtype is not None and np.dtype(dtype).kind in "iu":
# Conversion float -> int should not contain NaN or
# inf (numpy#14412). We cannot use casting='safe' because
# then conversion float -> int would be disallowed.
array = np.asarray(array, order=order)
if array.dtype.kind == "f":
_assert_all_finite(array, allow_nan=False, msg_dtype=dtype)
array = array.astype(dtype, casting="unsafe", copy=False)
else:
array = np.asarray(array, order=order, dtype=dtype)
except ComplexWarning as complex_warning:
raise ValueError(
"Complex data not supported\n{}\n".format(array)
) from complex_warning
# It is possible that the np.array(..) gave no warning. This happens
# when no dtype conversion happened, for example dtype = None. The
# result is that np.array(..) produces an array of complex dtype
# and we need to catch and raise exception for such cases.
_ensure_no_complex_data(array)
if ensure_2d:
# If input is scalar raise error
if array.ndim == 0:
raise ValueError(
"Expected 2D array, got scalar array instead:\narray={}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample.".format(array)
)
# If input is 1D raise error
if array.ndim == 1:
raise ValueError(
"Expected 2D array, got 1D array instead:\narray={}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample.".format(array)
)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind in "OUSV":
warnings.warn(
"Arrays of bytes/strings is being converted to decimal "
"numbers if dtype='numeric'. This behavior is deprecated in "
"0.24 and will be removed in 1.1 (renaming of 0.26). Please "
"convert your data to numeric values explicitly instead.",
FutureWarning,
stacklevel=2,
)
try:
array = array.astype(np.float64)
except ValueError as e:
raise ValueError(
"Unable to convert array of bytes/strings "
"into decimal numbers with dtype='numeric'"
) from e
if not allow_nd and array.ndim >= 3:
raise ValueError(
"Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name)
)
if force_all_finite:
_assert_all_finite(array, allow_nan=force_all_finite == "allow-nan")
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError(
"Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, array.shape, ensure_min_samples, context)
)
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError(
"Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, array.shape, ensure_min_features, context)
)
if copy and np.may_share_memory(array, array_orig):
array = np.array(array, dtype=dtype, order=order)
return array
def _check_large_sparse(X, accept_large_sparse=False):
"""Raise a ValueError if X has 64bit indices and accept_large_sparse=False"""
if not accept_large_sparse:
supported_indices = ["int32"]
if X.getformat() == "coo":
index_keys = ["col", "row"]
elif X.getformat() in ["csr", "csc", "bsr"]:
index_keys = ["indices", "indptr"]
else:
return
for key in index_keys:
indices_datatype = getattr(X, key).dtype
if indices_datatype not in supported_indices:
raise ValueError(
"Only sparse matrices with 32-bit integer"
" indices are accepted. Got %s indices." % indices_datatype
)
def check_X_y(
X,
y,
accept_sparse=False,
*,
accept_large_sparse=True,
dtype="numeric",
order=None,
copy=False,
force_all_finite=True,
ensure_2d=True,
allow_nd=False,
multi_output=False,
ensure_min_samples=1,
ensure_min_features=1,
y_numeric=False,
estimator=None,
):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X to be 2D and y 1D. By
default, X is checked to be non-empty and containing only finite values.
Standard input checks are also applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2D and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : {ndarray, list, sparse matrix}
Input data.
y : {ndarray, list, sparse matrix}
Labels.
accept_sparse : str, bool or list of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse will cause it to be accepted only
if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'}, default=None
Whether an array will be forced to be fortran or c-style.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in X. This parameter
does not influence whether y can have np.inf, np.nan, pd.NA values.
The possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
ensure_2d : bool, default=True
Whether to raise a value error if X is not 2D.
allow_nd : bool, default=False
Whether to allow X.ndim > 2.
multi_output : bool, default=False
Whether to allow 2D y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int, default=1
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : bool, default=False
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
if y is None:
raise ValueError("y cannot be None")
X = check_array(
X,
accept_sparse=accept_sparse,
accept_large_sparse=accept_large_sparse,
dtype=dtype,
order=order,
copy=copy,
force_all_finite=force_all_finite,
ensure_2d=ensure_2d,
allow_nd=allow_nd,
ensure_min_samples=ensure_min_samples,
ensure_min_features=ensure_min_features,
estimator=estimator,
)
y = _check_y(y, multi_output=multi_output, y_numeric=y_numeric)
check_consistent_length(X, y)
return X, y
def _check_y(y, multi_output=False, y_numeric=False):
"""Isolated part of check_X_y dedicated to y validation"""
if multi_output:
y = check_array(
y, accept_sparse="csr", force_all_finite=True, ensure_2d=False, dtype=None
)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
_ensure_no_complex_data(y)
if y_numeric and y.dtype.kind == "O":
y = y.astype(np.float64)
return y
def column_or_1d(y, *, warn=False):
"""Ravel column or 1d numpy array, else raises an error.
Parameters
----------
y : array-like
warn : bool, default=False
To control display of warnings.
Returns
-------
y : ndarray
"""
y = np.asarray(y)
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning,
stacklevel=2,
)
return np.ravel(y)
raise ValueError(
"y should be a 1d array, got an array of shape {} instead.".format(shape)
)
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
Parameters
----------
seed : None, int or instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, numbers.Integral):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError(
"%r cannot be used to seed a numpy.random.RandomState instance" % seed
)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Parameters
----------
estimator : object
An estimator to inspect.
parameter : str
The searched parameter.
Returns
-------
is_parameter: bool
Whether the parameter was found to be a named parameter of the
estimator's fit method.
Examples
--------
>>> from sklearn.svm import SVC
>>> from sklearn.utils.validation import has_fit_parameter
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, *, tol=1e-10, raise_warning=True, raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : {ndarray, sparse matrix}
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float, default=1e-10
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : bool, default=True
If True then raise a warning if conversion is required.
raise_exception : bool, default=False
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : {ndarray, sparse matrix}
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError(
"array must be 2-dimensional and square. shape = {0}".format(array.shape)
)
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ["csr", "csc", "coo"]:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn(
"Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.",
stacklevel=2,
)
if sp.issparse(array):
conversion = "to" + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes=None, *, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
fitted attributes (ending with a trailing underscore) and otherwise
raises a NotFittedError with the given message.
If an estimator does not set any attributes with a trailing underscore, it
can define a ``__sklearn_is_fitted__`` method returning a boolean to specify if the
estimator is fitted or not.
Parameters
----------
estimator : estimator instance
estimator instance for which the check is performed.
attributes : str, list or tuple of str, default=None
Attribute name(s) given as string or a list/tuple of strings
Eg.: ``["coef_", "estimator_", ...], "coef_"``
If `None`, `estimator` is considered fitted if there exist an
attribute that ends with a underscore and does not start with double
underscore.
msg : str, default=None
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this
estimator."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default=all
Specify whether all or any of the given attributes must exist.
Returns
-------
None
Raises
------
NotFittedError
If the attributes are not found.
"""
if isclass(estimator):
raise TypeError("{} is a class, not an instance.".format(estimator))
if msg is None:
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator."
)
if not hasattr(estimator, "fit"):
raise TypeError("%s is not an estimator instance." % (estimator))
if attributes is not None:
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
fitted = all_or_any([hasattr(estimator, attr) for attr in attributes])
elif hasattr(estimator, "__sklearn_is_fitted__"):
fitted = estimator.__sklearn_is_fitted__()
else:
fitted = [
v for v in vars(estimator) if v.endswith("_") and not v.startswith("__")
]
if not fitted:
raise NotFittedError(msg % {"name": type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : {array-like, sparse matrix}
Input data.
whom : str
Who passed X to this function.
"""
# avoid X.min() on sparse matrix since it also sorts the indices
if sp.issparse(X):
if X.format in ["lil", "dok"]:
X = X.tocsr()
if X.data.size == 0:
X_min = 0
else:
X_min = X.data.min()
else:
X_min = X.min()
if X_min < 0:
raise ValueError("Negative values in data passed to %s" % whom)
def check_scalar(
x,
name,
target_type,
*,
min_val=None,
max_val=None,
include_boundaries="both",
):
"""Validate scalar parameters type and value.
Parameters
----------
x : object
The scalar parameter to validate.
name : str
The name of the parameter to be printed in error messages.
target_type : type or tuple
Acceptable data types for the parameter.
min_val : float or int, default=None
The minimum valid value the parameter can take. If None (default) it
is implied that the parameter does not have a lower bound.
max_val : float or int, default=False
The maximum valid value the parameter can take. If None (default) it
is implied that the parameter does not have an upper bound.
include_boundaries : {"left", "right", "both", "neither"}, default="both"
Whether the interval defined by `min_val` and `max_val` should include
the boundaries. Possible choices are:
- `"left"`: only `min_val` is included in the valid interval;
- `"right"`: only `max_val` is included in the valid interval;
- `"both"`: `min_val` and `max_val` are included in the valid interval;
- `"neither"`: neither `min_val` nor `max_val` are included in the
valid interval.
Returns
-------
x : numbers.Number
The validated number.
Raises
------
TypeError
If the parameter's type does not match the desired type.
ValueError
If the parameter's value violates the given bounds.
"""
if not isinstance(x, target_type):
raise TypeError(f"{name} must be an instance of {target_type}, not {type(x)}.")
expected_include_boundaries = ("left", "right", "both", "neither")
if include_boundaries not in expected_include_boundaries:
raise ValueError(
f"Unknown value for `include_boundaries`: {repr(include_boundaries)}. "
f"Possible values are: {expected_include_boundaries}."
)
comparison_operator = (
operator.lt if include_boundaries in ("left", "both") else operator.le
)
if min_val is not None and comparison_operator(x, min_val):
raise ValueError(
f"{name} == {x}, must be"
f" {'>=' if include_boundaries in ('left', 'both') else '>'} {min_val}."
)
comparison_operator = (
operator.gt if include_boundaries in ("right", "both") else operator.ge
)
if max_val is not None and comparison_operator(x, max_val):
raise ValueError(
f"{name} == {x}, must be"
f" {'<=' if include_boundaries in ('right', 'both') else '<'} {max_val}."
)
return x
def _check_psd_eigenvalues(lambdas, enable_warnings=False):
"""Check the eigenvalues of a positive semidefinite (PSD) matrix.
Checks the provided array of PSD matrix eigenvalues for numerical or
conditioning issues and returns a fixed validated version. This method
should typically be used if the PSD matrix is user-provided (e.g. a
Gram matrix) or computed using a user-provided dissimilarity metric
(e.g. kernel function), or if the decomposition process uses approximation
methods (randomized SVD, etc.).
It checks for three things:
- that there are no significant imaginary parts in eigenvalues (more than
1e-5 times the maximum real part). If this check fails, it raises a
``ValueError``. Otherwise all non-significant imaginary parts that may
remain are set to zero. This operation is traced with a
``PositiveSpectrumWarning`` when ``enable_warnings=True``.
- that eigenvalues are not all negative. If this check fails, it raises a
``ValueError``
- that there are no significant negative eigenvalues with absolute value
more than 1e-10 (1e-6) and more than 1e-5 (5e-3) times the largest
positive eigenvalue in double (simple) precision. If this check fails,
it raises a ``ValueError``. Otherwise all negative eigenvalues that may
remain are set to zero. This operation is traced with a
``PositiveSpectrumWarning`` when ``enable_warnings=True``.
Finally, all the positive eigenvalues that are too small (with a value
smaller than the maximum eigenvalue multiplied by 1e-12 (2e-7)) are set to
zero. This operation is traced with a ``PositiveSpectrumWarning`` when
``enable_warnings=True``.
Parameters
----------
lambdas : array-like of shape (n_eigenvalues,)
Array of eigenvalues to check / fix.
enable_warnings : bool, default=False
When this is set to ``True``, a ``PositiveSpectrumWarning`` will be
raised when there are imaginary parts, negative eigenvalues, or
extremely small non-zero eigenvalues. Otherwise no warning will be
raised. In both cases, imaginary parts, negative eigenvalues, and
extremely small non-zero eigenvalues will be set to zero.
Returns
-------
lambdas_fixed : ndarray of shape (n_eigenvalues,)
A fixed validated copy of the array of eigenvalues.
Examples
--------
>>> from sklearn.utils.validation import _check_psd_eigenvalues
>>> _check_psd_eigenvalues([1, 2]) # nominal case
array([1, 2])
>>> _check_psd_eigenvalues([5, 5j]) # significant imag part
Traceback (most recent call last):
...
ValueError: There are significant imaginary parts in eigenvalues (1
of the maximum real part). Either the matrix is not PSD, or there was
an issue while computing the eigendecomposition of the matrix.
>>> _check_psd_eigenvalues([5, 5e-5j]) # insignificant imag part
array([5., 0.])
>>> _check_psd_eigenvalues([-5, -1]) # all negative
Traceback (most recent call last):
...
ValueError: All eigenvalues are negative (maximum is -1). Either the
matrix is not PSD, or there was an issue while computing the
eigendecomposition of the matrix.
>>> _check_psd_eigenvalues([5, -1]) # significant negative
Traceback (most recent call last):
...
ValueError: There are significant negative eigenvalues (0.2 of the
maximum positive). Either the matrix is not PSD, or there was an issue
while computing the eigendecomposition of the matrix.
>>> _check_psd_eigenvalues([5, -5e-5]) # insignificant negative
array([5., 0.])
>>> _check_psd_eigenvalues([5, 4e-12]) # bad conditioning (too small)
array([5., 0.])
"""
lambdas = np.array(lambdas)
is_double_precision = lambdas.dtype == np.float64
# note: the minimum value available is
# - single-precision: np.finfo('float32').eps = 1.2e-07
# - double-precision: np.finfo('float64').eps = 2.2e-16
# the various thresholds used for validation
# we may wish to change the value according to precision.
significant_imag_ratio = 1e-5
significant_neg_ratio = 1e-5 if is_double_precision else 5e-3
significant_neg_value = 1e-10 if is_double_precision else 1e-6
small_pos_ratio = 1e-12 if is_double_precision else 2e-7
# Check that there are no significant imaginary parts
if not np.isreal(lambdas).all():
max_imag_abs = np.abs(np.imag(lambdas)).max()
max_real_abs = np.abs(np.real(lambdas)).max()
if max_imag_abs > significant_imag_ratio * max_real_abs:
raise ValueError(
"There are significant imaginary parts in eigenvalues (%g "
"of the maximum real part). Either the matrix is not PSD, or "
"there was an issue while computing the eigendecomposition "
"of the matrix." % (max_imag_abs / max_real_abs)
)
# warn about imaginary parts being removed
if enable_warnings:
warnings.warn(
"There are imaginary parts in eigenvalues (%g "
"of the maximum real part). Either the matrix is not"
" PSD, or there was an issue while computing the "
"eigendecomposition of the matrix. Only the real "
"parts will be kept." % (max_imag_abs / max_real_abs),
PositiveSpectrumWarning,
)
# Remove all imaginary parts (even if zero)
lambdas = np.real(lambdas)
# Check that there are no significant negative eigenvalues
max_eig = lambdas.max()
if max_eig < 0:
raise ValueError(
"All eigenvalues are negative (maximum is %g). "
"Either the matrix is not PSD, or there was an "
"issue while computing the eigendecomposition of "
"the matrix." % max_eig
)
else:
min_eig = lambdas.min()
if (
min_eig < -significant_neg_ratio * max_eig
and min_eig < -significant_neg_value
):
raise ValueError(
"There are significant negative eigenvalues (%g"
" of the maximum positive). Either the matrix is "
"not PSD, or there was an issue while computing "
"the eigendecomposition of the matrix." % (-min_eig / max_eig)
)
elif min_eig < 0:
# Remove all negative values and warn about it
if enable_warnings:
warnings.warn(
"There are negative eigenvalues (%g of the "
"maximum positive). Either the matrix is not "
"PSD, or there was an issue while computing the"
" eigendecomposition of the matrix. Negative "
"eigenvalues will be replaced with 0." % (-min_eig / max_eig),
PositiveSpectrumWarning,
)
lambdas[lambdas < 0] = 0
# Check for conditioning (small positive non-zeros)
too_small_lambdas = (0 < lambdas) & (lambdas < small_pos_ratio * max_eig)
if too_small_lambdas.any():
if enable_warnings:
warnings.warn(
"Badly conditioned PSD matrix spectrum: the largest "
"eigenvalue is more than %g times the smallest. "
"Small eigenvalues will be replaced with 0."
"" % (1 / small_pos_ratio),
PositiveSpectrumWarning,
)
lambdas[too_small_lambdas] = 0
return lambdas
def _check_sample_weight(
sample_weight, X, dtype=None, copy=False, only_non_negative=False
):
"""Validate sample weights.
Note that passing sample_weight=None will output an array of ones.
Therefore, in some cases, you may want to protect the call with:
if sample_weight is not None:
sample_weight = _check_sample_weight(...)
Parameters
----------
sample_weight : {ndarray, Number or None}, shape (n_samples,)
Input sample weights.
X : {ndarray, list, sparse matrix}
Input data.
only_non_negative : bool, default=False,
Whether or not the weights are expected to be non-negative.
.. versionadded:: 1.0
dtype : dtype, default=None
dtype of the validated `sample_weight`.
If None, and the input `sample_weight` is an array, the dtype of the
input is preserved; otherwise an array with the default numpy dtype
is be allocated. If `dtype` is not one of `float32`, `float64`,
`None`, the output will be of dtype `float64`.
copy : bool, default=False
If True, a copy of sample_weight will be created.
Returns
-------
sample_weight : ndarray of shape (n_samples,)
Validated sample weight. It is guaranteed to be "C" contiguous.
"""
n_samples = _num_samples(X)
if dtype is not None and dtype not in [np.float32, np.float64]:
dtype = np.float64
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=dtype)
elif isinstance(sample_weight, numbers.Number):
sample_weight = np.full(n_samples, sample_weight, dtype=dtype)
else:
if dtype is None:
dtype = [np.float64, np.float32]
sample_weight = check_array(
sample_weight,
accept_sparse=False,
ensure_2d=False,
dtype=dtype,
order="C",
copy=copy,
)
if sample_weight.ndim != 1:
raise ValueError("Sample weights must be 1D array or scalar")
if sample_weight.shape != (n_samples,):
raise ValueError(
"sample_weight.shape == {}, expected {}!".format(
sample_weight.shape, (n_samples,)
)
)
if only_non_negative:
check_non_negative(sample_weight, "`sample_weight`")
return sample_weight
def _allclose_dense_sparse(x, y, rtol=1e-7, atol=1e-9):
"""Check allclose for sparse and dense data.
Both x and y need to be either sparse or dense, they
can't be mixed.
Parameters
----------
x : {array-like, sparse matrix}
First array to compare.
y : {array-like, sparse matrix}
Second array to compare.
rtol : float, default=1e-7
Relative tolerance; see numpy.allclose.
atol : float, default=1e-9
absolute tolerance; see numpy.allclose. Note that the default here is
more tolerant than the default for numpy.testing.assert_allclose, where
atol=0.
"""
if sp.issparse(x) and sp.issparse(y):
x = x.tocsr()
y = y.tocsr()
x.sum_duplicates()
y.sum_duplicates()
return (
np.array_equal(x.indices, y.indices)
and np.array_equal(x.indptr, y.indptr)
and np.allclose(x.data, y.data, rtol=rtol, atol=atol)
)
elif not sp.issparse(x) and not sp.issparse(y):
return np.allclose(x, y, rtol=rtol, atol=atol)
raise ValueError(
"Can only compare two sparse matrices, not a sparse matrix and an array"
)
def _check_fit_params(X, fit_params, indices=None):
"""Check and validate the parameters passed during `fit`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data array.
fit_params : dict
Dictionary containing the parameters passed at fit.
indices : array-like of shape (n_samples,), default=None
Indices to be selected if the parameter has the same size as `X`.
Returns
-------
fit_params_validated : dict
Validated parameters. We ensure that the values support indexing.
"""
from . import _safe_indexing
fit_params_validated = {}
for param_key, param_value in fit_params.items():
if not _is_arraylike(param_value) or _num_samples(param_value) != _num_samples(
X
):
# Non-indexable pass-through (for now for backward-compatibility).
# https://github.com/scikit-learn/scikit-learn/issues/15805
fit_params_validated[param_key] = param_value
else:
# Any other fit_params should support indexing
# (e.g. for cross-validation).
fit_params_validated[param_key] = _make_indexable(param_value)
fit_params_validated[param_key] = _safe_indexing(
fit_params_validated[param_key], indices
)
return fit_params_validated
def _get_feature_names(X):
"""Get feature names from X.
Support for other array containers should place its implementation here.
Parameters
----------
X : {ndarray, dataframe} of shape (n_samples, n_features)
Array container to extract feature names.
- pandas dataframe : The columns will be considered to be feature
names. If the dataframe contains non-string feature names, `None` is
returned.
- All other array containers will return `None`.
Returns
-------
names: ndarray or None
Feature names of `X`. Unrecognized array containers will return `None`.
"""
feature_names = None
# extract feature names for support array containers
if hasattr(X, "columns"):
feature_names = np.asarray(X.columns, dtype=object)
if feature_names is None or len(feature_names) == 0:
return
types = sorted(t.__qualname__ for t in set(type(v) for v in feature_names))
# Warn when types are mixed.
# ints and strings do not warn
if len(types) > 1 or not (types[0].startswith("int") or types[0] == "str"):
# TODO: Convert to an error in 1.2
warnings.warn(
"Feature names only support names that are all strings. "
f"Got feature names with dtypes: {types}. An error will be raised "
"in 1.2.",
FutureWarning,
)
return
# Only feature names of all strings are supported
if types[0] == "str":
return feature_names
def _check_feature_names_in(estimator, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then names are generated: `[x0, x1, ..., x(n_features_in_)]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_in : ndarray of str
Feature names in.
"""
feature_names_in_ = getattr(estimator, "feature_names_in_", None)
n_features_in_ = getattr(estimator, "n_features_in_", None)
if input_features is not None:
input_features = np.asarray(input_features, dtype=object)
if feature_names_in_ is not None and not np.array_equal(
feature_names_in_, input_features
):
raise ValueError("input_features is not equal to feature_names_in_")
if n_features_in_ is not None and len(input_features) != n_features_in_:
raise ValueError(
"input_features should have length equal to number of "
f"features ({n_features_in_}), got {len(input_features)}"
)
return input_features
if feature_names_in_ is not None:
return feature_names_in_
# Generates feature names if `n_features_in_` is defined
if n_features_in_ is None:
raise ValueError("Unable to generate feature names without n_features_in_")
return np.asarray([f"x{i}" for i in range(n_features_in_)], dtype=object)
|
# -*- coding: utf-8 -*-
"""
Entrance of the subscription module.
"""
import json
import logging
import os
from datetime import datetime
from src.utils_v1.did.eladid import ffi, lib
from src import hive_setting
from src.utils_v1.constants import APP_INSTANCE_DID, DID_INFO_NONCE_EXPIRED
from src.utils_v1.did.entity import Entity
from src.utils_v1.did_info import create_nonce, get_did_info_by_app_instance_did, add_did_nonce_to_db, \
update_did_info_by_app_instance_did, get_did_info_by_nonce, update_token_of_did_info
from src.utils.http_client import HttpClient
from src.utils.http_exception import InvalidParameterException, BadRequestException
from src.utils.http_response import hive_restful_response
from src.utils.consts import URL_DID_SIGN_IN, URL_DID_BACKUP_AUTH
from src.utils.singleton import Singleton
class Auth(Entity, metaclass=Singleton):
def __init__(self):
self.storepass = hive_setting.DID_STOREPASS
Entity.__init__(self, "hive.auth", mnemonic=hive_setting.DID_MNEMONIC, passphrase=hive_setting.DID_PASSPHRASE)
self.http = HttpClient()
@hive_restful_response
def sign_in(self, doc):
app_instance_did = self.__get_app_instance_did(doc)
return {
"challenge": self.__create_challenge(app_instance_did, *self.__save_nonce_to_db(app_instance_did))
}
def __get_app_instance_did(self, app_instance_doc):
doc_str = json.dumps(app_instance_doc)
app_instance_doc = lib.DIDDocument_FromJson(doc_str.encode())
if not app_instance_doc or lib.DIDDocument_IsValid(app_instance_doc) != 1:
raise BadRequestException(msg='The did document is invalid in getting app instance did.')
app_instance_did = lib.DIDDocument_GetSubject(app_instance_doc)
if not app_instance_did:
raise BadRequestException(msg='Can not get did from document in getting app instance did.')
spec_did_str = ffi.string(lib.DID_GetMethodSpecificId(app_instance_did)).decode()
try:
with open(hive_setting.DID_DATA_LOCAL_DIDS + os.sep + spec_did_str, "w") as f:
f.write(doc_str)
except Exception as e:
logging.getLogger("HiveAuth").error(f"Exception in sign_in:{str(e)} in getting app instance did")
return "did:" + ffi.string(lib.DID_GetMethod(app_instance_did)).decode() + ":" + spec_did_str
def __save_nonce_to_db(self, app_instance_did):
nonce, expire_time = create_nonce(), int(datetime.now().timestamp()) + hive_setting.AUTH_CHALLENGE_EXPIRED
try:
if not get_did_info_by_app_instance_did(app_instance_did):
add_did_nonce_to_db(app_instance_did, nonce, expire_time)
else:
update_did_info_by_app_instance_did(app_instance_did, nonce, expire_time)
except Exception as e:
logging.getLogger("HiveAuth").error(f"Exception in __save_nonce_to_db: {e}")
raise BadRequestException(msg='Failed to generate nonce.')
return nonce, expire_time
def __create_challenge(self, app_instance_did, nonce, expire_time):
"""
Create challenge for sign in response.
"""
builder = lib.DIDDocument_GetJwtBuilder(self.doc) # service instance doc
if not builder:
raise BadRequestException(msg=f'Can not get challenge builder: {self.get_error_message()}.')
lib.JWTBuilder_SetHeader(builder, "type".encode(), "JWT".encode())
lib.JWTBuilder_SetHeader(builder, "version".encode(), "1.0".encode())
lib.JWTBuilder_SetSubject(builder, "DIDAuthChallenge".encode())
lib.JWTBuilder_SetAudience(builder, app_instance_did.encode())
lib.JWTBuilder_SetClaim(builder, "nonce".encode(), nonce.encode())
lib.JWTBuilder_SetExpiration(builder, expire_time)
lib.JWTBuilder_Sign(builder, ffi.NULL, self.storepass)
token = lib.JWTBuilder_Compact(builder)
lib.JWTBuilder_Destroy(builder)
if not token:
raise BadRequestException(msg="Failed to create challenge token.")
return ffi.string(token).decode()
@hive_restful_response
def auth(self, challenge_response):
credential_info = self.__get_auth_info_from_challenge_response(challenge_response, ['appDid', ])
access_token = self.__create_access_token(credential_info, "AccessToken")
try:
update_token_of_did_info(credential_info["userDid"],
credential_info["appDid"],
credential_info["id"],
credential_info["nonce"],
access_token,
credential_info["expTime"])
except Exception as e:
logging.error(f"Exception in __save_auth_info_to_db:: {e}")
raise e
return {
"token": access_token,
}
def __get_auth_info_from_challenge_response(self, challenge_response, props=None):
presentation_json, nonce, nonce_info = self.__get_values_from_challenge_response(challenge_response)
if nonce_info[DID_INFO_NONCE_EXPIRED] < int(datetime.now().timestamp()):
raise BadRequestException(msg='The nonce expired.')
credential_info = self.__get_presentation_credential_info(presentation_json, props)
if credential_info["id"] != nonce_info[APP_INSTANCE_DID]:
raise BadRequestException(msg='The app instance did of the credential does not match.')
credential_info["nonce"] = nonce
return credential_info
def __get_values_from_challenge_response(self, challenge_response):
challenge_response_cstr = lib.DefaultJWSParser_Parse(challenge_response.encode())
if not challenge_response_cstr:
raise BadRequestException(msg='Invalid challenge response.')
presentation_cstr = lib.JWT_GetClaimAsJson(challenge_response_cstr, "presentation".encode())
lib.JWT_Destroy(challenge_response_cstr)
if not presentation_cstr:
raise BadRequestException(msg='Can not get presentation cstr.')
presentation = lib.Presentation_FromJson(presentation_cstr)
if not presentation or lib.Presentation_IsValid(presentation) != 1:
raise BadRequestException(msg='The presentation is invalid.')
if lib.Presentation_GetCredentialCount(presentation) < 1:
raise BadRequestException(msg='No presentation credential exists.')
self.__validate_presentation_realm(presentation)
nonce, nonce_info = self.__get_presentation_nonce(presentation)
return json.loads(ffi.string(presentation_cstr).decode()), nonce, nonce_info
def __get_presentation_nonce(self, presentation):
nonce = lib.Presentation_GetNonce(presentation)
if not nonce:
raise BadRequestException(msg='Failed to get presentation nonce.')
nonce_str = ffi.string(nonce).decode()
if not nonce_str:
raise BadRequestException(msg='Invalid presentation nonce.')
nonce_info = get_did_info_by_nonce(nonce_str)
if not nonce_info:
raise BadRequestException(msg='Can not get presentation nonce information from database.')
return nonce_str, nonce_info
def __validate_presentation_realm(self, presentation):
realm = lib.Presentation_GetRealm(presentation)
if not realm:
raise BadRequestException(msg='Can not get presentation realm.')
realm = ffi.string(realm).decode()
if not realm or realm != self.get_did_string():
raise BadRequestException(msg='Invalid presentation realm or not match.')
def __get_presentation_credential_info(self, presentation_json, props=None):
if "verifiableCredential" not in presentation_json:
raise BadRequestException(msg='Verifiable credentials do not exist.')
vcs_json = presentation_json["verifiableCredential"]
if not isinstance(vcs_json, list):
raise BadRequestException(msg="Verifiable credentials are not the list.")
vc_json = vcs_json[0]
if not vc_json:
raise BadRequestException(msg='The credential is invalid.')
if "credentialSubject" not in vc_json or type(vc_json["credentialSubject"]) != dict\
or "issuer" not in vc_json:
raise BadRequestException('The credential subject is invalid or the issuer does not exist.')
credential_info = vc_json["credentialSubject"]
required_props = ['id', ]
if props:
required_props.extend(props)
not_exist_props = list(filter(lambda p: p not in credential_info, required_props))
if not_exist_props:
raise BadRequestException(f"The credentialSubject's prop ({not_exist_props}) does not exists.")
credential_info["expTime"] = self.__get_presentation_credential_expire_time(vcs_json)
credential_info["userDid"] = vc_json["issuer"]
return credential_info
def __get_presentation_credential_expire_time(self, vcs_json):
vc_str = json.dumps(vcs_json[0])
if not vc_str:
raise BadRequestException(msg='The presentation credential does not exist.')
vc = lib.Credential_FromJson(vc_str.encode(), ffi.NULL)
if not vc or lib.Credential_IsValid(vc) != 1:
raise BadRequestException(msg='The presentation credential is invalid.')
exp_time = lib.Credential_GetExpirationDate(vc)
if exp_time <= 0:
raise BadRequestException("The credential's expiration date does not exist.")
return min(int(datetime.now().timestamp()) + hive_setting.ACCESS_TOKEN_EXPIRED, exp_time)
def __create_access_token(self, credential_info, subject):
doc = lib.DIDStore_LoadDID(self.store, self.did)
if not doc:
raise BadRequestException('Can not load service instance document in creating access token.')
builder = lib.DIDDocument_GetJwtBuilder(doc)
if not builder:
raise BadRequestException(msg='Can not get builder from doc in creating access token.')
lib.JWTBuilder_SetHeader(builder, "typ".encode(), "JWT".encode())
lib.JWTBuilder_SetHeader(builder, "version".encode(), "1.0".encode())
lib.JWTBuilder_SetSubject(builder, subject.encode())
lib.JWTBuilder_SetAudience(builder, credential_info["id"].encode())
lib.JWTBuilder_SetExpiration(builder, credential_info["expTime"])
props = {k: credential_info[k] for k in credential_info if k not in ['id', 'expTime']}
if not lib.JWTBuilder_SetClaim(builder, "props".encode(), json.dumps(props).encode()):
lib.JWTBuilder_Destroy(builder)
raise BadRequestException(msg='Can not set claim in creating access token.')
lib.JWTBuilder_Sign(builder, ffi.NULL, self.storepass)
token = lib.JWTBuilder_Compact(builder)
lib.JWTBuilder_Destroy(builder)
if not token:
raise BadRequestException(msg='Can not build token in creating access token.')
return ffi.string(token).decode()
@hive_restful_response
def backup_auth(self, challenge_response):
""" for the vault service node """
credential_info = self.__get_auth_info_from_challenge_response(challenge_response, ["targetHost", "targetDID"])
access_token = self.__create_access_token(credential_info, "BackupToken")
return {'token': access_token}
def get_error_message(self, prompt=None):
""" helper method to get error message from did.so """
err_message = ffi.string(lib.DIDError_GetLastErrorMessage()).decode()
return err_message if not prompt else f'[{prompt}] {err_message}'
def get_backup_credential_info(self, credential):
""" for vault /backup """
from src.utils_v1.auth import get_credential_info
credential_info, err = get_credential_info(credential, ["targetHost", "targetDID"])
if credential_info is None:
raise InvalidParameterException(msg=f'Failed to get credential info: {err}')
return credential_info
def backup_client_sign_in(self, host_url, credential, subject):
"""
for vault /backup & /restore
:return challenge_response, backup_service_instance_did
"""
vc = lib.Credential_FromJson(credential.encode(), ffi.NULL)
if not vc:
raise InvalidParameterException(msg='backup_sign_in: invalid credential.')
doc_str = ffi.string(lib.DIDDocument_ToJson(lib.DIDStore_LoadDID(self.store, self.did), True)).decode()
doc = json.loads(doc_str)
body = self.http.post(host_url + URL_DID_SIGN_IN, None, {"id": doc})
if 'challenge' not in body or not body["challenge"]:
raise InvalidParameterException(msg='backup_sign_in: failed to sign in to backup node.')
jws = lib.DefaultJWSParser_Parse(body["challenge"].encode())
if not jws:
raise InvalidParameterException(
msg=f'backup_sign_in: failed to parse challenge with error {self.get_error_message()}.')
aud = ffi.string(lib.JWT_GetAudience(jws)).decode()
if aud != self.get_did_string():
lib.JWT_Destroy(jws)
raise InvalidParameterException(msg=f'backup_sign_in: failed to get the audience of the challenge.')
nonce = ffi.string(lib.JWT_GetClaim(jws, "nonce".encode())).decode()
if nonce is None:
lib.JWT_Destroy(jws)
raise InvalidParameterException(
msg=f'backup_sign_in: failed to get the nonce of the challenge.')
issuer = ffi.string(lib.JWT_GetIssuer(jws)).decode()
lib.JWT_Destroy(jws)
if issuer is None:
raise InvalidParameterException(
msg=f'backup_sign_in: failed to get the issuer of the challenge.')
vp_json = self.create_presentation(vc, nonce, issuer)
if vp_json is None:
raise InvalidParameterException(
msg=f'backup_sign_in: failed to create presentation.')
challenge_response = self.create_vp_token(vp_json, subject, issuer, hive_setting.AUTH_CHALLENGE_EXPIRED)
if challenge_response is None:
raise InvalidParameterException(
msg=f'backup_sign_in: failed to create the challenge response.')
return challenge_response, issuer
def backup_client_auth(self, host_url, challenge_response, backup_service_instance_did):
"""
for vault /backup & /restore
:return backup access token
"""
body = self.http.post(host_url + URL_DID_BACKUP_AUTH, None, {"challenge_response": challenge_response})
if 'token' not in body or not body["token"]:
raise InvalidParameterException(msg='backup_auth: failed to backup auth to backup node.')
jws = lib.DefaultJWSParser_Parse(body["token"].encode())
if not jws:
raise InvalidParameterException(
msg=f'backup_auth: failed to parse token with error {self.get_error_message()}.')
audience = ffi.string(lib.JWT_GetAudience(jws)).decode()
if audience != self.get_did_string():
lib.JWT_Destroy(jws)
raise InvalidParameterException(msg=f'backup_auth: failed to get the audience of the challenge.')
issuer = ffi.string(lib.JWT_GetIssuer(jws)).decode()
lib.JWT_Destroy(jws)
if issuer != backup_service_instance_did:
raise InvalidParameterException(msg=f'backup_auth: failed to get the issuer of the challenge.')
return body["token"]
def create_order_proof(self, user_did, doc_id, amount=0, is_receipt=False):
doc = lib.DIDStore_LoadDID(self.store, self.did)
if not doc:
raise BadRequestException('Can not load service instance document in creating order proof.')
builder = lib.DIDDocument_GetJwtBuilder(doc)
if not builder:
raise BadRequestException(msg='Can not get builder from doc in creating order proof.')
lib.JWTBuilder_SetHeader(builder, "typ".encode(), "JWT".encode())
lib.JWTBuilder_SetHeader(builder, "version".encode(), "1.0".encode())
lib.JWTBuilder_SetSubject(builder, 'ORDER_PROOF'.encode())
lib.JWTBuilder_SetAudience(builder, user_did.encode())
exp = int(datetime.utcnow().timestamp()) + 7 * 24 * 3600 if not is_receipt else -1
lib.JWTBuilder_SetExpiration(builder, exp)
props = {'order_id': doc_id}
if is_receipt:
props = {'receipt_id': doc_id, 'amount': amount}
lib.JWTBuilder_SetClaim(builder, "props".encode(), json.dumps(props).encode())
lib.JWTBuilder_Sign(builder, ffi.NULL, self.storepass)
proof = lib.JWTBuilder_Compact(builder)
lib.JWTBuilder_Destroy(builder)
if not proof:
raise BadRequestException(msg='Can not build token in creating order proof.')
return ffi.string(proof).decode()
def verify_order_proof(self, proof, user_did, order_id):
# INFO:DefaultJWSParser_Parse will validate the sign information.
jws = lib.DefaultJWSParser_Parse(proof.encode())
if not jws:
raise BadRequestException(msg=self.get_error_message('parse the proof error'))
issuer = lib.JWT_GetIssuer(jws)
if not issuer:
lib.JWT_Destroy(jws)
raise BadRequestException(msg=self.get_error_message('the issue of the proof error'))
if self.did_str != ffi.string(issuer).decode():
lib.JWT_Destroy(jws)
raise BadRequestException(msg=f'the issue of the proof not match: {ffi.string(issuer).decode()}')
audience = lib.JWT_GetAudience(jws)
if not audience:
lib.JWT_Destroy(jws)
raise BadRequestException(msg=self.get_error_message('the audience of the proof error'))
if user_did != ffi.string(audience).decode():
lib.JWT_Destroy(jws)
raise BadRequestException(msg=f'the audience of the proof not match: {ffi.string(audience).decode()}')
props = lib.JWT_GetClaim(jws, "props".encode())
if not props:
lib.JWT_Destroy(jws)
raise BadRequestException(msg=self.get_error_message('the claim of the proof error'))
props_json = json.loads(ffi.string(props).decode())
if props_json.get('order_id') != order_id:
lib.JWT_Destroy(jws)
raise BadRequestException(msg=f'the order_id of the proof not match: {props_json.get('order_id')}')
if hive_setting.PAYMENT_CHECK_EXPIRED:
expired = lib.JWT_GetExpiration(jws)
now = int(datetime.now().timestamp())
if now > expired:
lib.JWT_Destroy(jws)
raise BadRequestException(msg=f'the proof is expired (valid for 7 days)')
lib.JWT_Destroy(jws)
|
# -*- coding: utf-8 -*-
"""
Entrance of the subscription module.
"""
import json
import logging
import os
from datetime import datetime
from src.utils_v1.did.eladid import ffi, lib
from src import hive_setting
from src.utils_v1.constants import APP_INSTANCE_DID, DID_INFO_NONCE_EXPIRED
from src.utils_v1.did.entity import Entity
from src.utils_v1.did_info import create_nonce, get_did_info_by_app_instance_did, add_did_nonce_to_db, \
update_did_info_by_app_instance_did, get_did_info_by_nonce, update_token_of_did_info
from src.utils.http_client import HttpClient
from src.utils.http_exception import InvalidParameterException, BadRequestException
from src.utils.http_response import hive_restful_response
from src.utils.consts import URL_DID_SIGN_IN, URL_DID_BACKUP_AUTH
from src.utils.singleton import Singleton
class Auth(Entity, metaclass=Singleton):
def __init__(self):
self.storepass = hive_setting.DID_STOREPASS
Entity.__init__(self, "hive.auth", mnemonic=hive_setting.DID_MNEMONIC, passphrase=hive_setting.DID_PASSPHRASE)
self.http = HttpClient()
@hive_restful_response
def sign_in(self, doc):
app_instance_did = self.__get_app_instance_did(doc)
return {
"challenge": self.__create_challenge(app_instance_did, *self.__save_nonce_to_db(app_instance_did))
}
def __get_app_instance_did(self, app_instance_doc):
doc_str = json.dumps(app_instance_doc)
app_instance_doc = lib.DIDDocument_FromJson(doc_str.encode())
if not app_instance_doc or lib.DIDDocument_IsValid(app_instance_doc) != 1:
raise BadRequestException(msg='The did document is invalid in getting app instance did.')
app_instance_did = lib.DIDDocument_GetSubject(app_instance_doc)
if not app_instance_did:
raise BadRequestException(msg='Can not get did from document in getting app instance did.')
spec_did_str = ffi.string(lib.DID_GetMethodSpecificId(app_instance_did)).decode()
try:
with open(hive_setting.DID_DATA_LOCAL_DIDS + os.sep + spec_did_str, "w") as f:
f.write(doc_str)
except Exception as e:
logging.getLogger("HiveAuth").error(f"Exception in sign_in:{str(e)} in getting app instance did")
return "did:" + ffi.string(lib.DID_GetMethod(app_instance_did)).decode() + ":" + spec_did_str
def __save_nonce_to_db(self, app_instance_did):
nonce, expire_time = create_nonce(), int(datetime.now().timestamp()) + hive_setting.AUTH_CHALLENGE_EXPIRED
try:
if not get_did_info_by_app_instance_did(app_instance_did):
add_did_nonce_to_db(app_instance_did, nonce, expire_time)
else:
update_did_info_by_app_instance_did(app_instance_did, nonce, expire_time)
except Exception as e:
logging.getLogger("HiveAuth").error(f"Exception in __save_nonce_to_db: {e}")
raise BadRequestException(msg='Failed to generate nonce.')
return nonce, expire_time
def __create_challenge(self, app_instance_did, nonce, expire_time):
"""
Create challenge for sign in response.
"""
builder = lib.DIDDocument_GetJwtBuilder(self.doc) # service instance doc
if not builder:
raise BadRequestException(msg=f'Can not get challenge builder: {self.get_error_message()}.')
lib.JWTBuilder_SetHeader(builder, "type".encode(), "JWT".encode())
lib.JWTBuilder_SetHeader(builder, "version".encode(), "1.0".encode())
lib.JWTBuilder_SetSubject(builder, "DIDAuthChallenge".encode())
lib.JWTBuilder_SetAudience(builder, app_instance_did.encode())
lib.JWTBuilder_SetClaim(builder, "nonce".encode(), nonce.encode())
lib.JWTBuilder_SetExpiration(builder, expire_time)
lib.JWTBuilder_Sign(builder, ffi.NULL, self.storepass)
token = lib.JWTBuilder_Compact(builder)
lib.JWTBuilder_Destroy(builder)
if not token:
raise BadRequestException(msg="Failed to create challenge token.")
return ffi.string(token).decode()
@hive_restful_response
def auth(self, challenge_response):
credential_info = self.__get_auth_info_from_challenge_response(challenge_response, ['appDid', ])
access_token = self.__create_access_token(credential_info, "AccessToken")
try:
update_token_of_did_info(credential_info["userDid"],
credential_info["appDid"],
credential_info["id"],
credential_info["nonce"],
access_token,
credential_info["expTime"])
except Exception as e:
logging.error(f"Exception in __save_auth_info_to_db:: {e}")
raise e
return {
"token": access_token,
}
def __get_auth_info_from_challenge_response(self, challenge_response, props=None):
presentation_json, nonce, nonce_info = self.__get_values_from_challenge_response(challenge_response)
if nonce_info[DID_INFO_NONCE_EXPIRED] < int(datetime.now().timestamp()):
raise BadRequestException(msg='The nonce expired.')
credential_info = self.__get_presentation_credential_info(presentation_json, props)
if credential_info["id"] != nonce_info[APP_INSTANCE_DID]:
raise BadRequestException(msg='The app instance did of the credential does not match.')
credential_info["nonce"] = nonce
return credential_info
def __get_values_from_challenge_response(self, challenge_response):
challenge_response_cstr = lib.DefaultJWSParser_Parse(challenge_response.encode())
if not challenge_response_cstr:
raise BadRequestException(msg='Invalid challenge response.')
presentation_cstr = lib.JWT_GetClaimAsJson(challenge_response_cstr, "presentation".encode())
lib.JWT_Destroy(challenge_response_cstr)
if not presentation_cstr:
raise BadRequestException(msg='Can not get presentation cstr.')
presentation = lib.Presentation_FromJson(presentation_cstr)
if not presentation or lib.Presentation_IsValid(presentation) != 1:
raise BadRequestException(msg='The presentation is invalid.')
if lib.Presentation_GetCredentialCount(presentation) < 1:
raise BadRequestException(msg='No presentation credential exists.')
self.__validate_presentation_realm(presentation)
nonce, nonce_info = self.__get_presentation_nonce(presentation)
return json.loads(ffi.string(presentation_cstr).decode()), nonce, nonce_info
def __get_presentation_nonce(self, presentation):
nonce = lib.Presentation_GetNonce(presentation)
if not nonce:
raise BadRequestException(msg='Failed to get presentation nonce.')
nonce_str = ffi.string(nonce).decode()
if not nonce_str:
raise BadRequestException(msg='Invalid presentation nonce.')
nonce_info = get_did_info_by_nonce(nonce_str)
if not nonce_info:
raise BadRequestException(msg='Can not get presentation nonce information from database.')
return nonce_str, nonce_info
def __validate_presentation_realm(self, presentation):
realm = lib.Presentation_GetRealm(presentation)
if not realm:
raise BadRequestException(msg='Can not get presentation realm.')
realm = ffi.string(realm).decode()
if not realm or realm != self.get_did_string():
raise BadRequestException(msg='Invalid presentation realm or not match.')
def __get_presentation_credential_info(self, presentation_json, props=None):
if "verifiableCredential" not in presentation_json:
raise BadRequestException(msg='Verifiable credentials do not exist.')
vcs_json = presentation_json["verifiableCredential"]
if not isinstance(vcs_json, list):
raise BadRequestException(msg="Verifiable credentials are not the list.")
vc_json = vcs_json[0]
if not vc_json:
raise BadRequestException(msg='The credential is invalid.')
if "credentialSubject" not in vc_json or type(vc_json["credentialSubject"]) != dict\
or "issuer" not in vc_json:
raise BadRequestException('The credential subject is invalid or the issuer does not exist.')
credential_info = vc_json["credentialSubject"]
required_props = ['id', ]
if props:
required_props.extend(props)
not_exist_props = list(filter(lambda p: p not in credential_info, required_props))
if not_exist_props:
raise BadRequestException(f"The credentialSubject's prop ({not_exist_props}) does not exists.")
credential_info["expTime"] = self.__get_presentation_credential_expire_time(vcs_json)
credential_info["userDid"] = vc_json["issuer"]
return credential_info
def __get_presentation_credential_expire_time(self, vcs_json):
vc_str = json.dumps(vcs_json[0])
if not vc_str:
raise BadRequestException(msg='The presentation credential does not exist.')
vc = lib.Credential_FromJson(vc_str.encode(), ffi.NULL)
if not vc or lib.Credential_IsValid(vc) != 1:
raise BadRequestException(msg='The presentation credential is invalid.')
exp_time = lib.Credential_GetExpirationDate(vc)
if exp_time <= 0:
raise BadRequestException("The credential's expiration date does not exist.")
return min(int(datetime.now().timestamp()) + hive_setting.ACCESS_TOKEN_EXPIRED, exp_time)
def __create_access_token(self, credential_info, subject):
doc = lib.DIDStore_LoadDID(self.store, self.did)
if not doc:
raise BadRequestException('Can not load service instance document in creating access token.')
builder = lib.DIDDocument_GetJwtBuilder(doc)
if not builder:
raise BadRequestException(msg='Can not get builder from doc in creating access token.')
lib.JWTBuilder_SetHeader(builder, "typ".encode(), "JWT".encode())
lib.JWTBuilder_SetHeader(builder, "version".encode(), "1.0".encode())
lib.JWTBuilder_SetSubject(builder, subject.encode())
lib.JWTBuilder_SetAudience(builder, credential_info["id"].encode())
lib.JWTBuilder_SetExpiration(builder, credential_info["expTime"])
props = {k: credential_info[k] for k in credential_info if k not in ['id', 'expTime']}
if not lib.JWTBuilder_SetClaim(builder, "props".encode(), json.dumps(props).encode()):
lib.JWTBuilder_Destroy(builder)
raise BadRequestException(msg='Can not set claim in creating access token.')
lib.JWTBuilder_Sign(builder, ffi.NULL, self.storepass)
token = lib.JWTBuilder_Compact(builder)
lib.JWTBuilder_Destroy(builder)
if not token:
raise BadRequestException(msg='Can not build token in creating access token.')
return ffi.string(token).decode()
@hive_restful_response
def backup_auth(self, challenge_response):
""" for the vault service node """
credential_info = self.__get_auth_info_from_challenge_response(challenge_response, ["targetHost", "targetDID"])
access_token = self.__create_access_token(credential_info, "BackupToken")
return {'token': access_token}
def get_error_message(self, prompt=None):
""" helper method to get error message from did.so """
err_message = ffi.string(lib.DIDError_GetLastErrorMessage()).decode()
return err_message if not prompt else f'[{prompt}] {err_message}'
def get_backup_credential_info(self, credential):
""" for vault /backup """
from src.utils_v1.auth import get_credential_info
credential_info, err = get_credential_info(credential, ["targetHost", "targetDID"])
if credential_info is None:
raise InvalidParameterException(msg=f'Failed to get credential info: {err}')
return credential_info
def backup_client_sign_in(self, host_url, credential, subject):
"""
for vault /backup & /restore
:return challenge_response, backup_service_instance_did
"""
vc = lib.Credential_FromJson(credential.encode(), ffi.NULL)
if not vc:
raise InvalidParameterException(msg='backup_sign_in: invalid credential.')
doc_str = ffi.string(lib.DIDDocument_ToJson(lib.DIDStore_LoadDID(self.store, self.did), True)).decode()
doc = json.loads(doc_str)
body = self.http.post(host_url + URL_DID_SIGN_IN, None, {"id": doc})
if 'challenge' not in body or not body["challenge"]:
raise InvalidParameterException(msg='backup_sign_in: failed to sign in to backup node.')
jws = lib.DefaultJWSParser_Parse(body["challenge"].encode())
if not jws:
raise InvalidParameterException(
msg=f'backup_sign_in: failed to parse challenge with error {self.get_error_message()}.')
aud = ffi.string(lib.JWT_GetAudience(jws)).decode()
if aud != self.get_did_string():
lib.JWT_Destroy(jws)
raise InvalidParameterException(msg=f'backup_sign_in: failed to get the audience of the challenge.')
nonce = ffi.string(lib.JWT_GetClaim(jws, "nonce".encode())).decode()
if nonce is None:
lib.JWT_Destroy(jws)
raise InvalidParameterException(
msg=f'backup_sign_in: failed to get the nonce of the challenge.')
issuer = ffi.string(lib.JWT_GetIssuer(jws)).decode()
lib.JWT_Destroy(jws)
if issuer is None:
raise InvalidParameterException(
msg=f'backup_sign_in: failed to get the issuer of the challenge.')
vp_json = self.create_presentation(vc, nonce, issuer)
if vp_json is None:
raise InvalidParameterException(
msg=f'backup_sign_in: failed to create presentation.')
challenge_response = self.create_vp_token(vp_json, subject, issuer, hive_setting.AUTH_CHALLENGE_EXPIRED)
if challenge_response is None:
raise InvalidParameterException(
msg=f'backup_sign_in: failed to create the challenge response.')
return challenge_response, issuer
def backup_client_auth(self, host_url, challenge_response, backup_service_instance_did):
"""
for vault /backup & /restore
:return backup access token
"""
body = self.http.post(host_url + URL_DID_BACKUP_AUTH, None, {"challenge_response": challenge_response})
if 'token' not in body or not body["token"]:
raise InvalidParameterException(msg='backup_auth: failed to backup auth to backup node.')
jws = lib.DefaultJWSParser_Parse(body["token"].encode())
if not jws:
raise InvalidParameterException(
msg=f'backup_auth: failed to parse token with error {self.get_error_message()}.')
audience = ffi.string(lib.JWT_GetAudience(jws)).decode()
if audience != self.get_did_string():
lib.JWT_Destroy(jws)
raise InvalidParameterException(msg=f'backup_auth: failed to get the audience of the challenge.')
issuer = ffi.string(lib.JWT_GetIssuer(jws)).decode()
lib.JWT_Destroy(jws)
if issuer != backup_service_instance_did:
raise InvalidParameterException(msg=f'backup_auth: failed to get the issuer of the challenge.')
return body["token"]
def create_order_proof(self, user_did, doc_id, amount=0, is_receipt=False):
doc = lib.DIDStore_LoadDID(self.store, self.did)
if not doc:
raise BadRequestException('Can not load service instance document in creating order proof.')
builder = lib.DIDDocument_GetJwtBuilder(doc)
if not builder:
raise BadRequestException(msg='Can not get builder from doc in creating order proof.')
lib.JWTBuilder_SetHeader(builder, "typ".encode(), "JWT".encode())
lib.JWTBuilder_SetHeader(builder, "version".encode(), "1.0".encode())
lib.JWTBuilder_SetSubject(builder, 'ORDER_PROOF'.encode())
lib.JWTBuilder_SetAudience(builder, user_did.encode())
exp = int(datetime.utcnow().timestamp()) + 7 * 24 * 3600 if not is_receipt else -1
lib.JWTBuilder_SetExpiration(builder, exp)
props = {'order_id': doc_id}
if is_receipt:
props = {'receipt_id': doc_id, 'amount': amount}
lib.JWTBuilder_SetClaim(builder, "props".encode(), json.dumps(props).encode())
lib.JWTBuilder_Sign(builder, ffi.NULL, self.storepass)
proof = lib.JWTBuilder_Compact(builder)
lib.JWTBuilder_Destroy(builder)
if not proof:
raise BadRequestException(msg='Can not build token in creating order proof.')
return ffi.string(proof).decode()
def verify_order_proof(self, proof, user_did, order_id):
# INFO:DefaultJWSParser_Parse will validate the sign information.
jws = lib.DefaultJWSParser_Parse(proof.encode())
if not jws:
raise BadRequestException(msg=self.get_error_message('parse the proof error'))
issuer = lib.JWT_GetIssuer(jws)
if not issuer:
lib.JWT_Destroy(jws)
raise BadRequestException(msg=self.get_error_message('the issue of the proof error'))
if self.did_str != ffi.string(issuer).decode():
lib.JWT_Destroy(jws)
raise BadRequestException(msg=f'the issue of the proof not match: {ffi.string(issuer).decode()}')
audience = lib.JWT_GetAudience(jws)
if not audience:
lib.JWT_Destroy(jws)
raise BadRequestException(msg=self.get_error_message('the audience of the proof error'))
if user_did != ffi.string(audience).decode():
lib.JWT_Destroy(jws)
raise BadRequestException(msg=f'the audience of the proof not match: {ffi.string(audience).decode()}')
props = lib.JWT_GetClaim(jws, "props".encode())
if not props:
lib.JWT_Destroy(jws)
raise BadRequestException(msg=self.get_error_message('the claim of the proof error'))
props_json = json.loads(ffi.string(props).decode())
if props_json.get('order_id') != order_id:
lib.JWT_Destroy(jws)
raise BadRequestException(msg=f'the order_id of the proof not match: {props_json.get("order_id")}')
if hive_setting.PAYMENT_CHECK_EXPIRED:
expired = lib.JWT_GetExpiration(jws)
now = int(datetime.now().timestamp())
if now > expired:
lib.JWT_Destroy(jws)
raise BadRequestException(msg=f'the proof is expired (valid for 7 days)')
lib.JWT_Destroy(jws)
|
from typing import Any, Dict, List
from .utils import get_json
def fetch_markets(market_type: str) -> List[Dict[str, Any]]:
'''Fetch all trading markets from a crypto exchage.'''
if market_type == 'spot':
return _fetch_spot_markets()
elif market_type == 'swap':
return _fetch_swap_markets()
else:
raise ValueError(f'Unknown market type: {market_type}')
def _fetch_spot_markets() -> List[Dict[str, Any]]:
url = 'https://apiv2.bitz.com/V2/Market/symbolList'
resp = get_json(url)
if resp['status'] != 200:
logging.error(f'resp.status is {resp['status']}')
return []
else:
return list(resp['data'].values())
def _fetch_swap_markets() -> List[Dict[str, Any]]:
url = 'https://apiv2.bitz.com/V2/Market/getContractCoin'
resp = get_json(url)
if resp['status'] != 200:
logging.error(f'resp.status is {resp['status']}')
return []
else:
return resp['data']
|
from typing import Any, Dict, List
from .utils import get_json
def fetch_markets(market_type: str) -> List[Dict[str, Any]]:
'''Fetch all trading markets from a crypto exchage.'''
if market_type == 'spot':
return _fetch_spot_markets()
elif market_type == 'swap':
return _fetch_swap_markets()
else:
raise ValueError(f'Unknown market type: {market_type}')
def _fetch_spot_markets() -> List[Dict[str, Any]]:
url = 'https://apiv2.bitz.com/V2/Market/symbolList'
resp = get_json(url)
if resp['status'] != 200:
logging.error(f'resp.status is {resp["status"]}')
return []
else:
return list(resp['data'].values())
def _fetch_swap_markets() -> List[Dict[str, Any]]:
url = 'https://apiv2.bitz.com/V2/Market/getContractCoin'
resp = get_json(url)
if resp['status'] != 200:
logging.error(f'resp.status is {resp["status"]}')
return []
else:
return resp['data']
|
"""
Training or finetuning a LSR model on DocRED dataset.
"""
import argparse
import csv
import json
import logging
import os
import numpy as np
import torch
from sklearn import metrics
from torch.utils.data.dataloader import DataLoader, default_collate
from transformers import set_seed
from .config import LsrConfig
from .modeling import LsrModel, LsrModelOutput
from .preprocess import LsrPreprocessor
from .utils import h_t_idx_generator
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class LsrMetrics:
"""
For calculating metrics for LsrModel.
Computes precision, recall, f1, auc for precision vs recall, and the optimal prediction threshold (theta).
This is modified from the original.
The original additionally computes an ignore score which ignores facts seen in training set.
"""
def __init__(self, num_relations):
self.num_relations = num_relations
self.test_result = []
self.total_recall = 0
def add_batch(self, model_output: LsrModelOutput, batch_data):
predict_re = torch.sigmoid(model_output.prediction)
predict_re = predict_re.data.cpu().numpy()
for i, label in enumerate(batch_data['labels']):
self.total_recall += len(label)
vertex_set_length = batch_data['entity_num_list'][i] # the number of entities in each instance.
for j, (h_idx, t_idx) in enumerate(h_t_idx_generator(vertex_set_length)):
for r in range(1, self.num_relations):
result_tuple = (
(h_idx, t_idx, r) in label,
float(predict_re[i, j, r]),
)
self.test_result.append(result_tuple)
def compute(self, input_theta=None):
"""
Computes metrics based on data added so far.
Args:
input_theta (`optional`, `float`):
Prediction threshold. Provide a value between 0 to 1 if you want to compute the precision and recall
for that specific threshold. Otherwise the optimal based on f1 score will be computed for you.
"""
# Sorts in descending order by predicted value
self.test_result.sort(key=lambda x: x[1], reverse=True)
precision = []
recall = []
correct = 0
w = 0
if self.total_recall == 0:
self.total_recall = 1 # for test
for i, item in enumerate(self.test_result):
correct += item[0]
recall.append(float(correct) / (i + 1))
precision.append(float(correct) / self.total_recall)
if input_theta is not None and item[1] > input_theta:
w = i
precision = np.asarray(precision, dtype='float32')
recall = np.asarray(recall, dtype='float32')
f1_arr = (2 * precision * recall / (precision + recall + 1e-20))
auc = metrics.auc(x=precision, y=recall)
if input_theta is None:
f1 = f1_arr.max()
f1_pos = f1_arr.argmax()
best_theta = self.test_result[f1_pos][1]
return best_theta, f1, precision[f1_pos], recall[f1_pos], auc
else:
return input_theta, f1_arr[w], precision[w], recall[w], auc
class DocredDataset(torch.utils.data.Dataset):
def __init__(self, json_file, preprocessor):
self.data = json.load(open(json_file))
self.preprocessed_data = preprocessor(self.data)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
instance = {}
for key in self.preprocessed_data.keys():
instance[key] = self.preprocessed_data[key][idx]
return instance
def lsr_collate_fn(batch):
"""
Manually processes labels and uses default for the rest.
"""
labels = []
for instance in batch:
labels_instance = instance.pop("labels")
labels.append(labels_instance)
collated_data = default_collate(batch)
collated_data["labels"] = labels
return collated_data
class MyAdagrad(torch.optim.Optimizer):
"""
Modification of the Adagrad optimizer that allows to specify an initial
accumulator value. This mimics the behavior of the default Adagrad implementation
in Tensorflow. The default PyTorch Adagrad uses 0 for initial accumulator value.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
lr_decay (float, optional): learning rate decay (default: 0)
init_accu_value (float, optional): initial accumulater value.
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
"""
def __init__(self, params, lr=1e-2, lr_decay=0, init_accu_value=0.1, weight_decay=0):
defaults = dict(lr=lr, lr_decay=lr_decay, init_accu_value=init_accu_value, \
weight_decay=weight_decay)
super(MyAdagrad, self).__init__(params, defaults)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = 0
state['sum'] = torch.ones(p.data.size()).type_as(p.data) * \
init_accu_value
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['sum'].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
state['step'] += 1
if group['weight_decay'] != 0:
if p.grad.data.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients ")
grad = grad.add(group['weight_decay'], p.data)
clr = group['lr'] / (1 + (state['step'] - 1) * group['lr_decay'])
if p.grad.data.is_sparse:
grad = grad.coalesce() # the update is non-linear so indices must be unique
grad_indices = grad._indices()
grad_values = grad._values()
size = torch.Size([x for x in grad.size()])
def make_sparse(values):
constructor = type(p.grad.data)
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor()
return constructor(grad_indices, values, size)
state['sum'].add_(make_sparse(grad_values.pow(2)))
std = state['sum']._sparse_mask(grad)
std_values = std._values().sqrt_().add_(1e-10)
p.data.add_(-clr, make_sparse(grad_values / std_values))
else:
state['sum'].addcmul_(1, grad, grad)
std = state['sum'].sqrt().add_(1e-10)
p.data.addcdiv_(-clr, grad, std)
return loss
def get_optimizer(name, parameters, lr, l2=0):
if name == 'sgd':
return torch.optim.SGD(parameters, lr=lr, weight_decay=l2)
elif name in ['adagrad', 'myadagrad']:
# use custom adagrad to allow for init accumulator value
return MyAdagrad(parameters, lr=lr, init_accu_value=0.1, weight_decay=l2)
elif name == 'adam':
return torch.optim.Adam(parameters, lr, weight_decay=l2)
elif name == 'adamw':
return torch.optim.AdamW(parameters, lr=lr, weight_decay=l2)
elif name == 'adamax':
return torch.optim.Adamax(parameters, weight_decay=l2)
elif name == 'adadelta':
return torch.optim.Adadelta(parameters, lr=lr, weight_decay=l2)
else:
raise Exception("Unsupported optimizer: {}".format(name))
def parse_args():
parser = argparse.ArgumentParser(
description="Train a relation extraction model based on latent structure refinement.")
parser.add_argument("--train_file", type=str, required=True, help="A json file containing the training data.")
parser.add_argument("--validation_file", type=str, default=None, help="A json file containing the validation data.")
parser.add_argument("--output_dir", type=str, default=None, help="Output directory path.")
parser.add_argument("--metadata_dir", type=str, required=True, help="Path to docred metadata directory.")
# Model arguments
parser.add_argument("--model_weights_path", type=str, default=None,
help="Provide a path to model weights if you want to finetune from a checkpoint.")
parser.add_argument("--model_config_path", type=str, default=None,
help="Provide a config if you want to override the defaults. "
"To use bert encoder layer, specify in config file.")
parser.add_argument("--pretrained_embeddings_path", type=str, default=None,
help="Provide a path to pretrained embeddings if you want to want to use them.")
# Training arguments
parser.add_argument('--lr', type=float, default=1e-3, help='Applies to sgd and adagrad.')
parser.add_argument('--lr_decay', type=float, default=0.98, help='Learning rate decay rate.')
parser.add_argument('--decay_epoch', type=int, default=20, help='Decay learning rate after this epoch.')
parser.add_argument('--evaluate_epoch', type=int, default=30, help='Evaluate after this epoch.')
parser.add_argument('--optim', choices=['sgd', 'adagrad', 'adam', 'adamw', 'adamax'], default='adam',
help='Choice of optimizer.')
parser.add_argument('--weight_decay', type=float, default=0, help='L2 weight decay.')
parser.add_argument('--num_epoch', type=int, default=200, help='Number of total training epochs.')
parser.add_argument('--batch_size', type=int, default=20, help='Training batch size.')
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Gradient clipping.')
parser.add_argument('--seed', type=int, default=0, help="A seed for reproducible training.")
parser.add_argument('--use_gpu', type=bool, default=True, help="Whether you want to use GPU for training.")
parser.add_argument('--use_wandb', type=bool, default=False, help="Whether to use wandb to monitor training.")
parser.add_argument('--wandb_run_name', type=str, default=None, help="Wandb run name.")
args = parser.parse_args()
return args
def train_model(args):
logger.info(f"Training arguments: {vars(args)}")
# Defaults to cpu if gpu is unavailable
device = torch.device("cuda") if args.use_gpu and torch.cuda.is_available() else torch.device("cpu")
logger.info(f"Using device: {device}")
# Make output dir, save training args, create metrics files
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
os.makedirs(os.path.join(args.output_dir, 'best_f1'), exist_ok=True)
os.makedirs(os.path.join(args.output_dir, 'final'), exist_ok=True)
with open(os.path.join(args.output_dir, 'training_args.json'), "w") as fp:
json.dump(vars(args), fp, sort_keys=True, indent=4)
# create metric output files
train_metrics_file = os.path.join(args.output_dir, 'train_metrics.csv')
val_metrics_file = os.path.join(args.output_dir, 'val_metrics.csv')
fieldnames = ['epoch', 'loss', 'best_theta', 'f1', 'precision', 'recall', 'auc']
with open(train_metrics_file, 'w') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
with open(val_metrics_file, 'w') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
if args.seed is not None:
set_seed(args.seed)
# Load config if provided else initialize with defaults
if args.model_config_path:
config = LsrConfig.from_json_file(json_file=args.model_config_path)
else:
config = LsrConfig()
# Load model weights if provided
if args.model_weights_path:
model = LsrModel.from_pretrained(args.model_weights_path, config=config)
else:
model = LsrModel(config)
if args.use_wandb:
import wandb
wandb.init(project="lsr", name=args.wandb_run_name)
wandb.watch(model, log="all")
# Note: this will override the provided model weights
if args.pretrained_embeddings_path is not None and not config.use_bert:
pretrained_embeddings = np.load(args.pretrained_embeddings_path)
model.load_pretrained_word_embedding(pretrained_word_embedding=pretrained_embeddings)
# Set to training device
model.to(device=device)
# Load dataset
# Set to cpu initially (for preprocessing entire dataset first)
logger.info("Preprocessing datasets...")
rel2id_path = os.path.join(args.metadata_dir, "rel2id.json")
word2id_path = os.path.join(args.metadata_dir, "word2id.json")
ner2id_path = os.path.join(args.metadata_dir, "ner2id.json")
train_preprocessor = LsrPreprocessor(rel2id_path=rel2id_path, word2id_path=word2id_path, ner2id_path=ner2id_path,
is_train=True, device=torch.device("cpu"), config=config)
val_preprocessor = LsrPreprocessor(rel2id_path=rel2id_path, word2id_path=word2id_path, ner2id_path=ner2id_path,
device=torch.device("cpu"), config=config)
train_dataset = DocredDataset(json_file=args.train_file, preprocessor=train_preprocessor)
val_dataset = DocredDataset(json_file=args.validation_file, preprocessor=val_preprocessor)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=args.batch_size, collate_fn=lsr_collate_fn)
val_dataloader = DataLoader(val_dataset, shuffle=True, batch_size=args.batch_size, collate_fn=lsr_collate_fn)
# Optimizer and parameters
if config.use_bert:
other_params = [p for name, p in model.named_parameters() if
p.requires_grad and not name.startswith("bert")]
optimizer = torch.optim.Adam([
{"params": other_params, "lr": args.lr},
{"params": model.bert.parameters(), "lr": 1e-5},
], lr=args.lr)
else:
parameters = [p for p in model.parameters() if p.requires_grad]
optimizer = get_optimizer(args.optim, parameters, args.lr)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, args.lr_decay)
best_f1 = 0
for epoch in range(args.num_epoch):
logger.info(f"Epoch: {epoch + 1}/{args.num_epoch}, lr: {optimizer.param_groups[0]["lr"]}")
train_metrics = LsrMetrics(num_relations=config.num_relations)
total_epoch_train_loss = 0
model.train()
for step, batch in enumerate(train_dataloader):
for key, value in batch.items():
if isinstance(value, torch.Tensor):
batch[key] = value.to(device=device)
outputs = model(**batch)
# Backpropagation
loss = outputs.loss
# TODO: Remove debug logs below
if np.isnan(loss.item()):
logger.info("Skipping backward prop.")
continue
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
# Track metrics
total_epoch_train_loss += loss.item()
train_metrics.add_batch(model_output=outputs, batch_data=batch)
# Compute metrics and log at end of epoch
best_theta, f1, precision, recall, auc = train_metrics.compute()
avg_epoch_train_loss = total_epoch_train_loss / (step + 1)
logger.info(f"Train loss: {avg_epoch_train_loss:.3f}, best theta: {best_theta:.3f}, "
f"f1: {f1:.3f}, precision: {precision:.3f}, recall: {recall:.3f}, auc: {auc:.5f}")
if args.use_wandb:
wandb.log({
"train_loss": avg_epoch_train_loss,
"train_best_theta": best_theta,
"train_f1": f1,
"train_precision": precision,
"train_recall": recall,
"train_auc": auc
}, step=epoch)
# Write train metrics
if args.output_dir is not None:
with open(train_metrics_file, 'a') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writerow({'epoch': epoch + 1, 'loss': avg_epoch_train_loss, 'best_theta': best_theta, 'f1': f1,
'precision': precision, 'recall': recall, 'auc': auc})
if epoch + 1 >= args.evaluate_epoch:
val_metrics = LsrMetrics(num_relations=config.num_relations)
total_epoch_val_loss = 0
model.eval()
for step, batch in enumerate(val_dataloader):
for key, value in batch.items():
if isinstance(value, torch.Tensor):
batch[key] = value.to(device=device)
outputs = model(**batch)
# Track metrics
total_epoch_val_loss += loss.item()
val_metrics.add_batch(model_output=outputs, batch_data=batch)
# Compute metrics and log at end of epoch
best_theta, f1, precision, recall, auc = val_metrics.compute()
avg_epoch_val_loss = total_epoch_val_loss / (step + 1)
logger.info(f"Val loss: {avg_epoch_val_loss:.3f}, best theta: {best_theta:.3f}, "
f"f1: {f1:.3f}, precision: {precision:.3f}, recall: {recall:.3f}, auc: {auc:.5f}")
if args.use_wandb:
wandb.log({
"val_loss": avg_epoch_val_loss,
"val_best_theta": best_theta,
"val_f1": f1,
"val_precision": precision,
"val_recall": recall,
"val_auc": auc
}, step=epoch)
# Write val metrics
if args.output_dir is not None:
with open(val_metrics_file, 'a') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writerow(
{'epoch': epoch + 1, 'loss': avg_epoch_val_loss, 'best_theta': best_theta, 'f1': f1,
'precision': precision, 'recall': recall, 'auc': auc})
# Save best model so far
if args.output_dir is not None and f1 > best_f1:
logger.info("Best f1 so far, saving model.")
best_f1 = f1
model.save_pretrained(os.path.join(args.output_dir, 'best_f1'))
if epoch + 1 >= args.decay_epoch:
if args.optim == 'adam' and optimizer.param_groups[0]['lr'] > 1e-4:
scheduler.step()
# Save final model
if args.output_dir is not None:
logger.info("Saving final model.")
model.save_pretrained(os.path.join(args.output_dir, 'final'))
if __name__ == "__main__":
args = parse_args()
train_model(args)
|
"""
Training or finetuning a LSR model on DocRED dataset.
"""
import argparse
import csv
import json
import logging
import os
import numpy as np
import torch
from sklearn import metrics
from torch.utils.data.dataloader import DataLoader, default_collate
from transformers import set_seed
from .config import LsrConfig
from .modeling import LsrModel, LsrModelOutput
from .preprocess import LsrPreprocessor
from .utils import h_t_idx_generator
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class LsrMetrics:
"""
For calculating metrics for LsrModel.
Computes precision, recall, f1, auc for precision vs recall, and the optimal prediction threshold (theta).
This is modified from the original.
The original additionally computes an ignore score which ignores facts seen in training set.
"""
def __init__(self, num_relations):
self.num_relations = num_relations
self.test_result = []
self.total_recall = 0
def add_batch(self, model_output: LsrModelOutput, batch_data):
predict_re = torch.sigmoid(model_output.prediction)
predict_re = predict_re.data.cpu().numpy()
for i, label in enumerate(batch_data['labels']):
self.total_recall += len(label)
vertex_set_length = batch_data['entity_num_list'][i] # the number of entities in each instance.
for j, (h_idx, t_idx) in enumerate(h_t_idx_generator(vertex_set_length)):
for r in range(1, self.num_relations):
result_tuple = (
(h_idx, t_idx, r) in label,
float(predict_re[i, j, r]),
)
self.test_result.append(result_tuple)
def compute(self, input_theta=None):
"""
Computes metrics based on data added so far.
Args:
input_theta (`optional`, `float`):
Prediction threshold. Provide a value between 0 to 1 if you want to compute the precision and recall
for that specific threshold. Otherwise the optimal based on f1 score will be computed for you.
"""
# Sorts in descending order by predicted value
self.test_result.sort(key=lambda x: x[1], reverse=True)
precision = []
recall = []
correct = 0
w = 0
if self.total_recall == 0:
self.total_recall = 1 # for test
for i, item in enumerate(self.test_result):
correct += item[0]
recall.append(float(correct) / (i + 1))
precision.append(float(correct) / self.total_recall)
if input_theta is not None and item[1] > input_theta:
w = i
precision = np.asarray(precision, dtype='float32')
recall = np.asarray(recall, dtype='float32')
f1_arr = (2 * precision * recall / (precision + recall + 1e-20))
auc = metrics.auc(x=precision, y=recall)
if input_theta is None:
f1 = f1_arr.max()
f1_pos = f1_arr.argmax()
best_theta = self.test_result[f1_pos][1]
return best_theta, f1, precision[f1_pos], recall[f1_pos], auc
else:
return input_theta, f1_arr[w], precision[w], recall[w], auc
class DocredDataset(torch.utils.data.Dataset):
def __init__(self, json_file, preprocessor):
self.data = json.load(open(json_file))
self.preprocessed_data = preprocessor(self.data)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
instance = {}
for key in self.preprocessed_data.keys():
instance[key] = self.preprocessed_data[key][idx]
return instance
def lsr_collate_fn(batch):
"""
Manually processes labels and uses default for the rest.
"""
labels = []
for instance in batch:
labels_instance = instance.pop("labels")
labels.append(labels_instance)
collated_data = default_collate(batch)
collated_data["labels"] = labels
return collated_data
class MyAdagrad(torch.optim.Optimizer):
"""
Modification of the Adagrad optimizer that allows to specify an initial
accumulator value. This mimics the behavior of the default Adagrad implementation
in Tensorflow. The default PyTorch Adagrad uses 0 for initial accumulator value.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
lr_decay (float, optional): learning rate decay (default: 0)
init_accu_value (float, optional): initial accumulater value.
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
"""
def __init__(self, params, lr=1e-2, lr_decay=0, init_accu_value=0.1, weight_decay=0):
defaults = dict(lr=lr, lr_decay=lr_decay, init_accu_value=init_accu_value, \
weight_decay=weight_decay)
super(MyAdagrad, self).__init__(params, defaults)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = 0
state['sum'] = torch.ones(p.data.size()).type_as(p.data) * \
init_accu_value
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['sum'].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
state['step'] += 1
if group['weight_decay'] != 0:
if p.grad.data.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients ")
grad = grad.add(group['weight_decay'], p.data)
clr = group['lr'] / (1 + (state['step'] - 1) * group['lr_decay'])
if p.grad.data.is_sparse:
grad = grad.coalesce() # the update is non-linear so indices must be unique
grad_indices = grad._indices()
grad_values = grad._values()
size = torch.Size([x for x in grad.size()])
def make_sparse(values):
constructor = type(p.grad.data)
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor()
return constructor(grad_indices, values, size)
state['sum'].add_(make_sparse(grad_values.pow(2)))
std = state['sum']._sparse_mask(grad)
std_values = std._values().sqrt_().add_(1e-10)
p.data.add_(-clr, make_sparse(grad_values / std_values))
else:
state['sum'].addcmul_(1, grad, grad)
std = state['sum'].sqrt().add_(1e-10)
p.data.addcdiv_(-clr, grad, std)
return loss
def get_optimizer(name, parameters, lr, l2=0):
if name == 'sgd':
return torch.optim.SGD(parameters, lr=lr, weight_decay=l2)
elif name in ['adagrad', 'myadagrad']:
# use custom adagrad to allow for init accumulator value
return MyAdagrad(parameters, lr=lr, init_accu_value=0.1, weight_decay=l2)
elif name == 'adam':
return torch.optim.Adam(parameters, lr, weight_decay=l2)
elif name == 'adamw':
return torch.optim.AdamW(parameters, lr=lr, weight_decay=l2)
elif name == 'adamax':
return torch.optim.Adamax(parameters, weight_decay=l2)
elif name == 'adadelta':
return torch.optim.Adadelta(parameters, lr=lr, weight_decay=l2)
else:
raise Exception("Unsupported optimizer: {}".format(name))
def parse_args():
parser = argparse.ArgumentParser(
description="Train a relation extraction model based on latent structure refinement.")
parser.add_argument("--train_file", type=str, required=True, help="A json file containing the training data.")
parser.add_argument("--validation_file", type=str, default=None, help="A json file containing the validation data.")
parser.add_argument("--output_dir", type=str, default=None, help="Output directory path.")
parser.add_argument("--metadata_dir", type=str, required=True, help="Path to docred metadata directory.")
# Model arguments
parser.add_argument("--model_weights_path", type=str, default=None,
help="Provide a path to model weights if you want to finetune from a checkpoint.")
parser.add_argument("--model_config_path", type=str, default=None,
help="Provide a config if you want to override the defaults. "
"To use bert encoder layer, specify in config file.")
parser.add_argument("--pretrained_embeddings_path", type=str, default=None,
help="Provide a path to pretrained embeddings if you want to want to use them.")
# Training arguments
parser.add_argument('--lr', type=float, default=1e-3, help='Applies to sgd and adagrad.')
parser.add_argument('--lr_decay', type=float, default=0.98, help='Learning rate decay rate.')
parser.add_argument('--decay_epoch', type=int, default=20, help='Decay learning rate after this epoch.')
parser.add_argument('--evaluate_epoch', type=int, default=30, help='Evaluate after this epoch.')
parser.add_argument('--optim', choices=['sgd', 'adagrad', 'adam', 'adamw', 'adamax'], default='adam',
help='Choice of optimizer.')
parser.add_argument('--weight_decay', type=float, default=0, help='L2 weight decay.')
parser.add_argument('--num_epoch', type=int, default=200, help='Number of total training epochs.')
parser.add_argument('--batch_size', type=int, default=20, help='Training batch size.')
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Gradient clipping.')
parser.add_argument('--seed', type=int, default=0, help="A seed for reproducible training.")
parser.add_argument('--use_gpu', type=bool, default=True, help="Whether you want to use GPU for training.")
parser.add_argument('--use_wandb', type=bool, default=False, help="Whether to use wandb to monitor training.")
parser.add_argument('--wandb_run_name', type=str, default=None, help="Wandb run name.")
args = parser.parse_args()
return args
def train_model(args):
logger.info(f"Training arguments: {vars(args)}")
# Defaults to cpu if gpu is unavailable
device = torch.device("cuda") if args.use_gpu and torch.cuda.is_available() else torch.device("cpu")
logger.info(f"Using device: {device}")
# Make output dir, save training args, create metrics files
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
os.makedirs(os.path.join(args.output_dir, 'best_f1'), exist_ok=True)
os.makedirs(os.path.join(args.output_dir, 'final'), exist_ok=True)
with open(os.path.join(args.output_dir, 'training_args.json'), "w") as fp:
json.dump(vars(args), fp, sort_keys=True, indent=4)
# create metric output files
train_metrics_file = os.path.join(args.output_dir, 'train_metrics.csv')
val_metrics_file = os.path.join(args.output_dir, 'val_metrics.csv')
fieldnames = ['epoch', 'loss', 'best_theta', 'f1', 'precision', 'recall', 'auc']
with open(train_metrics_file, 'w') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
with open(val_metrics_file, 'w') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
if args.seed is not None:
set_seed(args.seed)
# Load config if provided else initialize with defaults
if args.model_config_path:
config = LsrConfig.from_json_file(json_file=args.model_config_path)
else:
config = LsrConfig()
# Load model weights if provided
if args.model_weights_path:
model = LsrModel.from_pretrained(args.model_weights_path, config=config)
else:
model = LsrModel(config)
if args.use_wandb:
import wandb
wandb.init(project="lsr", name=args.wandb_run_name)
wandb.watch(model, log="all")
# Note: this will override the provided model weights
if args.pretrained_embeddings_path is not None and not config.use_bert:
pretrained_embeddings = np.load(args.pretrained_embeddings_path)
model.load_pretrained_word_embedding(pretrained_word_embedding=pretrained_embeddings)
# Set to training device
model.to(device=device)
# Load dataset
# Set to cpu initially (for preprocessing entire dataset first)
logger.info("Preprocessing datasets...")
rel2id_path = os.path.join(args.metadata_dir, "rel2id.json")
word2id_path = os.path.join(args.metadata_dir, "word2id.json")
ner2id_path = os.path.join(args.metadata_dir, "ner2id.json")
train_preprocessor = LsrPreprocessor(rel2id_path=rel2id_path, word2id_path=word2id_path, ner2id_path=ner2id_path,
is_train=True, device=torch.device("cpu"), config=config)
val_preprocessor = LsrPreprocessor(rel2id_path=rel2id_path, word2id_path=word2id_path, ner2id_path=ner2id_path,
device=torch.device("cpu"), config=config)
train_dataset = DocredDataset(json_file=args.train_file, preprocessor=train_preprocessor)
val_dataset = DocredDataset(json_file=args.validation_file, preprocessor=val_preprocessor)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=args.batch_size, collate_fn=lsr_collate_fn)
val_dataloader = DataLoader(val_dataset, shuffle=True, batch_size=args.batch_size, collate_fn=lsr_collate_fn)
# Optimizer and parameters
if config.use_bert:
other_params = [p for name, p in model.named_parameters() if
p.requires_grad and not name.startswith("bert")]
optimizer = torch.optim.Adam([
{"params": other_params, "lr": args.lr},
{"params": model.bert.parameters(), "lr": 1e-5},
], lr=args.lr)
else:
parameters = [p for p in model.parameters() if p.requires_grad]
optimizer = get_optimizer(args.optim, parameters, args.lr)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, args.lr_decay)
best_f1 = 0
for epoch in range(args.num_epoch):
logger.info(f"Epoch: {epoch + 1}/{args.num_epoch}, lr: {optimizer.param_groups[0]['lr']}")
train_metrics = LsrMetrics(num_relations=config.num_relations)
total_epoch_train_loss = 0
model.train()
for step, batch in enumerate(train_dataloader):
for key, value in batch.items():
if isinstance(value, torch.Tensor):
batch[key] = value.to(device=device)
outputs = model(**batch)
# Backpropagation
loss = outputs.loss
# TODO: Remove debug logs below
if np.isnan(loss.item()):
logger.info("Skipping backward prop.")
continue
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
# Track metrics
total_epoch_train_loss += loss.item()
train_metrics.add_batch(model_output=outputs, batch_data=batch)
# Compute metrics and log at end of epoch
best_theta, f1, precision, recall, auc = train_metrics.compute()
avg_epoch_train_loss = total_epoch_train_loss / (step + 1)
logger.info(f"Train loss: {avg_epoch_train_loss:.3f}, best theta: {best_theta:.3f}, "
f"f1: {f1:.3f}, precision: {precision:.3f}, recall: {recall:.3f}, auc: {auc:.5f}")
if args.use_wandb:
wandb.log({
"train_loss": avg_epoch_train_loss,
"train_best_theta": best_theta,
"train_f1": f1,
"train_precision": precision,
"train_recall": recall,
"train_auc": auc
}, step=epoch)
# Write train metrics
if args.output_dir is not None:
with open(train_metrics_file, 'a') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writerow({'epoch': epoch + 1, 'loss': avg_epoch_train_loss, 'best_theta': best_theta, 'f1': f1,
'precision': precision, 'recall': recall, 'auc': auc})
if epoch + 1 >= args.evaluate_epoch:
val_metrics = LsrMetrics(num_relations=config.num_relations)
total_epoch_val_loss = 0
model.eval()
for step, batch in enumerate(val_dataloader):
for key, value in batch.items():
if isinstance(value, torch.Tensor):
batch[key] = value.to(device=device)
outputs = model(**batch)
# Track metrics
total_epoch_val_loss += loss.item()
val_metrics.add_batch(model_output=outputs, batch_data=batch)
# Compute metrics and log at end of epoch
best_theta, f1, precision, recall, auc = val_metrics.compute()
avg_epoch_val_loss = total_epoch_val_loss / (step + 1)
logger.info(f"Val loss: {avg_epoch_val_loss:.3f}, best theta: {best_theta:.3f}, "
f"f1: {f1:.3f}, precision: {precision:.3f}, recall: {recall:.3f}, auc: {auc:.5f}")
if args.use_wandb:
wandb.log({
"val_loss": avg_epoch_val_loss,
"val_best_theta": best_theta,
"val_f1": f1,
"val_precision": precision,
"val_recall": recall,
"val_auc": auc
}, step=epoch)
# Write val metrics
if args.output_dir is not None:
with open(val_metrics_file, 'a') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writerow(
{'epoch': epoch + 1, 'loss': avg_epoch_val_loss, 'best_theta': best_theta, 'f1': f1,
'precision': precision, 'recall': recall, 'auc': auc})
# Save best model so far
if args.output_dir is not None and f1 > best_f1:
logger.info("Best f1 so far, saving model.")
best_f1 = f1
model.save_pretrained(os.path.join(args.output_dir, 'best_f1'))
if epoch + 1 >= args.decay_epoch:
if args.optim == 'adam' and optimizer.param_groups[0]['lr'] > 1e-4:
scheduler.step()
# Save final model
if args.output_dir is not None:
logger.info("Saving final model.")
model.save_pretrained(os.path.join(args.output_dir, 'final'))
if __name__ == "__main__":
args = parse_args()
train_model(args)
|
from fastapi import APIRouter
from fastapi.param_functions import Depends
from starlette.requests import Request
from starlette.responses import RedirectResponse
from dependencies import get_user
import math
router = APIRouter()
def convert_size(size_bytes):
if size_bytes == 0:
return "0MB"
size_name = ("MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def resourcesUsed(servers):
cpu = 0
ram = 0
disk = 0
backups = 0
ports = 0
databases = 0
slots = 0
for server in servers:
cpu += server['attributes']['limits']['cpu']
ram += server['attributes']['limits']['memory']
disk += server['attributes']['limits']['disk']
backups += server['attributes']['feature_limits']['backups']
if server['attributes']['feature_limits']['allocations'] != 0:
ports += server['attributes']['feature_limits']['allocations'] - 1
databases += server['attributes']['feature_limits']['databases']
slots += 1
return {
'cpu': cpu,
'ram': ram,
'disk': disk,
'backups': backups,
'ports': ports,
'databases': databases,
'servers': slots
}
def resourcesTotal(user, plan):
cpu = user['resources']['cpu']
ram = user['resources']['ram']
disk = user['resources']['disk']
backups = user['resources']['backups']
ports = user['resources']['ports']
databases = user['resources']['database']
servers = user['resources']['servers']
cpu += plan['resources']['cpu']
ram += plan['resources']['ram']
disk += plan['resources']['disk']
backups += plan['resources']['backups']
ports += plan['resources']['ports']
databases += plan['resources']['database']
servers += plan['resources']['servers']
return {
'cpu': cpu,
'ram': ram,
'disk': disk,
'backups': backups,
'ports': ports,
'databases': databases,
'servers': servers
}
@router.get("/")
async def dashboard_get(request: Request, user = Depends(get_user)):
ptero = f"{request.app.config["pterodactyl"]["domain"]}" if request.app.config['pterodactyl']['domain'].endswith('/') else f"{request.app.config["pterodactyl"]["domain"]}/"
async with request.app.session.get(f"{ptero}api/application/users/{user["pterodactyl"]["id"]}?include=servers", headers={"Authorization": f"Bearer {request.app.config["pterodactyl"]["key"]}"}) as response:
req = await response.json()
servers = req['attributes']['relationships']['servers']['data']
for server in servers:
server['attributes']['limits']['memory'] = convert_size(server['attributes']['limits']['memory']) if server['attributes']['limits']['memory'] != 0 else "Unlimited"
server['attributes']['limits']['disk'] = convert_size(server['attributes']['limits']['disk']) if server['attributes']['limits']['disk'] != 0 else "Unlimited"
server['attributes']['limits']['cpu'] = server['attributes']['limits']['cpu'] if server['attributes']['limits']['cpu'] != 0 else "Unlimited"
db = await request.app.database.get_db_client()
db = db[request.app.config['database']['database']]
config = await db.config.find_one({"_id": 1})
return request.app.templates.TemplateResponse("dashboard/dash.html", {"request": request, "user": user, "servers": servers, "panel": ptero, "config": config})
@router.get("/shop")
async def shop_get(request: Request, user = Depends(get_user)):
ptero = f"{request.app.config["pterodactyl"]["domain"]}" if request.app.config['pterodactyl']['domain'].endswith('/') else f"{request.app.config["pterodactyl"]["domain"]}/"
async with request.app.session.get(f"{ptero}api/application/users/{user["pterodactyl"]["id"]}?include=servers", headers={"Authorization": f"Bearer {request.app.config["pterodactyl"]["key"]}"}) as response:
req = await response.json()
servers = req['attributes']['relationships']['servers']['data']
db = await request.app.database.get_db_client()
db = db[request.app.config['database']['database']]
plan = await db.plans.find_one({"_id": user['plan']})
config = await db.config.find_one({"_id": 1})
return request.app.templates.TemplateResponse("dashboard/shop.html", {"request": request, "user": user, "config": config, "resourcesUsed": resourcesUsed(servers), "resourcesTotal": resourcesTotal(user, plan), "config": config})
@router.get("/server/create")
async def create_get(request: Request, user = Depends(get_user)):
ptero = f"{request.app.config["pterodactyl"]["domain"]}" if request.app.config['pterodactyl']['domain'].endswith('/') else f"{request.app.config["pterodactyl"]["domain"]}/"
async with request.app.session.get(f"{ptero}api/application/users/{user["pterodactyl"]["id"]}?include=servers", headers={"Authorization": f"Bearer {request.app.config["pterodactyl"]["key"]}"}) as response:
req = await response.json()
servers = req['attributes']['relationships']['servers']['data']
resources = resourcesUsed(servers)
db = await request.app.database.get_db_client()
db = db[request.app.config['database']['database']]
plan = await db.plans.find_one({"_id": user['plan']})
config = await db.config.find_one({"_id": 1})
return request.app.templates.TemplateResponse("dashboard/server/create.html", {"request": request, "user": user, "resourcesUsed": resources, "resourcesTotal": resourcesTotal(user, plan), "config": config})
@router.get("/server/edit/{server_id}")
async def create_get(request: Request, server_id, user = Depends(get_user)):
ptero = f"{request.app.config["pterodactyl"]["domain"]}" if request.app.config['pterodactyl']['domain'].endswith('/') else f"{request.app.config["pterodactyl"]["domain"]}/"
async with request.app.session.get(f"{ptero}api/application/users/{user["pterodactyl"]["id"]}?include=servers", headers={"Authorization": f"Bearer {request.app.config["pterodactyl"]["key"]}"}) as response:
req = await response.json()
servers = req['attributes']['relationships']['servers']['data']
resources = resourcesUsed(servers)
editServer = None
for server in servers:
if server['attributes']['identifier'] == server_id:
editServer = server['attributes']
if not editServer:
return RedirectResponse(url="/", status_code=302)
db = await request.app.database.get_db_client()
db = db[request.app.config['database']['database']]
plan = await db.plans.find_one({"_id": user['plan']})
config = await db.config.find_one({"_id": 1})
return request.app.templates.TemplateResponse("dashboard/server/edit.html", {"request": request, "user": user, "server": editServer, "resourcesUsed": resources, "resourcesTotal": resourcesTotal(user, plan), "config": config})
@router.get("/panel")
async def panel_get(request: Request, user = Depends(get_user)):
db = await request.app.database.get_db_client()
db = db[request.app.config['database']['database']]
config = await db.config.find_one({"_id": 1})
return request.app.templates.TemplateResponse("dashboard/panel.html", {"request": request, "user": user, "config": config})
|
from fastapi import APIRouter
from fastapi.param_functions import Depends
from starlette.requests import Request
from starlette.responses import RedirectResponse
from dependencies import get_user
import math
router = APIRouter()
def convert_size(size_bytes):
if size_bytes == 0:
return "0MB"
size_name = ("MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def resourcesUsed(servers):
cpu = 0
ram = 0
disk = 0
backups = 0
ports = 0
databases = 0
slots = 0
for server in servers:
cpu += server['attributes']['limits']['cpu']
ram += server['attributes']['limits']['memory']
disk += server['attributes']['limits']['disk']
backups += server['attributes']['feature_limits']['backups']
if server['attributes']['feature_limits']['allocations'] != 0:
ports += server['attributes']['feature_limits']['allocations'] - 1
databases += server['attributes']['feature_limits']['databases']
slots += 1
return {
'cpu': cpu,
'ram': ram,
'disk': disk,
'backups': backups,
'ports': ports,
'databases': databases,
'servers': slots
}
def resourcesTotal(user, plan):
cpu = user['resources']['cpu']
ram = user['resources']['ram']
disk = user['resources']['disk']
backups = user['resources']['backups']
ports = user['resources']['ports']
databases = user['resources']['database']
servers = user['resources']['servers']
cpu += plan['resources']['cpu']
ram += plan['resources']['ram']
disk += plan['resources']['disk']
backups += plan['resources']['backups']
ports += plan['resources']['ports']
databases += plan['resources']['database']
servers += plan['resources']['servers']
return {
'cpu': cpu,
'ram': ram,
'disk': disk,
'backups': backups,
'ports': ports,
'databases': databases,
'servers': servers
}
@router.get("/")
async def dashboard_get(request: Request, user = Depends(get_user)):
ptero = f"{request.app.config['pterodactyl']['domain']}" if request.app.config['pterodactyl']['domain'].endswith('/') else f"{request.app.config['pterodactyl']['domain']}/"
async with request.app.session.get(f"{ptero}api/application/users/{user['pterodactyl']['id']}?include=servers", headers={"Authorization": f"Bearer {request.app.config['pterodactyl']['key']}"}) as response:
req = await response.json()
servers = req['attributes']['relationships']['servers']['data']
for server in servers:
server['attributes']['limits']['memory'] = convert_size(server['attributes']['limits']['memory']) if server['attributes']['limits']['memory'] != 0 else "Unlimited"
server['attributes']['limits']['disk'] = convert_size(server['attributes']['limits']['disk']) if server['attributes']['limits']['disk'] != 0 else "Unlimited"
server['attributes']['limits']['cpu'] = server['attributes']['limits']['cpu'] if server['attributes']['limits']['cpu'] != 0 else "Unlimited"
db = await request.app.database.get_db_client()
db = db[request.app.config['database']['database']]
config = await db.config.find_one({"_id": 1})
return request.app.templates.TemplateResponse("dashboard/dash.html", {"request": request, "user": user, "servers": servers, "panel": ptero, "config": config})
@router.get("/shop")
async def shop_get(request: Request, user = Depends(get_user)):
ptero = f"{request.app.config['pterodactyl']['domain']}" if request.app.config['pterodactyl']['domain'].endswith('/') else f"{request.app.config['pterodactyl']['domain']}/"
async with request.app.session.get(f"{ptero}api/application/users/{user['pterodactyl']['id']}?include=servers", headers={"Authorization": f"Bearer {request.app.config['pterodactyl']['key']}"}) as response:
req = await response.json()
servers = req['attributes']['relationships']['servers']['data']
db = await request.app.database.get_db_client()
db = db[request.app.config['database']['database']]
plan = await db.plans.find_one({"_id": user['plan']})
config = await db.config.find_one({"_id": 1})
return request.app.templates.TemplateResponse("dashboard/shop.html", {"request": request, "user": user, "config": config, "resourcesUsed": resourcesUsed(servers), "resourcesTotal": resourcesTotal(user, plan), "config": config})
@router.get("/server/create")
async def create_get(request: Request, user = Depends(get_user)):
ptero = f"{request.app.config['pterodactyl']['domain']}" if request.app.config['pterodactyl']['domain'].endswith('/') else f"{request.app.config['pterodactyl']['domain']}/"
async with request.app.session.get(f"{ptero}api/application/users/{user['pterodactyl']['id']}?include=servers", headers={"Authorization": f"Bearer {request.app.config['pterodactyl']['key']}"}) as response:
req = await response.json()
servers = req['attributes']['relationships']['servers']['data']
resources = resourcesUsed(servers)
db = await request.app.database.get_db_client()
db = db[request.app.config['database']['database']]
plan = await db.plans.find_one({"_id": user['plan']})
config = await db.config.find_one({"_id": 1})
return request.app.templates.TemplateResponse("dashboard/server/create.html", {"request": request, "user": user, "resourcesUsed": resources, "resourcesTotal": resourcesTotal(user, plan), "config": config})
@router.get("/server/edit/{server_id}")
async def create_get(request: Request, server_id, user = Depends(get_user)):
ptero = f"{request.app.config['pterodactyl']['domain']}" if request.app.config['pterodactyl']['domain'].endswith('/') else f"{request.app.config['pterodactyl']['domain']}/"
async with request.app.session.get(f"{ptero}api/application/users/{user['pterodactyl']['id']}?include=servers", headers={"Authorization": f"Bearer {request.app.config['pterodactyl']['key']}"}) as response:
req = await response.json()
servers = req['attributes']['relationships']['servers']['data']
resources = resourcesUsed(servers)
editServer = None
for server in servers:
if server['attributes']['identifier'] == server_id:
editServer = server['attributes']
if not editServer:
return RedirectResponse(url="/", status_code=302)
db = await request.app.database.get_db_client()
db = db[request.app.config['database']['database']]
plan = await db.plans.find_one({"_id": user['plan']})
config = await db.config.find_one({"_id": 1})
return request.app.templates.TemplateResponse("dashboard/server/edit.html", {"request": request, "user": user, "server": editServer, "resourcesUsed": resources, "resourcesTotal": resourcesTotal(user, plan), "config": config})
@router.get("/panel")
async def panel_get(request: Request, user = Depends(get_user)):
db = await request.app.database.get_db_client()
db = db[request.app.config['database']['database']]
config = await db.config.find_one({"_id": 1})
return request.app.templates.TemplateResponse("dashboard/panel.html", {"request": request, "user": user, "config": config})
|
#!/usr/bin/env python3
from env import env
from run_common import AWSCli
from run_common import print_message
from run_common import print_session
from run_terminate_codebuild_common import run_terminate_vpc_project
from run_terminate_codebuild_common import terminate_all_iam_role_and_policy
from run_terminate_codebuild_common import terminate_all_notification_rule
options, args = dict(), list()
if __name__ == "__main__":
from run_common import parse_args
options, args = parse_args()
def terminate_cron_event(aws_cli, rule_name):
print_message(f'delete events rule: {rule_name}')
cmd = ['events', 'remove-targets']
cmd += ['--rule', rule_name]
cmd += ['--ids', '1']
aws_cli.run(cmd, ignore_error=True)
cmd = ['events', 'delete-rule']
cmd += ['--name', rule_name]
aws_cli.run(cmd, ignore_error=True)
def run_terminate_default_project(name, settings):
aws_region = settings['AWS_REGION']
print_message(f'delete default project: {name}')
aws_cli = AWSCli(aws_region)
cmd = ['codebuild', 'delete-project']
cmd += ['--name', name]
aws_cli.run(cmd, ignore_error=True)
terminate_all_iam_role_and_policy(aws_cli, name, settings)
terminate_all_notification_rule(aws_cli, name, settings)
def run_terminate_github_project(name, settings):
aws_region = settings['AWS_REGION']
print_message(f'delete github project: {name}')
aws_cli = AWSCli(aws_region)
cmd = ['codebuild', 'delete-webhook']
cmd += ['--project-name', name]
aws_cli.run(cmd, ignore_error=True)
for cc in settings.get('CRON', list()):
git_branch = cc['SOURCE_VERSION']
rule_name = f'{name}CronRuleSourceBy{git_branch.title()}'
terminate_cron_event(aws_cli, rule_name)
cmd = ['codebuild', 'delete-project']
cmd += ['--name', name]
aws_cli.run(cmd, ignore_error=True)
terminate_all_iam_role_and_policy(aws_cli, name, settings)
terminate_all_notification_rule(aws_cli, name, settings)
def run_terminate_cron_project(name, settings):
aws_region = settings['AWS_REGION']
git_branch = settings['BRANCH']
print_message(f'delete cron project: {name}')
aws_cli = AWSCli(aws_region)
cmd = ['codebuild', 'delete-project']
cmd += ['--name', name]
aws_cli.run(cmd, ignore_error=True)
rule_name = f'{name}CronRuleSourceBy{git_branch.title()}'
terminate_cron_event(aws_cli, rule_name)
terminate_all_iam_role_and_policy(aws_cli, name, settings)
terminate_all_notification_rule(aws_cli, name, settings)
################################################################################
#
# start
#
################################################################################
print_session('terminate codebuild')
target_name = None
region = options.get('region')
is_target_exists = False
if len(args) > 1:
target_name = args[1]
for settings in env.get('codebuild', list()):
if target_name and settings['NAME'] != target_name:
continue
if region and settings['AWS_REGION'] != region:
continue
is_target_exists = True
if settings['TYPE'] == 'default':
run_terminate_default_project(settings['NAME'], settings)
elif settings['TYPE'] == 'cron':
run_terminate_cron_project(settings['NAME'], settings)
elif settings['TYPE'] == 'github':
run_terminate_github_project(settings['NAME'], settings)
elif settings['TYPE'] == 'vpc':
run_terminate_vpc_project(settings['NAME'], settings)
else:
print(f'{settings['TYPE']} is not supported')
raise Exception()
if is_target_exists is False:
mm = list()
if target_name:
mm.append(target_name)
if region:
mm.append(region)
mm = ' in '.join(mm)
print(f'codebuild: {mm} is not found in config.json')
|
#!/usr/bin/env python3
from env import env
from run_common import AWSCli
from run_common import print_message
from run_common import print_session
from run_terminate_codebuild_common import run_terminate_vpc_project
from run_terminate_codebuild_common import terminate_all_iam_role_and_policy
from run_terminate_codebuild_common import terminate_all_notification_rule
options, args = dict(), list()
if __name__ == "__main__":
from run_common import parse_args
options, args = parse_args()
def terminate_cron_event(aws_cli, rule_name):
print_message(f'delete events rule: {rule_name}')
cmd = ['events', 'remove-targets']
cmd += ['--rule', rule_name]
cmd += ['--ids', '1']
aws_cli.run(cmd, ignore_error=True)
cmd = ['events', 'delete-rule']
cmd += ['--name', rule_name]
aws_cli.run(cmd, ignore_error=True)
def run_terminate_default_project(name, settings):
aws_region = settings['AWS_REGION']
print_message(f'delete default project: {name}')
aws_cli = AWSCli(aws_region)
cmd = ['codebuild', 'delete-project']
cmd += ['--name', name]
aws_cli.run(cmd, ignore_error=True)
terminate_all_iam_role_and_policy(aws_cli, name, settings)
terminate_all_notification_rule(aws_cli, name, settings)
def run_terminate_github_project(name, settings):
aws_region = settings['AWS_REGION']
print_message(f'delete github project: {name}')
aws_cli = AWSCli(aws_region)
cmd = ['codebuild', 'delete-webhook']
cmd += ['--project-name', name]
aws_cli.run(cmd, ignore_error=True)
for cc in settings.get('CRON', list()):
git_branch = cc['SOURCE_VERSION']
rule_name = f'{name}CronRuleSourceBy{git_branch.title()}'
terminate_cron_event(aws_cli, rule_name)
cmd = ['codebuild', 'delete-project']
cmd += ['--name', name]
aws_cli.run(cmd, ignore_error=True)
terminate_all_iam_role_and_policy(aws_cli, name, settings)
terminate_all_notification_rule(aws_cli, name, settings)
def run_terminate_cron_project(name, settings):
aws_region = settings['AWS_REGION']
git_branch = settings['BRANCH']
print_message(f'delete cron project: {name}')
aws_cli = AWSCli(aws_region)
cmd = ['codebuild', 'delete-project']
cmd += ['--name', name]
aws_cli.run(cmd, ignore_error=True)
rule_name = f'{name}CronRuleSourceBy{git_branch.title()}'
terminate_cron_event(aws_cli, rule_name)
terminate_all_iam_role_and_policy(aws_cli, name, settings)
terminate_all_notification_rule(aws_cli, name, settings)
################################################################################
#
# start
#
################################################################################
print_session('terminate codebuild')
target_name = None
region = options.get('region')
is_target_exists = False
if len(args) > 1:
target_name = args[1]
for settings in env.get('codebuild', list()):
if target_name and settings['NAME'] != target_name:
continue
if region and settings['AWS_REGION'] != region:
continue
is_target_exists = True
if settings['TYPE'] == 'default':
run_terminate_default_project(settings['NAME'], settings)
elif settings['TYPE'] == 'cron':
run_terminate_cron_project(settings['NAME'], settings)
elif settings['TYPE'] == 'github':
run_terminate_github_project(settings['NAME'], settings)
elif settings['TYPE'] == 'vpc':
run_terminate_vpc_project(settings['NAME'], settings)
else:
print(f'{settings["TYPE"]} is not supported')
raise Exception()
if is_target_exists is False:
mm = list()
if target_name:
mm.append(target_name)
if region:
mm.append(region)
mm = ' in '.join(mm)
print(f'codebuild: {mm} is not found in config.json')
|
import discord
import time
import os
import psutil
import datetime
from datetime import datetime
from discord.ext import commands
from utils import default, repo
class Information(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.process = psutil.Process(os.getpid())
if not hasattr(self.bot, 'uptime'):
self.bot.uptime = datetime.datetime.now()
@commands.command()
async def ping(self, ctx):
"""Measures ping"""
before = time.monotonic()
message = await ctx.send('Pong!')
ping = (time.monotonic() - before)
await message.edit(content=f'Pong! | `{int(ping) * 1000} ms`')
@commands.command(aliases=['botinfo', 'botstats', 'botstatus'])
async def aboutbot(self, ctx):
""" About the bot """
ram_usage = self.process.memory_full_info().rss / 1024**2
avg_members = round(len(self.bot.users) / len(self.bot.guilds))
embed = discord.Embed(colour=ctx.me.top_role.colour)
embed.set_thumbnail(url=ctx.bot.user.avatar_url)
embed.add_field(name="Last boot", value=default.timeago(datetime.now() - self.bot.uptime), inline=True)
embed.add_field(
name=f"Developer: Ari Bowe",
# value=', '.join([str(self.bot.get_user(x)) for x in self.config.owners]),
inline=True)
embed.add_field(name="Library", value="discord.py", inline=True)
embed.add_field(name="Servers", value=f"{len(ctx.bot.guilds)} ( avg: {avg_members} users/server )", inline=True)
embed.add_field(name="Commands loaded", value=str(len([x.name for x in self.bot.commands])), inline=True)
embed.add_field(name="RAM", value=f"{ram_usage:.2f} MB", inline=True)
await ctx.send(content=f"ℹ About **{ctx.bot.user}** | **{repo.VERSION_DATA["Version"]}**", embed=embed)
@commands.command()
async def about(self, ctx, member: discord.Member = None):
"""About a member"""
if member is None:
author = ctx.message.author
embed = discord.Embed(colour=author.colour)
embed.set_thumbnail(url=author.avatar_url)
embed.add_field(name='Joined at', value=author.joined_at, inline=True)
embed.add_field(name='Nickname', value=author.nick if author.nick else 'N/A', inline=True)
embed.add_field(name='Status', value=author.status, inline=True)
embed.add_field(name='Animated Avatar', value='Yes' if author.is_avatar_animated() else 'No', inline=True)
roles = []
for role in author.roles:
roles.append(role.name)
roles.pop(0)
embed.add_field(name='Roles', value='N/A' if len(roles) == 0 else ', '.join(roles), inline=True)
await ctx.send(content='ℹ About **yourself**!', embed=embed)
else:
embed = discord.Embed(colour=member.colour)
embed.set_thumbnail(url=member.avatar_url)
embed.add_field(name='Joined at', value=member.joined_at, inline=True)
embed.add_field(name='Nickname', value=member.nick if member.nick else 'N/A', inline=True)
embed.add_field(name='Status', value=member.status, inline=True)
embed.add_field(name='Animated Avatar', value='Yes' if member.is_avatar_animated() else 'No', inline=True)
roles = []
for role in member.roles:
roles.append(role.name)
roles.pop(0)
embed.add_field(name='Roles', value='N/A' if len(roles) == 0 else ', '.join(roles), inline=True)
await ctx.send(content=f'ℹ About **{member.display_name}**', embed=embed)
def setup(bot):
bot.add_cog(Information(bot))
|
import discord
import time
import os
import psutil
import datetime
from datetime import datetime
from discord.ext import commands
from utils import default, repo
class Information(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.process = psutil.Process(os.getpid())
if not hasattr(self.bot, 'uptime'):
self.bot.uptime = datetime.datetime.now()
@commands.command()
async def ping(self, ctx):
"""Measures ping"""
before = time.monotonic()
message = await ctx.send('Pong!')
ping = (time.monotonic() - before)
await message.edit(content=f'Pong! | `{int(ping) * 1000} ms`')
@commands.command(aliases=['botinfo', 'botstats', 'botstatus'])
async def aboutbot(self, ctx):
""" About the bot """
ram_usage = self.process.memory_full_info().rss / 1024**2
avg_members = round(len(self.bot.users) / len(self.bot.guilds))
embed = discord.Embed(colour=ctx.me.top_role.colour)
embed.set_thumbnail(url=ctx.bot.user.avatar_url)
embed.add_field(name="Last boot", value=default.timeago(datetime.now() - self.bot.uptime), inline=True)
embed.add_field(
name=f"Developer: Ari Bowe",
# value=', '.join([str(self.bot.get_user(x)) for x in self.config.owners]),
inline=True)
embed.add_field(name="Library", value="discord.py", inline=True)
embed.add_field(name="Servers", value=f"{len(ctx.bot.guilds)} ( avg: {avg_members} users/server )", inline=True)
embed.add_field(name="Commands loaded", value=str(len([x.name for x in self.bot.commands])), inline=True)
embed.add_field(name="RAM", value=f"{ram_usage:.2f} MB", inline=True)
await ctx.send(content=f"ℹ About **{ctx.bot.user}** | **{repo.VERSION_DATA['Version']}**", embed=embed)
@commands.command()
async def about(self, ctx, member: discord.Member = None):
"""About a member"""
if member is None:
author = ctx.message.author
embed = discord.Embed(colour=author.colour)
embed.set_thumbnail(url=author.avatar_url)
embed.add_field(name='Joined at', value=author.joined_at, inline=True)
embed.add_field(name='Nickname', value=author.nick if author.nick else 'N/A', inline=True)
embed.add_field(name='Status', value=author.status, inline=True)
embed.add_field(name='Animated Avatar', value='Yes' if author.is_avatar_animated() else 'No', inline=True)
roles = []
for role in author.roles:
roles.append(role.name)
roles.pop(0)
embed.add_field(name='Roles', value='N/A' if len(roles) == 0 else ', '.join(roles), inline=True)
await ctx.send(content='ℹ About **yourself**!', embed=embed)
else:
embed = discord.Embed(colour=member.colour)
embed.set_thumbnail(url=member.avatar_url)
embed.add_field(name='Joined at', value=member.joined_at, inline=True)
embed.add_field(name='Nickname', value=member.nick if member.nick else 'N/A', inline=True)
embed.add_field(name='Status', value=member.status, inline=True)
embed.add_field(name='Animated Avatar', value='Yes' if member.is_avatar_animated() else 'No', inline=True)
roles = []
for role in member.roles:
roles.append(role.name)
roles.pop(0)
embed.add_field(name='Roles', value='N/A' if len(roles) == 0 else ', '.join(roles), inline=True)
await ctx.send(content=f'ℹ About **{member.display_name}**', embed=embed)
def setup(bot):
bot.add_cog(Information(bot))
|
import json
import os
import unittest
import warnings
import yaml
from checkov.terraform import checks
from checkov.common.checks_infra.checks_parser import NXGraphCheckParser
from checkov.common.checks_infra.registry import Registry
from checkov.common.models.enums import CheckResult
from typing import List
from pathlib import Path
from checkov.terraform.runner import Runner
from checkov.runner_filter import RunnerFilter
class TestYamlPolicies(unittest.TestCase):
def setUp(self) -> None:
os.environ['UNIQUE_TAG'] = ''
warnings.filterwarnings("ignore", category=ResourceWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
def test_VPCHasFlowLog(self):
self.go("VPCHasFlowLog")
def test_VPCHasRestrictedSG(self):
self.go("VPCHasRestrictedSG")
def test_APIGWLoggingLevelsDefinedProperly(self):
self.go("APIGWLoggingLevelsDefinedProperly")
def test_GuardDutyIsEnabled(self):
self.go("GuardDutyIsEnabled")
def test_SGAttachedToResource(self):
self.go("SGAttachedToResource")
def test_StorageContainerActivityLogsNotPublic(self):
self.go("StorageContainerActivityLogsNotPublic")
def test_StorageCriticalDataEncryptedCMK(self):
self.go("StorageCriticalDataEncryptedCMK")
def test_VAconfiguredToSendReports(self):
self.go("VAconfiguredToSendReports")
def test_VAconfiguredToSendReportsToAdmins(self):
self.go("VAconfiguredToSendReportsToAdmins")
def test_VAisEnabledInStorageAccount(self):
self.go("VAisEnabledInStorageAccount")
def test_VAsetPeriodicScansOnSQL(self):
self.go("VAsetPeriodicScansOnSQL")
def test_CloudFrontHasSecurityHeadersPolicy(self):
self.go("CloudFrontHasSecurityHeadersPolicy")
def test_CloudtrailHasCloudwatch(self):
self.go("CloudtrailHasCloudwatch")
def test_S3BucketHasPublicAccessBlock(self):
self.go("S3BucketHasPublicAccessBlock")
def test_AccessToPostgreSQLFromAzureServicesIsDisabled(self):
self.go("AccessToPostgreSQLFromAzureServicesIsDisabled")
def test_AzureActiveDirectoryAdminIsConfigured(self):
self.go("AzureActiveDirectoryAdminIsConfigured")
def test_DisableAccessToSqlDBInstanceForRootUsersWithoutPassword(self):
self.go("DisableAccessToSqlDBInstanceForRootUsersWithoutPassword")
def test_GCPProjectHasNoLegacyNetworks(self):
self.go("GCPProjectHasNoLegacyNetworks")
def test_AzureDataFactoriesEncryptedWithCustomerManagedKey(self):
self.go("AzureDataFactoriesEncryptedWithCustomerManagedKey")
def test_AzureUnattachedDisksAreEncrypted(self):
self.go("AzureUnattachedDisksAreEncrypted")
def test_AzureNetworkInterfacePublicIPAddressId(self):
self.go("AzureNetworkInterfacePublicIPAddressId")
def test_AzureAntimalwareIsConfiguredWithAutoUpdatesForVMs(self):
self.go("AzureAntimalwareIsConfiguredWithAutoUpdatesForVMs")
def test_ALBRedirectsHTTPToHTTPS(self):
self.go("ALBRedirectsHTTPToHTTPS")
def test_GCPLogBucketsConfiguredUsingLock(self):
self.go("GCPLogBucketsConfiguredUsingLock")
def test_GCPAuditLogsConfiguredForAllServicesAndUsers(self):
self.go("GCPAuditLogsConfiguredForAllServicesAndUsers")
def test_GCPKMSCryptoKeysAreNotPubliclyAccessible(self):
self.go("GCPKMSCryptoKeysAreNotPubliclyAccessible")
def test_VirtualMachinesUtilizingManagedDisks(self):
self.go("VirtualMachinesUtilizingManagedDisks")
def test_RDSClusterHasBackupPlan(self):
self.go("RDSClusterHasBackupPlan")
def test_EBSAddedBackup(self):
self.go("EBSAddedBackup")
def test_AMRClustersNotOpenToInternet(self):
self.go("AMRClustersNotOpenToInternet")
def test_AutoScallingEnabledELB(self):
self.go("AutoScallingEnabledELB")
def test_IAMGroupHasAtLeastOneUser(self):
self.go("IAMGroupHasAtLeastOneUser")
def test_IAMUserHasNoConsoleAccess(self):
self.go("IAMUserHasNoConsoleAccess")
def test_IAMUsersAreMembersAtLeastOneGroup(self):
self.go("IAMUsersAreMembersAtLeastOneGroup")
def test_DataExplorerEncryptionUsesCustomKey(self):
self.go("DataExplorerEncryptionUsesCustomKey")
def test_MSQLenablesCustomerManagedKey(self):
self.go("MSQLenablesCustomerManagedKey")
def test_PGSQLenablesCustomerManagedKey(self):
self.go("PGSQLenablesCustomerManagedKey")
def test_StorageLoggingIsEnabledForBlobService(self):
self.go("StorageLoggingIsEnabledForBlobService")
def test_StorageLoggingIsEnabledForTableService(self):
self.go("StorageLoggingIsEnabledForTableService")
def test_VMHasBackUpMachine(self):
self.go("VMHasBackUpMachine")
def test_SubnetHasACL(self):
self.go("SubnetHasACL")
def test_GKEClustersAreNotUsingDefaultServiceAccount(self):
self.go("GKEClustersAreNotUsingDefaultServiceAccount")
def test_AzureStorageAccountsUseCustomerManagedKeyForEncryption(self):
self.go("AzureStorageAccountsUseCustomerManagedKeyForEncryption")
def test_AzureMSSQLServerHasSecurityAlertPolicy(self):
self.go("AzureMSSQLServerHasSecurityAlertPolicy")
def test_AzureSynapseWorkspacesHaveNoIPFirewallRulesAttached(self):
self.go("AzureSynapseWorkspacesHaveNoIPFirewallRulesAttached")
def test_EncryptedEBSVolumeOnlyConnectedToEC2s(self):
self.go("EncryptedEBSVolumeOnlyConnectedToEC2s")
def test_ServiceAccountHasGCPmanagedKey(self):
self.go("ServiceAccountHasGCPmanagedKey")
def test_AutoScalingEnableOnDynamoDBTables(self):
self.go("AutoScalingEnableOnDynamoDBTables")
def test_EIPAllocatedToVPCAttachedEC2(self):
self.go("EIPAllocatedToVPCAttachedEC2")
def test_EFSAddedBackup(self):
self.go("EFSAddedBackup")
def test_EFSAddedBackupSuppress(self):
self.go("EFSAddedBackupSuppress", "EFSAddedBackup")
def test_Route53ARecordAttachedResource(self):
self.go("Route53ARecordAttachedResource")
def test_PostgresRDSHasQueryLoggingEnabled(self):
self.go("PostgresRDSHasQueryLoggingEnabled")
def test_PostgresDBHasQueryLoggingEnabled(self):
self.go("PostgresDBHasQueryLoggingEnabled")
def test_ALBProtectedByWAF(self):
self.go("ALBProtectedByWAF")
def test_APIProtectedByWAF(self):
self.go("APIProtectedByWAF")
def test_SQLServerAuditingEnabled(self):
self.go("SQLServerAuditingEnabled")
def test_WAF2HasLogs(self):
self.go("WAF2HasLogs")
def test_AppSyncProtectedByWAF(self):
self.go("AppSyncProtectedByWAF")
def test_SQLServerAuditingRetention90Days(self):
self.go("SQLServerAuditingRetention90Days")
def test_AWSSSMParameterShouldBeEncrypted(self):
self.go("AWSSSMParametershouldbeEncrypted", "AWSSSMParameterShouldBeEncrypted")
def test_registry_load(self):
registry = Registry(parser=NXGraphCheckParser(), checks_dir=str(
Path(__file__).parent.parent.parent.parent.parent / "checkov" / "terraform" / "checks" / "graph_checks"))
registry.load_checks()
self.assertGreater(len(registry.checks), 0)
def go(self, dir_name, check_name=None):
dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
f"resources/{dir_name}")
assert os.path.exists(dir_path)
policy_dir_path = os.path.dirname(checks.__file__)
assert os.path.exists(policy_dir_path)
found = False
for root, d_names, f_names in os.walk(policy_dir_path):
for f_name in f_names:
check_name = dir_name if check_name is None else check_name
if f_name == f"{check_name}.yaml":
found = True
policy = load_yaml_data(f_name, root)
assert policy is not None
expected = load_yaml_data("expected.yaml", dir_path)
assert expected is not None
report = get_policy_results(dir_path, policy)
expected = load_yaml_data("expected.yaml", dir_path)
expected_to_fail = expected.get('fail', [])
expected_to_pass = expected.get('pass', [])
expected_to_skip = expected.get('skip', [])
self.assert_entities(expected_to_pass, report.passed_checks, True)
self.assert_entities(expected_to_fail, report.failed_checks, False)
self.assert_entities(expected_to_skip, report.skipped_checks, True)
assert found
def assert_entities(self, expected_entities: List[str], results: List[CheckResult], assertion: bool):
self.assertEqual(len(expected_entities), len(results),
f"mismatch in number of results in {"passed" if assertion else "failed"}, "
f"expected: {len(expected_entities)}, got: {len(results)}")
for expected_entity in expected_entities:
found = False
for check_result in results:
entity_id = check_result.resource
if entity_id == expected_entity:
found = True
break
self.assertTrue(found, f"expected to find entity {expected_entity}, {"passed" if assertion else "failed"}")
def get_policy_results(root_folder, policy):
check_id = policy['metadata']['id']
graph_runner = Runner()
report = graph_runner.run(root_folder, runner_filter=RunnerFilter(checks=[check_id]))
return report
def wrap_policy(policy):
policy['query'] = policy['definition']
del policy['definition']
def load_yaml_data(source_file_name, dir_path):
expected_path = os.path.join(dir_path, source_file_name)
if not os.path.exists(expected_path):
return None
with open(expected_path, "r") as f:
expected_data = yaml.safe_load(f)
return json.loads(json.dumps(expected_data))
|
import json
import os
import unittest
import warnings
import yaml
from checkov.terraform import checks
from checkov.common.checks_infra.checks_parser import NXGraphCheckParser
from checkov.common.checks_infra.registry import Registry
from checkov.common.models.enums import CheckResult
from typing import List
from pathlib import Path
from checkov.terraform.runner import Runner
from checkov.runner_filter import RunnerFilter
class TestYamlPolicies(unittest.TestCase):
def setUp(self) -> None:
os.environ['UNIQUE_TAG'] = ''
warnings.filterwarnings("ignore", category=ResourceWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
def test_VPCHasFlowLog(self):
self.go("VPCHasFlowLog")
def test_VPCHasRestrictedSG(self):
self.go("VPCHasRestrictedSG")
def test_APIGWLoggingLevelsDefinedProperly(self):
self.go("APIGWLoggingLevelsDefinedProperly")
def test_GuardDutyIsEnabled(self):
self.go("GuardDutyIsEnabled")
def test_SGAttachedToResource(self):
self.go("SGAttachedToResource")
def test_StorageContainerActivityLogsNotPublic(self):
self.go("StorageContainerActivityLogsNotPublic")
def test_StorageCriticalDataEncryptedCMK(self):
self.go("StorageCriticalDataEncryptedCMK")
def test_VAconfiguredToSendReports(self):
self.go("VAconfiguredToSendReports")
def test_VAconfiguredToSendReportsToAdmins(self):
self.go("VAconfiguredToSendReportsToAdmins")
def test_VAisEnabledInStorageAccount(self):
self.go("VAisEnabledInStorageAccount")
def test_VAsetPeriodicScansOnSQL(self):
self.go("VAsetPeriodicScansOnSQL")
def test_CloudFrontHasSecurityHeadersPolicy(self):
self.go("CloudFrontHasSecurityHeadersPolicy")
def test_CloudtrailHasCloudwatch(self):
self.go("CloudtrailHasCloudwatch")
def test_S3BucketHasPublicAccessBlock(self):
self.go("S3BucketHasPublicAccessBlock")
def test_AccessToPostgreSQLFromAzureServicesIsDisabled(self):
self.go("AccessToPostgreSQLFromAzureServicesIsDisabled")
def test_AzureActiveDirectoryAdminIsConfigured(self):
self.go("AzureActiveDirectoryAdminIsConfigured")
def test_DisableAccessToSqlDBInstanceForRootUsersWithoutPassword(self):
self.go("DisableAccessToSqlDBInstanceForRootUsersWithoutPassword")
def test_GCPProjectHasNoLegacyNetworks(self):
self.go("GCPProjectHasNoLegacyNetworks")
def test_AzureDataFactoriesEncryptedWithCustomerManagedKey(self):
self.go("AzureDataFactoriesEncryptedWithCustomerManagedKey")
def test_AzureUnattachedDisksAreEncrypted(self):
self.go("AzureUnattachedDisksAreEncrypted")
def test_AzureNetworkInterfacePublicIPAddressId(self):
self.go("AzureNetworkInterfacePublicIPAddressId")
def test_AzureAntimalwareIsConfiguredWithAutoUpdatesForVMs(self):
self.go("AzureAntimalwareIsConfiguredWithAutoUpdatesForVMs")
def test_ALBRedirectsHTTPToHTTPS(self):
self.go("ALBRedirectsHTTPToHTTPS")
def test_GCPLogBucketsConfiguredUsingLock(self):
self.go("GCPLogBucketsConfiguredUsingLock")
def test_GCPAuditLogsConfiguredForAllServicesAndUsers(self):
self.go("GCPAuditLogsConfiguredForAllServicesAndUsers")
def test_GCPKMSCryptoKeysAreNotPubliclyAccessible(self):
self.go("GCPKMSCryptoKeysAreNotPubliclyAccessible")
def test_VirtualMachinesUtilizingManagedDisks(self):
self.go("VirtualMachinesUtilizingManagedDisks")
def test_RDSClusterHasBackupPlan(self):
self.go("RDSClusterHasBackupPlan")
def test_EBSAddedBackup(self):
self.go("EBSAddedBackup")
def test_AMRClustersNotOpenToInternet(self):
self.go("AMRClustersNotOpenToInternet")
def test_AutoScallingEnabledELB(self):
self.go("AutoScallingEnabledELB")
def test_IAMGroupHasAtLeastOneUser(self):
self.go("IAMGroupHasAtLeastOneUser")
def test_IAMUserHasNoConsoleAccess(self):
self.go("IAMUserHasNoConsoleAccess")
def test_IAMUsersAreMembersAtLeastOneGroup(self):
self.go("IAMUsersAreMembersAtLeastOneGroup")
def test_DataExplorerEncryptionUsesCustomKey(self):
self.go("DataExplorerEncryptionUsesCustomKey")
def test_MSQLenablesCustomerManagedKey(self):
self.go("MSQLenablesCustomerManagedKey")
def test_PGSQLenablesCustomerManagedKey(self):
self.go("PGSQLenablesCustomerManagedKey")
def test_StorageLoggingIsEnabledForBlobService(self):
self.go("StorageLoggingIsEnabledForBlobService")
def test_StorageLoggingIsEnabledForTableService(self):
self.go("StorageLoggingIsEnabledForTableService")
def test_VMHasBackUpMachine(self):
self.go("VMHasBackUpMachine")
def test_SubnetHasACL(self):
self.go("SubnetHasACL")
def test_GKEClustersAreNotUsingDefaultServiceAccount(self):
self.go("GKEClustersAreNotUsingDefaultServiceAccount")
def test_AzureStorageAccountsUseCustomerManagedKeyForEncryption(self):
self.go("AzureStorageAccountsUseCustomerManagedKeyForEncryption")
def test_AzureMSSQLServerHasSecurityAlertPolicy(self):
self.go("AzureMSSQLServerHasSecurityAlertPolicy")
def test_AzureSynapseWorkspacesHaveNoIPFirewallRulesAttached(self):
self.go("AzureSynapseWorkspacesHaveNoIPFirewallRulesAttached")
def test_EncryptedEBSVolumeOnlyConnectedToEC2s(self):
self.go("EncryptedEBSVolumeOnlyConnectedToEC2s")
def test_ServiceAccountHasGCPmanagedKey(self):
self.go("ServiceAccountHasGCPmanagedKey")
def test_AutoScalingEnableOnDynamoDBTables(self):
self.go("AutoScalingEnableOnDynamoDBTables")
def test_EIPAllocatedToVPCAttachedEC2(self):
self.go("EIPAllocatedToVPCAttachedEC2")
def test_EFSAddedBackup(self):
self.go("EFSAddedBackup")
def test_EFSAddedBackupSuppress(self):
self.go("EFSAddedBackupSuppress", "EFSAddedBackup")
def test_Route53ARecordAttachedResource(self):
self.go("Route53ARecordAttachedResource")
def test_PostgresRDSHasQueryLoggingEnabled(self):
self.go("PostgresRDSHasQueryLoggingEnabled")
def test_PostgresDBHasQueryLoggingEnabled(self):
self.go("PostgresDBHasQueryLoggingEnabled")
def test_ALBProtectedByWAF(self):
self.go("ALBProtectedByWAF")
def test_APIProtectedByWAF(self):
self.go("APIProtectedByWAF")
def test_SQLServerAuditingEnabled(self):
self.go("SQLServerAuditingEnabled")
def test_WAF2HasLogs(self):
self.go("WAF2HasLogs")
def test_AppSyncProtectedByWAF(self):
self.go("AppSyncProtectedByWAF")
def test_SQLServerAuditingRetention90Days(self):
self.go("SQLServerAuditingRetention90Days")
def test_AWSSSMParameterShouldBeEncrypted(self):
self.go("AWSSSMParametershouldbeEncrypted", "AWSSSMParameterShouldBeEncrypted")
def test_registry_load(self):
registry = Registry(parser=NXGraphCheckParser(), checks_dir=str(
Path(__file__).parent.parent.parent.parent.parent / "checkov" / "terraform" / "checks" / "graph_checks"))
registry.load_checks()
self.assertGreater(len(registry.checks), 0)
def go(self, dir_name, check_name=None):
dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
f"resources/{dir_name}")
assert os.path.exists(dir_path)
policy_dir_path = os.path.dirname(checks.__file__)
assert os.path.exists(policy_dir_path)
found = False
for root, d_names, f_names in os.walk(policy_dir_path):
for f_name in f_names:
check_name = dir_name if check_name is None else check_name
if f_name == f"{check_name}.yaml":
found = True
policy = load_yaml_data(f_name, root)
assert policy is not None
expected = load_yaml_data("expected.yaml", dir_path)
assert expected is not None
report = get_policy_results(dir_path, policy)
expected = load_yaml_data("expected.yaml", dir_path)
expected_to_fail = expected.get('fail', [])
expected_to_pass = expected.get('pass', [])
expected_to_skip = expected.get('skip', [])
self.assert_entities(expected_to_pass, report.passed_checks, True)
self.assert_entities(expected_to_fail, report.failed_checks, False)
self.assert_entities(expected_to_skip, report.skipped_checks, True)
assert found
def assert_entities(self, expected_entities: List[str], results: List[CheckResult], assertion: bool):
self.assertEqual(len(expected_entities), len(results),
f"mismatch in number of results in {'passed' if assertion else 'failed'}, "
f"expected: {len(expected_entities)}, got: {len(results)}")
for expected_entity in expected_entities:
found = False
for check_result in results:
entity_id = check_result.resource
if entity_id == expected_entity:
found = True
break
self.assertTrue(found, f"expected to find entity {expected_entity}, {'passed' if assertion else 'failed'}")
def get_policy_results(root_folder, policy):
check_id = policy['metadata']['id']
graph_runner = Runner()
report = graph_runner.run(root_folder, runner_filter=RunnerFilter(checks=[check_id]))
return report
def wrap_policy(policy):
policy['query'] = policy['definition']
del policy['definition']
def load_yaml_data(source_file_name, dir_path):
expected_path = os.path.join(dir_path, source_file_name)
if not os.path.exists(expected_path):
return None
with open(expected_path, "r") as f:
expected_data = yaml.safe_load(f)
return json.loads(json.dumps(expected_data))
|
# ---
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# jupyter:
# jupytext:
# formats: ipynb,md:myst,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.10.0
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# [](https://colab.research.google.com/github/google/jax/blob/main/docs/autodidax.ipynb)
# # Autodidax: JAX core from scratch
#
# Ever want to learn how JAX works, but the implementation seemed impenetrable?
# Well, you're in luck! By reading this tutorial, you'll learn every big idea in
# JAX's core system. You'll even get clued into our weird jargon!
#
# **This is a work-in-progress draft.** There are some important ingredients
# missing, still to come in parts 5 and 6 (and more?). There are also some
# simplifications here that we haven't yet applied to the main system, but we
# will.
# ## Part 1: Transformations as interpreters: standard evaluation, `jvp`, and `vmap`
#
# We want to transform functions that look like this:
#
# ```python
# def f(x):
# y = sin(x) * 2.
# z = - y + x
# return z
# ```
#
# Think of functions like `sin` and the arithmetic operations underlying the
# infix operators (`mul`, `add`, and `neg`) as primitive operations, meaning
# atomic units of processing rather than compositions.
#
# "Transform" means "interpret differently." Instead of standard interpretation
# where we apply primitive operations to numerical inputs to produce numerical
# outputs, we want to override primitive application and let different values
# flow through our program. For example, we might want to replace the
# application of every primitive with an application of [its JVP
# rule](https://jax.readthedocs.io/en/latest/notebooks/autodiff_cookbook.html),
# and let primal-tangent pairs flow through our program. Moreover, we want to be
# able to compose multiple transformations, leading to stacks of interpreters.
# ### JAX core machinery
#
# We can implement stacks of interpreters and even have them all discharge on
# the fly as we execute the Python function to be transformed. To start, let's
# define these primitives so that we can intercept their application:
# +
from typing import NamedTuple
class Primitive(NamedTuple):
name: str
add_p = Primitive('add')
mul_p = Primitive('mul')
neg_p = Primitive("neg")
sin_p = Primitive("sin")
cos_p = Primitive("cos")
reduce_sum_p = Primitive("reduce_sum")
greater_p = Primitive("greater")
less_p = Primitive("less")
transpose_p = Primitive("transpose")
broadcast_p = Primitive("broadcast")
def add(x, y): return bind1(add_p, x, y)
def mul(x, y): return bind1(mul_p, x, y)
def neg(x): return bind1(neg_p, x)
def sin(x): return bind1(sin_p, x)
def cos(x): return bind1(cos_p, x)
def reduce_sum(x, axis=None): return bind1(reduce_sum_p, x, axis=axis)
def greater(x, y): return bind1(greater_p, x, y)
def less(x, y): return bind1(less_p, x, y)
def transpose(x, perm): return bind1(transpose_p, perm=perm)
def broadcast(x, shape, axes): return bind1(broadcast_p, x, shape=shape, axes=axes)
def bind1(prim, *args, **params):
out, = bind(prim, *args, **params)
return out
# -
# We'll set up array data types and infix operator methods in a moment.
#
# A `Primitive` is just an object with a name, to which we attach our
# interpretation rules (one for each transformation). The `bind` function is our
# interception point: it'll figure out which transformation rule to apply, based
# on how the arguments are boxed in tracers and what interpreters are active.
#
# The functions that user code calls, like `add` and `sin`, are just wrappers
# around calls to `bind`. These wrappers let us control how arguments are passed
# to `bind`, and in particular we follow a handy internal convention: when we
# call `bind`, we pass values representing array data as positional arguments,
# and we pass metadata like the `axis` argument to `sum_p` via keyword. This
# calling convention simplifies some core logic (since e.g. instances of the
# `Tracer` class to be defined below can only occur in positional arguments to
# `bind`). The wrappers can also provide docstrings!
#
# We represent active interpreters as a stack. The stack is just a simple
# `list`, and each element is a container with an integer level (corresponding
# to the element's height in the stack), an interpreter type (which we'll call a
# `trace_type`), and an optional field for any global data the interpreter
# needs. We call each element a `MainTrace`, though maybe "Interpreter" would be
# more descriptive.
# +
from contextlib import contextmanager
from typing import Type, List, Tuple, Sequence, Optional, Any
class MainTrace(NamedTuple):
level: int
trace_type: Type['Trace']
global_data: Optional[Any]
trace_stack: List[MainTrace] = []
dynamic_trace: Optional[MainTrace] = None # to be employed in Part 3
@contextmanager
def new_main(trace_type: Type['Trace'], global_data=None):
level = len(trace_stack)
main = MainTrace(level, trace_type, global_data)
trace_stack.append(main)
try:
yield main
finally:
trace_stack.pop()
# -
# When we're about to apply a transformation, we'll push another interpreter
# onto the stack using `new_main`. Then, as we apply primitives in the function,
# we can think of the `bind` first being interpreted by the trace at the top of
# the stack (i.e. with the highest level). If that first interpreter itself
# binds other primitives in its interpretation rule for the primitive, like how
# the JVP rule of `sin_p` might bind `cos_p` and `mul_p`, then those `bind`
# calls will be handled by the interpreter at the next level down.
#
# What goes at the bottom of the interpreter stack? At the bottom, we know all
# the transformation interpreters are finished, and we just want to do standard
# evaluation. So at the bottom we'll put an evaluation interpreter.
#
# Let's sketch out the interface for interpreters, which is based on the `Trace`
# and `Tracer` base classes. A `Tracer` represents a boxed-up value, perhaps
# carrying some extra context data used by the interpreter. A `Trace` handles
# boxing up values into `Tracers` and also handles primitive application.
class Trace:
main: MainTrace
def __init__(self, main: MainTrace) -> None:
self.main = main
def pure(self, val): assert False # must override
def lift(self, val): assert False # must override
def process_primitive(self, primitive, tracers, params):
assert False # must override
# The first two methods are about boxing up values in `Tracer`s, which are the
# objects that flow through the Python programs we transform. The last method is
# the callback we'll use to interpret primitive application.
#
# The `Trace` itself doesn't contain any data, other than a reference to its
# corresponding `MainTrace` instance. In fact, multiple instances of a `Trace`
# might be created and discarded during an application of a transformation,
# whereas only a single `MainTrace` instance is created per application of a
# transformation.
#
# As for `Tracer`s themselves, each one carries an abstract value (and forwards
# infix operators to it), and the rest is up to the transformation. (The
# relationship between `Tracer`s and `AbstractValue`s is that there's one
# `Tracer` per transformation, and at least one `AbstractValue` per base type,
# like arrays.)
# +
import numpy as np
class Tracer:
_trace: Trace
__array_priority__ = 1000
@property
def aval(self):
assert False # must override
def full_lower(self):
return self # default implementation
def __neg__(self): return self.aval._neg(self)
def __add__(self, other): return self.aval._add(self, other)
def __radd__(self, other): return self.aval._radd(self, other)
def __mul__(self, other): return self.aval._mul(self, other)
def __rmul__(self, other): return self.aval._rmul(self, other)
def __gt__(self, other): return self.aval._gt(self, other)
def __lt__(self, other): return self.aval._lt(self, other)
def __bool__(self): return self.aval._bool(self)
def __nonzero__(self): return self.aval._nonzero(self)
def __getattr__(self, name):
try:
return getattr(self.aval, name)
except AttributeError:
raise AttributeError(f"{self.__class__.__name__} has no attribute {name}")
def swap(f): return lambda x, y: f(y, x)
# +
class ShapedArray:
array_abstraction_level = 1
shape: Tuple[int]
dtype: np.dtype
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = dtype
@property
def ndim(self):
return len(self.shape)
_neg = staticmethod(neg)
_add = staticmethod(add)
_radd = staticmethod(swap(add))
_mul = staticmethod(mul)
_rmul = staticmethod(swap(mul))
_gt = staticmethod(greater)
_lt = staticmethod(less)
@staticmethod
def _bool(tracer):
raise Exception("ShapedArray can't be unambiguously converted to bool")
@staticmethod
def _nonzero(tracer):
raise Exception("ShapedArray can't be unambiguously converted to bool")
def str_short(self):
return f'{self.dtype.name}[{','.join(str(d) for d in self.shape)}]'
def __hash__(self):
return hash((self.shape, self.dtype))
def __eq__(self, other):
return (type(self) is type(other) and
self.shape == other.shape and self.dtype == other.dtype)
def __repr__(self):
return f"ShapedArray(shape={self.shape}, dtype={self.dtype})"
class ConcreteArray(ShapedArray):
array_abstraction_level = 2
val: np.ndarray
def __init__(self, val):
self.val = val
self.shape = val.shape
self.dtype = val.dtype
@staticmethod
def _bool(tracer):
return bool(tracer.aval.val)
@staticmethod
def _nonzero(tracer):
return bool(tracer.aval.val)
def get_aval(x):
if isinstance(x, Tracer):
return x.aval
elif type(x) in jax_types:
return ConcreteArray(np.asarray(x))
else:
raise TypeError(x)
jax_types = {bool, int, float,
np.bool_, np.int32, np.int64, np.float32, np.float64, np.ndarray}
# -
# Notice that we actually have two `AbstractValue`s for arrays, representing
# different levels of abstraction. A `ShapedArray` represents the set of all
# possible arrays with a given shape and dtype. A `ConcreteArray` represents a
# singleton set consisting of a single array value.
#
# Now that we've set up the interpreter stack, the Trace/Tracer API for
# interpreters, and abstract values, we can come back to implement `bind`:
def bind(prim, *args, **params):
top_trace = find_top_trace(args)
tracers = [full_raise(top_trace, arg) for arg in args]
outs = top_trace.process_primitive(prim, tracers, params)
return [full_lower(out) for out in outs]
# The main action is that we call `find_top_trace` to figure out which
# interpreter should handle this primitive application. We then call that top
# trace's `process_primitive` so that the trace can apply its interpretation
# rule. The calls to `full_raise` just ensure that the inputs are boxed in the
# top trace's `Tracer` instances, and the call to `full_lower` is an optional
# optimization so that we unbox values out of `Tracer`s as much as possible.
# +
import operator as op
def find_top_trace(xs) -> Trace:
top_main = max((x._trace.main for x in xs if isinstance(x, Tracer)),
default=trace_stack[0], key=op.attrgetter('level'))
if dynamic_trace and dynamic_trace.level > top_main.level:
top_main = dynamic_trace
return top_main.trace_type(top_main)
# -
# In words, ignoring the `dynamic_trace` step until Part 3, `find_top_trace`
# returns the highest-level interpreter associated with the `Tracer`s on its
# inputs, and otherwise returns the interpreter at the bottom of the stack
# (which is always an evaluation trace, at least for now). This is a deviation
# from the description above, where we always start by running the interpreter
# at the top of the stack and then work our way down, applying every interpreter
# in the stack. Instead, we're only applying an interpreter when the input
# arguments to a primitive bind are boxed in a `Tracer` corresponding to that
# interpreter. This optimization lets us skip irrelevant transformations, but
# bakes in an assumption that transformations mostly follow data dependence
# (except for the special bottom-of-the-stack interpreter, which interprets
# everything).
#
# An alternative would be to have every interpreter in the stack interpret every
# operation. That's worth exploring! JAX is designed around data dependence in
# large part because that's so natural for automatic differentiation, and JAX's
# roots are in autodiff. But it may be over-fit.
# +
def full_lower(val: Any):
if isinstance(val, Tracer):
return val.full_lower()
else:
return val
def full_raise(trace: Trace, val: Any) -> Tracer:
if not isinstance(val, Tracer):
assert type(val) in jax_types
return trace.pure(val)
level = trace.main.level
if val._trace.main is trace.main:
return val
elif val._trace.main.level < level:
return trace.lift(val)
elif val._trace.main.level > level:
raise Exception(f"Can't lift level {val._trace.main.level} to {level}.")
else: # val._trace.level == level
raise Exception(f"Different traces at same level: {val._trace}, {trace}.")
# -
# The logic in `full_raise` serves to box values into `Tracer`s for a particular
# `Trace`, calling different methods on the `Trace` based on context:
# `Trace.pure` is called on non-`Tracer` constants, and `Trace.lift` is called
# for values that are already `Tracer`s from a lower-level interpreter. These
# two methods could share the same implementation, but by distinguishing them in
# the core logic we can provide more information to the `Trace` subclass.
#
# That's it for the JAX core! Now we can start adding interpreters.
# ### Evaluation interpreter
#
# We'll start with the simplest interpreter: the evaluation interpreter that
# will sit at the bottom of the interpreter stack.
# +
class EvalTrace(Trace):
pure = lift = lambda self, x: x # no boxing in Tracers needed
def process_primitive(self, primitive, tracers, params):
return impl_rules[primitive](*tracers, **params)
trace_stack.append(MainTrace(0, EvalTrace, None)) # special bottom of the stack
# NB: in JAX, instead of a dict we attach impl rules to the Primitive instance
impl_rules = {}
impl_rules[add_p] = lambda x, y: [np.add(x, y)]
impl_rules[mul_p] = lambda x, y: [np.multiply(x, y)]
impl_rules[neg_p] = lambda x: [np.negative(x)]
impl_rules[sin_p] = lambda x: [np.sin(x)]
impl_rules[cos_p] = lambda x: [np.cos(x)]
impl_rules[reduce_sum_p] = lambda x, *, axis: [np.sum(x, axis)]
impl_rules[greater_p] = lambda x, y: [np.greater(x, y)]
impl_rules[less_p] = lambda x, y: [np.less(x, y)]
impl_rules[transpose_p] = lambda x, *, perm: [np.transpose(x, perm)]
def broadcast_impl(x, *, shape, axes):
for axis in sorted(axes):
x = np.expand_dims(x, axis)
return [np.broadcast_to(x, shape)]
impl_rules[broadcast_p] = broadcast_impl
# -
# With this interpreter, we can evaluate user functions:
# +
def f(x):
y = sin(x) * 2.
z = - y + x
return z
print(f(3.0))
# -
# Woo! Like going around in a big circle. But the point of this indirection is
# that now we can add some real transformations.
# ### Forward-mode autodiff with `jvp`
#
# First, a few helper functions:
# +
def zeros_like(val):
aval = get_aval(val)
return np.zeros(aval.shape, aval.dtype)
def unzip2(pairs):
lst1, lst2 = [], []
for x1, x2 in pairs:
lst1.append(x1)
lst2.append(x2)
return lst1, lst2
map_ = map
def map(f, *xs):
return list(map_(f, *xs))
zip_ = zip
def zip(*args):
fst, *rest = args = map(list, args)
n = len(fst)
for arg in rest:
assert len(arg) == n
return list(zip_(*args))
# -
# The `Tracer` for forward-mode autodiff carries a primal-tangent pair. The
# `Trace` applies JVP rules.
# +
class JVPTracer(Tracer):
def __init__(self, trace, primal, tangent):
self._trace = trace
self.primal = primal
self.tangent = tangent
@property
def aval(self):
return get_aval(self.primal)
class JVPTrace(Trace):
pure = lift = lambda self, val: JVPTracer(self, val, zeros_like(val))
def process_primitive(self, primitive, tracers, params):
primals_in, tangents_in = unzip2((t.primal, t.tangent) for t in tracers)
jvp_rule = jvp_rules[primitive]
primal_outs, tangent_outs = jvp_rule(primals_in, tangents_in, **params)
return [JVPTracer(self, x, t) for x, t in zip(primal_outs, tangent_outs)]
jvp_rules = {}
# -
# Notice both `lift` and `sublift` package a value into a `JVPTracer` with the
# minimal amount of context, which is a zero tangent value.
#
# Let's add some JVP rules for primitives:
# +
def add_jvp(primals, tangents):
(x, y), (x_dot, y_dot) = primals, tangents
return [x + y], [x_dot + y_dot]
jvp_rules[add_p] = add_jvp
def mul_jvp(primals, tangents):
(x, y), (x_dot, y_dot) = primals, tangents
return [x * y], [x_dot * y + x * y_dot]
jvp_rules[mul_p] = mul_jvp
def sin_jvp(primals, tangents):
(x,), (x_dot,) = primals, tangents
return [sin(x)], [cos(x) * x_dot]
jvp_rules[sin_p] = sin_jvp
def cos_jvp(primals, tangents):
(x,), (x_dot,) = primals, tangents
return [cos(x)], [-sin(x) * x_dot]
jvp_rules[cos_p] = cos_jvp
def neg_jvp(primals, tangents):
(x,), (x_dot,) = primals, tangents
return [neg(x)], [neg(x_dot)]
jvp_rules[neg_p] = neg_jvp
def reduce_sum_jvp(primals, tangents, *, axis):
(x,), (x_dot,) = primals, tangents
return [reduce_sum(x, axis)], [reduce_sum(x_dot, axis)]
jvp_rules[reduce_sum_p] = reduce_sum_jvp
def greater_jvp(primals, tangents):
(x, y), _ = primals, tangents
out_primal = greater(x, y)
return [out_primal], [zeros_like(out_primal)]
jvp_rules[greater_p] = greater_jvp
def less_jvp(primals, tangents):
(x, y), _ = primals, tangents
out_primal = less(x, y)
return [out_primal], [zeros_like(out_primal)]
jvp_rules[less_p] = less_jvp
# -
# Finally, we add a transformation API to kick off the trace:
def jvp_v1(f, primals, tangents):
with new_main(JVPTrace) as main:
trace = JVPTrace(main)
tracers_in = [JVPTracer(trace, x, t) for x, t in zip(primals, tangents)]
out = f(*tracers_in)
tracer_out = full_raise(trace, out)
primal_out, tangent_out = tracer_out.primal, tracer_out.tangent
return primal_out, tangent_out
# And with that, we can differentiate!
x = 3.0
y, sin_deriv_at_3 = jvp_v1(sin, (x,), (1.0,))
print(sin_deriv_at_3)
print(cos(3.0))
# +
def f(x):
y = sin(x) * 2.
z = - y + x
return z
x, xdot = 3., 1.
y, ydot = jvp_v1(f, (x,), (xdot,))
print(y)
print(ydot)
# +
def deriv(f):
return lambda x: jvp_v1(f, (x,), (1.,))[1]
print(deriv(sin)(3.))
print(deriv(deriv(sin))(3.))
print(deriv(deriv(deriv(sin)))(3.))
print(deriv(deriv(deriv(deriv(sin))))(3.))
# +
def f(x):
if x > 0.: # Python control flow
return 2. * x
else:
return x
print(deriv(f)(3.))
print(deriv(f)(-3.))
# -
# ## Pytrees and flattening user functions' inputs and outputs
# A limitation with `jvp_v1` is that it assumes the user function accepts arrays
# as positional arguments and produces a single array as output. What if it
# produced a list as output? Or accepted nested containers as inputs? It would
# be a pain to deal with all the possible containers in inputs and outputs at
# every layer of the stack. Instead, we can wrap the user function so that the
# wrapped version accepts arrays as inputs and returns a flat list of arrays as
# output. The wrapper just needs to unflatten its input, call the user function,
# and flatten the output.
#
# Here's how we'd like to write `jvp`, assuming the user always gives us
# functions that take arrays as inputs and produces a flat list of arrays as
# outputs:
def jvp_flat(f, primals, tangents):
with new_main(JVPTrace) as main:
trace = JVPTrace(main)
tracers_in = [JVPTracer(trace, x, t) for x, t in zip(primals, tangents)]
outs = f(*tracers_in)
tracers_out = [full_raise(trace, out) for out in outs]
primals_out, tangents_out = unzip2((t.primal, t.tangent) for t in tracers_out)
return primals_out, tangents_out
# To support user functions that have arbitrary containers in the inputs and
# outputs, here's how we'd write the user-facing `jvp` wrapper:
def jvp(f, primals, tangents):
primals_flat, in_tree = tree_flatten(primals)
tangents_flat, in_tree2 = tree_flatten(tangents)
if in_tree != in_tree2: raise TypeError
f, out_tree = flatten_fun(f, in_tree)
primals_out_flat, tangents_out_flat = jvp_flat(f, primals_flat, tangents_flat)
primals_out = tree_unflatten(out_tree(), primals_out_flat)
tangents_out = tree_unflatten(out_tree(), tangents_out_flat)
return primals_out, tangents_out
# Notice that we had to plumb the tree structure of the user function output
# back to the caller of `flatten_fun`. That information isn't available until we
# actually run the user function, so `flatten_fun` just returns a reference to a
# mutable cell, represented as a thunk. These side-effects are safe because we
# always run the user function exactly once. (This safe regime is the reason for
# the "linear" name in `linear_util.py`, in the sense of [linear
# types](https://en.wikipedia.org/wiki/Substructural_type_system).)
#
# All that remains is to write `tree_flatten`, `tree_unflatten`, and
# `flatten_fun`.
# + tags=["hide-input"]
def flatten_fun(f, in_tree):
store = Store()
def flat_fun(*args_flat):
pytree_args = tree_unflatten(in_tree, args_flat)
out = f(*pytree_args)
out_flat, out_tree = tree_flatten(out)
store.set_value(out_tree)
return out_flat
return flat_fun, store
class Empty: pass
empty = Empty()
class Store:
val = empty
def set_value(self, val):
assert self.val is empty
self.val = val
def __call__(self):
return self.val
# + tags=["hide-input"]
import itertools as it
from typing import Callable, Type, Hashable, Dict, Iterable, Iterator
class NodeType(NamedTuple):
name: str
to_iterable: Callable
from_iterable: Callable
def register_pytree_node(ty: Type, to_iter: Callable, from_iter: Callable
) -> None:
node_types[ty] = NodeType(str(ty), to_iter, from_iter)
node_types: Dict[Type, NodeType] = {}
register_pytree_node(tuple, lambda t: (None, t), lambda _, xs: tuple(xs))
register_pytree_node(list, lambda l: (None, l), lambda _, xs: list(xs))
register_pytree_node(dict,
lambda d: map(tuple, unzip2(sorted(d.items()))),
lambda keys, vals: dict(zip(keys, vals)))
class PyTreeDef(NamedTuple):
node_type: NodeType
node_metadata: Hashable
child_treedefs: Tuple['PyTreeDef']
class Leaf: pass
leaf = Leaf()
def tree_flatten(x: Any) -> Tuple[List[Any], PyTreeDef]:
children_iter, treedef = _tree_flatten(x)
return list(children_iter), treedef
def _tree_flatten(x: Any) -> Tuple[Iterable, PyTreeDef]:
node_type = node_types.get(type(x))
if node_type:
node_metadata, children = node_type.to_iterable(x)
children_flat, child_trees = unzip2(map(_tree_flatten, children))
flattened = it.chain.from_iterable(children_flat)
return flattened, PyTreeDef(node_type, node_metadata, tuple(child_trees))
else:
return [x], leaf
def tree_unflatten(treedef: PyTreeDef, xs: List[Any]) -> Any:
return _tree_unflatten(treedef, iter(xs))
def _tree_unflatten(treedef: PyTreeDef, xs: Iterator) -> Any:
if treedef is leaf:
return next(xs)
else:
children = (_tree_unflatten(t, xs) for t in treedef.child_treedefs)
return treedef.node_type.from_iterable(treedef.node_metadata, children)
# -
# With this pytree-handling `jvp` implementation, we can now handle arbitrary
# input and output containers. That'll come in handy with future transformations
# too!
# +
def f(x):
y = sin(x) * 2.
z = - y + x
return {'hi': z, 'there': [x, y]}
x, xdot = 3., 1.
y, ydot = jvp(f, (x,), (xdot,))
print(y)
print(ydot)
# -
# ### Vectorized batching with `vmap`
#
# First, a couple helper functions, one for producing mapped abstract values
# from unmapped ones (by removing an axis), and one for moving batch dimensions
# around:
# +
def mapped_aval(batch_dim, aval):
shape = list(aval.shape)
del shape[batch_dim]
return ShapedArray(tuple(shape), aval.dtype)
def move_batch_axis(axis_size, src, dst, x):
if src is not_mapped:
target_shape = list(np.shape(x))
target_shape.insert(dst, axis_size)
return broadcast(x, target_shape, [dst])
elif src == dst:
return x
else:
return moveaxis(x, src, dst)
def moveaxis(x, src: int, dst: int):
perm = [i for i in range(np.ndim(x)) if i != src]
perm.insert(dst, src)
return transpose(x, perm)
# -
# The `Tracer` for vectorized batching carries a batched value and an optional
# integer indicating which axis (if any) is the batch axis.
# +
from typing import Union
class NotMapped: pass
not_mapped = NotMapped()
BatchAxis = Union[NotMapped, int]
class BatchTracer(Tracer):
def __init__(self, trace, val, batch_dim: BatchAxis):
self._trace = trace
self.val = val
self.batch_dim = batch_dim
@property
def aval(self):
if self.batch_dim is not_mapped:
return get_aval(self.val)
else:
return mapped_aval(self.batch_dim, get_aval(self.val))
def full_lower(self):
if self.batch_dim is not_mapped:
return full_lower(self.val)
else:
return self
class BatchTrace(Trace):
pure = lift = lambda self, val: BatchTracer(self, val, not_mapped)
def process_primitive(self, primitive, tracers, params):
vals_in, bdims_in = unzip2((t.val, t.batch_dim) for t in tracers)
vmap_rule = vmap_rules[primitive]
val_outs, bdim_outs = vmap_rule(self.axis_size, vals_in, bdims_in, **params)
return [BatchTracer(self, x, bd) for x, bd in zip(val_outs, bdim_outs)]
@property
def axis_size(self):
return self.main.global_data
vmap_rules = {}
# -
# Here we've implemented the optional `Tracer.full_lower` method, which lets us
# peel off a batching tracer if it's not needed because it doesn't represent a
# batched value.
#
# For `BatchTrace`, analogous to `JVPTrace`, the methods `pure` and `lift` just
# box a value in a `BatchTracer` with the minimal amount of context, which in
# this case is a `batch_dim` taking the sentinel value `not_mapped`. Notice we
# use the `MainTrace`'s interpreter-global data field to store the batch axis
# size.
#
# Next we can define batching interpreter rules for each primitive:
# +
from functools import partial
def binop_batching_rule(op, axis_size, vals_in, dims_in):
(x, y), (x_bdim, y_bdim) = vals_in, dims_in
if x_bdim != y_bdim:
if x_bdim is not_mapped:
x = move_batch_axis(axis_size, x_bdim, y_bdim, x)
x_bdim = y_bdim
else:
y = move_batch_axis(axis_size, y_bdim, x_bdim, y)
return [op(x, y)], [x_bdim]
vmap_rules[add_p] = partial(binop_batching_rule, add)
vmap_rules[mul_p] = partial(binop_batching_rule, mul)
def vectorized_unop_batching_rule(op, axis_size, vals_in, dims_in):
(x,), (x_bdim,) = vals_in, dims_in
return [op(x)], [x_bdim]
vmap_rules[sin_p] = partial(vectorized_unop_batching_rule, sin)
vmap_rules[cos_p] = partial(vectorized_unop_batching_rule, cos)
vmap_rules[neg_p] = partial(vectorized_unop_batching_rule, neg)
def reduce_sum_batching_rule(axis_size, vals_in, dims_in, *, axis):
(x,), (x_bdim,) = vals_in, dims_in
new_axis = axis + (x_bdim <= axis)
out_bdim = x_bdim - (new_axis < x_bdim)
return [reduce_sum(x, new_axis)], [out_bdim]
vmap_rules[reduce_sum_p] = reduce_sum_batching_rule
# -
# Finally, we add a transformation API to kick off the trace:
# +
def vmap_flat(f, in_axes, *args):
axis_size, = {x.shape[ax] for x, ax in zip(args, in_axes)
if ax is not not_mapped}
with new_main(BatchTrace, axis_size) as main:
trace = BatchTrace(main)
tracers_in = [BatchTracer(trace, x, ax) if ax is not None else x
for x, ax in zip(args, in_axes)]
outs = f(*tracers_in)
tracers_out = [full_raise(trace, out) for out in outs]
vals_out, bdims_out = unzip2((t.val, t.batch_dim) for t in tracers_out)
outs_transposed = [move_batch_axis(axis_size, bdim, 0, val_out)
for val_out, bdim in zip(vals_out, bdims_out)]
return outs_transposed
def vmap(f, in_axes):
def batched_f(*args):
args_flat, in_tree = tree_flatten(args)
in_axes_flat, in_tree2 = tree_flatten(in_axes)
if in_tree != in_tree2: raise TypeError
f_flat, out_tree = flatten_fun(f, in_tree)
outs_flat = vmap_flat(f_flat, in_axes_flat, *args_flat)
return tree_unflatten(out_tree(), outs_flat)
return batched_f
# +
def add_one_to_a_scalar(scalar):
assert np.ndim(scalar) == 0
return 1 + scalar
vector_in = np.arange(3.)
vector_out = vmap(add_one_to_a_scalar, (0,))(vector_in)
print(vector_in)
print(vector_out)
# +
def jacfwd(f, x):
pushfwd = lambda v: jvp(f, (x,), (v,))[1]
vecs_in = np.eye(np.size(x)).reshape(np.shape(x) * 2)
return vmap(pushfwd, (0,))(vecs_in)
def f(x):
return sin(x)
jacfwd(f, np.arange(3.))
# -
# That's it for `jvp` and `vmap`!
# ## Part 2: Jaxprs
#
# The next transformations on the horizon are `jit` for just-in-time
# compilation and `vjp` for reverse-mode autodiff. (`grad` is just a small
# wrapper around `vjp`.) Whereas `jvp` and `vmap` only needed each `Tracer` to
# carry a little bit of extra context, for both `jit` and `vjp` we need much
# richer context: we need to represent _programs_. That is, we need jaxprs!
#
# Jaxprs are JAX's internal intermediate representation of programs. They are
# explicitly typed, functional, first-order, and in ANF form. We need a
# program representation for `jit` because the purpose of `jit` is to stage
# computation out of Python. For any computation we want to stage out, we need
# to be able to represent it as data, and build it up as we trace a Python
# function. Similarly, `vjp` needs a way to represent the computation for the
# backward pass of reverse-mode autodiff. We use the same jaxpr program
# representation for both needs.
#
# (Building a program representation is the most
# [free](https://en.wikipedia.org/wiki/Free_object) kind of
# trace-transformation, and so except for issues around handling native Python
# control flow, any transformation could be implemented by first tracing to a
# jaxpr and then interpreting the jaxpr.)
# ### Jaxpr data strutures
#
# The jaxpr term syntax is roughly:
#
# ```
# jaxpr ::=
# { lambda <binder> , ... .
# let <eqn>
# ...
# in ( <atom> , ... ) }
#
# binder ::= <var>:<array_type>
# var ::= a | b | c | ...
# atom ::= <var> | <literal>
# literal ::= <int32> | <int64> | <float32> | <float64>
#
# eqn ::= <binder> , ... = <primitive> [ <params> ] <atom> , ...
# ```
#
# The syntax of types is:
#
# ```
# jaxpr_type ::= [ <array_type> , ... ] -> [ <array_type> , ... ]
# array_type ::= <dtype>[<shape>]
# dtype ::= f32 | f64 | i32 | i64
# shape ::= <int> , ...
# ```
#
# How do we represent these as Python data structures? We reuse ShapedArrays to
# represent types, and we can represent the term syntax with a few Python
# structs:
# +
from typing import Set
class Var:
aval: ShapedArray
def __init__(self, aval): self.aval = aval
class Lit:
val: Any
aval: ShapedArray
def __init__(self, val):
self.aval = aval = raise_to_shaped(get_aval(val))
self.val = np.array(val, aval.dtype)
Atom = Union[Var, Lit]
class JaxprEqn(NamedTuple):
primitive: Primitive
inputs: List[Atom]
params: Dict[str, Any]
out_binders: List[Var]
class Jaxpr(NamedTuple):
in_binders: List[Var]
eqns: List[JaxprEqn]
outs: List[Atom]
def __hash__(self): return id(self)
__eq__ = op.is_
def raise_to_shaped(aval):
return ShapedArray(aval.shape, aval.dtype)
# -
# Type-checking a jaxpr involves checking that there are no unbound variables,
# that variables are only bound once, and that for each equation the type of
# the primitive application matches the type of the output binders.
# +
class JaxprType(NamedTuple):
in_types: List[ShapedArray]
out_types: List[ShapedArray]
def __repr__(self):
in_types = ', '.join(aval.str_short() for aval in self.in_types)
out_types = ', '.join(aval.str_short() for aval in self.out_types)
return f'({in_types}) -> ({out_types})'
def typecheck_jaxpr(jaxpr: Jaxpr) -> JaxprType:
env: Set[Var] = set()
for v in jaxpr.in_binders:
if v in env: raise TypeError
env.add(v)
for eqn in jaxpr.eqns:
in_types = [typecheck_atom(env, x) for x in eqn.inputs]
out_types = abstract_eval_rules[eqn.primitive](*in_types, **eqn.params)
for out_binder, out_type in zip(eqn.out_binders, out_types):
if not out_type == out_binder.aval: raise TypeError
for out_binder in eqn.out_binders:
if out_binder in env: raise TypeError
env.add(out_binder)
in_types = [v.aval for v in jaxpr.in_binders]
out_types = [typecheck_atom(env, x) for x in jaxpr.outs]
return JaxprType(in_types, out_types)
def typecheck_atom(env: Set[Var], x: Atom) -> ShapedArray:
if isinstance(x, Var):
if x not in env: raise TypeError("unbound variable")
return x.aval
elif isinstance(x, Lit):
return raise_to_shaped(get_aval(x.val))
else:
assert False
# -
# We can apply the function represented by a jaxpr to arguments with a simple
# interpreter.
# +
def eval_jaxpr(jaxpr: Jaxpr, args: List[Any]) -> List[Any]:
env: Dict[Var, Any] = {}
def read(x: Atom) -> Any:
return env[x] if type(x) is Var else x.val
def write(v: Var, val: Any) -> None:
assert v not in env # single-assignment
env[v] = val
map(write, jaxpr.in_binders, args)
for eqn in jaxpr.eqns:
in_vals = map(read, eqn.inputs)
outs = bind(eqn.primitive, *in_vals, **eqn.params)
map(write, eqn.out_binders, outs)
return map(read, jaxpr.outs)
def jaxpr_as_fun(jaxpr: Jaxpr):
return lambda *args: eval_jaxpr(jaxpr, args)
# -
# By using `bind` in the interpreter, this interpreter itself is traceable.
# ### Building jaxprs with tracing
#
# Now that we have jaxprs as a data structure, we need ways to produce these
# from tracing Python code. In general there are two variants of how we trace to
# a jaxpr; `jit` uses one and `vjp` uses the other. We'll start with the one
# used by `jit`, which is also used by control flow primitives like `lax.cond`,
# `lax.while_loop`, and `lax.scan`.
# +
def split_list(lst: List[Any], n: int) -> Tuple[List[Any], List[Any]]:
assert 0 <= n <= len(lst)
return lst[:n], lst[n:]
def partition_list(bs: List[bool], l: List[Any]) -> Tuple[List[Any], List[Any]]:
assert len(bs) == len(l)
lists = lst1, lst2 = [], []
for b, x in zip(bs, l):
lists[b].append(x)
return lst1, lst2
# +
# NB: the analogous class in JAX is called 'DynamicJaxprTracer'
class JaxprTracer(Tracer):
__slots__ = ['aval']
aval: ShapedArray
def __init__(self, trace, aval):
self._trace = trace
self.aval = aval
# NB: the analogous class in JAX is called 'DynamicJaxprTrace'
class JaxprTrace(Trace):
def new_arg(self, aval: ShapedArray) -> JaxprTracer:
aval = raise_to_shaped(aval)
tracer = self.builder.new_tracer(self, aval)
self.builder.tracer_to_var[id(tracer)] = Var(aval)
return tracer
def get_or_make_const_tracer(self, val: Any) -> JaxprTracer:
tracer = self.builder.const_tracers.get(id(val))
if tracer is None:
tracer = self.builder.new_tracer(self, raise_to_shaped(get_aval(val)))
self.builder.add_const(tracer, val)
return tracer
pure = lift = get_or_make_const_tracer
def process_primitive(self, primitive, tracers, params):
avals_in = [t.aval for t in tracers]
avals_out = abstract_eval_rules[primitive](*avals_in, **params)
out_tracers = [self.builder.new_tracer(self, a) for a in avals_out]
inputs = [self.builder.getvar(t) for t in tracers]
outvars = [self.builder.add_var(t) for t in out_tracers]
self.builder.add_eqn(JaxprEqn(primitive, inputs, params, outvars))
return out_tracers
@property
def builder(self):
return self.main.global_data
# NB: in JAX, we instead attach abstract eval rules to Primitive instances
abstract_eval_rules = {}
# -
# Notice that we keep as interpreter-global data a builder object, which keeps
# track of variables, constants, and eqns as we build up the jaxpr.
class JaxprBuilder:
eqns: List[JaxprEqn]
tracer_to_var: Dict[int, Var]
const_tracers: Dict[int, JaxprTracer]
constvals: Dict[Var, Any]
tracers: List[JaxprTracer]
def __init__(self):
self.eqns = []
self.tracer_to_var = {}
self.const_tracers = {}
self.constvals = {}
self.tracers = []
def new_tracer(self, trace: JaxprTrace, aval: ShapedArray) -> JaxprTracer:
tracer = JaxprTracer(trace, aval)
self.tracers.append(tracer)
return tracer
def add_eqn(self, eqn: JaxprEqn) -> None:
self.eqns.append(eqn)
def add_var(self, tracer: JaxprTracer) -> Var:
assert id(tracer) not in self.tracer_to_var
var = self.tracer_to_var[id(tracer)] = Var(tracer.aval)
return var
def getvar(self, tracer: JaxprTracer) -> Var:
var = self.tracer_to_var.get(id(tracer))
assert var is not None
return var
def add_const(self, tracer: JaxprTracer, val: Any) -> Var:
var = self.add_var(tracer)
self.const_tracers[id(val)] = tracer
self.constvals[var] = val
return var
def build(self, in_tracers: List[JaxprTracer], out_tracers: List[JaxprTracer]
) -> Tuple[Jaxpr, List[Any]]:
constvars, constvals = unzip2(self.constvals.items())
t2v = lambda t: self.tracer_to_var[id(t)]
in_binders = constvars + [t2v(t) for t in in_tracers]
out_vars = [t2v(t) for t in out_tracers]
jaxpr = Jaxpr(in_binders, self.eqns, out_vars)
typecheck_jaxpr(jaxpr)
jaxpr, constvals = _inline_literals(jaxpr, constvals)
return jaxpr, constvals
def _inline_literals(jaxpr: Jaxpr, consts: List[Any]) -> Tuple[Jaxpr, List[Any]]:
const_binders, other_binders = split_list(jaxpr.in_binders, len(consts))
scalars = [type(x) in jax_types and not get_aval(x).shape for x in consts]
new_const_binders, lit_binders = partition_list(scalars, const_binders)
new_consts, lit_vals = partition_list(scalars, consts)
literals = dict(zip(lit_binders, map(Lit, lit_vals)))
new_eqns = [JaxprEqn(eqn.primitive, [literals.get(x, x) for x in eqn.inputs],
eqn.params, eqn.out_binders) for eqn in jaxpr.eqns]
new_outs = [literals.get(x, x) for x in jaxpr.outs]
new_jaxpr = Jaxpr(new_const_binders + other_binders, new_eqns, new_outs)
typecheck_jaxpr(new_jaxpr)
return new_jaxpr, new_consts
# The rules we need for `JaxprTrace.process_primitive` are essentially typing
# rules for primitive applications: given the primitive, its parameters, and
# types for the inputs, the rule must produce a type for the output, which is
# then packaged with the output `JaxprTracer`. We can use abstract evaluation
# rules for this same purpose, even though they can be more general (since
# abstract evaluation rules must accept ConcreteArray inputs, and since they
# need only return an upper bound on the set of possible outputs, they can
# produce ConcreteArray outputs as well). We'll reuse these abstract evaluation
# rules for the other jaxpr-producing trace machinery, where the potential extra
# generality is useful.
# +
def binop_abstract_eval(x: ShapedArray, y: ShapedArray) -> List[ShapedArray]:
if not isinstance(x, ShapedArray) or not isinstance(y, ShapedArray):
raise TypeError
if raise_to_shaped(x) != raise_to_shaped(y): raise TypeError
return [ShapedArray(x.shape, x.dtype)]
abstract_eval_rules[add_p] = binop_abstract_eval
abstract_eval_rules[mul_p] = binop_abstract_eval
def compare_abstract_eval(x: ShapedArray, y: ShapedArray) -> List[ShapedArray]:
if not isinstance(x, ShapedArray) or not isinstance(y, ShapedArray):
raise TypeError
if x.shape != y.shape: raise TypeError
return [ShapedArray(x.shape, np.dtype('bool'))]
abstract_eval_rules[greater_p] = compare_abstract_eval
abstract_eval_rules[less_p] = compare_abstract_eval
def vectorized_unop_abstract_eval(x: ShapedArray) -> List[ShapedArray]:
return [ShapedArray(x.shape, x.dtype)]
abstract_eval_rules[sin_p] = vectorized_unop_abstract_eval
abstract_eval_rules[cos_p] = vectorized_unop_abstract_eval
abstract_eval_rules[neg_p] = vectorized_unop_abstract_eval
def reduce_sum_abstract_eval(x: ShapedArray, *, axis: int) -> List[ShapedArray]:
new_shape = [d for i, d in enumerate(x.shape) if i != axis]
return [ShapedArray(tuple(new_shape), x.dtype)]
abstract_eval_rules[reduce_sum_p] = reduce_sum_abstract_eval
def broadcast_abstract_eval(x: ShapedArray, *, shape: Sequence[int],
axes: Sequence[int]) -> List[ShapedArray]:
return [ShapedArray(tuple(shape), x.dtype)]
abstract_eval_rules[broadcast_p] = broadcast_abstract_eval
# -
# To check our implementation of jaxprs, we can add a `make_jaxpr`
# transformation and a pretty-printer:
# +
from functools import lru_cache
@lru_cache() # ShapedArrays are hashable
def make_jaxpr_v1(f, *avals_in):
avals_in, in_tree = tree_flatten(avals_in)
f, out_tree = flatten_fun(f, in_tree)
builder = JaxprBuilder()
with new_main(JaxprTrace, builder) as main:
trace = JaxprTrace(main)
tracers_in = [trace.new_arg(aval) for aval in avals_in]
outs = f(*tracers_in)
tracers_out = [full_raise(trace, out) for out in outs]
jaxpr, consts = builder.build(tracers_in, tracers_out)
return jaxpr, consts, out_tree()
# + tags=["hide-input"]
from collections import defaultdict
import string
class PPrint:
lines: List[Tuple[int, str]]
def __init__(self, lines):
self.lines = lines
def indent(self, indent: int) -> 'PPrint':
return PPrint([(indent + orig_indent, s) for orig_indent, s in self.lines])
def __add__(self, rhs: 'PPrint') -> 'PPrint':
return PPrint(self.lines + rhs.lines)
def __rshift__(self, rhs: 'PPrint') -> 'PPrint':
if not rhs.lines: return self
if not self.lines: return rhs
indent, s = self.lines[-1]
indented_block = rhs.indent(indent + len(s))
common_line = s + ' ' * rhs.lines[0][0] + rhs.lines[0][1]
return PPrint(self.lines[:-1]
+ [(indent, common_line)]
+ indented_block.lines[1:])
def __str__(self) -> str:
return '\n'.join(' ' * indent + s for indent, s in self.lines)
def pp(s: Any) -> PPrint:
return PPrint([(0, line) for line in str(s).splitlines()])
def vcat(ps: List[PPrint]) -> PPrint:
return sum(ps, pp(''))
def pp_jaxpr(jaxpr: Jaxpr):
namegen = (''.join(s) for r in it.count(1)
for s in it.permutations(string.ascii_lowercase, r))
names = defaultdict(lambda: next(namegen))
in_binders = ', '.join(var_str(names, x) for x in jaxpr.in_binders)
eqns = vcat([pp_eqn(names, e) for e in jaxpr.eqns])
outs = ', '.join(names[v] if isinstance(v, Var) else str(v.val)
for v in jaxpr.outs)
return (pp(f'{{ lambda {in_binders} .') +
((pp('let ') >> eqns) + pp(f'in ( {outs} ) }}')).indent(2))
def var_str(names: Dict[Var, str], v: Var) -> str:
return f'{names[v]}:{v.aval.str_short()}'
def pp_eqn(names: Dict[Var, str], eqn: JaxprEqn) -> PPrint:
lhs = pp(' '.join(var_str(names, v) for v in eqn.out_binders))
rhs = (pp(eqn.primitive.name) >> pp_params(eqn.params) >>
pp(' '.join(names[x] if isinstance(x, Var) else str(x.val)
for x in eqn.inputs)))
return lhs >> pp(' = ') >> rhs
def pp_params(params: Dict[str, Any]) -> PPrint:
items = sorted(params.items())
if items:
return pp(' [ ') >> vcat([pp(f'{k}={v}') for k, v in items]) >> pp(' ] ')
else:
return pp(' ')
Jaxpr.__repr__ = lambda self: str(pp_jaxpr(self))
# -
jaxpr, consts, _ = make_jaxpr_v1(lambda x: 2. * x, raise_to_shaped(get_aval(3.)))
print(jaxpr)
print(typecheck_jaxpr(jaxpr))
# But there's a limitation here: because of how `find_top_trace` operates by
# data dependence, `make_jaxpr_v1` can't stage out all the primitive operations
# performed by the Python callable it's given. For example:
jaxpr, consts, _ = make_jaxpr_v1(lambda: mul(2., 2.))
print(jaxpr)
# This is precisely the issue that
# [omnistaging](https://github.com/google/jax/pull/3370) fixed.
# We want to ensure that the `JaxprTrace` started by `make_jaxpr` is always
# applied, regardless of whether any inputs to `bind` are boxed in corresponding
# `JaxprTracer` instances. We can achieve this by employing the `dynamic_trace`
# global defined in Part 1:
# +
@contextmanager
def new_dynamic(main: MainTrace):
global dynamic_trace
prev_dynamic_trace, dynamic_trace = dynamic_trace, main
try:
yield
finally:
dynamic_trace = prev_dynamic_trace
@lru_cache()
def make_jaxpr(f: Callable, *avals_in: ShapedArray,
) -> Tuple[Jaxpr, List[Any], PyTreeDef]:
avals_in, in_tree = tree_flatten(avals_in)
f, out_tree = flatten_fun(f, in_tree)
builder = JaxprBuilder()
with new_main(JaxprTrace, builder) as main:
with new_dynamic(main):
trace = JaxprTrace(main)
tracers_in = [trace.new_arg(aval) for aval in avals_in]
outs = f(*tracers_in)
tracers_out = [full_raise(trace, out) for out in outs]
jaxpr, consts = builder.build(tracers_in, tracers_out)
return jaxpr, consts, out_tree()
jaxpr, consts, _ = make_jaxpr(lambda: mul(2., 2.))
print(jaxpr)
# -
# Using `dynamic_trace` this way is conceptually the same as stashing the
# current interpreter stack and starting a new one with the `JaxprTrace` at the
# bottom. That is, no interpreters lower in the stack than the `dynamic_trace`
# are applied (since `JaxprTrace.process_primitive` doesn't call `bind`), though
# if the Python callable being traced to a jaxpr itself uses transformations
# then those can be pushed onto the interpreter stack above the `JaxprTrace`.
# But temporarily stashing the interpreter stack would break up the system
# state. The `dynamic_trace` tag achieves the same goals while keeping the
# system state simpler.
# That's it for jaxprs! With jaxprs in hand, we can implement the remaining
# major JAX features.
# ## Part 3: `jit`, simplified
#
# While `jit` has a transformation-like API in that it accepts a Python callable
# as an argument, under the hood it's really a higher-order primitive rather
# than a transformation. A primitive is _higher-order_ when it's parameterized
# by a function.
# ### On-the-fly ("final style") and staged ("initial style") processing
#
# There are two options for how to handle higher-order primitives. Each requires
# a different approach to tracing and engenders different tradeoffs:
# 1. **On-the-fly processing, where `bind` takes a Python callable as an
# argument.** We defer forming a jaxpr until as late as possible, namely
# until we're running the final interpreter at the bottom of the interpreter
# stack. That way we can swap a `JaxprTrace` in at the bottom of the
# interpreter stack and thus stage out rather than execute all primitive
# operations. With this approach, transformations in the stack get applied as
# we execute the Python callable as usual. This approach can be very tricky
# to implement, but it's as general as possible because it allows
# higher-order primitives not to raise the abstraction level of their
# arguments and thus allows data-dependent Python control flow. We refer to
# this approach as using a "final-style higher-order primitive" employing the
# discharge-at-tracing-time "final-style transformations" we've used so far.
# 2. **Staged processing, where `bind` takes a jaxpr as an argument.** Before we
# call `bind`, in the primitive wrapper we can just use `make_jaxpr` to form
# a jaxpr up-front and be done with the Python callable entirely. In this
# case, `make_jaxpr` puts its `JaxprTrace` at the top of the interpreter
# stack, and no transformations lower in the stack, which might enter via
# closed-over Tracers, are applied to the Python callable as we trace it.
# (Transformations applied within the Python callable are applied as usual,
# being added to the stack above the JaxprTrace.) Instead, the
# transformations lower in the stack are later applied to the call primitive,
# and the call primitive's rules must then transform the jaxpr itself.
# Because we trace to a jaxpr up-front, this approach can't support
# data-dependent Python control flow, but it is more straightforward to
# implement. We refer to this kind of higher-order primitive as an
# "initial-style higher-order primitive", and say that its jaxpr-processing
# transformation rules are "initial-style transformation rules."
#
# The latter approach fits for `jit` because we don't need to support
# data-dependent Python control flow in the user-provided Python callable, as
# the whole purpose of `jit` is to stage computation out of Python to be
# executed by XLA. (In contrast, `custom_jvp` is a higher-order primitive in
# which we want to support data-dependent Python control flow.)
#
# Historically, we started using the "initial-style" and "final-style"
# terminology after reading the [typed tagless final
# interpreters](http://okmij.org/ftp/tagless-final/index.html) paper, and
# jokingly referring to JAX as an implementation of "untyped tagful final
# interpreters." We don't claim to carry over (or understand) any deep meaning
# behind these terms; we loosely use "initial style" to mean "build an AST and
# then transform it", and we use "final style" to mean "transform as we trace."
# But it's just imprecise yet sticky jargon.
# With the initial-style approach, here's the user-facing `jit` wrapper:
# +
def jit(f):
def f_jitted(*args):
avals_in = [raise_to_shaped(get_aval(x)) for x in args]
jaxpr, consts, out_tree = make_jaxpr(f, *avals_in)
outs = bind(xla_call_p, *consts, *args, jaxpr=jaxpr, num_consts=len(consts))
return tree_unflatten(out_tree, outs)
return f_jitted
xla_call_p = Primitive('xla_call')
# -
# With any new primitive, we need to give it transformation rules, starting with
# its evaluation rule. When we evaluate an application of the `xla_call`
# primitive, we want to stage out out the computation to XLA. That involves
# translating the jaxpr to an XLA HLO program, transferring the argument values
# to the XLA device, executing the XLA program, and transferring back the
# results. We'll cache the XLA HLO compilation so that for each `jit`ted
# function it only needs to be performed once per argument shape and dtype
# signature.
#
# First, some utilities.
class IDHashable:
val: Any
def __init__(self, val):
self.val = val
def __hash__(self) -> int:
return id(self.val)
def __eq__(self, other):
return type(other) is IDHashable and id(self.val) == id(other.val)
# Next, we'll define the evaluation rule for `xla_call`:
# +
from jax.lib import xla_bridge as xb
from jax.lib import xla_client as xc
xe = xc._xla
xops = xc._xla.ops
def xla_call_impl(*args, jaxpr: Jaxpr, num_consts: int):
consts, args = args[:num_consts], args[num_consts:]
hashable_consts = tuple(map(IDHashable, consts))
execute = xla_callable(IDHashable(jaxpr), hashable_consts)
return execute(*args)
impl_rules[xla_call_p] = xla_call_impl
@lru_cache()
def xla_callable(hashable_jaxpr: IDHashable, hashable_consts: Tuple[IDHashable]):
jaxpr: Jaxpr = hashable_jaxpr.val
typecheck_jaxpr(jaxpr)
consts = [x.val for x in hashable_consts]
in_avals = [v.aval for v in jaxpr.in_binders[len(consts):]]
c = xb.make_computation_builder('xla_call')
xla_consts = _xla_consts(c, consts)
xla_params = _xla_params(c, in_avals)
outs = jaxpr_subcomp(c, jaxpr, xla_consts + xla_params)
out = xops.Tuple(c, outs)
compiled = xb.get_backend(None).compile(c.build(out))
return partial(execute_compiled, compiled, [v.aval for v in jaxpr.outs])
def _xla_consts(c: xe.XlaBuilder, consts: List[Any]) -> List[xe.XlaOp]:
unique_consts = {id(cnst): cnst for cnst in consts}
xla_consts = {
id_: xops.ConstantLiteral(c, cnst) for id_, cnst in unique_consts.items()}
return [xla_consts[id(cnst)] for cnst in consts]
def _xla_params(c: xe.XlaBuilder, avals_in: List[ShapedArray]) -> List[xe.XlaOp]:
return [xb.parameter(c, i, _xla_shape(a)) for i, a in enumerate(avals_in)]
def _xla_shape(aval: ShapedArray) -> xe.Shape:
return xc.Shape.array_shape(xc.dtype_to_etype(aval.dtype), aval.shape)
# -
# The main action is in `xla_callable`, which compiles a jaxpr into an XLA HLO
# program using `jaxpr_subcomp`, then returns a callable which executes the
# compiled program:
# +
def jaxpr_subcomp(c: xe.XlaBuilder, jaxpr: Jaxpr, args: List[xe.XlaOp]
) -> xe.XlaOp:
env: Dict[Var, xe.XlaOp] = {}
def read(x: Atom) -> xe.XlaOp:
return env[x] if type(x) is Var else xb.constant(c, x.val, False)
def write(v: Var, val: xe.XlaOp) -> None:
env[v] = val
map(write, jaxpr.in_binders, args)
for eqn in jaxpr.eqns:
in_avals = [x.aval for x in eqn.inputs]
in_vals = map(read, eqn.inputs)
rule = xla_translations[eqn.primitive]
out_vals = rule(c, in_avals, in_vals, **eqn.params)
map(write, eqn.out_binders, out_vals)
return map(read, jaxpr.outs)
def execute_compiled(compiled, out_avals, *args):
input_bufs = [input_handlers[type(x)](x) for x in args]
out_bufs = compiled.execute(input_bufs)
return [handle_result(aval, buf) for aval, buf in zip(out_avals, out_bufs)]
default_input_handler = xb.get_backend(None).buffer_from_pyval
input_handlers = {ty: default_input_handler for ty in
[bool, int, float, np.ndarray, np.float64, np.float32]}
def handle_result(aval: ShapedArray, buf):
del aval # Unused for now.
return buf.to_py()
xla_translations = {}
# -
# Notice that `jaxpr_subcomp` has the structure of a simple interpreter. That's
# a common pattern: the way we process jaxprs is usually with an interpreter.
# And as with any interpreter, we need an interpretation rule for each
# primitive:
# +
def direct_translation(op, c, in_avals, in_vals):
del c, in_avals
return [op(*in_vals)]
xla_translations[add_p] = partial(direct_translation, xops.Add)
xla_translations[mul_p] = partial(direct_translation, xops.Mul)
xla_translations[neg_p] = partial(direct_translation, xops.Neg)
xla_translations[sin_p] = partial(direct_translation, xops.Sin)
xla_translations[cos_p] = partial(direct_translation, xops.Cos)
xla_translations[greater_p] = partial(direct_translation, xops.Gt)
xla_translations[less_p] = partial(direct_translation, xops.Lt)
def reduce_sum_translation(c, in_avals, in_vals, *, axis):
(x_aval,), (x,) = in_avals, in_vals
zero = xops.ConstantLiteral(c, np.array(0, x_aval.dtype))
subc = xb.make_computation_builder('add')
shape = _xla_shape(ShapedArray((), x_aval.dtype))
xops.Add(xops.Parameter(subc, 0, shape), xops.Parameter(subc, 1, shape))
return [xops.Reduce(c, [x], [zero], subc.build(), [axis])]
xla_translations[reduce_sum_p] = reduce_sum_translation
def broadcast_translation(c, in_avals, in_vals, *, shape, axes):
x, = in_vals
dims_complement = [i for i in range(len(shape)) if i not in axes]
return [xops.BroadcastInDim(x, shape, dims_complement)]
xla_translations[broadcast_p] = broadcast_translation
# -
# With that, we can now use `jit` to stage out, compile, and execute programs
# with XLA!
@jit
def f(x, y):
print('tracing!')
return sin(x) * cos(y)
z = f(3., 4.) # 'tracing!' prints the first time
print(z)
z = f(4., 5.) # 'tracing!' doesn't print, compilation cache hit!
print(z)
# +
@jit
def f(x):
return reduce_sum(x, axis=0)
print(f(np.array([1., 2., 3.])))
# +
def f(x):
y = sin(x) * 2.
z = - y + x
return z
def deriv(f):
return lambda x: jvp(f, (x,), (1.,))[1]
print( deriv(deriv(f))(3.))
print(jit(deriv(deriv(f)))(3.))
# -
# Instead of implementing `jit` to first trace to a jaxpr and then to lower the
# jaxpr to XLA HLO, it might appear that we could have skipped the jaxpr step
# and just lowered to HLO while tracing. That is, perhaps we could have instead
# implemented `jit` with a `Trace` and `Tracer` that appended to the XLA HLO
# graph incrementally on each primitive bind. That's correct for now, but won't
# be possible when we introduce compiled SPMD computations because there we must
# know the number of replicas needed before compiling the program.
# We haven't yet defined any transformation rules for `xla_call_p` other than
# its evaluation rule. That is, we can't yet do `vmap`-of-`jit` or
# `jvp`-of-`jit` or even `jit`-of`-jit`. Instead `jit` has to be at the "top
# level." Let's fix that!
# +
def xla_call_jvp_rule(primals, tangents, *, jaxpr, num_consts):
del num_consts # Unused.
new_jaxpr, new_consts = jvp_jaxpr(jaxpr)
outs = bind(xla_call_p, *new_consts, *primals, *tangents, jaxpr=new_jaxpr,
num_consts=len(new_consts))
n = len(outs) // 2
primals_out, tangents_out = outs[:n], outs[n:]
return primals_out, tangents_out
jvp_rules[xla_call_p] = xla_call_jvp_rule
@lru_cache()
def jvp_jaxpr(jaxpr: Jaxpr) -> Tuple[Jaxpr, List[Any]]:
def jvp_traceable(*primals_and_tangents):
n = len(primals_and_tangents) // 2
primals, tangents = primals_and_tangents[:n], primals_and_tangents[n:]
return jvp(jaxpr_as_fun(jaxpr), primals, tangents)
in_avals = [v.aval for v in jaxpr.in_binders]
new_jaxpr, new_consts, _ = make_jaxpr(jvp_traceable, *in_avals, *in_avals)
return new_jaxpr, new_consts
# +
def xla_call_vmap_rule(axis_size, vals_in, dims_in, *, jaxpr, num_consts):
del num_consts # Unused.
new_jaxpr, new_consts = vmap_jaxpr(jaxpr, axis_size, tuple(dims_in))
outs = bind(xla_call_p, *new_consts, *vals_in, jaxpr=new_jaxpr,
num_consts=len(new_consts))
return outs, [0] * len(outs)
vmap_rules[xla_call_p] = xla_call_vmap_rule
@lru_cache()
def vmap_jaxpr(jaxpr: Jaxpr, axis_size: int, bdims_in: Tuple[BatchAxis, ...]
) -> Tuple[Jaxpr, List[Any]]:
vmap_traceable = vmap(jaxpr_as_fun(jaxpr), tuple(bdims_in))
in_avals = [unmapped_aval(axis_size, d, v.aval)
for v, d in zip(jaxpr.in_binders, bdims_in)]
new_jaxpr, new_consts, _ = make_jaxpr(vmap_traceable, *in_avals)
return new_jaxpr, new_consts
def unmapped_aval(axis_size: int, batch_dim: BatchAxis, aval: ShapedArray
) -> ShapedArray:
if batch_dim is not_mapped:
return aval
else:
shape = list(aval.shape)
shape.insert(batch_dim, axis_size)
return ShapedArray(tuple(shape), aval.dtype)
# +
def xla_call_abstract_eval_rule(*in_types, jaxpr, num_consts):
del num_consts # Unused.
jaxpr_type = typecheck_jaxpr(jaxpr)
if not all(t1 == t2 for t1, t2 in zip(jaxpr_type.in_types, in_types)):
raise TypeError
return jaxpr_type.out_types
abstract_eval_rules[xla_call_p] = xla_call_abstract_eval_rule
def xla_call_translation(c, in_avals, in_vals, *, jaxpr, num_consts):
del num_consts # Only used at top-level.
# Calling jaxpr_subcomp directly would inline. We generate a Call HLO instead.
subc = xb.make_computation_builder('inner xla_call')
xla_params = _xla_params(subc, in_avals)
outs = jaxpr_subcomp(subc, jaxpr, xla_params)
subc = subc.build(xops.Tuple(subc, outs))
return destructure_tuple(c, xops.Call(c, subc, in_vals))
xla_translations[xla_call_p] = xla_call_translation
def destructure_tuple(c, tup):
num_elements = len(c.get_shape(tup).tuple_shapes())
return [xops.GetTupleElement(tup, i) for i in range(num_elements)]
# +
@jit
def f(x):
print('tracing!')
y = sin(x) * 2.
z = - y + x
return z
x, xdot = 3., 1.
y, ydot = jvp(f, (x,), (xdot,))
print(y)
print(ydot)
# -
y, ydot = jvp(f, (x,), (xdot,)) # 'tracing!' not printed
ys = vmap(f, (0,))(np.arange(3.))
print(ys)
# One piece missing is device memory persistence for arrays. That is, we've
# defined `handle_result` to transfer results back to CPU memory as NumPy
# arrays, but it's often preferable to avoid transferring results just to
# transfer them back for the next operation. We can do that by introducing a
# `DeviceArray` class, which can wrap XLA buffers and otherwise duck-type
# `numpy.ndarray`s:
# +
def handle_result(aval: ShapedArray, buf): # noqa: F811
return DeviceArray(aval, buf)
class DeviceArray:
buf: Any
aval: ShapedArray
def __init__(self, aval, buf):
self.aval = aval
self.buf = buf
dtype = property(lambda self: self.aval.dtype)
shape = property(lambda self: self.aval.shape)
ndim = property(lambda self: self.aval.ndim)
def __array__(self): return self.buf.to_py()
def __repr__(self): return repr(self.buf.to_py())
def __str__(self): return str(self.buf.to_py())
_neg = staticmethod(neg)
_add = staticmethod(add)
_radd = staticmethod(add)
_mul = staticmethod(mul)
_rmul = staticmethod(mul)
_gt = staticmethod(greater)
_lt = staticmethod(less)
input_handlers[DeviceArray] = lambda x: x.buf
jax_types.add(DeviceArray)
# +
@jit
def f(x):
y = sin(x) * 2.
z = - y + x
return z
x, xdot = 3., 1.
y, ydot = jvp(f, (x,), (xdot,))
print(y)
print(ydot)
# -
# ## Part 4: `linearize` and `vjp` (and `grad`!)
#
# The `linearize` and `vjp` autodiff functions are built on `jvp`, but involve
# jaxprs as well. That's because both involve staging out, or delaying,
# computation.
# ### `linearize`
#
# In the case of `linearize`, we want to stage out the linear part of a `jvp`
# computation. That is, if we have `jvp : (a -> b) -> (a, T a) -> (b, T b)`,
# then we write `linearize : (a -> b) -> a -> (b, T a -o T b)`, using `T a` to
# mean "the tangent type of `a`" and using the "lollipop" `-o` rather than the
# arrow `->` to indicate a _linear_ function. We define the semantics of
# `linearize` in terms of `jvp` too:
# ```python
# y, f_lin = linearize(f, x)
# y_dot = f_lin(x_dot)
# ```
# gives the same result for `(y, y_dot)` as
# ```
# y, y_dot = jvp(f, (x,), (x_dot,))
# ```
# where the application of `f_lin` does not redo any of the linearization work.
# We'll represent the delayed linear part `f_lin : T a -o T b` as a jaxpr.
#
# Tangentially, now that we have linear arrows `-o`, we can provide a slightly
# more informative type for `jvp`:
# ```
# jvp : (a -> b) -> (UnrestrictedUse a, T a) -o (UnrestrictedUse b, T b)
# ```
# Here we're writing `UnrestrictedUse` just to indicate that we have a special
# pair where the first element can be used in an unrestricted (nonlinear) way.
# In conjunction with the linear arrow, this notation is just meant to express
# that the function `jvp f` uses its first input in a nonlinear way but its
# second input in a linear way, producing a corresponding nonlinear output
# (which can be used in a nonlinear way) paired with a linear output. This more
# refined type signature encodes the data dependencies in `jvp f`, which are
# useful for partial evaluation.
#
# To build the `f_lin` jaxpr from a JVP, we need to perform partial evaluation:
# we evaluate all the primal values as we trace, but stage the tangent
# computations into a jaxpr. This is our second way to build jaxprs. But where
# `make_jaxpr` and its underlying `JaxprTrace`/`JaxprTracer` interpreters aim
# to stage out every primitive bind, this second approach stages out only those
# primitive binds with a data dependence on tangent inputs.
#
# First, some utilities:
# +
def split_half(lst: List[Any]) -> Tuple[List[Any], List[Any]]:
assert not len(lst) % 2
return split_list(lst, len(lst) // 2)
def merge_lists(which: List[bool], l1: List[Any], l2: List[Any]) -> List[Any]:
l1, l2 = iter(l1), iter(l2)
out = [next(l2) if b else next(l1) for b in which]
assert next(l1, None) is next(l2, None) is None
return out
# -
# Next, we'll write `linearize` by combining `jvp` together with a general
# partial evaluation transformation, to be added next:
# +
def linearize_flat(f, *primals_in):
pvals_in = ([PartialVal.known(x) for x in primals_in] +
[PartialVal.unknown(vspace(get_aval(x))) for x in primals_in])
def f_jvp(*primals_tangents_in):
primals_out, tangents_out = jvp(f, *split_half(primals_tangents_in))
return [*primals_out, *tangents_out]
jaxpr, pvals_out, consts = partial_eval_flat(f_jvp, pvals_in)
primal_pvals, _ = split_half(pvals_out)
assert all(pval.is_known for pval in primal_pvals)
primals_out = [pval.const for pval in primal_pvals]
f_lin = lambda *tangents: eval_jaxpr(jaxpr, [*consts, *tangents])
return primals_out, f_lin
def linearize(f, *primals_in):
primals_in_flat, in_tree = tree_flatten(primals_in)
f, out_tree = flatten_fun(f, in_tree)
primals_out_flat, f_lin_flat = linearize_flat(f, *primals_in_flat)
primals_out = tree_unflatten(out_tree(), primals_out_flat)
def f_lin(*tangents_in):
tangents_in_flat, in_tree2 = tree_flatten(tangents_in)
if in_tree != in_tree2: raise TypeError
tangents_out_flat = f_lin_flat(*tangents_in_flat)
return tree_unflatten(out_tree(), tangents_out_flat)
return primals_out, f_lin
def vspace(aval: ShapedArray) -> ShapedArray:
return raise_to_shaped(aval) # TODO handle integers?
# -
# Now we turn to the general partial evaluation transformation. The goal is to
# accept a Python callable and a list of inputs, some known and some unknown,
# and to produce (1) all the outputs which can be computed from the known
# inputs, together with (2) a jaxpr representing the part of the Python
# callable's computation which can only be performed after the remaining inputs
# are known.
#
# This transformation is tricky to summarize in a type signature. If we
# assume the input function's type signature is `(a1, a2) -> (b1, b2)`, where
# `a1` and `a2` represent the known and unknown inputs, respectively, and where
# `b1` only has a data dependency on `a1` while `b2` has some data dependency on
# `a2`, then we might write
#
# ```
# partial_eval : ((a1, a2) -> (b1, b2)) -> a1 -> exists r. (b1, r, (r, a2) -> b2)
# ```
#
# In words, given values for the inputs of type `a1`, `partial_eval` produces
# the outputs of type `b1` along with "residual" values of
# existentially-quantified type `r` representing the intermediates required to
# complete the computation in the second stage. It also produces a function of
# type `(r, a2) -> b2` which accepts the residual values as well as the
# remaining inputs and produces the remaining outputs.
#
# We like to think of partial evaluation as "unzipping" one computation into
# two. For example, consider this jaxpr:
# ```
# { lambda a:float64[] .
# let b:float64[] = sin a
# c:float64[] = neg b
# in ( c ) }
# ```
# A jaxpr for the JVP would look like:
# ```
# { lambda a:float64[] b:float64[] .
# let c:float64[] = sin a
# d:float64[] = cos a
# e:float64[] = mul d b
# f:float64[] = neg c
# g:float64[] = neg e
# in ( f, g ) }
# ```
# If we imagine applying partial evaluation to this jaxpr with the first input
# known and the second unknown, we end up 'unzipping' the JVP jaxpr into primal
# and tangent jaxprs:
# ```
# { lambda a:float64[] .
# let c:float64[] = sin a
# d:float64[] = cos a
# f:float64[] = neg c
# in ( f, d ) }
# ```
# ```
# { lambda d:float64[] b:float64[] .
# let e:float64[] = mul d b
# g:float64[] = neg e
# in ( g ) }
# ```
# This second jaxpr is represents the linear computation that we want from
# `linearize`.
#
# However, unlike in this jaxpr example, we want the computation on known values
# to occur while evaluating the input Python callable. That is, rather than
# forming a jaxpr for the entire function `(a1, a2) -> (b1, b2)`, staging all
# operations out of Python first before sorting out what can be evaluated now
# and what must be delayed, we want only to form a jaxpr for those operations
# that _must_ be delayed due to a dependence on unknown inputs. In the context
# of automatic differentiation, this is the feature ultimately enables us to
# handle functions like `grad(lambda x: x**2 if x > 0 else 0.)`. Python control
# flow works because partial evaluation keeps the primal computation in Python.
# As a consequence, our `Trace` and `Tracer` subclasses must on the fly sort out
# what can be evaluated and what must be staged out into a jaxpr.
#
# First, we start with a `PartialVal` class, which represents a value that can
# be either known or unknown:
class PartialVal(NamedTuple):
aval: ShapedArray
const: Optional[Any]
@classmethod
def known(cls, val: Any):
return PartialVal(get_aval(val), val)
@classmethod
def unknown(cls, aval: ShapedArray):
return PartialVal(aval, None)
is_known = property(lambda self: self.const is not None)
is_unknown = property(lambda self: self.const is None)
# Partial evaluation will take a list of `PartialVal`s representing inputs, and
# return a list of `PartialVal` outputs along with a jaxpr representing the
# delayed computation:
def partial_eval_flat(f: Callable, pvals_in: List[PartialVal]
) -> Tuple[Jaxpr, List[PartialVal], List[Any]]:
with new_main(PartialEvalTrace) as main:
trace = PartialEvalTrace(main)
tracers_in = [trace.new_arg(pval) for pval in pvals_in]
outs = f(*tracers_in)
tracers_out = [full_raise(trace, out) for out in outs]
pvals_out = [t.pval for t in tracers_out]
unk_tracers_in = [t for t in tracers_in if t.pval.is_unknown]
unk_tracers_out = [t for t in tracers_out if t.pval.is_unknown]
jaxpr, consts = tracers_to_jaxpr(unk_tracers_in, unk_tracers_out)
return jaxpr, pvals_out, consts
# Next we need to implement `PartialEvalTrace` and its `PartialEvalTracer`. This
# interpreter will build a jaxpr on the fly while tracking data dependencies. To
# do so, it builds a bipartite directed acyclic graph (DAG) between
# `PartialEvalTracer` nodes, representing staged-out values, and `JaxprRecipe`
# nodes, representing formulas for how to compute some values from others. One
# kind of recipe is a `JaxprEqnRecipe`, corresponding to a `JaxprEqn`'s primitive
# application, but we also have recipe types for constants and lambda binders:
# +
from weakref import ref, ReferenceType
class LambdaBindingRecipe(NamedTuple):
pass
class ConstRecipe(NamedTuple):
val: Any
class JaxprEqnRecipe(NamedTuple):
prim: Primitive
tracers_in: List['PartialEvalTracer']
params: Dict[str, Any]
avals_out: List[ShapedArray]
tracer_refs_out: List['ReferenceType[PartialEvalTracer]']
JaxprRecipe = Union[LambdaBindingRecipe, ConstRecipe, JaxprEqnRecipe]
# -
class PartialEvalTracer(Tracer):
pval: PartialVal
recipe: Optional[JaxprRecipe]
def __init__(self, trace, pval, recipe):
self._trace = trace
self.pval = pval
self.recipe = recipe
aval = property(lambda self: self.pval.aval)
def full_lower(self):
if self.pval.is_known:
return full_lower(self.pval.const)
return self
# The `PartialEvalTrace` contains the logic for constructing the graph of
# `JaxprRecipe`s and `PartialEvalTracer`s. Each argument corresponds to a
# `LambdaBindingRecipe` leaf node, and each constant is a `ConstRecipe` leaf
# node holding a reference to the constant. All other tracers and recipes come
# from `process_primitive`, which forms tracers with `JaxprEqnRecipe`s.
#
# For most primitives, the `process_primitive` logic is straightforward: if all
# inputs are known then we can bind the primitive on the known values
# (evaluating it in Python) and avoid forming tracers corresponding to the
# output. If instead any input is unknown then we instead stage out into a
# `JaxprEqnRecipe` representing the primitive application. To build the tracers
# representing unknown outputs, we need avals, which get from the abstract eval
# rules. (Notice that tracers reference `JaxprEqnRecipe`s, and `JaxprEqnRecipe`s
# reference tracers; we avoid circular garbage by using weakrefs.)
#
# That `process_primitive` logic applies to most primitives, but `xla_call_p`
# requires recursive treatment. So we special-case its rule in a
# `partial_eval_rules` dict.
# +
class PartialEvalTrace(Trace):
def new_arg(self, pval: PartialVal) -> Any:
return PartialEvalTracer(self, pval, LambdaBindingRecipe())
def lift(self, val: Any) -> PartialEvalTracer:
return PartialEvalTracer(self, PartialVal.known(val), None)
pure = lift
def instantiate_const(self, tracer: PartialEvalTracer) -> PartialEvalTracer:
if tracer.pval.is_unknown:
return tracer
else:
pval = PartialVal.unknown(raise_to_shaped(tracer.aval))
return PartialEvalTracer(self, pval, ConstRecipe(tracer.pval.const))
def process_primitive(self, primitive, tracers, params):
if all(t.pval.is_known for t in tracers):
return bind(primitive, *map(full_lower, tracers), **params)
rule = partial_eval_rules.get(primitive)
if rule: return rule(self, tracers, **params)
tracers_in = [self.instantiate_const(t) for t in tracers]
avals_in = [t.aval for t in tracers_in]
avals_out = abstract_eval_rules[primitive](*avals_in, **params)
tracers_out = [PartialEvalTracer(self, PartialVal.unknown(aval), None)
for aval in avals_out]
eqn = JaxprEqnRecipe(primitive, tracers_in, params, avals_out,
map(ref, tracers_out))
for t in tracers_out: t.recipe = eqn
return tracers_out
partial_eval_rules = {}
# -
# Now that we can build graph representations of jaxprs with `PartialEvalTrace`,
# we need a mechanism to convert the graph representation to a standard jaxpr.
# The jaxpr corresponds to a topological sort of the graph.
# +
def tracers_to_jaxpr(tracers_in: List[PartialEvalTracer],
tracers_out: List[PartialEvalTracer]):
tracer_to_var = {id(t): Var(raise_to_shaped(t.aval)) for t in tracers_in}
constvar_to_val = {}
constid_to_var = {}
processed_eqns = set()
eqns = []
for t in toposort(tracers_out, tracer_parents):
if isinstance(t.recipe, LambdaBindingRecipe):
assert id(t) in set(map(id, tracers_in))
elif isinstance(t.recipe, ConstRecipe):
val = t.recipe.val
var = constid_to_var.get(id(val))
if var is None:
aval = raise_to_shaped(get_aval(val))
var = tracer_to_var[id(t)] = constid_to_var[id(val)] = Var(aval)
constvar_to_val[var] = val
elif isinstance(t.recipe, JaxprEqnRecipe):
if id(t.recipe) not in processed_eqns:
eqns.append(recipe_to_eqn(tracer_to_var, t.recipe))
processed_eqns.add(id(t.recipe))
else:
raise TypeError(t.recipe)
constvars, constvals = unzip2(constvar_to_val.items())
in_binders = constvars + [tracer_to_var[id(t)] for t in tracers_in]
out_vars = [tracer_to_var[id(t)] for t in tracers_out]
jaxpr = Jaxpr(in_binders, eqns, out_vars)
typecheck_jaxpr(jaxpr)
return jaxpr, constvals
def recipe_to_eqn(tracer_to_var: Dict[int, Var], recipe: JaxprEqnRecipe
) -> JaxprEqn:
inputs = [tracer_to_var[id(t)] for t in recipe.tracers_in]
out_binders = [Var(aval) for aval in recipe.avals_out]
for t_ref, var in zip(recipe.tracer_refs_out, out_binders):
if t_ref() is not None: tracer_to_var[id(t_ref())] = var
return JaxprEqn(recipe.prim, inputs, recipe.params, out_binders)
def tracer_parents(t: PartialEvalTracer) -> List[PartialEvalTracer]:
return t.recipe.tracers_in if isinstance(t.recipe, JaxprEqnRecipe) else []
# + tags=["hide-input"]
def toposort(out_nodes: List[Any], parents: Callable[[Any], List[Any]]):
if not out_nodes: return []
out_nodes = remove_duplicates(out_nodes)
child_counts = {}
stack = list(out_nodes)
while stack:
node = stack.pop()
if id(node) in child_counts:
child_counts[id(node)] += 1
else:
child_counts[id(node)] = 1
stack.extend(parents(node))
for node in out_nodes:
child_counts[id(node)] -= 1
sorted_nodes = []
childless_nodes = [node for node in out_nodes if not child_counts[id(node)]]
while childless_nodes:
node = childless_nodes.pop()
sorted_nodes.append(node)
for parent in parents(node):
if child_counts[id(parent)] == 1:
childless_nodes.append(parent)
else:
child_counts[id(parent)] -= 1
sorted_nodes = sorted_nodes[::-1]
check_toposort(sorted_nodes, parents)
return sorted_nodes
def remove_duplicates(lst):
seen = set()
return [x for x in lst if id(x) not in seen and not seen.add(id(x))]
def check_toposort(nodes: List[Any], parents: Callable[[Any], List[Any]]):
seen = set()
for node in nodes:
assert all(id(parent) in seen for parent in parents(node))
seen.add(id(node))
# -
# Now we can linearize!
y, sin_lin = linearize(sin, 3.)
print(y, sin(3.))
print(sin_lin(1.), cos(3.))
# To handle `linearize`-of-`jit`, we still need to write a partial evaluation
# rule for `xla_call_p`. Other than tracer bookkeeping, the main task is to
# perform partial evaluation of a jaxpr, 'unzipping' it into two jaxprs.
#
# There are actually two rules to write: one for trace-time partial evaluation,
# which we'll call `xla_call_partial_eval`, and one for partial evaluation of
# jaxprs, which we'll call `xla_call_peval_eqn`.
# +
def xla_call_partial_eval(trace, tracers, *, jaxpr, num_consts):
del num_consts # Unused.
in_unknowns = [not t.pval.is_known for t in tracers]
jaxpr1, jaxpr2, out_unknowns, num_res = partial_eval_jaxpr(jaxpr, in_unknowns)
known_tracers, unknown_tracers = partition_list(in_unknowns, tracers)
known_vals = [t.pval.const for t in known_tracers]
outs1_res = bind(xla_call_p, *known_vals, jaxpr=jaxpr1, num_consts=0)
outs1, res = split_list(outs1_res, len(jaxpr1.outs) - num_res)
res_tracers = [trace.instantiate_const(full_raise(trace, x)) for x in res]
outs2 = [PartialEvalTracer(trace, PartialVal.unknown(v.aval), None)
for v in jaxpr2.outs]
eqn = JaxprEqnRecipe(xla_call_p, res_tracers + unknown_tracers,
dict(jaxpr=jaxpr2, num_consts=0),
[v.aval for v in jaxpr2.outs], map(ref, outs2))
for t in outs2: t.recipe = eqn
return merge_lists(out_unknowns, outs1, outs2)
partial_eval_rules[xla_call_p] = xla_call_partial_eval
def partial_eval_jaxpr(jaxpr: Jaxpr, in_unknowns: List[bool],
instantiate: Optional[List[bool]] = None,
) -> Tuple[Jaxpr, Jaxpr, List[bool], int]:
env: Dict[Var, bool] = {}
residuals: Set[Var] = set()
def read(v: Atom) -> bool:
return type(v) is Var and env[v]
def write(unk: bool, v: Var) -> None:
env[v] = unk
def new_res(x: Atom) -> Atom:
if type(x) is Var: residuals.add(x)
return x
eqns1, eqns2 = [], []
map(write, in_unknowns, jaxpr.in_binders)
for eqn in jaxpr.eqns:
unks_in = map(read, eqn.inputs)
rule = partial_eval_jaxpr_rules.get(eqn.primitive)
if rule:
eqn1, eqn2, unks_out, res = rule(unks_in, eqn)
eqns1.append(eqn1); eqns2.append(eqn2); residuals.update(res)
map(write, unks_out, eqn.out_binders)
elif any(unks_in):
inputs = [v if unk else new_res(v) for unk, v in zip(unks_in, eqn.inputs)]
eqns2.append(JaxprEqn(eqn.primitive, inputs, eqn.params, eqn.out_binders))
map(partial(write, True), eqn.out_binders)
else:
eqns1.append(eqn)
map(partial(write, False), eqn.out_binders)
out_unknowns = map(read, jaxpr.outs)
if instantiate is not None:
for v, uk, inst in zip(jaxpr.outs, out_unknowns, instantiate):
if inst and not uk: new_res(v)
out_unknowns = map(op.or_, out_unknowns, instantiate)
residuals, num_res = list(residuals), len(residuals)
ins1, ins2 = partition_list(in_unknowns, jaxpr.in_binders)
outs1, outs2 = partition_list(out_unknowns, jaxpr.outs)
jaxpr1 = Jaxpr(ins1, eqns1, outs1 + residuals)
jaxpr2 = Jaxpr(residuals + ins2, eqns2, outs2)
typecheck_partial_eval_jaxpr(jaxpr, in_unknowns, out_unknowns, jaxpr1, jaxpr2)
return jaxpr1, jaxpr2, out_unknowns, num_res
def typecheck_partial_eval_jaxpr(jaxpr, unks_in, unks_out, jaxpr1, jaxpr2):
jaxprty = typecheck_jaxpr(jaxpr) # (a1, a2) -> (b1, b2 )
jaxpr1ty = typecheck_jaxpr(jaxpr1) # a1 -> (b1, res)
jaxpr2ty = typecheck_jaxpr(jaxpr2) # (res, a2) -> b2
a1, a2 = partition_list(unks_in, jaxprty.in_types)
b1, b2 = partition_list(unks_out, jaxprty.out_types)
b1_, res = split_list(jaxpr1ty.out_types, len(b1))
res_, a2_ = split_list(jaxpr2ty.in_types, len(res))
b2_ = jaxpr2ty.out_types
if jaxpr1ty.in_types != a1: raise TypeError
if jaxpr2ty.out_types != b2: raise TypeError
if b1 != b1_: raise TypeError
if res != res_: raise TypeError
if a2 != a2_: raise TypeError
if b2 != b2_: raise TypeError
partial_eval_jaxpr_rules = {}
def xla_call_peval_eqn(unks_in: List[bool], eqn: JaxprEqn,
) -> Tuple[JaxprEqn, JaxprEqn, List[bool], List[Atom]]:
jaxpr = eqn.params['jaxpr']
jaxpr1, jaxpr2, unks_out, num_res = partial_eval_jaxpr(jaxpr, unks_in)
ins1, ins2 = partition_list(unks_in, eqn.inputs)
outs1, outs2 = partition_list(unks_out, eqn.out_binders)
residuals, _ = split_list(jaxpr2.in_binders, num_res)
eqn1 = JaxprEqn(xla_call_p, ins1, dict(jaxpr=jaxpr1, num_consts=0),
outs1 + residuals)
eqn2 = JaxprEqn(xla_call_p, residuals + ins2,
dict(jaxpr=jaxpr2, num_consts=0), outs2)
return eqn1, eqn2, unks_out, residuals
partial_eval_jaxpr_rules[xla_call_p] = xla_call_peval_eqn
# -
# With that, we can compose `linearize` and `jit` however we like:
# +
@jit
def f(x):
y = sin(x) * 2.
z = - y + x
return z
y, f_lin = linearize(f, 3.)
y_dot = f_lin(1.)
print(y, y_dot)
# +
@jit
def f(x):
y = sin(x) * 2.
z = g(x, y)
return z
@jit
def g(x, y):
return cos(x) + y
y, f_lin = linearize(f, 3.)
y_dot = f_lin(1.)
print(y, y_dot)
# -
# ### `vjp` and `grad`
#
# The `vjp` transformation works a lot like linearize. Its type signature is
# analogous:
#
# ```
# linearize : (a -> b) -> a -> (b, T a -o T b)
# vjp : (a -> b) -> a -> (b, T b -o T a)
# ```
#
# The only difference is that we transpose the linear part of the computation
# before returning it, so that it goes from type `T a -o T b` to type `T b -o T
# a`. That is, we'll implement `vjp` as, essentially,
#
# ```
# def vjp(f, x):
# y, f_lin = linearize(f, x)
# f_vjp = lambda y_bar: transpose(f_lin)(y_bar)
# return y, f_vjp
# ```
#
# Since we have the linear computation as a jaxpr, not just a Python callable,
# we can implement the transpose transformation as a jaxpr interpreter.
# +
def vjp_flat(f, *primals_in):
pvals_in = ([PartialVal.known(x) for x in primals_in] +
[PartialVal.unknown(vspace(get_aval(x))) for x in primals_in])
primal_pvals_in, tangent_pvals_in = split_half(pvals_in)
def f_jvp(*primals_tangents_in):
primals_out, tangents_out = jvp(f, *split_half(primals_tangents_in))
return [*primals_out, *tangents_out]
jaxpr, pvals_out, consts = partial_eval_flat(f_jvp, pvals_in) # linearize
primal_pvals, _ = split_half(pvals_out)
assert all(pval.is_known for pval in primal_pvals)
primals_out = [pval.const for pval in primal_pvals]
transpose_inputs = consts + [UndefPrimal(p.aval) for p in tangent_pvals_in]
f_vjp = lambda *cts: eval_jaxpr_transposed(jaxpr, transpose_inputs, cts)
return primals_out, f_vjp
def vjp(f, *primals_in):
primals_in_flat, in_tree = tree_flatten(primals_in)
f, out_tree = flatten_fun(f, in_tree)
primals_out_flat, f_vjp_flat = vjp_flat(f, *primals_in_flat)
primals_out = tree_unflatten(out_tree(), primals_out_flat)
def f_vjp(*cotangents_out):
cotangents_out_flat, _ = tree_flatten(cotangents_out)
cotangents_in_flat = f_vjp_flat(*cotangents_out_flat)
return tree_unflatten(in_tree, cotangents_in_flat)
return primals_out, f_vjp
class UndefPrimal(NamedTuple):
aval: ShapedArray
register_pytree_node(UndefPrimal,
lambda u: (u.aval, ()),
lambda aval, _: UndefPrimal(aval))
# -
# We use `UndefPrimal` instances to indicate which arguments with respect to
# with we want to transpose. These arise because in general, being explicit
# about closed-over values, we want to transpose functions of type
# `a -> b -o c` to functions of type `a -> c -o b`. Even more generally, the
# inputs with respect to which the function is linear could be scattered through
# the argument list. So we indicate the linear positions using `UndefPrimal`.
# We register `UndefPrimal` as a pytree node because the pytree mechanism gives
# a handy way to prune these placeholders out of argument lists.
#
# Next, we can write `eval_jaxpr_transposed`, along with transpose rules for
# all primitives which can be linear in at least one argument:
# +
# NB: the analogous function in JAX is called 'backward_pass'
def eval_jaxpr_transposed(jaxpr: Jaxpr, args: List[Any], cotangents: List[Any]
) -> List[Any]:
primal_env: Dict[Var, Any] = {}
ct_env: Dict[Var, Any] = {}
def read_primal(x: Atom) -> Any:
return primal_env.get(x, UndefPrimal(x.aval)) if type(x) is Var else x.val
def write_primal(v: Var, val: Any) -> None:
if type(val) is not UndefPrimal:
primal_env[v] = val
def read_cotangent(v: Var) -> Any:
return ct_env.pop(v, np.zeros(v.aval.shape, v.aval.dtype))
def write_cotangent(x: Atom, val: Any):
if type(x) is Var and val is not None:
ct_env[x] = add(ct_env[x], val) if x in ct_env else val
map(write_primal, jaxpr.in_binders, args)
map(write_cotangent, jaxpr.outs, cotangents)
for eqn in jaxpr.eqns[::-1]:
primals_in = map(read_primal, eqn.inputs)
cts_in = map(read_cotangent, eqn.out_binders)
rule = transpose_rules[eqn.primitive]
cts_out = rule(cts_in, *primals_in, **eqn.params)
map(write_cotangent, eqn.inputs, cts_out)
return [read_cotangent(v) for v, x in zip(jaxpr.in_binders, args)
if type(x) is UndefPrimal]
transpose_rules = {}
# +
def mul_transpose_rule(cts, x, y):
z_bar, = cts
assert (type(x) is UndefPrimal) ^ (type(y) is UndefPrimal)
return [mul(z_bar, y), None] if type(x) is UndefPrimal else [None, mul(x, z_bar)]
transpose_rules[mul_p] = mul_transpose_rule
def neg_transpose_rule(cts, x):
ybar, = cts
assert type(x) is UndefPrimal
return [neg(ybar)]
transpose_rules[neg_p] = neg_transpose_rule
def add_transpose_rule(cts, x, y):
z_bar, = cts
return [z_bar, z_bar]
transpose_rules[add_p] = add_transpose_rule
def xla_call_transpose_rule(cts, *invals, jaxpr, num_consts):
del num_consts # Unused.
undef_primals = [type(x) is UndefPrimal for x in invals]
transposed_jaxpr, new_consts = transpose_jaxpr(jaxpr, tuple(undef_primals))
residuals, _ = partition_list(undef_primals, invals)
outs = bind(xla_call_p, *new_consts, *residuals, *cts,
jaxpr=transposed_jaxpr, num_consts=len(new_consts))
outs = iter(outs)
return [next(outs) if undef else None for undef in undef_primals]
transpose_rules[xla_call_p] = xla_call_transpose_rule
@lru_cache()
def transpose_jaxpr(jaxpr: Jaxpr, undef_primals: Tuple[bool, ...]
) -> Tuple[Jaxpr, List[Any]]:
avals_in, avals_out = typecheck_jaxpr(jaxpr)
traceable = partial(eval_jaxpr_transposed, jaxpr)
args = [UndefPrimal(a) if u else a for a, u in zip(avals_in, undef_primals)]
trans_jaxpr, consts, _ = make_jaxpr(traceable, tuple(args), tuple(avals_out))
typecheck_jaxpr(trans_jaxpr)
return trans_jaxpr, consts
# -
# Now that we can linearize and transpose, we can finally write `grad`:
def grad(f):
def gradfun(x, *xs):
y, f_vjp = vjp(f, x, *xs)
if np.shape(y) != (): raise TypeError
x_bar, *_ = f_vjp(np.ones(np.shape(y), np.result_type(y)))
return x_bar
return gradfun
y, f_vjp = vjp(sin, 3.)
print(f_vjp(1.), cos(3.))
# +
def f(x):
y = sin(x) * 2.
z = - y + x
return z
print(grad(f)(3.))
# +
@jit
def f(x):
y = x * 2.
z = g(y)
return z
@jit
def g(x):
return cos(x) * 2.
print(grad(f)(3.))
# -
# Here's something of a compositionality stress test:
# +
# from core_test.py fun_with_nested_calls_2
def foo(x):
@jit
def bar(y):
def baz(w):
q = jit(lambda x: y)(x)
q = q + jit(lambda: y)()
q = q + jit(lambda y: w + y)(y)
q = jit(lambda w: jit(sin)(x) * y)(1.0) + q
return q
p, t = jvp(baz, (x + 1.0,), (y,))
return t + (x * p)
return bar(x)
def assert_allclose(*vals):
for v1, v2 in zip(vals[:-1], vals[1:]):
np.testing.assert_allclose(v1, v2)
ans1 = f(3.)
ans2 = jit(f)(3.)
ans3, _ = jvp(f, (3.,), (5.,))
ans4, _ = jvp(jit(f), (3.,), (5.,))
assert_allclose(ans1, ans2, ans3, ans4)
deriv1 = grad(f)(3.)
deriv2 = grad(jit(f))(3.)
deriv3 = jit(grad(jit(f)))(3.)
_, deriv4 = jvp(f, (3.,), (1.,))
_, deriv5 = jvp(jit(f), (3.,), (1.,))
assert_allclose(deriv1, deriv2, deriv3, deriv4, deriv5)
hess1 = grad(grad(f))(3.)
hess2 = grad(grad(jit(f)))(3.)
hess3 = grad(jit(grad(f)))(3.)
hess4 = jit(grad(grad(f)))(3.)
_, hess5 = jvp(grad(f), (3.,), (1.,))
_, hess6 = jvp(jit(grad(f)), (3.,), (1.,))
_, hess7 = jvp(jit(grad(f)), (3.,), (1.,))
assert_allclose(hess1, hess2, hess3, hess4, hess5, hess6, hess7)
# -
# ## Part 5: the control flow primitives `cond`
#
# Next we'll add higher-order primitives for staged-out control flow. These
# resemble `jit` from Part 3, another higher-order primitive, but differ in that
# they are parameterized by multiple callables rather than just one.
# ### Adding `cond`
#
# We introduce a `cond` primitive to represent conditional application of one
# function or another inside a jaxpr. We write the type of `cond` as
# `Bool -> (a -> b) -> (a -> b) -> a -> b`. In words, `cond` takes a boolean
# representing the predicate and two functions of equal types. Depending on the
# value of the predicate, it applies one function or the other to its final
# argument.
#
# In Python, we represent it as a function which itself takes two functions as
# arguments. As with `jit`, the first step is to call `make_jaxpr` on its
# callable arguments to turn them into jaxprs:
# +
def cond(pred, true_fn, false_fn, *operands):
avals_in = [raise_to_shaped(get_aval(x)) for x in operands]
true_jaxpr, true_consts, out_tree = make_jaxpr(true_fn, *avals_in)
false_jaxpr, false_consts, out_tree_ = make_jaxpr(false_fn, *avals_in)
if out_tree != out_tree_: raise TypeError
true_jaxpr, false_jaxpr = _join_jaxpr_consts(
true_jaxpr, false_jaxpr, len(true_consts), len(false_consts))
if typecheck_jaxpr(true_jaxpr) != typecheck_jaxpr(false_jaxpr):
raise TypeError
outs = bind_cond(pred, *true_consts, *false_consts, *operands,
true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr)
return tree_unflatten(out_tree, outs)
cond_p = Primitive('cond')
def _join_jaxpr_consts(jaxpr1: Jaxpr, jaxpr2: Jaxpr, n1: int, n2: int
) -> Tuple[Jaxpr, Jaxpr]:
jaxpr1_type, jaxpr2_type = typecheck_jaxpr(jaxpr1), typecheck_jaxpr(jaxpr2)
assert jaxpr1_type.in_types[n1:] == jaxpr2_type.in_types[n2:]
consts1, rest1 = split_list(jaxpr1.in_binders, n1)
consts2, rest2 = split_list(jaxpr2.in_binders, n2)
new_jaxpr1 = Jaxpr(consts1 + consts2 + rest1, jaxpr1.eqns, jaxpr1.outs)
new_jaxpr2 = Jaxpr(consts1 + consts2 + rest2, jaxpr2.eqns, jaxpr2.outs)
return new_jaxpr1, new_jaxpr2
def bind_cond(pred, *args, true_jaxpr, false_jaxpr):
assert len(args) == len(true_jaxpr.in_binders) == len(false_jaxpr.in_binders)
return bind(cond_p, pred, *args, true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr)
# -
# We require `true_jaxpr` and `false_jaxpr` to have the same type, but because
# they might close over different constants (and because jaxprs can only
# represent closed terms, i.e. can't have free variables and are instead
# closure-converted) we need to use the helper `_join_jaxpr_consts` to make
# consistent the input binder lists of the two jaxprs. (To be more economical we
# could try to identify pairs of constants with the same shapes, but instead we
# just concatenate the lists of constants.)
#
# Next we can turn to adding interpreter rules for `cond`. Its evaluation rule
# is simple:
def cond_impl(pred, *operands, true_jaxpr, false_jaxpr):
if pred:
return eval_jaxpr(true_jaxpr, operands)
else:
return eval_jaxpr(false_jaxpr, operands)
impl_rules[cond_p] = cond_impl
out = cond(True, lambda: 3, lambda: 4)
print(out)
# For its JVP and vmap rules, we only need to call the same `jvp_jaxpr` and
# `vmap_jaxpr` utilities we created for `jit`, followed by another pass of
# `_join_jaxpr_consts`:
def cond_jvp_rule(primals, tangents, *, true_jaxpr, false_jaxpr):
pred, *primals = primals
_ , *tangents = tangents
true_jaxpr , true_consts = jvp_jaxpr(true_jaxpr)
false_jaxpr, false_consts = jvp_jaxpr(false_jaxpr)
true_jaxpr, false_jaxpr = _join_jaxpr_consts(
true_jaxpr, false_jaxpr, len(true_consts), len(false_consts))
assert typecheck_jaxpr(true_jaxpr) == typecheck_jaxpr(false_jaxpr)
outs = bind_cond(pred, *true_consts, *false_consts, *primals, *tangents,
true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr)
primals_out, tangents_out = split_half(outs)
return primals_out, tangents_out
jvp_rules[cond_p] = cond_jvp_rule
out, out_tan = jvp(lambda x: cond(True, lambda: x * x, lambda: 0.), (1.,), (1.,))
print(out_tan)
def cond_vmap_rule(axis_size, vals_in, dims_in, *, true_jaxpr, false_jaxpr):
pred , *vals_in = vals_in
pred_dim, *dims_in = dims_in
if pred_dim is not not_mapped: raise NotImplementedError # TODO
true_jaxpr, true_consts = vmap_jaxpr(true_jaxpr, axis_size, tuple(dims_in))
false_jaxpr, false_consts = vmap_jaxpr(false_jaxpr, axis_size, tuple(dims_in))
true_jaxpr, false_jaxpr = _join_jaxpr_consts(
true_jaxpr, false_jaxpr, len(true_consts), len(false_consts))
assert typecheck_jaxpr(true_jaxpr) == typecheck_jaxpr(false_jaxpr)
outs = bind_cond(pred, *true_consts, *false_consts, *vals_in,
true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr)
return outs, [0] * len(outs)
vmap_rules[cond_p] = cond_vmap_rule
xs = np.array([1., 2., 3])
out = vmap(lambda x: cond(True, lambda: x + 1., lambda: 0.), (0,))(xs)
print(out)
# Notice that we're not currently supporting the case where the predicate value
# itself is batched. In mainline JAX, we handle this case by transforming the
# conditional to a [select primitive](https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.select.html).
# That transformation is semantically correct so long as `true_fun` and
# `false_fun` do not involve any side-effecting primitives.
#
# Another thing not represented here, but present in the mainline JAX, is that
# applying transformations to two jaxprs of equal type might result in jaxprs of
# different types. For example, applying the mainline JAX version of
# `vmap_jaxpr` to the identity-function jaxpr
#
# ```
# { lambda a:float32[] .
# let
# in ( a ) }
# ```
#
# would result in a jaxpr with a batched output, of type
# `[float32[10]] -> [float32[10]]` if the batch size were 10, while applying it
# to the zero-function jaxpr
#
# ```
# { lambda a:float32[] .
# let
# in ( 0. ) }
# ```
#
# would result in a jaxpr with an unbatched output, of type
# `[float32[10]] -> [float32[]]`. This is an optimization, aimed at not batching
# values unnecessarily. But it means that in `cond` we'd need an extra step of
# joining the two transformed jaxprs to have consistent output types. We don't
# need this step here because we chose `vmap_jaxpr` always to batch all outputs
# over the leading axis.
# Next we can turn to abstract evaluation and XLA lowering rules:
# +
def cond_abstract_eval(pred_type, *in_types, true_jaxpr, false_jaxpr):
if pred_type != ShapedArray((), np.dtype('bool')): raise TypeError
jaxpr_type = typecheck_jaxpr(true_jaxpr)
if jaxpr_type != typecheck_jaxpr(false_jaxpr):
raise TypeError
if not all(t1 == t2 for t1, t2 in zip(jaxpr_type.in_types, in_types)):
raise TypeError
return jaxpr_type.out_types
abstract_eval_rules[cond_p] = cond_abstract_eval
def cond_translation(c, in_avals, in_vals, *, true_jaxpr, false_jaxpr):
del in_avals # Unused.
pred, *in_vals = in_vals
flat_vals, in_tree = tree_flatten(in_vals)
operand = xops.Tuple(c, flat_vals)
operand_shape = c.get_shape(operand)
def make_comp(name: str, jaxpr: Jaxpr) -> xe.XlaComputation:
c = xb.make_computation_builder(name)
operand = xb.parameter(c, 0, operand_shape)
operands = tree_unflatten(in_tree, destructure_tuple(c, operand))
outs = jaxpr_subcomp(c, jaxpr, operands)
return c.build(xops.Tuple(c, outs))
true_comp = make_comp('true_fn', true_jaxpr)
false_comp = make_comp('false_fn', false_jaxpr)
int_etype = xc.dtype_to_etype(np.dtype('int32'))
out = xops.Conditional(xops.ConvertElementType(pred, int_etype),
[false_comp, true_comp], [operand] * 2)
return destructure_tuple(c, out)
xla_translations[cond_p] = cond_translation
# -
out = jit(lambda: cond(False, lambda: 1, lambda: 2))()
print(out)
# Finally, to support reverse-mode automatic differentiation, we need partial
# evaluation and transposition rules. For partial evaluation, we need to
# introduce another jaxpr-munging utility, `_join_jaxpr_res`, to handle the fact
# that applying partial evaluation to `true_fun` and `false_fun` will in general
# result in distinct residuals. We use `_join_jaxpr_res` to make the output
# types of the transformed jaxprs consistent (while `_join_jaxpr_consts` dealt
# with input types).
# +
def cond_partial_eval(trace, tracers, *, true_jaxpr, false_jaxpr):
pred_tracer, *tracers = tracers
assert pred_tracer.pval.is_known
pred = pred_tracer.pval.const
in_uks = [not t.pval.is_known for t in tracers]
*jaxprs, out_uks, num_res = _cond_partial_eval(true_jaxpr, false_jaxpr, in_uks)
t_jaxpr1, f_jaxpr1, t_jaxpr2, f_jaxpr2 = jaxprs
known_tracers, unknown_tracers = partition_list(in_uks, tracers)
known_vals = [t.pval.const for t in known_tracers]
outs1_res = bind_cond(pred, *known_vals,
true_jaxpr=t_jaxpr1, false_jaxpr=f_jaxpr1)
outs1, res = split_list(outs1_res, len(outs1_res) - num_res)
pred_tracer_ = trace.instantiate_const(full_raise(trace, pred_tracer))
res_tracers = [trace.instantiate_const(full_raise(trace, x)) for x in res]
outs2 = [PartialEvalTracer(trace, PartialVal.unknown(v.aval), None)
for v in t_jaxpr2.outs]
eqn = JaxprEqnRecipe(cond_p, [pred_tracer_, *res_tracers, *unknown_tracers],
dict(true_jaxpr=t_jaxpr2, false_jaxpr=f_jaxpr2),
[v.aval for v in t_jaxpr2.outs], map(ref, outs2))
for t in outs2: t.recipe = eqn
return merge_lists(out_uks, outs1, outs2)
partial_eval_rules[cond_p] = cond_partial_eval
def _cond_partial_eval(true_jaxpr: Jaxpr, false_jaxpr: Jaxpr, in_uks: List[bool]
) -> Tuple[Jaxpr, Jaxpr, Jaxpr, Jaxpr, List[bool], int]:
_, _, t_out_uks, _ = partial_eval_jaxpr(true_jaxpr , in_uks)
_, _, f_out_uks, _ = partial_eval_jaxpr(false_jaxpr, in_uks)
out_uks = map(op.or_, t_out_uks, f_out_uks)
t_jaxpr1, t_jaxpr2, _, t_nres = partial_eval_jaxpr(true_jaxpr , in_uks, out_uks)
f_jaxpr1, f_jaxpr2, _, f_nres = partial_eval_jaxpr(false_jaxpr, in_uks, out_uks)
t_jaxpr1, f_jaxpr1 = _join_jaxpr_res(t_jaxpr1, f_jaxpr1, t_nres, f_nres)
t_jaxpr2, f_jaxpr2 = _join_jaxpr_consts(t_jaxpr2, f_jaxpr2, t_nres, f_nres)
assert typecheck_jaxpr(t_jaxpr1) == typecheck_jaxpr(f_jaxpr1)
assert typecheck_jaxpr(t_jaxpr2) == typecheck_jaxpr(f_jaxpr2)
num_res = t_nres + f_nres
return t_jaxpr1, f_jaxpr1, t_jaxpr2, f_jaxpr2, out_uks, num_res
def _join_jaxpr_res(jaxpr1: Jaxpr, jaxpr2: Jaxpr, n1: int, n2: int
) -> Tuple[Jaxpr, Jaxpr]:
jaxpr1_type, jaxpr2_type = typecheck_jaxpr(jaxpr1), typecheck_jaxpr(jaxpr2)
out_types1, _ = split_list(jaxpr1_type.out_types, len(jaxpr1.outs) - n1)
out_types2, _ = split_list(jaxpr2_type.out_types, len(jaxpr2.outs) - n2)
assert out_types1 == out_types2
outs1, res1 = split_list(jaxpr1.outs, len(jaxpr1.outs) - n1)
outs2, res2 = split_list(jaxpr2.outs, len(jaxpr2.outs) - n2)
zeros_like1 = [Lit(np.zeros(v.aval.shape, v.aval.dtype)) for v in res1]
zeros_like2 = [Lit(np.zeros(v.aval.shape, v.aval.dtype)) for v in res2]
new_jaxpr1 = Jaxpr(jaxpr1.in_binders, jaxpr1.eqns, outs1 + res1 + zeros_like2)
new_jaxpr2 = Jaxpr(jaxpr2.in_binders, jaxpr2.eqns, outs2 + zeros_like1 + res2)
return new_jaxpr1, new_jaxpr2
# -
_, f_lin = linearize(lambda x: cond(True, lambda: x, lambda: 0.), 1.)
out = f_lin(3.14)
print(out)
def cond_peval_eqn(unks_in: List[bool], eqn: JaxprEqn,
) -> Tuple[JaxprEqn, JaxprEqn, List[bool], List[Atom]]:
pred_unk, *unks_in = unks_in
assert not pred_unk
true_jaxpr, false_jaxpr = eqn.params['true_jaxpr'], eqn.params['false_jaxpr']
*jaxprs, unks_out, num_res = _cond_partial_eval(true_jaxpr, false_jaxpr, unks_in)
t_jaxpr1, f_jaxpr1, t_jaxpr2, f_jaxpr2 = jaxprs
ins1, ins2 = partition_list(unks_in, eqn.inputs[1:])
outs1, outs2 = partition_list(unks_out, eqn.out_binders)
residuals, _ = split_list(t_jaxpr2.in_binders, num_res)
eqn1 = JaxprEqn(cond_p, [eqn.inputs[0], *ins1],
dict(true_jaxpr=t_jaxpr1, false_jaxpr=f_jaxpr1),
outs1 + residuals)
eqn2 = JaxprEqn(cond_p, [eqn.inputs[0], *residuals, *ins2],
dict(true_jaxpr=t_jaxpr2, false_jaxpr=f_jaxpr2),
outs2)
return eqn1, eqn2, unks_out, [eqn.inputs[0], *residuals]
partial_eval_jaxpr_rules[cond_p] = cond_peval_eqn
_, f_lin = linearize(jit(lambda x: cond(True, lambda: x, lambda: 0.)), 1.)
out = f_lin(3.14)
print(out)
# Transposition is a fairly straightforward application of `transpose_jaxpr`:
def cond_transpose_rule(cts, pred, *invals, true_jaxpr, false_jaxpr):
undef_primals = tuple([type(x) is UndefPrimal for x in invals])
true_jaxpr, true_consts = transpose_jaxpr(true_jaxpr, undef_primals)
false_jaxpr, false_consts = transpose_jaxpr(false_jaxpr, undef_primals)
true_jaxpr, false_jaxpr = _join_jaxpr_consts(
true_jaxpr, false_jaxpr, len(true_consts), len(false_consts))
res = [x for x in invals if type(x) is not UndefPrimal]
outs = bind_cond(pred, *true_consts, *false_consts, *res, *cts,
true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr)
outs = iter(outs)
return [None] + [next(outs) if type(x) is UndefPrimal else None for x in invals]
transpose_rules[cond_p] = cond_transpose_rule
out = grad(lambda x: cond(True, lambda: x * x, lambda: 0.))(1.)
print(out)
|
# ---
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# jupyter:
# jupytext:
# formats: ipynb,md:myst,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.10.0
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# [](https://colab.research.google.com/github/google/jax/blob/main/docs/autodidax.ipynb)
# # Autodidax: JAX core from scratch
#
# Ever want to learn how JAX works, but the implementation seemed impenetrable?
# Well, you're in luck! By reading this tutorial, you'll learn every big idea in
# JAX's core system. You'll even get clued into our weird jargon!
#
# **This is a work-in-progress draft.** There are some important ingredients
# missing, still to come in parts 5 and 6 (and more?). There are also some
# simplifications here that we haven't yet applied to the main system, but we
# will.
# ## Part 1: Transformations as interpreters: standard evaluation, `jvp`, and `vmap`
#
# We want to transform functions that look like this:
#
# ```python
# def f(x):
# y = sin(x) * 2.
# z = - y + x
# return z
# ```
#
# Think of functions like `sin` and the arithmetic operations underlying the
# infix operators (`mul`, `add`, and `neg`) as primitive operations, meaning
# atomic units of processing rather than compositions.
#
# "Transform" means "interpret differently." Instead of standard interpretation
# where we apply primitive operations to numerical inputs to produce numerical
# outputs, we want to override primitive application and let different values
# flow through our program. For example, we might want to replace the
# application of every primitive with an application of [its JVP
# rule](https://jax.readthedocs.io/en/latest/notebooks/autodiff_cookbook.html),
# and let primal-tangent pairs flow through our program. Moreover, we want to be
# able to compose multiple transformations, leading to stacks of interpreters.
# ### JAX core machinery
#
# We can implement stacks of interpreters and even have them all discharge on
# the fly as we execute the Python function to be transformed. To start, let's
# define these primitives so that we can intercept their application:
# +
from typing import NamedTuple
class Primitive(NamedTuple):
name: str
add_p = Primitive('add')
mul_p = Primitive('mul')
neg_p = Primitive("neg")
sin_p = Primitive("sin")
cos_p = Primitive("cos")
reduce_sum_p = Primitive("reduce_sum")
greater_p = Primitive("greater")
less_p = Primitive("less")
transpose_p = Primitive("transpose")
broadcast_p = Primitive("broadcast")
def add(x, y): return bind1(add_p, x, y)
def mul(x, y): return bind1(mul_p, x, y)
def neg(x): return bind1(neg_p, x)
def sin(x): return bind1(sin_p, x)
def cos(x): return bind1(cos_p, x)
def reduce_sum(x, axis=None): return bind1(reduce_sum_p, x, axis=axis)
def greater(x, y): return bind1(greater_p, x, y)
def less(x, y): return bind1(less_p, x, y)
def transpose(x, perm): return bind1(transpose_p, perm=perm)
def broadcast(x, shape, axes): return bind1(broadcast_p, x, shape=shape, axes=axes)
def bind1(prim, *args, **params):
out, = bind(prim, *args, **params)
return out
# -
# We'll set up array data types and infix operator methods in a moment.
#
# A `Primitive` is just an object with a name, to which we attach our
# interpretation rules (one for each transformation). The `bind` function is our
# interception point: it'll figure out which transformation rule to apply, based
# on how the arguments are boxed in tracers and what interpreters are active.
#
# The functions that user code calls, like `add` and `sin`, are just wrappers
# around calls to `bind`. These wrappers let us control how arguments are passed
# to `bind`, and in particular we follow a handy internal convention: when we
# call `bind`, we pass values representing array data as positional arguments,
# and we pass metadata like the `axis` argument to `sum_p` via keyword. This
# calling convention simplifies some core logic (since e.g. instances of the
# `Tracer` class to be defined below can only occur in positional arguments to
# `bind`). The wrappers can also provide docstrings!
#
# We represent active interpreters as a stack. The stack is just a simple
# `list`, and each element is a container with an integer level (corresponding
# to the element's height in the stack), an interpreter type (which we'll call a
# `trace_type`), and an optional field for any global data the interpreter
# needs. We call each element a `MainTrace`, though maybe "Interpreter" would be
# more descriptive.
# +
from contextlib import contextmanager
from typing import Type, List, Tuple, Sequence, Optional, Any
class MainTrace(NamedTuple):
level: int
trace_type: Type['Trace']
global_data: Optional[Any]
trace_stack: List[MainTrace] = []
dynamic_trace: Optional[MainTrace] = None # to be employed in Part 3
@contextmanager
def new_main(trace_type: Type['Trace'], global_data=None):
level = len(trace_stack)
main = MainTrace(level, trace_type, global_data)
trace_stack.append(main)
try:
yield main
finally:
trace_stack.pop()
# -
# When we're about to apply a transformation, we'll push another interpreter
# onto the stack using `new_main`. Then, as we apply primitives in the function,
# we can think of the `bind` first being interpreted by the trace at the top of
# the stack (i.e. with the highest level). If that first interpreter itself
# binds other primitives in its interpretation rule for the primitive, like how
# the JVP rule of `sin_p` might bind `cos_p` and `mul_p`, then those `bind`
# calls will be handled by the interpreter at the next level down.
#
# What goes at the bottom of the interpreter stack? At the bottom, we know all
# the transformation interpreters are finished, and we just want to do standard
# evaluation. So at the bottom we'll put an evaluation interpreter.
#
# Let's sketch out the interface for interpreters, which is based on the `Trace`
# and `Tracer` base classes. A `Tracer` represents a boxed-up value, perhaps
# carrying some extra context data used by the interpreter. A `Trace` handles
# boxing up values into `Tracers` and also handles primitive application.
class Trace:
main: MainTrace
def __init__(self, main: MainTrace) -> None:
self.main = main
def pure(self, val): assert False # must override
def lift(self, val): assert False # must override
def process_primitive(self, primitive, tracers, params):
assert False # must override
# The first two methods are about boxing up values in `Tracer`s, which are the
# objects that flow through the Python programs we transform. The last method is
# the callback we'll use to interpret primitive application.
#
# The `Trace` itself doesn't contain any data, other than a reference to its
# corresponding `MainTrace` instance. In fact, multiple instances of a `Trace`
# might be created and discarded during an application of a transformation,
# whereas only a single `MainTrace` instance is created per application of a
# transformation.
#
# As for `Tracer`s themselves, each one carries an abstract value (and forwards
# infix operators to it), and the rest is up to the transformation. (The
# relationship between `Tracer`s and `AbstractValue`s is that there's one
# `Tracer` per transformation, and at least one `AbstractValue` per base type,
# like arrays.)
# +
import numpy as np
class Tracer:
_trace: Trace
__array_priority__ = 1000
@property
def aval(self):
assert False # must override
def full_lower(self):
return self # default implementation
def __neg__(self): return self.aval._neg(self)
def __add__(self, other): return self.aval._add(self, other)
def __radd__(self, other): return self.aval._radd(self, other)
def __mul__(self, other): return self.aval._mul(self, other)
def __rmul__(self, other): return self.aval._rmul(self, other)
def __gt__(self, other): return self.aval._gt(self, other)
def __lt__(self, other): return self.aval._lt(self, other)
def __bool__(self): return self.aval._bool(self)
def __nonzero__(self): return self.aval._nonzero(self)
def __getattr__(self, name):
try:
return getattr(self.aval, name)
except AttributeError:
raise AttributeError(f"{self.__class__.__name__} has no attribute {name}")
def swap(f): return lambda x, y: f(y, x)
# +
class ShapedArray:
array_abstraction_level = 1
shape: Tuple[int]
dtype: np.dtype
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = dtype
@property
def ndim(self):
return len(self.shape)
_neg = staticmethod(neg)
_add = staticmethod(add)
_radd = staticmethod(swap(add))
_mul = staticmethod(mul)
_rmul = staticmethod(swap(mul))
_gt = staticmethod(greater)
_lt = staticmethod(less)
@staticmethod
def _bool(tracer):
raise Exception("ShapedArray can't be unambiguously converted to bool")
@staticmethod
def _nonzero(tracer):
raise Exception("ShapedArray can't be unambiguously converted to bool")
def str_short(self):
return f'{self.dtype.name}[{",".join(str(d) for d in self.shape)}]'
def __hash__(self):
return hash((self.shape, self.dtype))
def __eq__(self, other):
return (type(self) is type(other) and
self.shape == other.shape and self.dtype == other.dtype)
def __repr__(self):
return f"ShapedArray(shape={self.shape}, dtype={self.dtype})"
class ConcreteArray(ShapedArray):
array_abstraction_level = 2
val: np.ndarray
def __init__(self, val):
self.val = val
self.shape = val.shape
self.dtype = val.dtype
@staticmethod
def _bool(tracer):
return bool(tracer.aval.val)
@staticmethod
def _nonzero(tracer):
return bool(tracer.aval.val)
def get_aval(x):
if isinstance(x, Tracer):
return x.aval
elif type(x) in jax_types:
return ConcreteArray(np.asarray(x))
else:
raise TypeError(x)
jax_types = {bool, int, float,
np.bool_, np.int32, np.int64, np.float32, np.float64, np.ndarray}
# -
# Notice that we actually have two `AbstractValue`s for arrays, representing
# different levels of abstraction. A `ShapedArray` represents the set of all
# possible arrays with a given shape and dtype. A `ConcreteArray` represents a
# singleton set consisting of a single array value.
#
# Now that we've set up the interpreter stack, the Trace/Tracer API for
# interpreters, and abstract values, we can come back to implement `bind`:
def bind(prim, *args, **params):
top_trace = find_top_trace(args)
tracers = [full_raise(top_trace, arg) for arg in args]
outs = top_trace.process_primitive(prim, tracers, params)
return [full_lower(out) for out in outs]
# The main action is that we call `find_top_trace` to figure out which
# interpreter should handle this primitive application. We then call that top
# trace's `process_primitive` so that the trace can apply its interpretation
# rule. The calls to `full_raise` just ensure that the inputs are boxed in the
# top trace's `Tracer` instances, and the call to `full_lower` is an optional
# optimization so that we unbox values out of `Tracer`s as much as possible.
# +
import operator as op
def find_top_trace(xs) -> Trace:
top_main = max((x._trace.main for x in xs if isinstance(x, Tracer)),
default=trace_stack[0], key=op.attrgetter('level'))
if dynamic_trace and dynamic_trace.level > top_main.level:
top_main = dynamic_trace
return top_main.trace_type(top_main)
# -
# In words, ignoring the `dynamic_trace` step until Part 3, `find_top_trace`
# returns the highest-level interpreter associated with the `Tracer`s on its
# inputs, and otherwise returns the interpreter at the bottom of the stack
# (which is always an evaluation trace, at least for now). This is a deviation
# from the description above, where we always start by running the interpreter
# at the top of the stack and then work our way down, applying every interpreter
# in the stack. Instead, we're only applying an interpreter when the input
# arguments to a primitive bind are boxed in a `Tracer` corresponding to that
# interpreter. This optimization lets us skip irrelevant transformations, but
# bakes in an assumption that transformations mostly follow data dependence
# (except for the special bottom-of-the-stack interpreter, which interprets
# everything).
#
# An alternative would be to have every interpreter in the stack interpret every
# operation. That's worth exploring! JAX is designed around data dependence in
# large part because that's so natural for automatic differentiation, and JAX's
# roots are in autodiff. But it may be over-fit.
# +
def full_lower(val: Any):
if isinstance(val, Tracer):
return val.full_lower()
else:
return val
def full_raise(trace: Trace, val: Any) -> Tracer:
if not isinstance(val, Tracer):
assert type(val) in jax_types
return trace.pure(val)
level = trace.main.level
if val._trace.main is trace.main:
return val
elif val._trace.main.level < level:
return trace.lift(val)
elif val._trace.main.level > level:
raise Exception(f"Can't lift level {val._trace.main.level} to {level}.")
else: # val._trace.level == level
raise Exception(f"Different traces at same level: {val._trace}, {trace}.")
# -
# The logic in `full_raise` serves to box values into `Tracer`s for a particular
# `Trace`, calling different methods on the `Trace` based on context:
# `Trace.pure` is called on non-`Tracer` constants, and `Trace.lift` is called
# for values that are already `Tracer`s from a lower-level interpreter. These
# two methods could share the same implementation, but by distinguishing them in
# the core logic we can provide more information to the `Trace` subclass.
#
# That's it for the JAX core! Now we can start adding interpreters.
# ### Evaluation interpreter
#
# We'll start with the simplest interpreter: the evaluation interpreter that
# will sit at the bottom of the interpreter stack.
# +
class EvalTrace(Trace):
pure = lift = lambda self, x: x # no boxing in Tracers needed
def process_primitive(self, primitive, tracers, params):
return impl_rules[primitive](*tracers, **params)
trace_stack.append(MainTrace(0, EvalTrace, None)) # special bottom of the stack
# NB: in JAX, instead of a dict we attach impl rules to the Primitive instance
impl_rules = {}
impl_rules[add_p] = lambda x, y: [np.add(x, y)]
impl_rules[mul_p] = lambda x, y: [np.multiply(x, y)]
impl_rules[neg_p] = lambda x: [np.negative(x)]
impl_rules[sin_p] = lambda x: [np.sin(x)]
impl_rules[cos_p] = lambda x: [np.cos(x)]
impl_rules[reduce_sum_p] = lambda x, *, axis: [np.sum(x, axis)]
impl_rules[greater_p] = lambda x, y: [np.greater(x, y)]
impl_rules[less_p] = lambda x, y: [np.less(x, y)]
impl_rules[transpose_p] = lambda x, *, perm: [np.transpose(x, perm)]
def broadcast_impl(x, *, shape, axes):
for axis in sorted(axes):
x = np.expand_dims(x, axis)
return [np.broadcast_to(x, shape)]
impl_rules[broadcast_p] = broadcast_impl
# -
# With this interpreter, we can evaluate user functions:
# +
def f(x):
y = sin(x) * 2.
z = - y + x
return z
print(f(3.0))
# -
# Woo! Like going around in a big circle. But the point of this indirection is
# that now we can add some real transformations.
# ### Forward-mode autodiff with `jvp`
#
# First, a few helper functions:
# +
def zeros_like(val):
aval = get_aval(val)
return np.zeros(aval.shape, aval.dtype)
def unzip2(pairs):
lst1, lst2 = [], []
for x1, x2 in pairs:
lst1.append(x1)
lst2.append(x2)
return lst1, lst2
map_ = map
def map(f, *xs):
return list(map_(f, *xs))
zip_ = zip
def zip(*args):
fst, *rest = args = map(list, args)
n = len(fst)
for arg in rest:
assert len(arg) == n
return list(zip_(*args))
# -
# The `Tracer` for forward-mode autodiff carries a primal-tangent pair. The
# `Trace` applies JVP rules.
# +
class JVPTracer(Tracer):
def __init__(self, trace, primal, tangent):
self._trace = trace
self.primal = primal
self.tangent = tangent
@property
def aval(self):
return get_aval(self.primal)
class JVPTrace(Trace):
pure = lift = lambda self, val: JVPTracer(self, val, zeros_like(val))
def process_primitive(self, primitive, tracers, params):
primals_in, tangents_in = unzip2((t.primal, t.tangent) for t in tracers)
jvp_rule = jvp_rules[primitive]
primal_outs, tangent_outs = jvp_rule(primals_in, tangents_in, **params)
return [JVPTracer(self, x, t) for x, t in zip(primal_outs, tangent_outs)]
jvp_rules = {}
# -
# Notice both `lift` and `sublift` package a value into a `JVPTracer` with the
# minimal amount of context, which is a zero tangent value.
#
# Let's add some JVP rules for primitives:
# +
def add_jvp(primals, tangents):
(x, y), (x_dot, y_dot) = primals, tangents
return [x + y], [x_dot + y_dot]
jvp_rules[add_p] = add_jvp
def mul_jvp(primals, tangents):
(x, y), (x_dot, y_dot) = primals, tangents
return [x * y], [x_dot * y + x * y_dot]
jvp_rules[mul_p] = mul_jvp
def sin_jvp(primals, tangents):
(x,), (x_dot,) = primals, tangents
return [sin(x)], [cos(x) * x_dot]
jvp_rules[sin_p] = sin_jvp
def cos_jvp(primals, tangents):
(x,), (x_dot,) = primals, tangents
return [cos(x)], [-sin(x) * x_dot]
jvp_rules[cos_p] = cos_jvp
def neg_jvp(primals, tangents):
(x,), (x_dot,) = primals, tangents
return [neg(x)], [neg(x_dot)]
jvp_rules[neg_p] = neg_jvp
def reduce_sum_jvp(primals, tangents, *, axis):
(x,), (x_dot,) = primals, tangents
return [reduce_sum(x, axis)], [reduce_sum(x_dot, axis)]
jvp_rules[reduce_sum_p] = reduce_sum_jvp
def greater_jvp(primals, tangents):
(x, y), _ = primals, tangents
out_primal = greater(x, y)
return [out_primal], [zeros_like(out_primal)]
jvp_rules[greater_p] = greater_jvp
def less_jvp(primals, tangents):
(x, y), _ = primals, tangents
out_primal = less(x, y)
return [out_primal], [zeros_like(out_primal)]
jvp_rules[less_p] = less_jvp
# -
# Finally, we add a transformation API to kick off the trace:
def jvp_v1(f, primals, tangents):
with new_main(JVPTrace) as main:
trace = JVPTrace(main)
tracers_in = [JVPTracer(trace, x, t) for x, t in zip(primals, tangents)]
out = f(*tracers_in)
tracer_out = full_raise(trace, out)
primal_out, tangent_out = tracer_out.primal, tracer_out.tangent
return primal_out, tangent_out
# And with that, we can differentiate!
x = 3.0
y, sin_deriv_at_3 = jvp_v1(sin, (x,), (1.0,))
print(sin_deriv_at_3)
print(cos(3.0))
# +
def f(x):
y = sin(x) * 2.
z = - y + x
return z
x, xdot = 3., 1.
y, ydot = jvp_v1(f, (x,), (xdot,))
print(y)
print(ydot)
# +
def deriv(f):
return lambda x: jvp_v1(f, (x,), (1.,))[1]
print(deriv(sin)(3.))
print(deriv(deriv(sin))(3.))
print(deriv(deriv(deriv(sin)))(3.))
print(deriv(deriv(deriv(deriv(sin))))(3.))
# +
def f(x):
if x > 0.: # Python control flow
return 2. * x
else:
return x
print(deriv(f)(3.))
print(deriv(f)(-3.))
# -
# ## Pytrees and flattening user functions' inputs and outputs
# A limitation with `jvp_v1` is that it assumes the user function accepts arrays
# as positional arguments and produces a single array as output. What if it
# produced a list as output? Or accepted nested containers as inputs? It would
# be a pain to deal with all the possible containers in inputs and outputs at
# every layer of the stack. Instead, we can wrap the user function so that the
# wrapped version accepts arrays as inputs and returns a flat list of arrays as
# output. The wrapper just needs to unflatten its input, call the user function,
# and flatten the output.
#
# Here's how we'd like to write `jvp`, assuming the user always gives us
# functions that take arrays as inputs and produces a flat list of arrays as
# outputs:
def jvp_flat(f, primals, tangents):
with new_main(JVPTrace) as main:
trace = JVPTrace(main)
tracers_in = [JVPTracer(trace, x, t) for x, t in zip(primals, tangents)]
outs = f(*tracers_in)
tracers_out = [full_raise(trace, out) for out in outs]
primals_out, tangents_out = unzip2((t.primal, t.tangent) for t in tracers_out)
return primals_out, tangents_out
# To support user functions that have arbitrary containers in the inputs and
# outputs, here's how we'd write the user-facing `jvp` wrapper:
def jvp(f, primals, tangents):
primals_flat, in_tree = tree_flatten(primals)
tangents_flat, in_tree2 = tree_flatten(tangents)
if in_tree != in_tree2: raise TypeError
f, out_tree = flatten_fun(f, in_tree)
primals_out_flat, tangents_out_flat = jvp_flat(f, primals_flat, tangents_flat)
primals_out = tree_unflatten(out_tree(), primals_out_flat)
tangents_out = tree_unflatten(out_tree(), tangents_out_flat)
return primals_out, tangents_out
# Notice that we had to plumb the tree structure of the user function output
# back to the caller of `flatten_fun`. That information isn't available until we
# actually run the user function, so `flatten_fun` just returns a reference to a
# mutable cell, represented as a thunk. These side-effects are safe because we
# always run the user function exactly once. (This safe regime is the reason for
# the "linear" name in `linear_util.py`, in the sense of [linear
# types](https://en.wikipedia.org/wiki/Substructural_type_system).)
#
# All that remains is to write `tree_flatten`, `tree_unflatten`, and
# `flatten_fun`.
# + tags=["hide-input"]
def flatten_fun(f, in_tree):
store = Store()
def flat_fun(*args_flat):
pytree_args = tree_unflatten(in_tree, args_flat)
out = f(*pytree_args)
out_flat, out_tree = tree_flatten(out)
store.set_value(out_tree)
return out_flat
return flat_fun, store
class Empty: pass
empty = Empty()
class Store:
val = empty
def set_value(self, val):
assert self.val is empty
self.val = val
def __call__(self):
return self.val
# + tags=["hide-input"]
import itertools as it
from typing import Callable, Type, Hashable, Dict, Iterable, Iterator
class NodeType(NamedTuple):
name: str
to_iterable: Callable
from_iterable: Callable
def register_pytree_node(ty: Type, to_iter: Callable, from_iter: Callable
) -> None:
node_types[ty] = NodeType(str(ty), to_iter, from_iter)
node_types: Dict[Type, NodeType] = {}
register_pytree_node(tuple, lambda t: (None, t), lambda _, xs: tuple(xs))
register_pytree_node(list, lambda l: (None, l), lambda _, xs: list(xs))
register_pytree_node(dict,
lambda d: map(tuple, unzip2(sorted(d.items()))),
lambda keys, vals: dict(zip(keys, vals)))
class PyTreeDef(NamedTuple):
node_type: NodeType
node_metadata: Hashable
child_treedefs: Tuple['PyTreeDef']
class Leaf: pass
leaf = Leaf()
def tree_flatten(x: Any) -> Tuple[List[Any], PyTreeDef]:
children_iter, treedef = _tree_flatten(x)
return list(children_iter), treedef
def _tree_flatten(x: Any) -> Tuple[Iterable, PyTreeDef]:
node_type = node_types.get(type(x))
if node_type:
node_metadata, children = node_type.to_iterable(x)
children_flat, child_trees = unzip2(map(_tree_flatten, children))
flattened = it.chain.from_iterable(children_flat)
return flattened, PyTreeDef(node_type, node_metadata, tuple(child_trees))
else:
return [x], leaf
def tree_unflatten(treedef: PyTreeDef, xs: List[Any]) -> Any:
return _tree_unflatten(treedef, iter(xs))
def _tree_unflatten(treedef: PyTreeDef, xs: Iterator) -> Any:
if treedef is leaf:
return next(xs)
else:
children = (_tree_unflatten(t, xs) for t in treedef.child_treedefs)
return treedef.node_type.from_iterable(treedef.node_metadata, children)
# -
# With this pytree-handling `jvp` implementation, we can now handle arbitrary
# input and output containers. That'll come in handy with future transformations
# too!
# +
def f(x):
y = sin(x) * 2.
z = - y + x
return {'hi': z, 'there': [x, y]}
x, xdot = 3., 1.
y, ydot = jvp(f, (x,), (xdot,))
print(y)
print(ydot)
# -
# ### Vectorized batching with `vmap`
#
# First, a couple helper functions, one for producing mapped abstract values
# from unmapped ones (by removing an axis), and one for moving batch dimensions
# around:
# +
def mapped_aval(batch_dim, aval):
shape = list(aval.shape)
del shape[batch_dim]
return ShapedArray(tuple(shape), aval.dtype)
def move_batch_axis(axis_size, src, dst, x):
if src is not_mapped:
target_shape = list(np.shape(x))
target_shape.insert(dst, axis_size)
return broadcast(x, target_shape, [dst])
elif src == dst:
return x
else:
return moveaxis(x, src, dst)
def moveaxis(x, src: int, dst: int):
perm = [i for i in range(np.ndim(x)) if i != src]
perm.insert(dst, src)
return transpose(x, perm)
# -
# The `Tracer` for vectorized batching carries a batched value and an optional
# integer indicating which axis (if any) is the batch axis.
# +
from typing import Union
class NotMapped: pass
not_mapped = NotMapped()
BatchAxis = Union[NotMapped, int]
class BatchTracer(Tracer):
def __init__(self, trace, val, batch_dim: BatchAxis):
self._trace = trace
self.val = val
self.batch_dim = batch_dim
@property
def aval(self):
if self.batch_dim is not_mapped:
return get_aval(self.val)
else:
return mapped_aval(self.batch_dim, get_aval(self.val))
def full_lower(self):
if self.batch_dim is not_mapped:
return full_lower(self.val)
else:
return self
class BatchTrace(Trace):
pure = lift = lambda self, val: BatchTracer(self, val, not_mapped)
def process_primitive(self, primitive, tracers, params):
vals_in, bdims_in = unzip2((t.val, t.batch_dim) for t in tracers)
vmap_rule = vmap_rules[primitive]
val_outs, bdim_outs = vmap_rule(self.axis_size, vals_in, bdims_in, **params)
return [BatchTracer(self, x, bd) for x, bd in zip(val_outs, bdim_outs)]
@property
def axis_size(self):
return self.main.global_data
vmap_rules = {}
# -
# Here we've implemented the optional `Tracer.full_lower` method, which lets us
# peel off a batching tracer if it's not needed because it doesn't represent a
# batched value.
#
# For `BatchTrace`, analogous to `JVPTrace`, the methods `pure` and `lift` just
# box a value in a `BatchTracer` with the minimal amount of context, which in
# this case is a `batch_dim` taking the sentinel value `not_mapped`. Notice we
# use the `MainTrace`'s interpreter-global data field to store the batch axis
# size.
#
# Next we can define batching interpreter rules for each primitive:
# +
from functools import partial
def binop_batching_rule(op, axis_size, vals_in, dims_in):
(x, y), (x_bdim, y_bdim) = vals_in, dims_in
if x_bdim != y_bdim:
if x_bdim is not_mapped:
x = move_batch_axis(axis_size, x_bdim, y_bdim, x)
x_bdim = y_bdim
else:
y = move_batch_axis(axis_size, y_bdim, x_bdim, y)
return [op(x, y)], [x_bdim]
vmap_rules[add_p] = partial(binop_batching_rule, add)
vmap_rules[mul_p] = partial(binop_batching_rule, mul)
def vectorized_unop_batching_rule(op, axis_size, vals_in, dims_in):
(x,), (x_bdim,) = vals_in, dims_in
return [op(x)], [x_bdim]
vmap_rules[sin_p] = partial(vectorized_unop_batching_rule, sin)
vmap_rules[cos_p] = partial(vectorized_unop_batching_rule, cos)
vmap_rules[neg_p] = partial(vectorized_unop_batching_rule, neg)
def reduce_sum_batching_rule(axis_size, vals_in, dims_in, *, axis):
(x,), (x_bdim,) = vals_in, dims_in
new_axis = axis + (x_bdim <= axis)
out_bdim = x_bdim - (new_axis < x_bdim)
return [reduce_sum(x, new_axis)], [out_bdim]
vmap_rules[reduce_sum_p] = reduce_sum_batching_rule
# -
# Finally, we add a transformation API to kick off the trace:
# +
def vmap_flat(f, in_axes, *args):
axis_size, = {x.shape[ax] for x, ax in zip(args, in_axes)
if ax is not not_mapped}
with new_main(BatchTrace, axis_size) as main:
trace = BatchTrace(main)
tracers_in = [BatchTracer(trace, x, ax) if ax is not None else x
for x, ax in zip(args, in_axes)]
outs = f(*tracers_in)
tracers_out = [full_raise(trace, out) for out in outs]
vals_out, bdims_out = unzip2((t.val, t.batch_dim) for t in tracers_out)
outs_transposed = [move_batch_axis(axis_size, bdim, 0, val_out)
for val_out, bdim in zip(vals_out, bdims_out)]
return outs_transposed
def vmap(f, in_axes):
def batched_f(*args):
args_flat, in_tree = tree_flatten(args)
in_axes_flat, in_tree2 = tree_flatten(in_axes)
if in_tree != in_tree2: raise TypeError
f_flat, out_tree = flatten_fun(f, in_tree)
outs_flat = vmap_flat(f_flat, in_axes_flat, *args_flat)
return tree_unflatten(out_tree(), outs_flat)
return batched_f
# +
def add_one_to_a_scalar(scalar):
assert np.ndim(scalar) == 0
return 1 + scalar
vector_in = np.arange(3.)
vector_out = vmap(add_one_to_a_scalar, (0,))(vector_in)
print(vector_in)
print(vector_out)
# +
def jacfwd(f, x):
pushfwd = lambda v: jvp(f, (x,), (v,))[1]
vecs_in = np.eye(np.size(x)).reshape(np.shape(x) * 2)
return vmap(pushfwd, (0,))(vecs_in)
def f(x):
return sin(x)
jacfwd(f, np.arange(3.))
# -
# That's it for `jvp` and `vmap`!
# ## Part 2: Jaxprs
#
# The next transformations on the horizon are `jit` for just-in-time
# compilation and `vjp` for reverse-mode autodiff. (`grad` is just a small
# wrapper around `vjp`.) Whereas `jvp` and `vmap` only needed each `Tracer` to
# carry a little bit of extra context, for both `jit` and `vjp` we need much
# richer context: we need to represent _programs_. That is, we need jaxprs!
#
# Jaxprs are JAX's internal intermediate representation of programs. They are
# explicitly typed, functional, first-order, and in ANF form. We need a
# program representation for `jit` because the purpose of `jit` is to stage
# computation out of Python. For any computation we want to stage out, we need
# to be able to represent it as data, and build it up as we trace a Python
# function. Similarly, `vjp` needs a way to represent the computation for the
# backward pass of reverse-mode autodiff. We use the same jaxpr program
# representation for both needs.
#
# (Building a program representation is the most
# [free](https://en.wikipedia.org/wiki/Free_object) kind of
# trace-transformation, and so except for issues around handling native Python
# control flow, any transformation could be implemented by first tracing to a
# jaxpr and then interpreting the jaxpr.)
# ### Jaxpr data strutures
#
# The jaxpr term syntax is roughly:
#
# ```
# jaxpr ::=
# { lambda <binder> , ... .
# let <eqn>
# ...
# in ( <atom> , ... ) }
#
# binder ::= <var>:<array_type>
# var ::= a | b | c | ...
# atom ::= <var> | <literal>
# literal ::= <int32> | <int64> | <float32> | <float64>
#
# eqn ::= <binder> , ... = <primitive> [ <params> ] <atom> , ...
# ```
#
# The syntax of types is:
#
# ```
# jaxpr_type ::= [ <array_type> , ... ] -> [ <array_type> , ... ]
# array_type ::= <dtype>[<shape>]
# dtype ::= f32 | f64 | i32 | i64
# shape ::= <int> , ...
# ```
#
# How do we represent these as Python data structures? We reuse ShapedArrays to
# represent types, and we can represent the term syntax with a few Python
# structs:
# +
from typing import Set
class Var:
aval: ShapedArray
def __init__(self, aval): self.aval = aval
class Lit:
val: Any
aval: ShapedArray
def __init__(self, val):
self.aval = aval = raise_to_shaped(get_aval(val))
self.val = np.array(val, aval.dtype)
Atom = Union[Var, Lit]
class JaxprEqn(NamedTuple):
primitive: Primitive
inputs: List[Atom]
params: Dict[str, Any]
out_binders: List[Var]
class Jaxpr(NamedTuple):
in_binders: List[Var]
eqns: List[JaxprEqn]
outs: List[Atom]
def __hash__(self): return id(self)
__eq__ = op.is_
def raise_to_shaped(aval):
return ShapedArray(aval.shape, aval.dtype)
# -
# Type-checking a jaxpr involves checking that there are no unbound variables,
# that variables are only bound once, and that for each equation the type of
# the primitive application matches the type of the output binders.
# +
class JaxprType(NamedTuple):
in_types: List[ShapedArray]
out_types: List[ShapedArray]
def __repr__(self):
in_types = ', '.join(aval.str_short() for aval in self.in_types)
out_types = ', '.join(aval.str_short() for aval in self.out_types)
return f'({in_types}) -> ({out_types})'
def typecheck_jaxpr(jaxpr: Jaxpr) -> JaxprType:
env: Set[Var] = set()
for v in jaxpr.in_binders:
if v in env: raise TypeError
env.add(v)
for eqn in jaxpr.eqns:
in_types = [typecheck_atom(env, x) for x in eqn.inputs]
out_types = abstract_eval_rules[eqn.primitive](*in_types, **eqn.params)
for out_binder, out_type in zip(eqn.out_binders, out_types):
if not out_type == out_binder.aval: raise TypeError
for out_binder in eqn.out_binders:
if out_binder in env: raise TypeError
env.add(out_binder)
in_types = [v.aval for v in jaxpr.in_binders]
out_types = [typecheck_atom(env, x) for x in jaxpr.outs]
return JaxprType(in_types, out_types)
def typecheck_atom(env: Set[Var], x: Atom) -> ShapedArray:
if isinstance(x, Var):
if x not in env: raise TypeError("unbound variable")
return x.aval
elif isinstance(x, Lit):
return raise_to_shaped(get_aval(x.val))
else:
assert False
# -
# We can apply the function represented by a jaxpr to arguments with a simple
# interpreter.
# +
def eval_jaxpr(jaxpr: Jaxpr, args: List[Any]) -> List[Any]:
env: Dict[Var, Any] = {}
def read(x: Atom) -> Any:
return env[x] if type(x) is Var else x.val
def write(v: Var, val: Any) -> None:
assert v not in env # single-assignment
env[v] = val
map(write, jaxpr.in_binders, args)
for eqn in jaxpr.eqns:
in_vals = map(read, eqn.inputs)
outs = bind(eqn.primitive, *in_vals, **eqn.params)
map(write, eqn.out_binders, outs)
return map(read, jaxpr.outs)
def jaxpr_as_fun(jaxpr: Jaxpr):
return lambda *args: eval_jaxpr(jaxpr, args)
# -
# By using `bind` in the interpreter, this interpreter itself is traceable.
# ### Building jaxprs with tracing
#
# Now that we have jaxprs as a data structure, we need ways to produce these
# from tracing Python code. In general there are two variants of how we trace to
# a jaxpr; `jit` uses one and `vjp` uses the other. We'll start with the one
# used by `jit`, which is also used by control flow primitives like `lax.cond`,
# `lax.while_loop`, and `lax.scan`.
# +
def split_list(lst: List[Any], n: int) -> Tuple[List[Any], List[Any]]:
assert 0 <= n <= len(lst)
return lst[:n], lst[n:]
def partition_list(bs: List[bool], l: List[Any]) -> Tuple[List[Any], List[Any]]:
assert len(bs) == len(l)
lists = lst1, lst2 = [], []
for b, x in zip(bs, l):
lists[b].append(x)
return lst1, lst2
# +
# NB: the analogous class in JAX is called 'DynamicJaxprTracer'
class JaxprTracer(Tracer):
__slots__ = ['aval']
aval: ShapedArray
def __init__(self, trace, aval):
self._trace = trace
self.aval = aval
# NB: the analogous class in JAX is called 'DynamicJaxprTrace'
class JaxprTrace(Trace):
def new_arg(self, aval: ShapedArray) -> JaxprTracer:
aval = raise_to_shaped(aval)
tracer = self.builder.new_tracer(self, aval)
self.builder.tracer_to_var[id(tracer)] = Var(aval)
return tracer
def get_or_make_const_tracer(self, val: Any) -> JaxprTracer:
tracer = self.builder.const_tracers.get(id(val))
if tracer is None:
tracer = self.builder.new_tracer(self, raise_to_shaped(get_aval(val)))
self.builder.add_const(tracer, val)
return tracer
pure = lift = get_or_make_const_tracer
def process_primitive(self, primitive, tracers, params):
avals_in = [t.aval for t in tracers]
avals_out = abstract_eval_rules[primitive](*avals_in, **params)
out_tracers = [self.builder.new_tracer(self, a) for a in avals_out]
inputs = [self.builder.getvar(t) for t in tracers]
outvars = [self.builder.add_var(t) for t in out_tracers]
self.builder.add_eqn(JaxprEqn(primitive, inputs, params, outvars))
return out_tracers
@property
def builder(self):
return self.main.global_data
# NB: in JAX, we instead attach abstract eval rules to Primitive instances
abstract_eval_rules = {}
# -
# Notice that we keep as interpreter-global data a builder object, which keeps
# track of variables, constants, and eqns as we build up the jaxpr.
class JaxprBuilder:
eqns: List[JaxprEqn]
tracer_to_var: Dict[int, Var]
const_tracers: Dict[int, JaxprTracer]
constvals: Dict[Var, Any]
tracers: List[JaxprTracer]
def __init__(self):
self.eqns = []
self.tracer_to_var = {}
self.const_tracers = {}
self.constvals = {}
self.tracers = []
def new_tracer(self, trace: JaxprTrace, aval: ShapedArray) -> JaxprTracer:
tracer = JaxprTracer(trace, aval)
self.tracers.append(tracer)
return tracer
def add_eqn(self, eqn: JaxprEqn) -> None:
self.eqns.append(eqn)
def add_var(self, tracer: JaxprTracer) -> Var:
assert id(tracer) not in self.tracer_to_var
var = self.tracer_to_var[id(tracer)] = Var(tracer.aval)
return var
def getvar(self, tracer: JaxprTracer) -> Var:
var = self.tracer_to_var.get(id(tracer))
assert var is not None
return var
def add_const(self, tracer: JaxprTracer, val: Any) -> Var:
var = self.add_var(tracer)
self.const_tracers[id(val)] = tracer
self.constvals[var] = val
return var
def build(self, in_tracers: List[JaxprTracer], out_tracers: List[JaxprTracer]
) -> Tuple[Jaxpr, List[Any]]:
constvars, constvals = unzip2(self.constvals.items())
t2v = lambda t: self.tracer_to_var[id(t)]
in_binders = constvars + [t2v(t) for t in in_tracers]
out_vars = [t2v(t) for t in out_tracers]
jaxpr = Jaxpr(in_binders, self.eqns, out_vars)
typecheck_jaxpr(jaxpr)
jaxpr, constvals = _inline_literals(jaxpr, constvals)
return jaxpr, constvals
def _inline_literals(jaxpr: Jaxpr, consts: List[Any]) -> Tuple[Jaxpr, List[Any]]:
const_binders, other_binders = split_list(jaxpr.in_binders, len(consts))
scalars = [type(x) in jax_types and not get_aval(x).shape for x in consts]
new_const_binders, lit_binders = partition_list(scalars, const_binders)
new_consts, lit_vals = partition_list(scalars, consts)
literals = dict(zip(lit_binders, map(Lit, lit_vals)))
new_eqns = [JaxprEqn(eqn.primitive, [literals.get(x, x) for x in eqn.inputs],
eqn.params, eqn.out_binders) for eqn in jaxpr.eqns]
new_outs = [literals.get(x, x) for x in jaxpr.outs]
new_jaxpr = Jaxpr(new_const_binders + other_binders, new_eqns, new_outs)
typecheck_jaxpr(new_jaxpr)
return new_jaxpr, new_consts
# The rules we need for `JaxprTrace.process_primitive` are essentially typing
# rules for primitive applications: given the primitive, its parameters, and
# types for the inputs, the rule must produce a type for the output, which is
# then packaged with the output `JaxprTracer`. We can use abstract evaluation
# rules for this same purpose, even though they can be more general (since
# abstract evaluation rules must accept ConcreteArray inputs, and since they
# need only return an upper bound on the set of possible outputs, they can
# produce ConcreteArray outputs as well). We'll reuse these abstract evaluation
# rules for the other jaxpr-producing trace machinery, where the potential extra
# generality is useful.
# +
def binop_abstract_eval(x: ShapedArray, y: ShapedArray) -> List[ShapedArray]:
if not isinstance(x, ShapedArray) or not isinstance(y, ShapedArray):
raise TypeError
if raise_to_shaped(x) != raise_to_shaped(y): raise TypeError
return [ShapedArray(x.shape, x.dtype)]
abstract_eval_rules[add_p] = binop_abstract_eval
abstract_eval_rules[mul_p] = binop_abstract_eval
def compare_abstract_eval(x: ShapedArray, y: ShapedArray) -> List[ShapedArray]:
if not isinstance(x, ShapedArray) or not isinstance(y, ShapedArray):
raise TypeError
if x.shape != y.shape: raise TypeError
return [ShapedArray(x.shape, np.dtype('bool'))]
abstract_eval_rules[greater_p] = compare_abstract_eval
abstract_eval_rules[less_p] = compare_abstract_eval
def vectorized_unop_abstract_eval(x: ShapedArray) -> List[ShapedArray]:
return [ShapedArray(x.shape, x.dtype)]
abstract_eval_rules[sin_p] = vectorized_unop_abstract_eval
abstract_eval_rules[cos_p] = vectorized_unop_abstract_eval
abstract_eval_rules[neg_p] = vectorized_unop_abstract_eval
def reduce_sum_abstract_eval(x: ShapedArray, *, axis: int) -> List[ShapedArray]:
new_shape = [d for i, d in enumerate(x.shape) if i != axis]
return [ShapedArray(tuple(new_shape), x.dtype)]
abstract_eval_rules[reduce_sum_p] = reduce_sum_abstract_eval
def broadcast_abstract_eval(x: ShapedArray, *, shape: Sequence[int],
axes: Sequence[int]) -> List[ShapedArray]:
return [ShapedArray(tuple(shape), x.dtype)]
abstract_eval_rules[broadcast_p] = broadcast_abstract_eval
# -
# To check our implementation of jaxprs, we can add a `make_jaxpr`
# transformation and a pretty-printer:
# +
from functools import lru_cache
@lru_cache() # ShapedArrays are hashable
def make_jaxpr_v1(f, *avals_in):
avals_in, in_tree = tree_flatten(avals_in)
f, out_tree = flatten_fun(f, in_tree)
builder = JaxprBuilder()
with new_main(JaxprTrace, builder) as main:
trace = JaxprTrace(main)
tracers_in = [trace.new_arg(aval) for aval in avals_in]
outs = f(*tracers_in)
tracers_out = [full_raise(trace, out) for out in outs]
jaxpr, consts = builder.build(tracers_in, tracers_out)
return jaxpr, consts, out_tree()
# + tags=["hide-input"]
from collections import defaultdict
import string
class PPrint:
lines: List[Tuple[int, str]]
def __init__(self, lines):
self.lines = lines
def indent(self, indent: int) -> 'PPrint':
return PPrint([(indent + orig_indent, s) for orig_indent, s in self.lines])
def __add__(self, rhs: 'PPrint') -> 'PPrint':
return PPrint(self.lines + rhs.lines)
def __rshift__(self, rhs: 'PPrint') -> 'PPrint':
if not rhs.lines: return self
if not self.lines: return rhs
indent, s = self.lines[-1]
indented_block = rhs.indent(indent + len(s))
common_line = s + ' ' * rhs.lines[0][0] + rhs.lines[0][1]
return PPrint(self.lines[:-1]
+ [(indent, common_line)]
+ indented_block.lines[1:])
def __str__(self) -> str:
return '\n'.join(' ' * indent + s for indent, s in self.lines)
def pp(s: Any) -> PPrint:
return PPrint([(0, line) for line in str(s).splitlines()])
def vcat(ps: List[PPrint]) -> PPrint:
return sum(ps, pp(''))
def pp_jaxpr(jaxpr: Jaxpr):
namegen = (''.join(s) for r in it.count(1)
for s in it.permutations(string.ascii_lowercase, r))
names = defaultdict(lambda: next(namegen))
in_binders = ', '.join(var_str(names, x) for x in jaxpr.in_binders)
eqns = vcat([pp_eqn(names, e) for e in jaxpr.eqns])
outs = ', '.join(names[v] if isinstance(v, Var) else str(v.val)
for v in jaxpr.outs)
return (pp(f'{{ lambda {in_binders} .') +
((pp('let ') >> eqns) + pp(f'in ( {outs} ) }}')).indent(2))
def var_str(names: Dict[Var, str], v: Var) -> str:
return f'{names[v]}:{v.aval.str_short()}'
def pp_eqn(names: Dict[Var, str], eqn: JaxprEqn) -> PPrint:
lhs = pp(' '.join(var_str(names, v) for v in eqn.out_binders))
rhs = (pp(eqn.primitive.name) >> pp_params(eqn.params) >>
pp(' '.join(names[x] if isinstance(x, Var) else str(x.val)
for x in eqn.inputs)))
return lhs >> pp(' = ') >> rhs
def pp_params(params: Dict[str, Any]) -> PPrint:
items = sorted(params.items())
if items:
return pp(' [ ') >> vcat([pp(f'{k}={v}') for k, v in items]) >> pp(' ] ')
else:
return pp(' ')
Jaxpr.__repr__ = lambda self: str(pp_jaxpr(self))
# -
jaxpr, consts, _ = make_jaxpr_v1(lambda x: 2. * x, raise_to_shaped(get_aval(3.)))
print(jaxpr)
print(typecheck_jaxpr(jaxpr))
# But there's a limitation here: because of how `find_top_trace` operates by
# data dependence, `make_jaxpr_v1` can't stage out all the primitive operations
# performed by the Python callable it's given. For example:
jaxpr, consts, _ = make_jaxpr_v1(lambda: mul(2., 2.))
print(jaxpr)
# This is precisely the issue that
# [omnistaging](https://github.com/google/jax/pull/3370) fixed.
# We want to ensure that the `JaxprTrace` started by `make_jaxpr` is always
# applied, regardless of whether any inputs to `bind` are boxed in corresponding
# `JaxprTracer` instances. We can achieve this by employing the `dynamic_trace`
# global defined in Part 1:
# +
@contextmanager
def new_dynamic(main: MainTrace):
global dynamic_trace
prev_dynamic_trace, dynamic_trace = dynamic_trace, main
try:
yield
finally:
dynamic_trace = prev_dynamic_trace
@lru_cache()
def make_jaxpr(f: Callable, *avals_in: ShapedArray,
) -> Tuple[Jaxpr, List[Any], PyTreeDef]:
avals_in, in_tree = tree_flatten(avals_in)
f, out_tree = flatten_fun(f, in_tree)
builder = JaxprBuilder()
with new_main(JaxprTrace, builder) as main:
with new_dynamic(main):
trace = JaxprTrace(main)
tracers_in = [trace.new_arg(aval) for aval in avals_in]
outs = f(*tracers_in)
tracers_out = [full_raise(trace, out) for out in outs]
jaxpr, consts = builder.build(tracers_in, tracers_out)
return jaxpr, consts, out_tree()
jaxpr, consts, _ = make_jaxpr(lambda: mul(2., 2.))
print(jaxpr)
# -
# Using `dynamic_trace` this way is conceptually the same as stashing the
# current interpreter stack and starting a new one with the `JaxprTrace` at the
# bottom. That is, no interpreters lower in the stack than the `dynamic_trace`
# are applied (since `JaxprTrace.process_primitive` doesn't call `bind`), though
# if the Python callable being traced to a jaxpr itself uses transformations
# then those can be pushed onto the interpreter stack above the `JaxprTrace`.
# But temporarily stashing the interpreter stack would break up the system
# state. The `dynamic_trace` tag achieves the same goals while keeping the
# system state simpler.
# That's it for jaxprs! With jaxprs in hand, we can implement the remaining
# major JAX features.
# ## Part 3: `jit`, simplified
#
# While `jit` has a transformation-like API in that it accepts a Python callable
# as an argument, under the hood it's really a higher-order primitive rather
# than a transformation. A primitive is _higher-order_ when it's parameterized
# by a function.
# ### On-the-fly ("final style") and staged ("initial style") processing
#
# There are two options for how to handle higher-order primitives. Each requires
# a different approach to tracing and engenders different tradeoffs:
# 1. **On-the-fly processing, where `bind` takes a Python callable as an
# argument.** We defer forming a jaxpr until as late as possible, namely
# until we're running the final interpreter at the bottom of the interpreter
# stack. That way we can swap a `JaxprTrace` in at the bottom of the
# interpreter stack and thus stage out rather than execute all primitive
# operations. With this approach, transformations in the stack get applied as
# we execute the Python callable as usual. This approach can be very tricky
# to implement, but it's as general as possible because it allows
# higher-order primitives not to raise the abstraction level of their
# arguments and thus allows data-dependent Python control flow. We refer to
# this approach as using a "final-style higher-order primitive" employing the
# discharge-at-tracing-time "final-style transformations" we've used so far.
# 2. **Staged processing, where `bind` takes a jaxpr as an argument.** Before we
# call `bind`, in the primitive wrapper we can just use `make_jaxpr` to form
# a jaxpr up-front and be done with the Python callable entirely. In this
# case, `make_jaxpr` puts its `JaxprTrace` at the top of the interpreter
# stack, and no transformations lower in the stack, which might enter via
# closed-over Tracers, are applied to the Python callable as we trace it.
# (Transformations applied within the Python callable are applied as usual,
# being added to the stack above the JaxprTrace.) Instead, the
# transformations lower in the stack are later applied to the call primitive,
# and the call primitive's rules must then transform the jaxpr itself.
# Because we trace to a jaxpr up-front, this approach can't support
# data-dependent Python control flow, but it is more straightforward to
# implement. We refer to this kind of higher-order primitive as an
# "initial-style higher-order primitive", and say that its jaxpr-processing
# transformation rules are "initial-style transformation rules."
#
# The latter approach fits for `jit` because we don't need to support
# data-dependent Python control flow in the user-provided Python callable, as
# the whole purpose of `jit` is to stage computation out of Python to be
# executed by XLA. (In contrast, `custom_jvp` is a higher-order primitive in
# which we want to support data-dependent Python control flow.)
#
# Historically, we started using the "initial-style" and "final-style"
# terminology after reading the [typed tagless final
# interpreters](http://okmij.org/ftp/tagless-final/index.html) paper, and
# jokingly referring to JAX as an implementation of "untyped tagful final
# interpreters." We don't claim to carry over (or understand) any deep meaning
# behind these terms; we loosely use "initial style" to mean "build an AST and
# then transform it", and we use "final style" to mean "transform as we trace."
# But it's just imprecise yet sticky jargon.
# With the initial-style approach, here's the user-facing `jit` wrapper:
# +
def jit(f):
def f_jitted(*args):
avals_in = [raise_to_shaped(get_aval(x)) for x in args]
jaxpr, consts, out_tree = make_jaxpr(f, *avals_in)
outs = bind(xla_call_p, *consts, *args, jaxpr=jaxpr, num_consts=len(consts))
return tree_unflatten(out_tree, outs)
return f_jitted
xla_call_p = Primitive('xla_call')
# -
# With any new primitive, we need to give it transformation rules, starting with
# its evaluation rule. When we evaluate an application of the `xla_call`
# primitive, we want to stage out out the computation to XLA. That involves
# translating the jaxpr to an XLA HLO program, transferring the argument values
# to the XLA device, executing the XLA program, and transferring back the
# results. We'll cache the XLA HLO compilation so that for each `jit`ted
# function it only needs to be performed once per argument shape and dtype
# signature.
#
# First, some utilities.
class IDHashable:
val: Any
def __init__(self, val):
self.val = val
def __hash__(self) -> int:
return id(self.val)
def __eq__(self, other):
return type(other) is IDHashable and id(self.val) == id(other.val)
# Next, we'll define the evaluation rule for `xla_call`:
# +
from jax.lib import xla_bridge as xb
from jax.lib import xla_client as xc
xe = xc._xla
xops = xc._xla.ops
def xla_call_impl(*args, jaxpr: Jaxpr, num_consts: int):
consts, args = args[:num_consts], args[num_consts:]
hashable_consts = tuple(map(IDHashable, consts))
execute = xla_callable(IDHashable(jaxpr), hashable_consts)
return execute(*args)
impl_rules[xla_call_p] = xla_call_impl
@lru_cache()
def xla_callable(hashable_jaxpr: IDHashable, hashable_consts: Tuple[IDHashable]):
jaxpr: Jaxpr = hashable_jaxpr.val
typecheck_jaxpr(jaxpr)
consts = [x.val for x in hashable_consts]
in_avals = [v.aval for v in jaxpr.in_binders[len(consts):]]
c = xb.make_computation_builder('xla_call')
xla_consts = _xla_consts(c, consts)
xla_params = _xla_params(c, in_avals)
outs = jaxpr_subcomp(c, jaxpr, xla_consts + xla_params)
out = xops.Tuple(c, outs)
compiled = xb.get_backend(None).compile(c.build(out))
return partial(execute_compiled, compiled, [v.aval for v in jaxpr.outs])
def _xla_consts(c: xe.XlaBuilder, consts: List[Any]) -> List[xe.XlaOp]:
unique_consts = {id(cnst): cnst for cnst in consts}
xla_consts = {
id_: xops.ConstantLiteral(c, cnst) for id_, cnst in unique_consts.items()}
return [xla_consts[id(cnst)] for cnst in consts]
def _xla_params(c: xe.XlaBuilder, avals_in: List[ShapedArray]) -> List[xe.XlaOp]:
return [xb.parameter(c, i, _xla_shape(a)) for i, a in enumerate(avals_in)]
def _xla_shape(aval: ShapedArray) -> xe.Shape:
return xc.Shape.array_shape(xc.dtype_to_etype(aval.dtype), aval.shape)
# -
# The main action is in `xla_callable`, which compiles a jaxpr into an XLA HLO
# program using `jaxpr_subcomp`, then returns a callable which executes the
# compiled program:
# +
def jaxpr_subcomp(c: xe.XlaBuilder, jaxpr: Jaxpr, args: List[xe.XlaOp]
) -> xe.XlaOp:
env: Dict[Var, xe.XlaOp] = {}
def read(x: Atom) -> xe.XlaOp:
return env[x] if type(x) is Var else xb.constant(c, x.val, False)
def write(v: Var, val: xe.XlaOp) -> None:
env[v] = val
map(write, jaxpr.in_binders, args)
for eqn in jaxpr.eqns:
in_avals = [x.aval for x in eqn.inputs]
in_vals = map(read, eqn.inputs)
rule = xla_translations[eqn.primitive]
out_vals = rule(c, in_avals, in_vals, **eqn.params)
map(write, eqn.out_binders, out_vals)
return map(read, jaxpr.outs)
def execute_compiled(compiled, out_avals, *args):
input_bufs = [input_handlers[type(x)](x) for x in args]
out_bufs = compiled.execute(input_bufs)
return [handle_result(aval, buf) for aval, buf in zip(out_avals, out_bufs)]
default_input_handler = xb.get_backend(None).buffer_from_pyval
input_handlers = {ty: default_input_handler for ty in
[bool, int, float, np.ndarray, np.float64, np.float32]}
def handle_result(aval: ShapedArray, buf):
del aval # Unused for now.
return buf.to_py()
xla_translations = {}
# -
# Notice that `jaxpr_subcomp` has the structure of a simple interpreter. That's
# a common pattern: the way we process jaxprs is usually with an interpreter.
# And as with any interpreter, we need an interpretation rule for each
# primitive:
# +
def direct_translation(op, c, in_avals, in_vals):
del c, in_avals
return [op(*in_vals)]
xla_translations[add_p] = partial(direct_translation, xops.Add)
xla_translations[mul_p] = partial(direct_translation, xops.Mul)
xla_translations[neg_p] = partial(direct_translation, xops.Neg)
xla_translations[sin_p] = partial(direct_translation, xops.Sin)
xla_translations[cos_p] = partial(direct_translation, xops.Cos)
xla_translations[greater_p] = partial(direct_translation, xops.Gt)
xla_translations[less_p] = partial(direct_translation, xops.Lt)
def reduce_sum_translation(c, in_avals, in_vals, *, axis):
(x_aval,), (x,) = in_avals, in_vals
zero = xops.ConstantLiteral(c, np.array(0, x_aval.dtype))
subc = xb.make_computation_builder('add')
shape = _xla_shape(ShapedArray((), x_aval.dtype))
xops.Add(xops.Parameter(subc, 0, shape), xops.Parameter(subc, 1, shape))
return [xops.Reduce(c, [x], [zero], subc.build(), [axis])]
xla_translations[reduce_sum_p] = reduce_sum_translation
def broadcast_translation(c, in_avals, in_vals, *, shape, axes):
x, = in_vals
dims_complement = [i for i in range(len(shape)) if i not in axes]
return [xops.BroadcastInDim(x, shape, dims_complement)]
xla_translations[broadcast_p] = broadcast_translation
# -
# With that, we can now use `jit` to stage out, compile, and execute programs
# with XLA!
@jit
def f(x, y):
print('tracing!')
return sin(x) * cos(y)
z = f(3., 4.) # 'tracing!' prints the first time
print(z)
z = f(4., 5.) # 'tracing!' doesn't print, compilation cache hit!
print(z)
# +
@jit
def f(x):
return reduce_sum(x, axis=0)
print(f(np.array([1., 2., 3.])))
# +
def f(x):
y = sin(x) * 2.
z = - y + x
return z
def deriv(f):
return lambda x: jvp(f, (x,), (1.,))[1]
print( deriv(deriv(f))(3.))
print(jit(deriv(deriv(f)))(3.))
# -
# Instead of implementing `jit` to first trace to a jaxpr and then to lower the
# jaxpr to XLA HLO, it might appear that we could have skipped the jaxpr step
# and just lowered to HLO while tracing. That is, perhaps we could have instead
# implemented `jit` with a `Trace` and `Tracer` that appended to the XLA HLO
# graph incrementally on each primitive bind. That's correct for now, but won't
# be possible when we introduce compiled SPMD computations because there we must
# know the number of replicas needed before compiling the program.
# We haven't yet defined any transformation rules for `xla_call_p` other than
# its evaluation rule. That is, we can't yet do `vmap`-of-`jit` or
# `jvp`-of-`jit` or even `jit`-of`-jit`. Instead `jit` has to be at the "top
# level." Let's fix that!
# +
def xla_call_jvp_rule(primals, tangents, *, jaxpr, num_consts):
del num_consts # Unused.
new_jaxpr, new_consts = jvp_jaxpr(jaxpr)
outs = bind(xla_call_p, *new_consts, *primals, *tangents, jaxpr=new_jaxpr,
num_consts=len(new_consts))
n = len(outs) // 2
primals_out, tangents_out = outs[:n], outs[n:]
return primals_out, tangents_out
jvp_rules[xla_call_p] = xla_call_jvp_rule
@lru_cache()
def jvp_jaxpr(jaxpr: Jaxpr) -> Tuple[Jaxpr, List[Any]]:
def jvp_traceable(*primals_and_tangents):
n = len(primals_and_tangents) // 2
primals, tangents = primals_and_tangents[:n], primals_and_tangents[n:]
return jvp(jaxpr_as_fun(jaxpr), primals, tangents)
in_avals = [v.aval for v in jaxpr.in_binders]
new_jaxpr, new_consts, _ = make_jaxpr(jvp_traceable, *in_avals, *in_avals)
return new_jaxpr, new_consts
# +
def xla_call_vmap_rule(axis_size, vals_in, dims_in, *, jaxpr, num_consts):
del num_consts # Unused.
new_jaxpr, new_consts = vmap_jaxpr(jaxpr, axis_size, tuple(dims_in))
outs = bind(xla_call_p, *new_consts, *vals_in, jaxpr=new_jaxpr,
num_consts=len(new_consts))
return outs, [0] * len(outs)
vmap_rules[xla_call_p] = xla_call_vmap_rule
@lru_cache()
def vmap_jaxpr(jaxpr: Jaxpr, axis_size: int, bdims_in: Tuple[BatchAxis, ...]
) -> Tuple[Jaxpr, List[Any]]:
vmap_traceable = vmap(jaxpr_as_fun(jaxpr), tuple(bdims_in))
in_avals = [unmapped_aval(axis_size, d, v.aval)
for v, d in zip(jaxpr.in_binders, bdims_in)]
new_jaxpr, new_consts, _ = make_jaxpr(vmap_traceable, *in_avals)
return new_jaxpr, new_consts
def unmapped_aval(axis_size: int, batch_dim: BatchAxis, aval: ShapedArray
) -> ShapedArray:
if batch_dim is not_mapped:
return aval
else:
shape = list(aval.shape)
shape.insert(batch_dim, axis_size)
return ShapedArray(tuple(shape), aval.dtype)
# +
def xla_call_abstract_eval_rule(*in_types, jaxpr, num_consts):
del num_consts # Unused.
jaxpr_type = typecheck_jaxpr(jaxpr)
if not all(t1 == t2 for t1, t2 in zip(jaxpr_type.in_types, in_types)):
raise TypeError
return jaxpr_type.out_types
abstract_eval_rules[xla_call_p] = xla_call_abstract_eval_rule
def xla_call_translation(c, in_avals, in_vals, *, jaxpr, num_consts):
del num_consts # Only used at top-level.
# Calling jaxpr_subcomp directly would inline. We generate a Call HLO instead.
subc = xb.make_computation_builder('inner xla_call')
xla_params = _xla_params(subc, in_avals)
outs = jaxpr_subcomp(subc, jaxpr, xla_params)
subc = subc.build(xops.Tuple(subc, outs))
return destructure_tuple(c, xops.Call(c, subc, in_vals))
xla_translations[xla_call_p] = xla_call_translation
def destructure_tuple(c, tup):
num_elements = len(c.get_shape(tup).tuple_shapes())
return [xops.GetTupleElement(tup, i) for i in range(num_elements)]
# +
@jit
def f(x):
print('tracing!')
y = sin(x) * 2.
z = - y + x
return z
x, xdot = 3., 1.
y, ydot = jvp(f, (x,), (xdot,))
print(y)
print(ydot)
# -
y, ydot = jvp(f, (x,), (xdot,)) # 'tracing!' not printed
ys = vmap(f, (0,))(np.arange(3.))
print(ys)
# One piece missing is device memory persistence for arrays. That is, we've
# defined `handle_result` to transfer results back to CPU memory as NumPy
# arrays, but it's often preferable to avoid transferring results just to
# transfer them back for the next operation. We can do that by introducing a
# `DeviceArray` class, which can wrap XLA buffers and otherwise duck-type
# `numpy.ndarray`s:
# +
def handle_result(aval: ShapedArray, buf): # noqa: F811
return DeviceArray(aval, buf)
class DeviceArray:
buf: Any
aval: ShapedArray
def __init__(self, aval, buf):
self.aval = aval
self.buf = buf
dtype = property(lambda self: self.aval.dtype)
shape = property(lambda self: self.aval.shape)
ndim = property(lambda self: self.aval.ndim)
def __array__(self): return self.buf.to_py()
def __repr__(self): return repr(self.buf.to_py())
def __str__(self): return str(self.buf.to_py())
_neg = staticmethod(neg)
_add = staticmethod(add)
_radd = staticmethod(add)
_mul = staticmethod(mul)
_rmul = staticmethod(mul)
_gt = staticmethod(greater)
_lt = staticmethod(less)
input_handlers[DeviceArray] = lambda x: x.buf
jax_types.add(DeviceArray)
# +
@jit
def f(x):
y = sin(x) * 2.
z = - y + x
return z
x, xdot = 3., 1.
y, ydot = jvp(f, (x,), (xdot,))
print(y)
print(ydot)
# -
# ## Part 4: `linearize` and `vjp` (and `grad`!)
#
# The `linearize` and `vjp` autodiff functions are built on `jvp`, but involve
# jaxprs as well. That's because both involve staging out, or delaying,
# computation.
# ### `linearize`
#
# In the case of `linearize`, we want to stage out the linear part of a `jvp`
# computation. That is, if we have `jvp : (a -> b) -> (a, T a) -> (b, T b)`,
# then we write `linearize : (a -> b) -> a -> (b, T a -o T b)`, using `T a` to
# mean "the tangent type of `a`" and using the "lollipop" `-o` rather than the
# arrow `->` to indicate a _linear_ function. We define the semantics of
# `linearize` in terms of `jvp` too:
# ```python
# y, f_lin = linearize(f, x)
# y_dot = f_lin(x_dot)
# ```
# gives the same result for `(y, y_dot)` as
# ```
# y, y_dot = jvp(f, (x,), (x_dot,))
# ```
# where the application of `f_lin` does not redo any of the linearization work.
# We'll represent the delayed linear part `f_lin : T a -o T b` as a jaxpr.
#
# Tangentially, now that we have linear arrows `-o`, we can provide a slightly
# more informative type for `jvp`:
# ```
# jvp : (a -> b) -> (UnrestrictedUse a, T a) -o (UnrestrictedUse b, T b)
# ```
# Here we're writing `UnrestrictedUse` just to indicate that we have a special
# pair where the first element can be used in an unrestricted (nonlinear) way.
# In conjunction with the linear arrow, this notation is just meant to express
# that the function `jvp f` uses its first input in a nonlinear way but its
# second input in a linear way, producing a corresponding nonlinear output
# (which can be used in a nonlinear way) paired with a linear output. This more
# refined type signature encodes the data dependencies in `jvp f`, which are
# useful for partial evaluation.
#
# To build the `f_lin` jaxpr from a JVP, we need to perform partial evaluation:
# we evaluate all the primal values as we trace, but stage the tangent
# computations into a jaxpr. This is our second way to build jaxprs. But where
# `make_jaxpr` and its underlying `JaxprTrace`/`JaxprTracer` interpreters aim
# to stage out every primitive bind, this second approach stages out only those
# primitive binds with a data dependence on tangent inputs.
#
# First, some utilities:
# +
def split_half(lst: List[Any]) -> Tuple[List[Any], List[Any]]:
assert not len(lst) % 2
return split_list(lst, len(lst) // 2)
def merge_lists(which: List[bool], l1: List[Any], l2: List[Any]) -> List[Any]:
l1, l2 = iter(l1), iter(l2)
out = [next(l2) if b else next(l1) for b in which]
assert next(l1, None) is next(l2, None) is None
return out
# -
# Next, we'll write `linearize` by combining `jvp` together with a general
# partial evaluation transformation, to be added next:
# +
def linearize_flat(f, *primals_in):
pvals_in = ([PartialVal.known(x) for x in primals_in] +
[PartialVal.unknown(vspace(get_aval(x))) for x in primals_in])
def f_jvp(*primals_tangents_in):
primals_out, tangents_out = jvp(f, *split_half(primals_tangents_in))
return [*primals_out, *tangents_out]
jaxpr, pvals_out, consts = partial_eval_flat(f_jvp, pvals_in)
primal_pvals, _ = split_half(pvals_out)
assert all(pval.is_known for pval in primal_pvals)
primals_out = [pval.const for pval in primal_pvals]
f_lin = lambda *tangents: eval_jaxpr(jaxpr, [*consts, *tangents])
return primals_out, f_lin
def linearize(f, *primals_in):
primals_in_flat, in_tree = tree_flatten(primals_in)
f, out_tree = flatten_fun(f, in_tree)
primals_out_flat, f_lin_flat = linearize_flat(f, *primals_in_flat)
primals_out = tree_unflatten(out_tree(), primals_out_flat)
def f_lin(*tangents_in):
tangents_in_flat, in_tree2 = tree_flatten(tangents_in)
if in_tree != in_tree2: raise TypeError
tangents_out_flat = f_lin_flat(*tangents_in_flat)
return tree_unflatten(out_tree(), tangents_out_flat)
return primals_out, f_lin
def vspace(aval: ShapedArray) -> ShapedArray:
return raise_to_shaped(aval) # TODO handle integers?
# -
# Now we turn to the general partial evaluation transformation. The goal is to
# accept a Python callable and a list of inputs, some known and some unknown,
# and to produce (1) all the outputs which can be computed from the known
# inputs, together with (2) a jaxpr representing the part of the Python
# callable's computation which can only be performed after the remaining inputs
# are known.
#
# This transformation is tricky to summarize in a type signature. If we
# assume the input function's type signature is `(a1, a2) -> (b1, b2)`, where
# `a1` and `a2` represent the known and unknown inputs, respectively, and where
# `b1` only has a data dependency on `a1` while `b2` has some data dependency on
# `a2`, then we might write
#
# ```
# partial_eval : ((a1, a2) -> (b1, b2)) -> a1 -> exists r. (b1, r, (r, a2) -> b2)
# ```
#
# In words, given values for the inputs of type `a1`, `partial_eval` produces
# the outputs of type `b1` along with "residual" values of
# existentially-quantified type `r` representing the intermediates required to
# complete the computation in the second stage. It also produces a function of
# type `(r, a2) -> b2` which accepts the residual values as well as the
# remaining inputs and produces the remaining outputs.
#
# We like to think of partial evaluation as "unzipping" one computation into
# two. For example, consider this jaxpr:
# ```
# { lambda a:float64[] .
# let b:float64[] = sin a
# c:float64[] = neg b
# in ( c ) }
# ```
# A jaxpr for the JVP would look like:
# ```
# { lambda a:float64[] b:float64[] .
# let c:float64[] = sin a
# d:float64[] = cos a
# e:float64[] = mul d b
# f:float64[] = neg c
# g:float64[] = neg e
# in ( f, g ) }
# ```
# If we imagine applying partial evaluation to this jaxpr with the first input
# known and the second unknown, we end up 'unzipping' the JVP jaxpr into primal
# and tangent jaxprs:
# ```
# { lambda a:float64[] .
# let c:float64[] = sin a
# d:float64[] = cos a
# f:float64[] = neg c
# in ( f, d ) }
# ```
# ```
# { lambda d:float64[] b:float64[] .
# let e:float64[] = mul d b
# g:float64[] = neg e
# in ( g ) }
# ```
# This second jaxpr is represents the linear computation that we want from
# `linearize`.
#
# However, unlike in this jaxpr example, we want the computation on known values
# to occur while evaluating the input Python callable. That is, rather than
# forming a jaxpr for the entire function `(a1, a2) -> (b1, b2)`, staging all
# operations out of Python first before sorting out what can be evaluated now
# and what must be delayed, we want only to form a jaxpr for those operations
# that _must_ be delayed due to a dependence on unknown inputs. In the context
# of automatic differentiation, this is the feature ultimately enables us to
# handle functions like `grad(lambda x: x**2 if x > 0 else 0.)`. Python control
# flow works because partial evaluation keeps the primal computation in Python.
# As a consequence, our `Trace` and `Tracer` subclasses must on the fly sort out
# what can be evaluated and what must be staged out into a jaxpr.
#
# First, we start with a `PartialVal` class, which represents a value that can
# be either known or unknown:
class PartialVal(NamedTuple):
aval: ShapedArray
const: Optional[Any]
@classmethod
def known(cls, val: Any):
return PartialVal(get_aval(val), val)
@classmethod
def unknown(cls, aval: ShapedArray):
return PartialVal(aval, None)
is_known = property(lambda self: self.const is not None)
is_unknown = property(lambda self: self.const is None)
# Partial evaluation will take a list of `PartialVal`s representing inputs, and
# return a list of `PartialVal` outputs along with a jaxpr representing the
# delayed computation:
def partial_eval_flat(f: Callable, pvals_in: List[PartialVal]
) -> Tuple[Jaxpr, List[PartialVal], List[Any]]:
with new_main(PartialEvalTrace) as main:
trace = PartialEvalTrace(main)
tracers_in = [trace.new_arg(pval) for pval in pvals_in]
outs = f(*tracers_in)
tracers_out = [full_raise(trace, out) for out in outs]
pvals_out = [t.pval for t in tracers_out]
unk_tracers_in = [t for t in tracers_in if t.pval.is_unknown]
unk_tracers_out = [t for t in tracers_out if t.pval.is_unknown]
jaxpr, consts = tracers_to_jaxpr(unk_tracers_in, unk_tracers_out)
return jaxpr, pvals_out, consts
# Next we need to implement `PartialEvalTrace` and its `PartialEvalTracer`. This
# interpreter will build a jaxpr on the fly while tracking data dependencies. To
# do so, it builds a bipartite directed acyclic graph (DAG) between
# `PartialEvalTracer` nodes, representing staged-out values, and `JaxprRecipe`
# nodes, representing formulas for how to compute some values from others. One
# kind of recipe is a `JaxprEqnRecipe`, corresponding to a `JaxprEqn`'s primitive
# application, but we also have recipe types for constants and lambda binders:
# +
from weakref import ref, ReferenceType
class LambdaBindingRecipe(NamedTuple):
pass
class ConstRecipe(NamedTuple):
val: Any
class JaxprEqnRecipe(NamedTuple):
prim: Primitive
tracers_in: List['PartialEvalTracer']
params: Dict[str, Any]
avals_out: List[ShapedArray]
tracer_refs_out: List['ReferenceType[PartialEvalTracer]']
JaxprRecipe = Union[LambdaBindingRecipe, ConstRecipe, JaxprEqnRecipe]
# -
class PartialEvalTracer(Tracer):
pval: PartialVal
recipe: Optional[JaxprRecipe]
def __init__(self, trace, pval, recipe):
self._trace = trace
self.pval = pval
self.recipe = recipe
aval = property(lambda self: self.pval.aval)
def full_lower(self):
if self.pval.is_known:
return full_lower(self.pval.const)
return self
# The `PartialEvalTrace` contains the logic for constructing the graph of
# `JaxprRecipe`s and `PartialEvalTracer`s. Each argument corresponds to a
# `LambdaBindingRecipe` leaf node, and each constant is a `ConstRecipe` leaf
# node holding a reference to the constant. All other tracers and recipes come
# from `process_primitive`, which forms tracers with `JaxprEqnRecipe`s.
#
# For most primitives, the `process_primitive` logic is straightforward: if all
# inputs are known then we can bind the primitive on the known values
# (evaluating it in Python) and avoid forming tracers corresponding to the
# output. If instead any input is unknown then we instead stage out into a
# `JaxprEqnRecipe` representing the primitive application. To build the tracers
# representing unknown outputs, we need avals, which get from the abstract eval
# rules. (Notice that tracers reference `JaxprEqnRecipe`s, and `JaxprEqnRecipe`s
# reference tracers; we avoid circular garbage by using weakrefs.)
#
# That `process_primitive` logic applies to most primitives, but `xla_call_p`
# requires recursive treatment. So we special-case its rule in a
# `partial_eval_rules` dict.
# +
class PartialEvalTrace(Trace):
def new_arg(self, pval: PartialVal) -> Any:
return PartialEvalTracer(self, pval, LambdaBindingRecipe())
def lift(self, val: Any) -> PartialEvalTracer:
return PartialEvalTracer(self, PartialVal.known(val), None)
pure = lift
def instantiate_const(self, tracer: PartialEvalTracer) -> PartialEvalTracer:
if tracer.pval.is_unknown:
return tracer
else:
pval = PartialVal.unknown(raise_to_shaped(tracer.aval))
return PartialEvalTracer(self, pval, ConstRecipe(tracer.pval.const))
def process_primitive(self, primitive, tracers, params):
if all(t.pval.is_known for t in tracers):
return bind(primitive, *map(full_lower, tracers), **params)
rule = partial_eval_rules.get(primitive)
if rule: return rule(self, tracers, **params)
tracers_in = [self.instantiate_const(t) for t in tracers]
avals_in = [t.aval for t in tracers_in]
avals_out = abstract_eval_rules[primitive](*avals_in, **params)
tracers_out = [PartialEvalTracer(self, PartialVal.unknown(aval), None)
for aval in avals_out]
eqn = JaxprEqnRecipe(primitive, tracers_in, params, avals_out,
map(ref, tracers_out))
for t in tracers_out: t.recipe = eqn
return tracers_out
partial_eval_rules = {}
# -
# Now that we can build graph representations of jaxprs with `PartialEvalTrace`,
# we need a mechanism to convert the graph representation to a standard jaxpr.
# The jaxpr corresponds to a topological sort of the graph.
# +
def tracers_to_jaxpr(tracers_in: List[PartialEvalTracer],
tracers_out: List[PartialEvalTracer]):
tracer_to_var = {id(t): Var(raise_to_shaped(t.aval)) for t in tracers_in}
constvar_to_val = {}
constid_to_var = {}
processed_eqns = set()
eqns = []
for t in toposort(tracers_out, tracer_parents):
if isinstance(t.recipe, LambdaBindingRecipe):
assert id(t) in set(map(id, tracers_in))
elif isinstance(t.recipe, ConstRecipe):
val = t.recipe.val
var = constid_to_var.get(id(val))
if var is None:
aval = raise_to_shaped(get_aval(val))
var = tracer_to_var[id(t)] = constid_to_var[id(val)] = Var(aval)
constvar_to_val[var] = val
elif isinstance(t.recipe, JaxprEqnRecipe):
if id(t.recipe) not in processed_eqns:
eqns.append(recipe_to_eqn(tracer_to_var, t.recipe))
processed_eqns.add(id(t.recipe))
else:
raise TypeError(t.recipe)
constvars, constvals = unzip2(constvar_to_val.items())
in_binders = constvars + [tracer_to_var[id(t)] for t in tracers_in]
out_vars = [tracer_to_var[id(t)] for t in tracers_out]
jaxpr = Jaxpr(in_binders, eqns, out_vars)
typecheck_jaxpr(jaxpr)
return jaxpr, constvals
def recipe_to_eqn(tracer_to_var: Dict[int, Var], recipe: JaxprEqnRecipe
) -> JaxprEqn:
inputs = [tracer_to_var[id(t)] for t in recipe.tracers_in]
out_binders = [Var(aval) for aval in recipe.avals_out]
for t_ref, var in zip(recipe.tracer_refs_out, out_binders):
if t_ref() is not None: tracer_to_var[id(t_ref())] = var
return JaxprEqn(recipe.prim, inputs, recipe.params, out_binders)
def tracer_parents(t: PartialEvalTracer) -> List[PartialEvalTracer]:
return t.recipe.tracers_in if isinstance(t.recipe, JaxprEqnRecipe) else []
# + tags=["hide-input"]
def toposort(out_nodes: List[Any], parents: Callable[[Any], List[Any]]):
if not out_nodes: return []
out_nodes = remove_duplicates(out_nodes)
child_counts = {}
stack = list(out_nodes)
while stack:
node = stack.pop()
if id(node) in child_counts:
child_counts[id(node)] += 1
else:
child_counts[id(node)] = 1
stack.extend(parents(node))
for node in out_nodes:
child_counts[id(node)] -= 1
sorted_nodes = []
childless_nodes = [node for node in out_nodes if not child_counts[id(node)]]
while childless_nodes:
node = childless_nodes.pop()
sorted_nodes.append(node)
for parent in parents(node):
if child_counts[id(parent)] == 1:
childless_nodes.append(parent)
else:
child_counts[id(parent)] -= 1
sorted_nodes = sorted_nodes[::-1]
check_toposort(sorted_nodes, parents)
return sorted_nodes
def remove_duplicates(lst):
seen = set()
return [x for x in lst if id(x) not in seen and not seen.add(id(x))]
def check_toposort(nodes: List[Any], parents: Callable[[Any], List[Any]]):
seen = set()
for node in nodes:
assert all(id(parent) in seen for parent in parents(node))
seen.add(id(node))
# -
# Now we can linearize!
y, sin_lin = linearize(sin, 3.)
print(y, sin(3.))
print(sin_lin(1.), cos(3.))
# To handle `linearize`-of-`jit`, we still need to write a partial evaluation
# rule for `xla_call_p`. Other than tracer bookkeeping, the main task is to
# perform partial evaluation of a jaxpr, 'unzipping' it into two jaxprs.
#
# There are actually two rules to write: one for trace-time partial evaluation,
# which we'll call `xla_call_partial_eval`, and one for partial evaluation of
# jaxprs, which we'll call `xla_call_peval_eqn`.
# +
def xla_call_partial_eval(trace, tracers, *, jaxpr, num_consts):
del num_consts # Unused.
in_unknowns = [not t.pval.is_known for t in tracers]
jaxpr1, jaxpr2, out_unknowns, num_res = partial_eval_jaxpr(jaxpr, in_unknowns)
known_tracers, unknown_tracers = partition_list(in_unknowns, tracers)
known_vals = [t.pval.const for t in known_tracers]
outs1_res = bind(xla_call_p, *known_vals, jaxpr=jaxpr1, num_consts=0)
outs1, res = split_list(outs1_res, len(jaxpr1.outs) - num_res)
res_tracers = [trace.instantiate_const(full_raise(trace, x)) for x in res]
outs2 = [PartialEvalTracer(trace, PartialVal.unknown(v.aval), None)
for v in jaxpr2.outs]
eqn = JaxprEqnRecipe(xla_call_p, res_tracers + unknown_tracers,
dict(jaxpr=jaxpr2, num_consts=0),
[v.aval for v in jaxpr2.outs], map(ref, outs2))
for t in outs2: t.recipe = eqn
return merge_lists(out_unknowns, outs1, outs2)
partial_eval_rules[xla_call_p] = xla_call_partial_eval
def partial_eval_jaxpr(jaxpr: Jaxpr, in_unknowns: List[bool],
instantiate: Optional[List[bool]] = None,
) -> Tuple[Jaxpr, Jaxpr, List[bool], int]:
env: Dict[Var, bool] = {}
residuals: Set[Var] = set()
def read(v: Atom) -> bool:
return type(v) is Var and env[v]
def write(unk: bool, v: Var) -> None:
env[v] = unk
def new_res(x: Atom) -> Atom:
if type(x) is Var: residuals.add(x)
return x
eqns1, eqns2 = [], []
map(write, in_unknowns, jaxpr.in_binders)
for eqn in jaxpr.eqns:
unks_in = map(read, eqn.inputs)
rule = partial_eval_jaxpr_rules.get(eqn.primitive)
if rule:
eqn1, eqn2, unks_out, res = rule(unks_in, eqn)
eqns1.append(eqn1); eqns2.append(eqn2); residuals.update(res)
map(write, unks_out, eqn.out_binders)
elif any(unks_in):
inputs = [v if unk else new_res(v) for unk, v in zip(unks_in, eqn.inputs)]
eqns2.append(JaxprEqn(eqn.primitive, inputs, eqn.params, eqn.out_binders))
map(partial(write, True), eqn.out_binders)
else:
eqns1.append(eqn)
map(partial(write, False), eqn.out_binders)
out_unknowns = map(read, jaxpr.outs)
if instantiate is not None:
for v, uk, inst in zip(jaxpr.outs, out_unknowns, instantiate):
if inst and not uk: new_res(v)
out_unknowns = map(op.or_, out_unknowns, instantiate)
residuals, num_res = list(residuals), len(residuals)
ins1, ins2 = partition_list(in_unknowns, jaxpr.in_binders)
outs1, outs2 = partition_list(out_unknowns, jaxpr.outs)
jaxpr1 = Jaxpr(ins1, eqns1, outs1 + residuals)
jaxpr2 = Jaxpr(residuals + ins2, eqns2, outs2)
typecheck_partial_eval_jaxpr(jaxpr, in_unknowns, out_unknowns, jaxpr1, jaxpr2)
return jaxpr1, jaxpr2, out_unknowns, num_res
def typecheck_partial_eval_jaxpr(jaxpr, unks_in, unks_out, jaxpr1, jaxpr2):
jaxprty = typecheck_jaxpr(jaxpr) # (a1, a2) -> (b1, b2 )
jaxpr1ty = typecheck_jaxpr(jaxpr1) # a1 -> (b1, res)
jaxpr2ty = typecheck_jaxpr(jaxpr2) # (res, a2) -> b2
a1, a2 = partition_list(unks_in, jaxprty.in_types)
b1, b2 = partition_list(unks_out, jaxprty.out_types)
b1_, res = split_list(jaxpr1ty.out_types, len(b1))
res_, a2_ = split_list(jaxpr2ty.in_types, len(res))
b2_ = jaxpr2ty.out_types
if jaxpr1ty.in_types != a1: raise TypeError
if jaxpr2ty.out_types != b2: raise TypeError
if b1 != b1_: raise TypeError
if res != res_: raise TypeError
if a2 != a2_: raise TypeError
if b2 != b2_: raise TypeError
partial_eval_jaxpr_rules = {}
def xla_call_peval_eqn(unks_in: List[bool], eqn: JaxprEqn,
) -> Tuple[JaxprEqn, JaxprEqn, List[bool], List[Atom]]:
jaxpr = eqn.params['jaxpr']
jaxpr1, jaxpr2, unks_out, num_res = partial_eval_jaxpr(jaxpr, unks_in)
ins1, ins2 = partition_list(unks_in, eqn.inputs)
outs1, outs2 = partition_list(unks_out, eqn.out_binders)
residuals, _ = split_list(jaxpr2.in_binders, num_res)
eqn1 = JaxprEqn(xla_call_p, ins1, dict(jaxpr=jaxpr1, num_consts=0),
outs1 + residuals)
eqn2 = JaxprEqn(xla_call_p, residuals + ins2,
dict(jaxpr=jaxpr2, num_consts=0), outs2)
return eqn1, eqn2, unks_out, residuals
partial_eval_jaxpr_rules[xla_call_p] = xla_call_peval_eqn
# -
# With that, we can compose `linearize` and `jit` however we like:
# +
@jit
def f(x):
y = sin(x) * 2.
z = - y + x
return z
y, f_lin = linearize(f, 3.)
y_dot = f_lin(1.)
print(y, y_dot)
# +
@jit
def f(x):
y = sin(x) * 2.
z = g(x, y)
return z
@jit
def g(x, y):
return cos(x) + y
y, f_lin = linearize(f, 3.)
y_dot = f_lin(1.)
print(y, y_dot)
# -
# ### `vjp` and `grad`
#
# The `vjp` transformation works a lot like linearize. Its type signature is
# analogous:
#
# ```
# linearize : (a -> b) -> a -> (b, T a -o T b)
# vjp : (a -> b) -> a -> (b, T b -o T a)
# ```
#
# The only difference is that we transpose the linear part of the computation
# before returning it, so that it goes from type `T a -o T b` to type `T b -o T
# a`. That is, we'll implement `vjp` as, essentially,
#
# ```
# def vjp(f, x):
# y, f_lin = linearize(f, x)
# f_vjp = lambda y_bar: transpose(f_lin)(y_bar)
# return y, f_vjp
# ```
#
# Since we have the linear computation as a jaxpr, not just a Python callable,
# we can implement the transpose transformation as a jaxpr interpreter.
# +
def vjp_flat(f, *primals_in):
pvals_in = ([PartialVal.known(x) for x in primals_in] +
[PartialVal.unknown(vspace(get_aval(x))) for x in primals_in])
primal_pvals_in, tangent_pvals_in = split_half(pvals_in)
def f_jvp(*primals_tangents_in):
primals_out, tangents_out = jvp(f, *split_half(primals_tangents_in))
return [*primals_out, *tangents_out]
jaxpr, pvals_out, consts = partial_eval_flat(f_jvp, pvals_in) # linearize
primal_pvals, _ = split_half(pvals_out)
assert all(pval.is_known for pval in primal_pvals)
primals_out = [pval.const for pval in primal_pvals]
transpose_inputs = consts + [UndefPrimal(p.aval) for p in tangent_pvals_in]
f_vjp = lambda *cts: eval_jaxpr_transposed(jaxpr, transpose_inputs, cts)
return primals_out, f_vjp
def vjp(f, *primals_in):
primals_in_flat, in_tree = tree_flatten(primals_in)
f, out_tree = flatten_fun(f, in_tree)
primals_out_flat, f_vjp_flat = vjp_flat(f, *primals_in_flat)
primals_out = tree_unflatten(out_tree(), primals_out_flat)
def f_vjp(*cotangents_out):
cotangents_out_flat, _ = tree_flatten(cotangents_out)
cotangents_in_flat = f_vjp_flat(*cotangents_out_flat)
return tree_unflatten(in_tree, cotangents_in_flat)
return primals_out, f_vjp
class UndefPrimal(NamedTuple):
aval: ShapedArray
register_pytree_node(UndefPrimal,
lambda u: (u.aval, ()),
lambda aval, _: UndefPrimal(aval))
# -
# We use `UndefPrimal` instances to indicate which arguments with respect to
# with we want to transpose. These arise because in general, being explicit
# about closed-over values, we want to transpose functions of type
# `a -> b -o c` to functions of type `a -> c -o b`. Even more generally, the
# inputs with respect to which the function is linear could be scattered through
# the argument list. So we indicate the linear positions using `UndefPrimal`.
# We register `UndefPrimal` as a pytree node because the pytree mechanism gives
# a handy way to prune these placeholders out of argument lists.
#
# Next, we can write `eval_jaxpr_transposed`, along with transpose rules for
# all primitives which can be linear in at least one argument:
# +
# NB: the analogous function in JAX is called 'backward_pass'
def eval_jaxpr_transposed(jaxpr: Jaxpr, args: List[Any], cotangents: List[Any]
) -> List[Any]:
primal_env: Dict[Var, Any] = {}
ct_env: Dict[Var, Any] = {}
def read_primal(x: Atom) -> Any:
return primal_env.get(x, UndefPrimal(x.aval)) if type(x) is Var else x.val
def write_primal(v: Var, val: Any) -> None:
if type(val) is not UndefPrimal:
primal_env[v] = val
def read_cotangent(v: Var) -> Any:
return ct_env.pop(v, np.zeros(v.aval.shape, v.aval.dtype))
def write_cotangent(x: Atom, val: Any):
if type(x) is Var and val is not None:
ct_env[x] = add(ct_env[x], val) if x in ct_env else val
map(write_primal, jaxpr.in_binders, args)
map(write_cotangent, jaxpr.outs, cotangents)
for eqn in jaxpr.eqns[::-1]:
primals_in = map(read_primal, eqn.inputs)
cts_in = map(read_cotangent, eqn.out_binders)
rule = transpose_rules[eqn.primitive]
cts_out = rule(cts_in, *primals_in, **eqn.params)
map(write_cotangent, eqn.inputs, cts_out)
return [read_cotangent(v) for v, x in zip(jaxpr.in_binders, args)
if type(x) is UndefPrimal]
transpose_rules = {}
# +
def mul_transpose_rule(cts, x, y):
z_bar, = cts
assert (type(x) is UndefPrimal) ^ (type(y) is UndefPrimal)
return [mul(z_bar, y), None] if type(x) is UndefPrimal else [None, mul(x, z_bar)]
transpose_rules[mul_p] = mul_transpose_rule
def neg_transpose_rule(cts, x):
ybar, = cts
assert type(x) is UndefPrimal
return [neg(ybar)]
transpose_rules[neg_p] = neg_transpose_rule
def add_transpose_rule(cts, x, y):
z_bar, = cts
return [z_bar, z_bar]
transpose_rules[add_p] = add_transpose_rule
def xla_call_transpose_rule(cts, *invals, jaxpr, num_consts):
del num_consts # Unused.
undef_primals = [type(x) is UndefPrimal for x in invals]
transposed_jaxpr, new_consts = transpose_jaxpr(jaxpr, tuple(undef_primals))
residuals, _ = partition_list(undef_primals, invals)
outs = bind(xla_call_p, *new_consts, *residuals, *cts,
jaxpr=transposed_jaxpr, num_consts=len(new_consts))
outs = iter(outs)
return [next(outs) if undef else None for undef in undef_primals]
transpose_rules[xla_call_p] = xla_call_transpose_rule
@lru_cache()
def transpose_jaxpr(jaxpr: Jaxpr, undef_primals: Tuple[bool, ...]
) -> Tuple[Jaxpr, List[Any]]:
avals_in, avals_out = typecheck_jaxpr(jaxpr)
traceable = partial(eval_jaxpr_transposed, jaxpr)
args = [UndefPrimal(a) if u else a for a, u in zip(avals_in, undef_primals)]
trans_jaxpr, consts, _ = make_jaxpr(traceable, tuple(args), tuple(avals_out))
typecheck_jaxpr(trans_jaxpr)
return trans_jaxpr, consts
# -
# Now that we can linearize and transpose, we can finally write `grad`:
def grad(f):
def gradfun(x, *xs):
y, f_vjp = vjp(f, x, *xs)
if np.shape(y) != (): raise TypeError
x_bar, *_ = f_vjp(np.ones(np.shape(y), np.result_type(y)))
return x_bar
return gradfun
y, f_vjp = vjp(sin, 3.)
print(f_vjp(1.), cos(3.))
# +
def f(x):
y = sin(x) * 2.
z = - y + x
return z
print(grad(f)(3.))
# +
@jit
def f(x):
y = x * 2.
z = g(y)
return z
@jit
def g(x):
return cos(x) * 2.
print(grad(f)(3.))
# -
# Here's something of a compositionality stress test:
# +
# from core_test.py fun_with_nested_calls_2
def foo(x):
@jit
def bar(y):
def baz(w):
q = jit(lambda x: y)(x)
q = q + jit(lambda: y)()
q = q + jit(lambda y: w + y)(y)
q = jit(lambda w: jit(sin)(x) * y)(1.0) + q
return q
p, t = jvp(baz, (x + 1.0,), (y,))
return t + (x * p)
return bar(x)
def assert_allclose(*vals):
for v1, v2 in zip(vals[:-1], vals[1:]):
np.testing.assert_allclose(v1, v2)
ans1 = f(3.)
ans2 = jit(f)(3.)
ans3, _ = jvp(f, (3.,), (5.,))
ans4, _ = jvp(jit(f), (3.,), (5.,))
assert_allclose(ans1, ans2, ans3, ans4)
deriv1 = grad(f)(3.)
deriv2 = grad(jit(f))(3.)
deriv3 = jit(grad(jit(f)))(3.)
_, deriv4 = jvp(f, (3.,), (1.,))
_, deriv5 = jvp(jit(f), (3.,), (1.,))
assert_allclose(deriv1, deriv2, deriv3, deriv4, deriv5)
hess1 = grad(grad(f))(3.)
hess2 = grad(grad(jit(f)))(3.)
hess3 = grad(jit(grad(f)))(3.)
hess4 = jit(grad(grad(f)))(3.)
_, hess5 = jvp(grad(f), (3.,), (1.,))
_, hess6 = jvp(jit(grad(f)), (3.,), (1.,))
_, hess7 = jvp(jit(grad(f)), (3.,), (1.,))
assert_allclose(hess1, hess2, hess3, hess4, hess5, hess6, hess7)
# -
# ## Part 5: the control flow primitives `cond`
#
# Next we'll add higher-order primitives for staged-out control flow. These
# resemble `jit` from Part 3, another higher-order primitive, but differ in that
# they are parameterized by multiple callables rather than just one.
# ### Adding `cond`
#
# We introduce a `cond` primitive to represent conditional application of one
# function or another inside a jaxpr. We write the type of `cond` as
# `Bool -> (a -> b) -> (a -> b) -> a -> b`. In words, `cond` takes a boolean
# representing the predicate and two functions of equal types. Depending on the
# value of the predicate, it applies one function or the other to its final
# argument.
#
# In Python, we represent it as a function which itself takes two functions as
# arguments. As with `jit`, the first step is to call `make_jaxpr` on its
# callable arguments to turn them into jaxprs:
# +
def cond(pred, true_fn, false_fn, *operands):
avals_in = [raise_to_shaped(get_aval(x)) for x in operands]
true_jaxpr, true_consts, out_tree = make_jaxpr(true_fn, *avals_in)
false_jaxpr, false_consts, out_tree_ = make_jaxpr(false_fn, *avals_in)
if out_tree != out_tree_: raise TypeError
true_jaxpr, false_jaxpr = _join_jaxpr_consts(
true_jaxpr, false_jaxpr, len(true_consts), len(false_consts))
if typecheck_jaxpr(true_jaxpr) != typecheck_jaxpr(false_jaxpr):
raise TypeError
outs = bind_cond(pred, *true_consts, *false_consts, *operands,
true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr)
return tree_unflatten(out_tree, outs)
cond_p = Primitive('cond')
def _join_jaxpr_consts(jaxpr1: Jaxpr, jaxpr2: Jaxpr, n1: int, n2: int
) -> Tuple[Jaxpr, Jaxpr]:
jaxpr1_type, jaxpr2_type = typecheck_jaxpr(jaxpr1), typecheck_jaxpr(jaxpr2)
assert jaxpr1_type.in_types[n1:] == jaxpr2_type.in_types[n2:]
consts1, rest1 = split_list(jaxpr1.in_binders, n1)
consts2, rest2 = split_list(jaxpr2.in_binders, n2)
new_jaxpr1 = Jaxpr(consts1 + consts2 + rest1, jaxpr1.eqns, jaxpr1.outs)
new_jaxpr2 = Jaxpr(consts1 + consts2 + rest2, jaxpr2.eqns, jaxpr2.outs)
return new_jaxpr1, new_jaxpr2
def bind_cond(pred, *args, true_jaxpr, false_jaxpr):
assert len(args) == len(true_jaxpr.in_binders) == len(false_jaxpr.in_binders)
return bind(cond_p, pred, *args, true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr)
# -
# We require `true_jaxpr` and `false_jaxpr` to have the same type, but because
# they might close over different constants (and because jaxprs can only
# represent closed terms, i.e. can't have free variables and are instead
# closure-converted) we need to use the helper `_join_jaxpr_consts` to make
# consistent the input binder lists of the two jaxprs. (To be more economical we
# could try to identify pairs of constants with the same shapes, but instead we
# just concatenate the lists of constants.)
#
# Next we can turn to adding interpreter rules for `cond`. Its evaluation rule
# is simple:
def cond_impl(pred, *operands, true_jaxpr, false_jaxpr):
if pred:
return eval_jaxpr(true_jaxpr, operands)
else:
return eval_jaxpr(false_jaxpr, operands)
impl_rules[cond_p] = cond_impl
out = cond(True, lambda: 3, lambda: 4)
print(out)
# For its JVP and vmap rules, we only need to call the same `jvp_jaxpr` and
# `vmap_jaxpr` utilities we created for `jit`, followed by another pass of
# `_join_jaxpr_consts`:
def cond_jvp_rule(primals, tangents, *, true_jaxpr, false_jaxpr):
pred, *primals = primals
_ , *tangents = tangents
true_jaxpr , true_consts = jvp_jaxpr(true_jaxpr)
false_jaxpr, false_consts = jvp_jaxpr(false_jaxpr)
true_jaxpr, false_jaxpr = _join_jaxpr_consts(
true_jaxpr, false_jaxpr, len(true_consts), len(false_consts))
assert typecheck_jaxpr(true_jaxpr) == typecheck_jaxpr(false_jaxpr)
outs = bind_cond(pred, *true_consts, *false_consts, *primals, *tangents,
true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr)
primals_out, tangents_out = split_half(outs)
return primals_out, tangents_out
jvp_rules[cond_p] = cond_jvp_rule
out, out_tan = jvp(lambda x: cond(True, lambda: x * x, lambda: 0.), (1.,), (1.,))
print(out_tan)
def cond_vmap_rule(axis_size, vals_in, dims_in, *, true_jaxpr, false_jaxpr):
pred , *vals_in = vals_in
pred_dim, *dims_in = dims_in
if pred_dim is not not_mapped: raise NotImplementedError # TODO
true_jaxpr, true_consts = vmap_jaxpr(true_jaxpr, axis_size, tuple(dims_in))
false_jaxpr, false_consts = vmap_jaxpr(false_jaxpr, axis_size, tuple(dims_in))
true_jaxpr, false_jaxpr = _join_jaxpr_consts(
true_jaxpr, false_jaxpr, len(true_consts), len(false_consts))
assert typecheck_jaxpr(true_jaxpr) == typecheck_jaxpr(false_jaxpr)
outs = bind_cond(pred, *true_consts, *false_consts, *vals_in,
true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr)
return outs, [0] * len(outs)
vmap_rules[cond_p] = cond_vmap_rule
xs = np.array([1., 2., 3])
out = vmap(lambda x: cond(True, lambda: x + 1., lambda: 0.), (0,))(xs)
print(out)
# Notice that we're not currently supporting the case where the predicate value
# itself is batched. In mainline JAX, we handle this case by transforming the
# conditional to a [select primitive](https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.select.html).
# That transformation is semantically correct so long as `true_fun` and
# `false_fun` do not involve any side-effecting primitives.
#
# Another thing not represented here, but present in the mainline JAX, is that
# applying transformations to two jaxprs of equal type might result in jaxprs of
# different types. For example, applying the mainline JAX version of
# `vmap_jaxpr` to the identity-function jaxpr
#
# ```
# { lambda a:float32[] .
# let
# in ( a ) }
# ```
#
# would result in a jaxpr with a batched output, of type
# `[float32[10]] -> [float32[10]]` if the batch size were 10, while applying it
# to the zero-function jaxpr
#
# ```
# { lambda a:float32[] .
# let
# in ( 0. ) }
# ```
#
# would result in a jaxpr with an unbatched output, of type
# `[float32[10]] -> [float32[]]`. This is an optimization, aimed at not batching
# values unnecessarily. But it means that in `cond` we'd need an extra step of
# joining the two transformed jaxprs to have consistent output types. We don't
# need this step here because we chose `vmap_jaxpr` always to batch all outputs
# over the leading axis.
# Next we can turn to abstract evaluation and XLA lowering rules:
# +
def cond_abstract_eval(pred_type, *in_types, true_jaxpr, false_jaxpr):
if pred_type != ShapedArray((), np.dtype('bool')): raise TypeError
jaxpr_type = typecheck_jaxpr(true_jaxpr)
if jaxpr_type != typecheck_jaxpr(false_jaxpr):
raise TypeError
if not all(t1 == t2 for t1, t2 in zip(jaxpr_type.in_types, in_types)):
raise TypeError
return jaxpr_type.out_types
abstract_eval_rules[cond_p] = cond_abstract_eval
def cond_translation(c, in_avals, in_vals, *, true_jaxpr, false_jaxpr):
del in_avals # Unused.
pred, *in_vals = in_vals
flat_vals, in_tree = tree_flatten(in_vals)
operand = xops.Tuple(c, flat_vals)
operand_shape = c.get_shape(operand)
def make_comp(name: str, jaxpr: Jaxpr) -> xe.XlaComputation:
c = xb.make_computation_builder(name)
operand = xb.parameter(c, 0, operand_shape)
operands = tree_unflatten(in_tree, destructure_tuple(c, operand))
outs = jaxpr_subcomp(c, jaxpr, operands)
return c.build(xops.Tuple(c, outs))
true_comp = make_comp('true_fn', true_jaxpr)
false_comp = make_comp('false_fn', false_jaxpr)
int_etype = xc.dtype_to_etype(np.dtype('int32'))
out = xops.Conditional(xops.ConvertElementType(pred, int_etype),
[false_comp, true_comp], [operand] * 2)
return destructure_tuple(c, out)
xla_translations[cond_p] = cond_translation
# -
out = jit(lambda: cond(False, lambda: 1, lambda: 2))()
print(out)
# Finally, to support reverse-mode automatic differentiation, we need partial
# evaluation and transposition rules. For partial evaluation, we need to
# introduce another jaxpr-munging utility, `_join_jaxpr_res`, to handle the fact
# that applying partial evaluation to `true_fun` and `false_fun` will in general
# result in distinct residuals. We use `_join_jaxpr_res` to make the output
# types of the transformed jaxprs consistent (while `_join_jaxpr_consts` dealt
# with input types).
# +
def cond_partial_eval(trace, tracers, *, true_jaxpr, false_jaxpr):
pred_tracer, *tracers = tracers
assert pred_tracer.pval.is_known
pred = pred_tracer.pval.const
in_uks = [not t.pval.is_known for t in tracers]
*jaxprs, out_uks, num_res = _cond_partial_eval(true_jaxpr, false_jaxpr, in_uks)
t_jaxpr1, f_jaxpr1, t_jaxpr2, f_jaxpr2 = jaxprs
known_tracers, unknown_tracers = partition_list(in_uks, tracers)
known_vals = [t.pval.const for t in known_tracers]
outs1_res = bind_cond(pred, *known_vals,
true_jaxpr=t_jaxpr1, false_jaxpr=f_jaxpr1)
outs1, res = split_list(outs1_res, len(outs1_res) - num_res)
pred_tracer_ = trace.instantiate_const(full_raise(trace, pred_tracer))
res_tracers = [trace.instantiate_const(full_raise(trace, x)) for x in res]
outs2 = [PartialEvalTracer(trace, PartialVal.unknown(v.aval), None)
for v in t_jaxpr2.outs]
eqn = JaxprEqnRecipe(cond_p, [pred_tracer_, *res_tracers, *unknown_tracers],
dict(true_jaxpr=t_jaxpr2, false_jaxpr=f_jaxpr2),
[v.aval for v in t_jaxpr2.outs], map(ref, outs2))
for t in outs2: t.recipe = eqn
return merge_lists(out_uks, outs1, outs2)
partial_eval_rules[cond_p] = cond_partial_eval
def _cond_partial_eval(true_jaxpr: Jaxpr, false_jaxpr: Jaxpr, in_uks: List[bool]
) -> Tuple[Jaxpr, Jaxpr, Jaxpr, Jaxpr, List[bool], int]:
_, _, t_out_uks, _ = partial_eval_jaxpr(true_jaxpr , in_uks)
_, _, f_out_uks, _ = partial_eval_jaxpr(false_jaxpr, in_uks)
out_uks = map(op.or_, t_out_uks, f_out_uks)
t_jaxpr1, t_jaxpr2, _, t_nres = partial_eval_jaxpr(true_jaxpr , in_uks, out_uks)
f_jaxpr1, f_jaxpr2, _, f_nres = partial_eval_jaxpr(false_jaxpr, in_uks, out_uks)
t_jaxpr1, f_jaxpr1 = _join_jaxpr_res(t_jaxpr1, f_jaxpr1, t_nres, f_nres)
t_jaxpr2, f_jaxpr2 = _join_jaxpr_consts(t_jaxpr2, f_jaxpr2, t_nres, f_nres)
assert typecheck_jaxpr(t_jaxpr1) == typecheck_jaxpr(f_jaxpr1)
assert typecheck_jaxpr(t_jaxpr2) == typecheck_jaxpr(f_jaxpr2)
num_res = t_nres + f_nres
return t_jaxpr1, f_jaxpr1, t_jaxpr2, f_jaxpr2, out_uks, num_res
def _join_jaxpr_res(jaxpr1: Jaxpr, jaxpr2: Jaxpr, n1: int, n2: int
) -> Tuple[Jaxpr, Jaxpr]:
jaxpr1_type, jaxpr2_type = typecheck_jaxpr(jaxpr1), typecheck_jaxpr(jaxpr2)
out_types1, _ = split_list(jaxpr1_type.out_types, len(jaxpr1.outs) - n1)
out_types2, _ = split_list(jaxpr2_type.out_types, len(jaxpr2.outs) - n2)
assert out_types1 == out_types2
outs1, res1 = split_list(jaxpr1.outs, len(jaxpr1.outs) - n1)
outs2, res2 = split_list(jaxpr2.outs, len(jaxpr2.outs) - n2)
zeros_like1 = [Lit(np.zeros(v.aval.shape, v.aval.dtype)) for v in res1]
zeros_like2 = [Lit(np.zeros(v.aval.shape, v.aval.dtype)) for v in res2]
new_jaxpr1 = Jaxpr(jaxpr1.in_binders, jaxpr1.eqns, outs1 + res1 + zeros_like2)
new_jaxpr2 = Jaxpr(jaxpr2.in_binders, jaxpr2.eqns, outs2 + zeros_like1 + res2)
return new_jaxpr1, new_jaxpr2
# -
_, f_lin = linearize(lambda x: cond(True, lambda: x, lambda: 0.), 1.)
out = f_lin(3.14)
print(out)
def cond_peval_eqn(unks_in: List[bool], eqn: JaxprEqn,
) -> Tuple[JaxprEqn, JaxprEqn, List[bool], List[Atom]]:
pred_unk, *unks_in = unks_in
assert not pred_unk
true_jaxpr, false_jaxpr = eqn.params['true_jaxpr'], eqn.params['false_jaxpr']
*jaxprs, unks_out, num_res = _cond_partial_eval(true_jaxpr, false_jaxpr, unks_in)
t_jaxpr1, f_jaxpr1, t_jaxpr2, f_jaxpr2 = jaxprs
ins1, ins2 = partition_list(unks_in, eqn.inputs[1:])
outs1, outs2 = partition_list(unks_out, eqn.out_binders)
residuals, _ = split_list(t_jaxpr2.in_binders, num_res)
eqn1 = JaxprEqn(cond_p, [eqn.inputs[0], *ins1],
dict(true_jaxpr=t_jaxpr1, false_jaxpr=f_jaxpr1),
outs1 + residuals)
eqn2 = JaxprEqn(cond_p, [eqn.inputs[0], *residuals, *ins2],
dict(true_jaxpr=t_jaxpr2, false_jaxpr=f_jaxpr2),
outs2)
return eqn1, eqn2, unks_out, [eqn.inputs[0], *residuals]
partial_eval_jaxpr_rules[cond_p] = cond_peval_eqn
_, f_lin = linearize(jit(lambda x: cond(True, lambda: x, lambda: 0.)), 1.)
out = f_lin(3.14)
print(out)
# Transposition is a fairly straightforward application of `transpose_jaxpr`:
def cond_transpose_rule(cts, pred, *invals, true_jaxpr, false_jaxpr):
undef_primals = tuple([type(x) is UndefPrimal for x in invals])
true_jaxpr, true_consts = transpose_jaxpr(true_jaxpr, undef_primals)
false_jaxpr, false_consts = transpose_jaxpr(false_jaxpr, undef_primals)
true_jaxpr, false_jaxpr = _join_jaxpr_consts(
true_jaxpr, false_jaxpr, len(true_consts), len(false_consts))
res = [x for x in invals if type(x) is not UndefPrimal]
outs = bind_cond(pred, *true_consts, *false_consts, *res, *cts,
true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr)
outs = iter(outs)
return [None] + [next(outs) if type(x) is UndefPrimal else None for x in invals]
transpose_rules[cond_p] = cond_transpose_rule
out = grad(lambda x: cond(True, lambda: x * x, lambda: 0.))(1.)
print(out)
|
from datetime import timedelta, date, datetime
from django.utils.text import slugify
from django.db import models
from django.urls import reverse
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.utils.translation import gettext as _
from django.db.models import Q
from django.template.defaultfilters import truncatewords
# Utils
CONDITION_CHOICES = (
(1, _("New")),
(2, _("Excellent")),
(3, _("Used")),
(4, _("As pictured")),
)
DURATION_CHOICES = (
(30, _("One month")),
(60, _("Two month")),
(15, _("One week")),
)
class AuctionManager(models.Manager):
def to_close(self):
now = datetime.now()
query = Q(Q(Q(duration=30) & Q(start_date__lt=now - timedelta(days=30))) |
Q(Q(duration=60) & Q(start_date__lt=now - timedelta(days=60))) |
Q(Q(duration=15) & Q(start_date__lt=now - timedelta(days=15))))
return super().get_queryset().filter(query)
def open(self):
return super().get_queryset().filter(is_open=True)
def real_price(self, price_gte=None, price_lte=None):
"""
Fiter auctionss, that are still open, according to duration and starting date,
starting or current price is used, where possible.
"""
query = Q(is_open=True)
if price_gte:
query &= Q(Q(Q(actual_price=0) & Q(starting_price__gte=price_gte)) |
Q(~Q(actual_price=0) & Q(actual_price__gte=price_gte)))
if price_lte:
query &= Q(Q(Q(actual_price=0) & Q(starting_price__lte=price_lte)) |
Q(~Q(actual_price=0) & Q(actual_price__lte=price_lte)))
return super().get_queryset().filter(query)
def get_upload_to_path(instance, filename):
path = f'{settings.UPLOADS_DIR}/auction_'
path += f'{datetime.now().strftime('%Y_%m_%d')}_'
path += f'{instance.title_slug}_'
return path + filename
# Models
class City(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class MarketplaceUser(AbstractUser):
did_accept_tos = models.BooleanField(default=False, blank=True)
member_since = models.DateField(auto_now_add=True)
address = models.CharField(max_length=1024)
city = models.ForeignKey(City, models.SET_NULL, null=True)
@property
def rating(self):
reviews = self.reviewed_by_buyer.all()
ratings_count = len(reviews)
if ratings_count:
return sum([r.stars for r in reviews]) / ratings_count
else:
return 0
def __str__(self):
return self.username
class Brand(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Auction(models.Model):
title = models.CharField(max_length=255)
description = models.TextField()
product_condition = models.IntegerField(choices=CONDITION_CHOICES, default=1)
brand = models.ForeignKey(Brand, models.SET_NULL, blank=True, null=True)
city = models.ForeignKey(City, models.SET_NULL, null=True, blank=True)
duration = models.IntegerField(choices=DURATION_CHOICES, default=30)
starting_price = models.PositiveIntegerField()
actual_price = models.PositiveIntegerField(blank=True, null=True, default=0)
start_date = models.DateField(auto_now_add=True)
is_open = models.BooleanField(blank=True, default=True)
img1 = models.ImageField(upload_to=get_upload_to_path)
img2 = models.ImageField(upload_to=get_upload_to_path, blank=True, null=True)
img3 = models.ImageField(upload_to=get_upload_to_path, blank=True, null=True)
seller = models.ForeignKey(MarketplaceUser,
on_delete=models.CASCADE,
related_name='auctions',
blank=True, null=True)
highest_bidder = models.ForeignKey(MarketplaceUser,
related_name='highest_bids',
on_delete=models.SET_NULL,
blank=True, null=True)
objects = AuctionManager()
def get_upload_to_path(self, filename):
path = f'{settings.UPLOADS_DIR}/auction_'
path += f'{datetime.now().strftime('%Y_%m_%d')}_'
path += f'{self.title_slug}_'
return path + filename
def get_absolute_url(self):
return reverse('product_detail', args=[self.id])
@property
def real_price(self):
if self.actual_price == 0:
return self.starting_price
else:
return self.actual_price
@property
def open_until(self):
return self.start_date + timedelta(days=self.duration)
@property
def short_description(self):
if len(self.description) > 100:
return truncatewords(self.description, 15) + ' ...'
else:
return self.description
@property
def title_slug(self):
return slugify(self.title)
def __str__(self):
return f"{self.title} [{self.duration} {_("days")}]"
@property
def end_date(self):
return self.start_date + timedelta(days=self.duration)
@property
def watchers(self):
return self.watch_set.count()
@property
def likes(self):
return self.like_set.count()
def is_liked_by_user(self, user):
return self.like_set.filter(marketplaceuser=user).count()
def is_watched_by_user(self, user):
return self.watch_set.filter(marketplaceuser=user).count()
class FKToUserAndAuction(models.Model):
auction = models.ForeignKey(Auction, models.SET_NULL, blank=True, null=True)
marketplaceuser = models.ForeignKey(MarketplaceUser,
models.SET_NULL, blank=True, null=True)
class Meta:
abstract = True
def __str__(self):
return f'{self.marketplaceuser} : {self.auction}'
class Like(FKToUserAndAuction): pass
class Watch(FKToUserAndAuction): pass
class Review(models.Model):
auction = models.ForeignKey(Auction, models.SET_NULL, blank=True, null=True)
stars = models.IntegerField(choices=((i, i) for i in range(6)))
review = models.TextField(blank=True, default='')
seller = models.ForeignKey(MarketplaceUser,
related_name='reviewed_by_buyer',
on_delete=models.CASCADE)
buyer = models.ForeignKey(MarketplaceUser,
related_name='reviewed_by_seller',
on_delete=models.SET_NULL, null=True)
class TermsOfUse(models.Model):
text = models.TextField()
|
from datetime import timedelta, date, datetime
from django.utils.text import slugify
from django.db import models
from django.urls import reverse
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.utils.translation import gettext as _
from django.db.models import Q
from django.template.defaultfilters import truncatewords
# Utils
CONDITION_CHOICES = (
(1, _("New")),
(2, _("Excellent")),
(3, _("Used")),
(4, _("As pictured")),
)
DURATION_CHOICES = (
(30, _("One month")),
(60, _("Two month")),
(15, _("One week")),
)
class AuctionManager(models.Manager):
def to_close(self):
now = datetime.now()
query = Q(Q(Q(duration=30) & Q(start_date__lt=now - timedelta(days=30))) |
Q(Q(duration=60) & Q(start_date__lt=now - timedelta(days=60))) |
Q(Q(duration=15) & Q(start_date__lt=now - timedelta(days=15))))
return super().get_queryset().filter(query)
def open(self):
return super().get_queryset().filter(is_open=True)
def real_price(self, price_gte=None, price_lte=None):
"""
Fiter auctionss, that are still open, according to duration and starting date,
starting or current price is used, where possible.
"""
query = Q(is_open=True)
if price_gte:
query &= Q(Q(Q(actual_price=0) & Q(starting_price__gte=price_gte)) |
Q(~Q(actual_price=0) & Q(actual_price__gte=price_gte)))
if price_lte:
query &= Q(Q(Q(actual_price=0) & Q(starting_price__lte=price_lte)) |
Q(~Q(actual_price=0) & Q(actual_price__lte=price_lte)))
return super().get_queryset().filter(query)
def get_upload_to_path(instance, filename):
path = f'{settings.UPLOADS_DIR}/auction_'
path += f'{datetime.now().strftime("%Y_%m_%d")}_'
path += f'{instance.title_slug}_'
return path + filename
# Models
class City(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class MarketplaceUser(AbstractUser):
did_accept_tos = models.BooleanField(default=False, blank=True)
member_since = models.DateField(auto_now_add=True)
address = models.CharField(max_length=1024)
city = models.ForeignKey(City, models.SET_NULL, null=True)
@property
def rating(self):
reviews = self.reviewed_by_buyer.all()
ratings_count = len(reviews)
if ratings_count:
return sum([r.stars for r in reviews]) / ratings_count
else:
return 0
def __str__(self):
return self.username
class Brand(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Auction(models.Model):
title = models.CharField(max_length=255)
description = models.TextField()
product_condition = models.IntegerField(choices=CONDITION_CHOICES, default=1)
brand = models.ForeignKey(Brand, models.SET_NULL, blank=True, null=True)
city = models.ForeignKey(City, models.SET_NULL, null=True, blank=True)
duration = models.IntegerField(choices=DURATION_CHOICES, default=30)
starting_price = models.PositiveIntegerField()
actual_price = models.PositiveIntegerField(blank=True, null=True, default=0)
start_date = models.DateField(auto_now_add=True)
is_open = models.BooleanField(blank=True, default=True)
img1 = models.ImageField(upload_to=get_upload_to_path)
img2 = models.ImageField(upload_to=get_upload_to_path, blank=True, null=True)
img3 = models.ImageField(upload_to=get_upload_to_path, blank=True, null=True)
seller = models.ForeignKey(MarketplaceUser,
on_delete=models.CASCADE,
related_name='auctions',
blank=True, null=True)
highest_bidder = models.ForeignKey(MarketplaceUser,
related_name='highest_bids',
on_delete=models.SET_NULL,
blank=True, null=True)
objects = AuctionManager()
def get_upload_to_path(self, filename):
path = f'{settings.UPLOADS_DIR}/auction_'
path += f'{datetime.now().strftime("%Y_%m_%d")}_'
path += f'{self.title_slug}_'
return path + filename
def get_absolute_url(self):
return reverse('product_detail', args=[self.id])
@property
def real_price(self):
if self.actual_price == 0:
return self.starting_price
else:
return self.actual_price
@property
def open_until(self):
return self.start_date + timedelta(days=self.duration)
@property
def short_description(self):
if len(self.description) > 100:
return truncatewords(self.description, 15) + ' ...'
else:
return self.description
@property
def title_slug(self):
return slugify(self.title)
def __str__(self):
return f"{self.title} [{self.duration} {_('days')}]"
@property
def end_date(self):
return self.start_date + timedelta(days=self.duration)
@property
def watchers(self):
return self.watch_set.count()
@property
def likes(self):
return self.like_set.count()
def is_liked_by_user(self, user):
return self.like_set.filter(marketplaceuser=user).count()
def is_watched_by_user(self, user):
return self.watch_set.filter(marketplaceuser=user).count()
class FKToUserAndAuction(models.Model):
auction = models.ForeignKey(Auction, models.SET_NULL, blank=True, null=True)
marketplaceuser = models.ForeignKey(MarketplaceUser,
models.SET_NULL, blank=True, null=True)
class Meta:
abstract = True
def __str__(self):
return f'{self.marketplaceuser} : {self.auction}'
class Like(FKToUserAndAuction): pass
class Watch(FKToUserAndAuction): pass
class Review(models.Model):
auction = models.ForeignKey(Auction, models.SET_NULL, blank=True, null=True)
stars = models.IntegerField(choices=((i, i) for i in range(6)))
review = models.TextField(blank=True, default='')
seller = models.ForeignKey(MarketplaceUser,
related_name='reviewed_by_buyer',
on_delete=models.CASCADE)
buyer = models.ForeignKey(MarketplaceUser,
related_name='reviewed_by_seller',
on_delete=models.SET_NULL, null=True)
class TermsOfUse(models.Model):
text = models.TextField()
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import torch
import torch.nn.functional as F
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf, open_dict
from pytorch_lightning.loggers.wandb import WandbLogger
from nemo.collections.tts.data.datalayers import MelAudioDataset
from nemo.collections.tts.helpers.helpers import get_batch_size, get_num_workers, plot_spectrogram_to_numpy
from nemo.collections.tts.losses.hifigan_losses import DiscriminatorLoss, FeatureMatchingLoss, GeneratorLoss
from nemo.collections.tts.models.base import Vocoder
from nemo.collections.tts.modules.hifigan_modules import MultiPeriodDiscriminator, MultiScaleDiscriminator
from nemo.core.classes import Exportable
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types.elements import AudioSignal, MelSpectrogramType
from nemo.core.neural_types.neural_type import NeuralType
from nemo.core.optim.lr_scheduler import CosineAnnealing, compute_max_steps
from nemo.utils import logging
HAVE_WANDB = True
try:
import wandb
except ModuleNotFoundError:
HAVE_WANDB = False
class HifiGanModel(Vocoder, Exportable):
def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None):
if isinstance(cfg, dict):
cfg = OmegaConf.create(cfg)
super().__init__(cfg=cfg, trainer=trainer)
self.audio_to_melspec_precessor = instantiate(cfg.preprocessor)
# use a different melspec extractor because:
# 1. we need to pass grads
# 2. we need remove fmax limitation
self.trg_melspec_fn = instantiate(cfg.preprocessor, highfreq=None, use_grads=True)
self.generator = instantiate(cfg.generator)
self.mpd = MultiPeriodDiscriminator(debug=cfg.debug if "debug" in cfg else False)
self.msd = MultiScaleDiscriminator(debug=cfg.debug if "debug" in cfg else False)
self.feature_loss = FeatureMatchingLoss()
self.discriminator_loss = DiscriminatorLoss()
self.generator_loss = GeneratorLoss()
self.l1_factor = cfg.get("l1_loss_factor", 45)
self.sample_rate = self._cfg.preprocessor.sample_rate
self.stft_bias = None
if self._train_dl and isinstance(self._train_dl.dataset, MelAudioDataset):
self.input_as_mel = True
else:
self.input_as_mel = False
self.automatic_optimization = False
def _get_max_steps(self):
return compute_max_steps(
max_epochs=self._cfg.max_epochs,
accumulate_grad_batches=self.trainer.accumulate_grad_batches,
limit_train_batches=self.trainer.limit_train_batches,
num_workers=get_num_workers(self.trainer),
num_samples=len(self._train_dl.dataset),
batch_size=get_batch_size(self._train_dl),
drop_last=self._train_dl.drop_last,
)
def _get_warmup_steps(self, max_steps):
warmup_steps = self._cfg.sched.get("warmup_steps", None)
warmup_ratio = self._cfg.sched.get("warmup_ratio", None)
if warmup_steps is not None and warmup_ratio is not None:
raise ValueError(f'Either use warmup_steps or warmup_ratio for scheduler')
if warmup_steps is not None:
return warmup_steps
if warmup_ratio is not None:
return warmup_ratio * max_steps
raise ValueError(f'Specify warmup_steps or warmup_ratio for scheduler')
def configure_optimizers(self):
self.optim_g = instantiate(self._cfg.optim, params=self.generator.parameters(),)
self.optim_d = instantiate(
self._cfg.optim, params=itertools.chain(self.msd.parameters(), self.mpd.parameters()),
)
if hasattr(self._cfg, 'sched'):
max_steps = self._cfg.get("max_steps", None)
if max_steps is None or max_steps < 0:
max_steps = self._get_max_steps()
warmup_steps = self._get_warmup_steps(max_steps)
self.scheduler_g = CosineAnnealing(
optimizer=self.optim_g, max_steps=max_steps, min_lr=self._cfg.sched.min_lr, warmup_steps=warmup_steps,
) # Use warmup to delay start
sch1_dict = {
'scheduler': self.scheduler_g,
'interval': 'step',
}
self.scheduler_d = CosineAnnealing(
optimizer=self.optim_d, max_steps=max_steps, min_lr=self._cfg.sched.min_lr,
)
sch2_dict = {
'scheduler': self.scheduler_d,
'interval': 'step',
}
return [self.optim_g, self.optim_d], [sch1_dict, sch2_dict]
else:
return [self.optim_g, self.optim_d]
@property
def input_types(self):
return {
"spec": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
}
@property
def output_types(self):
return {
"audio": NeuralType(('B', 'S', 'T'), AudioSignal(self.sample_rate)),
}
@typecheck()
def forward(self, *, spec):
"""
Runs the generator, for inputs and outputs see input_types, and output_types
"""
return self.generator(x=spec)
@typecheck(
input_types={"spec": NeuralType(('B', 'C', 'T'), MelSpectrogramType())},
output_types={"audio": NeuralType(('B', 'T'), AudioSignal())},
)
def convert_spectrogram_to_audio(self, spec: 'torch.tensor') -> 'torch.tensor':
return self(spec=spec).squeeze(1)
def training_step(self, batch, batch_idx):
# if in finetune mode the mels are pre-computed using a
# spectrogram generator
if self.input_as_mel:
audio, audio_len, audio_mel = batch
# else, we compute the mel using the ground truth audio
else:
audio, audio_len = batch
# mel as input for generator
audio_mel, _ = self.audio_to_melspec_precessor(audio, audio_len)
# mel as input for L1 mel loss
audio_trg_mel, _ = self.trg_melspec_fn(audio, audio_len)
audio = audio.unsqueeze(1)
audio_pred = self.generator(x=audio_mel)
audio_pred_mel, _ = self.trg_melspec_fn(audio_pred.squeeze(1), audio_len)
# train discriminator
self.optim_d.zero_grad()
mpd_score_real, mpd_score_gen, _, _ = self.mpd(y=audio, y_hat=audio_pred.detach())
loss_disc_mpd, _, _ = self.discriminator_loss(
disc_real_outputs=mpd_score_real, disc_generated_outputs=mpd_score_gen
)
msd_score_real, msd_score_gen, _, _ = self.msd(y=audio, y_hat=audio_pred.detach())
loss_disc_msd, _, _ = self.discriminator_loss(
disc_real_outputs=msd_score_real, disc_generated_outputs=msd_score_gen
)
loss_d = loss_disc_msd + loss_disc_mpd
self.manual_backward(loss_d)
self.optim_d.step()
# train generator
self.optim_g.zero_grad()
loss_mel = F.l1_loss(audio_pred_mel, audio_trg_mel)
_, mpd_score_gen, fmap_mpd_real, fmap_mpd_gen = self.mpd(y=audio, y_hat=audio_pred)
_, msd_score_gen, fmap_msd_real, fmap_msd_gen = self.msd(y=audio, y_hat=audio_pred)
loss_fm_mpd = self.feature_loss(fmap_r=fmap_mpd_real, fmap_g=fmap_mpd_gen)
loss_fm_msd = self.feature_loss(fmap_r=fmap_msd_real, fmap_g=fmap_msd_gen)
loss_gen_mpd, _ = self.generator_loss(disc_outputs=mpd_score_gen)
loss_gen_msd, _ = self.generator_loss(disc_outputs=msd_score_gen)
loss_g = loss_gen_msd + loss_gen_mpd + loss_fm_msd + loss_fm_mpd + loss_mel * self.l1_factor
self.manual_backward(loss_g)
self.optim_g.step()
# run schedulers
schedulers = self.lr_schedulers()
if schedulers is not None:
sch1, sch2 = schedulers
sch1.step()
sch2.step()
metrics = {
"g_loss_fm_mpd": loss_fm_mpd,
"g_loss_fm_msd": loss_fm_msd,
"g_loss_gen_mpd": loss_gen_mpd,
"g_loss_gen_msd": loss_gen_msd,
"g_loss": loss_g,
"d_loss_mpd": loss_disc_mpd,
"d_loss_msd": loss_disc_msd,
"d_loss": loss_d,
"global_step": self.global_step,
"lr": self.optim_g.param_groups[0]['lr'],
}
self.log_dict(metrics, on_step=True, sync_dist=True)
self.log("g_l1_loss", loss_mel, prog_bar=True, logger=False, sync_dist=True)
def validation_step(self, batch, batch_idx):
if self.input_as_mel:
audio, audio_len, audio_mel = batch
audio_mel_len = [audio_mel.shape[1]] * audio_mel.shape[0]
else:
audio, audio_len = batch
audio_mel, audio_mel_len = self.audio_to_melspec_precessor(audio, audio_len)
audio_pred = self(spec=audio_mel)
# perform bias denoising
pred_denoised = self._bias_denoise(audio_pred, audio_mel).squeeze(1)
pred_denoised_mel, _ = self.audio_to_melspec_precessor(pred_denoised, audio_len)
if self.input_as_mel:
gt_mel, gt_mel_len = self.audio_to_melspec_precessor(audio, audio_len)
audio_pred_mel, _ = self.audio_to_melspec_precessor(audio_pred.squeeze(1), audio_len)
loss_mel = F.l1_loss(audio_mel, audio_pred_mel)
self.log_dict({"val_loss": loss_mel}, on_epoch=True, sync_dist=True)
# plot audio once per epoch
if batch_idx == 0 and isinstance(self.logger, WandbLogger) and HAVE_WANDB:
clips = []
specs = []
for i in range(min(5, audio.shape[0])):
clips += [
wandb.Audio(
audio[i, : audio_len[i]].data.cpu().numpy(),
caption=f"real audio {i}",
sample_rate=self.sample_rate,
),
wandb.Audio(
audio_pred[i, 0, : audio_len[i]].data.cpu().numpy().astype('float32'),
caption=f"generated audio {i}",
sample_rate=self.sample_rate,
),
wandb.Audio(
pred_denoised[i, : audio_len[i]].data.cpu().numpy(),
caption=f"denoised audio {i}",
sample_rate=self.sample_rate,
),
]
specs += [
wandb.Image(
plot_spectrogram_to_numpy(audio_mel[i, :, : audio_mel_len[i]].data.cpu().numpy()),
caption=f"input mel {i}",
),
wandb.Image(
plot_spectrogram_to_numpy(audio_pred_mel[i, :, : audio_mel_len[i]].data.cpu().numpy()),
caption=f"output mel {i}",
),
wandb.Image(
plot_spectrogram_to_numpy(pred_denoised_mel[i, :, : audio_mel_len[i]].data.cpu().numpy()),
caption=f"denoised mel {i}",
),
]
if self.input_as_mel:
specs += [
wandb.Image(
plot_spectrogram_to_numpy(gt_mel[i, :, : audio_mel_len[i]].data.cpu().numpy()),
caption=f"gt mel {i}",
),
]
self.logger.experiment.log({"audio": clips, "specs": specs})
def _bias_denoise(self, audio, mel):
def stft(x):
comp = torch.stft(x.squeeze(1), n_fft=1024, hop_length=256, win_length=1024)
real, imag = comp[..., 0], comp[..., 1]
mags = torch.sqrt(real ** 2 + imag ** 2)
phase = torch.atan2(imag, real)
return mags, phase
def istft(mags, phase):
comp = torch.stack([mags * torch.cos(phase), mags * torch.sin(phase)], dim=-1)
x = torch.istft(comp, n_fft=1024, hop_length=256, win_length=1024)
return x
# create bias tensor
if self.stft_bias is None or self.stft_bias.shape[0] != audio.shape[0]:
audio_bias = self(spec=torch.zeros_like(mel, device=mel.device))
self.stft_bias, _ = stft(audio_bias)
self.stft_bias = self.stft_bias[:, :, 0][:, :, None]
audio_mags, audio_phase = stft(audio)
audio_mags = audio_mags - self.cfg.get("denoise_strength", 0.0025) * self.stft_bias
audio_mags = torch.clamp(audio_mags, 0.0)
audio_denoised = istft(audio_mags, audio_phase).unsqueeze(1)
return audio_denoised
def __setup_dataloader_from_config(self, cfg, shuffle_should_be: bool = True, name: str = "train"):
if "dataset" not in cfg or not isinstance(cfg.dataset, DictConfig):
raise ValueError(f"No dataset for {name}")
if "dataloader_params" not in cfg or not isinstance(cfg.dataloader_params, DictConfig):
raise ValueError(f"No dataloder_params for {name}")
if shuffle_should_be:
if 'shuffle' not in cfg.dataloader_params:
logging.warning(
f"Shuffle should be set to True for {self}'s {name} dataloader but was not found in its "
"config. Manually setting to True"
)
with open_dict(cfg["dataloader_params"]):
cfg.dataloader_params.shuffle = True
elif not cfg.dataloader_params.shuffle:
logging.error(f"The {name} dataloader for {self} has shuffle set to False!!!")
elif not shuffle_should_be and cfg.dataloader_params.shuffle:
logging.error(f"The {name} dataloader for {self} has shuffle set to True!!!")
dataset = instantiate(cfg.dataset)
return torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params)
def setup_training_data(self, cfg):
self._train_dl = self.__setup_dataloader_from_config(cfg)
def setup_validation_data(self, cfg):
self._validation_dl = self.__setup_dataloader_from_config(cfg, shuffle_should_be=False, name="validation")
@classmethod
def list_available_models(cls) -> 'Optional[Dict[str, str]]':
list_of_models = []
model = PretrainedModelInfo(
pretrained_model_name="tts_hifigan",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_hifigan/versions/1.0.0rc1/files/tts_hifigan.nemo",
description="This model is trained on LJSpeech audio sampled at 22050Hz and mel spectrograms generated from Tacotron2, TalkNet, and FastPitch. This model has been tested on generating female English voices with an American accent.",
class_=cls,
)
list_of_models.append(model)
model = PretrainedModelInfo(
pretrained_model_name="tts_en_lj_hifigan_ft_mixertts",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_lj_hifigan/versions/1.6.0/files/tts_en_lj_hifigan_ft_mixertts.nemo",
description="This model is trained on LJSpeech audio sampled at 22050Hz and mel spectrograms generated from Mixer-TTS. This model has been tested on generating female English voices with an American accent.",
class_=cls,
)
list_of_models.append(model)
model = PretrainedModelInfo(
pretrained_model_name="tts_en_lj_hifigan_ft_mixerttsx",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_lj_hifigan/versions/1.6.0/files/tts_en_lj_hifigan_ft_mixerttsx.nemo",
description="This model is trained on LJSpeech audio sampled at 22050Hz and mel spectrograms generated from Mixer-TTS-X. This model has been tested on generating female English voices with an American accent.",
class_=cls,
)
list_of_models.append(model)
return list_of_models
def load_state_dict(self, state_dict, strict=True):
# override load_state_dict to give us some flexibility to be backward-compatible
# with old checkpoints
new_state_dict = {}
num_resblocks = len(self.cfg['generator']['resblock_kernel_sizes'])
for k, v in state_dict.items():
new_k = k
if 'resblocks' in k:
parts = k.split(".")
# only do this is the checkpoint type is older
if len(parts) == 6:
layer = int(parts[2])
new_layer = f"{layer // num_resblocks}.{layer % num_resblocks}"
new_k = f"generator.resblocks.{new_layer}.{".".join(parts[3:])}"
new_state_dict[new_k] = v
super().load_state_dict(new_state_dict, strict=strict)
def _prepare_for_export(self, **kwargs):
"""
Override this method to prepare module for export. This is in-place operation.
Base version does common necessary module replacements (Apex etc)
"""
if self.generator is not None:
try:
self.generator.remove_weight_norm()
except ValueError:
return
def input_example(self):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
par = next(self.parameters())
mel = torch.randn((1, self.cfg['preprocessor']['nfilt'], 96), device=par.device, dtype=par.dtype)
return ({'spec': mel},)
def forward_for_export(self, spec):
"""
Runs the generator, for inputs and outputs see input_types, and output_types
"""
return self.generator(x=spec)
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import torch
import torch.nn.functional as F
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf, open_dict
from pytorch_lightning.loggers.wandb import WandbLogger
from nemo.collections.tts.data.datalayers import MelAudioDataset
from nemo.collections.tts.helpers.helpers import get_batch_size, get_num_workers, plot_spectrogram_to_numpy
from nemo.collections.tts.losses.hifigan_losses import DiscriminatorLoss, FeatureMatchingLoss, GeneratorLoss
from nemo.collections.tts.models.base import Vocoder
from nemo.collections.tts.modules.hifigan_modules import MultiPeriodDiscriminator, MultiScaleDiscriminator
from nemo.core.classes import Exportable
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.neural_types.elements import AudioSignal, MelSpectrogramType
from nemo.core.neural_types.neural_type import NeuralType
from nemo.core.optim.lr_scheduler import CosineAnnealing, compute_max_steps
from nemo.utils import logging
HAVE_WANDB = True
try:
import wandb
except ModuleNotFoundError:
HAVE_WANDB = False
class HifiGanModel(Vocoder, Exportable):
def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None):
if isinstance(cfg, dict):
cfg = OmegaConf.create(cfg)
super().__init__(cfg=cfg, trainer=trainer)
self.audio_to_melspec_precessor = instantiate(cfg.preprocessor)
# use a different melspec extractor because:
# 1. we need to pass grads
# 2. we need remove fmax limitation
self.trg_melspec_fn = instantiate(cfg.preprocessor, highfreq=None, use_grads=True)
self.generator = instantiate(cfg.generator)
self.mpd = MultiPeriodDiscriminator(debug=cfg.debug if "debug" in cfg else False)
self.msd = MultiScaleDiscriminator(debug=cfg.debug if "debug" in cfg else False)
self.feature_loss = FeatureMatchingLoss()
self.discriminator_loss = DiscriminatorLoss()
self.generator_loss = GeneratorLoss()
self.l1_factor = cfg.get("l1_loss_factor", 45)
self.sample_rate = self._cfg.preprocessor.sample_rate
self.stft_bias = None
if self._train_dl and isinstance(self._train_dl.dataset, MelAudioDataset):
self.input_as_mel = True
else:
self.input_as_mel = False
self.automatic_optimization = False
def _get_max_steps(self):
return compute_max_steps(
max_epochs=self._cfg.max_epochs,
accumulate_grad_batches=self.trainer.accumulate_grad_batches,
limit_train_batches=self.trainer.limit_train_batches,
num_workers=get_num_workers(self.trainer),
num_samples=len(self._train_dl.dataset),
batch_size=get_batch_size(self._train_dl),
drop_last=self._train_dl.drop_last,
)
def _get_warmup_steps(self, max_steps):
warmup_steps = self._cfg.sched.get("warmup_steps", None)
warmup_ratio = self._cfg.sched.get("warmup_ratio", None)
if warmup_steps is not None and warmup_ratio is not None:
raise ValueError(f'Either use warmup_steps or warmup_ratio for scheduler')
if warmup_steps is not None:
return warmup_steps
if warmup_ratio is not None:
return warmup_ratio * max_steps
raise ValueError(f'Specify warmup_steps or warmup_ratio for scheduler')
def configure_optimizers(self):
self.optim_g = instantiate(self._cfg.optim, params=self.generator.parameters(),)
self.optim_d = instantiate(
self._cfg.optim, params=itertools.chain(self.msd.parameters(), self.mpd.parameters()),
)
if hasattr(self._cfg, 'sched'):
max_steps = self._cfg.get("max_steps", None)
if max_steps is None or max_steps < 0:
max_steps = self._get_max_steps()
warmup_steps = self._get_warmup_steps(max_steps)
self.scheduler_g = CosineAnnealing(
optimizer=self.optim_g, max_steps=max_steps, min_lr=self._cfg.sched.min_lr, warmup_steps=warmup_steps,
) # Use warmup to delay start
sch1_dict = {
'scheduler': self.scheduler_g,
'interval': 'step',
}
self.scheduler_d = CosineAnnealing(
optimizer=self.optim_d, max_steps=max_steps, min_lr=self._cfg.sched.min_lr,
)
sch2_dict = {
'scheduler': self.scheduler_d,
'interval': 'step',
}
return [self.optim_g, self.optim_d], [sch1_dict, sch2_dict]
else:
return [self.optim_g, self.optim_d]
@property
def input_types(self):
return {
"spec": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),
}
@property
def output_types(self):
return {
"audio": NeuralType(('B', 'S', 'T'), AudioSignal(self.sample_rate)),
}
@typecheck()
def forward(self, *, spec):
"""
Runs the generator, for inputs and outputs see input_types, and output_types
"""
return self.generator(x=spec)
@typecheck(
input_types={"spec": NeuralType(('B', 'C', 'T'), MelSpectrogramType())},
output_types={"audio": NeuralType(('B', 'T'), AudioSignal())},
)
def convert_spectrogram_to_audio(self, spec: 'torch.tensor') -> 'torch.tensor':
return self(spec=spec).squeeze(1)
def training_step(self, batch, batch_idx):
# if in finetune mode the mels are pre-computed using a
# spectrogram generator
if self.input_as_mel:
audio, audio_len, audio_mel = batch
# else, we compute the mel using the ground truth audio
else:
audio, audio_len = batch
# mel as input for generator
audio_mel, _ = self.audio_to_melspec_precessor(audio, audio_len)
# mel as input for L1 mel loss
audio_trg_mel, _ = self.trg_melspec_fn(audio, audio_len)
audio = audio.unsqueeze(1)
audio_pred = self.generator(x=audio_mel)
audio_pred_mel, _ = self.trg_melspec_fn(audio_pred.squeeze(1), audio_len)
# train discriminator
self.optim_d.zero_grad()
mpd_score_real, mpd_score_gen, _, _ = self.mpd(y=audio, y_hat=audio_pred.detach())
loss_disc_mpd, _, _ = self.discriminator_loss(
disc_real_outputs=mpd_score_real, disc_generated_outputs=mpd_score_gen
)
msd_score_real, msd_score_gen, _, _ = self.msd(y=audio, y_hat=audio_pred.detach())
loss_disc_msd, _, _ = self.discriminator_loss(
disc_real_outputs=msd_score_real, disc_generated_outputs=msd_score_gen
)
loss_d = loss_disc_msd + loss_disc_mpd
self.manual_backward(loss_d)
self.optim_d.step()
# train generator
self.optim_g.zero_grad()
loss_mel = F.l1_loss(audio_pred_mel, audio_trg_mel)
_, mpd_score_gen, fmap_mpd_real, fmap_mpd_gen = self.mpd(y=audio, y_hat=audio_pred)
_, msd_score_gen, fmap_msd_real, fmap_msd_gen = self.msd(y=audio, y_hat=audio_pred)
loss_fm_mpd = self.feature_loss(fmap_r=fmap_mpd_real, fmap_g=fmap_mpd_gen)
loss_fm_msd = self.feature_loss(fmap_r=fmap_msd_real, fmap_g=fmap_msd_gen)
loss_gen_mpd, _ = self.generator_loss(disc_outputs=mpd_score_gen)
loss_gen_msd, _ = self.generator_loss(disc_outputs=msd_score_gen)
loss_g = loss_gen_msd + loss_gen_mpd + loss_fm_msd + loss_fm_mpd + loss_mel * self.l1_factor
self.manual_backward(loss_g)
self.optim_g.step()
# run schedulers
schedulers = self.lr_schedulers()
if schedulers is not None:
sch1, sch2 = schedulers
sch1.step()
sch2.step()
metrics = {
"g_loss_fm_mpd": loss_fm_mpd,
"g_loss_fm_msd": loss_fm_msd,
"g_loss_gen_mpd": loss_gen_mpd,
"g_loss_gen_msd": loss_gen_msd,
"g_loss": loss_g,
"d_loss_mpd": loss_disc_mpd,
"d_loss_msd": loss_disc_msd,
"d_loss": loss_d,
"global_step": self.global_step,
"lr": self.optim_g.param_groups[0]['lr'],
}
self.log_dict(metrics, on_step=True, sync_dist=True)
self.log("g_l1_loss", loss_mel, prog_bar=True, logger=False, sync_dist=True)
def validation_step(self, batch, batch_idx):
if self.input_as_mel:
audio, audio_len, audio_mel = batch
audio_mel_len = [audio_mel.shape[1]] * audio_mel.shape[0]
else:
audio, audio_len = batch
audio_mel, audio_mel_len = self.audio_to_melspec_precessor(audio, audio_len)
audio_pred = self(spec=audio_mel)
# perform bias denoising
pred_denoised = self._bias_denoise(audio_pred, audio_mel).squeeze(1)
pred_denoised_mel, _ = self.audio_to_melspec_precessor(pred_denoised, audio_len)
if self.input_as_mel:
gt_mel, gt_mel_len = self.audio_to_melspec_precessor(audio, audio_len)
audio_pred_mel, _ = self.audio_to_melspec_precessor(audio_pred.squeeze(1), audio_len)
loss_mel = F.l1_loss(audio_mel, audio_pred_mel)
self.log_dict({"val_loss": loss_mel}, on_epoch=True, sync_dist=True)
# plot audio once per epoch
if batch_idx == 0 and isinstance(self.logger, WandbLogger) and HAVE_WANDB:
clips = []
specs = []
for i in range(min(5, audio.shape[0])):
clips += [
wandb.Audio(
audio[i, : audio_len[i]].data.cpu().numpy(),
caption=f"real audio {i}",
sample_rate=self.sample_rate,
),
wandb.Audio(
audio_pred[i, 0, : audio_len[i]].data.cpu().numpy().astype('float32'),
caption=f"generated audio {i}",
sample_rate=self.sample_rate,
),
wandb.Audio(
pred_denoised[i, : audio_len[i]].data.cpu().numpy(),
caption=f"denoised audio {i}",
sample_rate=self.sample_rate,
),
]
specs += [
wandb.Image(
plot_spectrogram_to_numpy(audio_mel[i, :, : audio_mel_len[i]].data.cpu().numpy()),
caption=f"input mel {i}",
),
wandb.Image(
plot_spectrogram_to_numpy(audio_pred_mel[i, :, : audio_mel_len[i]].data.cpu().numpy()),
caption=f"output mel {i}",
),
wandb.Image(
plot_spectrogram_to_numpy(pred_denoised_mel[i, :, : audio_mel_len[i]].data.cpu().numpy()),
caption=f"denoised mel {i}",
),
]
if self.input_as_mel:
specs += [
wandb.Image(
plot_spectrogram_to_numpy(gt_mel[i, :, : audio_mel_len[i]].data.cpu().numpy()),
caption=f"gt mel {i}",
),
]
self.logger.experiment.log({"audio": clips, "specs": specs})
def _bias_denoise(self, audio, mel):
def stft(x):
comp = torch.stft(x.squeeze(1), n_fft=1024, hop_length=256, win_length=1024)
real, imag = comp[..., 0], comp[..., 1]
mags = torch.sqrt(real ** 2 + imag ** 2)
phase = torch.atan2(imag, real)
return mags, phase
def istft(mags, phase):
comp = torch.stack([mags * torch.cos(phase), mags * torch.sin(phase)], dim=-1)
x = torch.istft(comp, n_fft=1024, hop_length=256, win_length=1024)
return x
# create bias tensor
if self.stft_bias is None or self.stft_bias.shape[0] != audio.shape[0]:
audio_bias = self(spec=torch.zeros_like(mel, device=mel.device))
self.stft_bias, _ = stft(audio_bias)
self.stft_bias = self.stft_bias[:, :, 0][:, :, None]
audio_mags, audio_phase = stft(audio)
audio_mags = audio_mags - self.cfg.get("denoise_strength", 0.0025) * self.stft_bias
audio_mags = torch.clamp(audio_mags, 0.0)
audio_denoised = istft(audio_mags, audio_phase).unsqueeze(1)
return audio_denoised
def __setup_dataloader_from_config(self, cfg, shuffle_should_be: bool = True, name: str = "train"):
if "dataset" not in cfg or not isinstance(cfg.dataset, DictConfig):
raise ValueError(f"No dataset for {name}")
if "dataloader_params" not in cfg or not isinstance(cfg.dataloader_params, DictConfig):
raise ValueError(f"No dataloder_params for {name}")
if shuffle_should_be:
if 'shuffle' not in cfg.dataloader_params:
logging.warning(
f"Shuffle should be set to True for {self}'s {name} dataloader but was not found in its "
"config. Manually setting to True"
)
with open_dict(cfg["dataloader_params"]):
cfg.dataloader_params.shuffle = True
elif not cfg.dataloader_params.shuffle:
logging.error(f"The {name} dataloader for {self} has shuffle set to False!!!")
elif not shuffle_should_be and cfg.dataloader_params.shuffle:
logging.error(f"The {name} dataloader for {self} has shuffle set to True!!!")
dataset = instantiate(cfg.dataset)
return torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params)
def setup_training_data(self, cfg):
self._train_dl = self.__setup_dataloader_from_config(cfg)
def setup_validation_data(self, cfg):
self._validation_dl = self.__setup_dataloader_from_config(cfg, shuffle_should_be=False, name="validation")
@classmethod
def list_available_models(cls) -> 'Optional[Dict[str, str]]':
list_of_models = []
model = PretrainedModelInfo(
pretrained_model_name="tts_hifigan",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_hifigan/versions/1.0.0rc1/files/tts_hifigan.nemo",
description="This model is trained on LJSpeech audio sampled at 22050Hz and mel spectrograms generated from Tacotron2, TalkNet, and FastPitch. This model has been tested on generating female English voices with an American accent.",
class_=cls,
)
list_of_models.append(model)
model = PretrainedModelInfo(
pretrained_model_name="tts_en_lj_hifigan_ft_mixertts",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_lj_hifigan/versions/1.6.0/files/tts_en_lj_hifigan_ft_mixertts.nemo",
description="This model is trained on LJSpeech audio sampled at 22050Hz and mel spectrograms generated from Mixer-TTS. This model has been tested on generating female English voices with an American accent.",
class_=cls,
)
list_of_models.append(model)
model = PretrainedModelInfo(
pretrained_model_name="tts_en_lj_hifigan_ft_mixerttsx",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_lj_hifigan/versions/1.6.0/files/tts_en_lj_hifigan_ft_mixerttsx.nemo",
description="This model is trained on LJSpeech audio sampled at 22050Hz and mel spectrograms generated from Mixer-TTS-X. This model has been tested on generating female English voices with an American accent.",
class_=cls,
)
list_of_models.append(model)
return list_of_models
def load_state_dict(self, state_dict, strict=True):
# override load_state_dict to give us some flexibility to be backward-compatible
# with old checkpoints
new_state_dict = {}
num_resblocks = len(self.cfg['generator']['resblock_kernel_sizes'])
for k, v in state_dict.items():
new_k = k
if 'resblocks' in k:
parts = k.split(".")
# only do this is the checkpoint type is older
if len(parts) == 6:
layer = int(parts[2])
new_layer = f"{layer // num_resblocks}.{layer % num_resblocks}"
new_k = f"generator.resblocks.{new_layer}.{'.'.join(parts[3:])}"
new_state_dict[new_k] = v
super().load_state_dict(new_state_dict, strict=strict)
def _prepare_for_export(self, **kwargs):
"""
Override this method to prepare module for export. This is in-place operation.
Base version does common necessary module replacements (Apex etc)
"""
if self.generator is not None:
try:
self.generator.remove_weight_norm()
except ValueError:
return
def input_example(self):
"""
Generates input examples for tracing etc.
Returns:
A tuple of input examples.
"""
par = next(self.parameters())
mel = torch.randn((1, self.cfg['preprocessor']['nfilt'], 96), device=par.device, dtype=par.dtype)
return ({'spec': mel},)
def forward_for_export(self, spec):
"""
Runs the generator, for inputs and outputs see input_types, and output_types
"""
return self.generator(x=spec)
|
"""Performs an SRE checkpoint.
The checks are defined in
https://gitlab.cee.redhat.com/app-sre/contract/-/blob/master/content/process/sre_checkpoints.md
"""
import logging
import re
from functools import partial, lru_cache
from http import HTTPStatus
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, Mapping, Union
import requests
from jinja2 import Template
from jira import Issue
from reconcile.utils.constants import PROJ_ROOT
from reconcile.utils.jira_client import JiraClient
DEFAULT_CHECKPOINT_LABELS = ("sre-checkpoint",)
# We reject the full RFC 5322 standard since many clients will choke
# with some carefully crafted valid addresses.
EMAIL_ADDRESS_REGEXP = re.compile(r"^\w+[-\w\.]*@(?:\w[-\w]*\w\.)+\w+")
MAX_EMAIL_ADDRESS_LENGTH = 320 # Per RFC3696
MISSING_DATA_TEMPLATE = (
PROJ_ROOT / "templates" / "jira-checkpoint-missinginfo.j2"
)
@lru_cache
def url_makes_sense(url: str) -> bool:
"""Guesses whether the URL may have a meaningful document.
Obvious cases are if the document can be fully downloaded, but we
also accept that the given document may require credentials that
we don't have.
The URL is non-sensical if the server is crashing, the document
doesn't exist or the specified URL can't be even probed with GET.
"""
if not url:
return False
try:
rs = requests.get(url)
except requests.exceptions.ConnectionError:
return False
# Codes above NOT_FOUND mean the URL to the document doesn't
# exist, that the URL is very malformed or that it points to a
# broken resource
return rs.status_code < HTTPStatus.NOT_FOUND
def valid_owners(owners: Iterable[Mapping[str, str]]) -> bool:
"""Confirm whether all the owners have a name and a valid email address."""
return all(
o["name"]
and o["email"]
and EMAIL_ADDRESS_REGEXP.fullmatch(o["email"])
and len(o["email"]) <= MAX_EMAIL_ADDRESS_LENGTH
for o in owners
)
VALIDATORS: Dict[str, Callable] = {
"sopsUrl": url_makes_sense,
"architectureDocument": url_makes_sense,
"grafanaUrls": lambda x: all(url_makes_sense(y["url"]) for y in x),
"serviceOwners": valid_owners,
}
def render_template(
template: Path, name: str, path: str, field: str, bad_value: str
) -> str:
"""Render the template with all its fields."""
with open(template) as f:
t = Template(f.read(), keep_trailing_newline=True, trim_blocks=True)
return t.render(
app_name=name, app_path=path, field=field, field_value=bad_value
)
def file_ticket(
jira: JiraClient,
field: str,
app_name: str,
app_path: str,
labels: Iterable[str],
parent: str,
bad_value: str,
) -> Issue:
"""Return a ticket."""
if bad_value:
summary = f"Incorrect metadata {field} for {app_name}"
else:
summary = f"Missing metadata {field} for {app_name}"
i = jira.create_issue(
summary,
render_template(
MISSING_DATA_TEMPLATE, app_name, app_path, field, bad_value
),
labels=labels,
links=(parent,),
)
return i
def report_invalid_metadata(
app: Mapping[str, Any],
path: str,
board: Mapping[str, Union[str, Mapping]],
settings: Mapping[str, Any],
parent: str,
dry_run: bool = False,
) -> None:
"""Cut tickets for any missing/invalid field in the app.
During dry runs it will only log the rendered template.
:param app: App description, as returned by
queries.JIRA_BOARDS_QUICK_QUERY
:param path: path in app-interface to said app
:param board: JIRA board description, as per
queries.JIRA_BOARDS_QUERY
:param settings: app-interface settings (necessary to log into the
JIRA instance)
:param parent: parent ticket for this checkpoint
:param dry_run: whether this is a dry run
"""
if dry_run:
do_cut = partial(
render_template,
template=MISSING_DATA_TEMPLATE,
name=app["name"],
path=path,
)
else:
jira = JiraClient(board, settings)
do_cut = partial(
file_ticket, # type: ignore
jira=jira,
app_name=app["name"],
labels=DEFAULT_CHECKPOINT_LABELS,
parent=parent,
app_path=path,
)
for field, validator in VALIDATORS.items():
value = app.get(field)
try:
if not validator(value):
i = do_cut(field=field, bad_value=str(value))
logging.error(
f"Reporting bad field {field} with value {value}: {i}"
)
except Exception as e:
i = do_cut(field=field, bad_value=str(value))
logging.error(f"Problems with {field} for {app["name"]}: {e}")
logging.error(f"Will report as {i}")
logging.debug(f"Stack trace of {e}:", exc_info=True)
|
"""Performs an SRE checkpoint.
The checks are defined in
https://gitlab.cee.redhat.com/app-sre/contract/-/blob/master/content/process/sre_checkpoints.md
"""
import logging
import re
from functools import partial, lru_cache
from http import HTTPStatus
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, Mapping, Union
import requests
from jinja2 import Template
from jira import Issue
from reconcile.utils.constants import PROJ_ROOT
from reconcile.utils.jira_client import JiraClient
DEFAULT_CHECKPOINT_LABELS = ("sre-checkpoint",)
# We reject the full RFC 5322 standard since many clients will choke
# with some carefully crafted valid addresses.
EMAIL_ADDRESS_REGEXP = re.compile(r"^\w+[-\w\.]*@(?:\w[-\w]*\w\.)+\w+")
MAX_EMAIL_ADDRESS_LENGTH = 320 # Per RFC3696
MISSING_DATA_TEMPLATE = (
PROJ_ROOT / "templates" / "jira-checkpoint-missinginfo.j2"
)
@lru_cache
def url_makes_sense(url: str) -> bool:
"""Guesses whether the URL may have a meaningful document.
Obvious cases are if the document can be fully downloaded, but we
also accept that the given document may require credentials that
we don't have.
The URL is non-sensical if the server is crashing, the document
doesn't exist or the specified URL can't be even probed with GET.
"""
if not url:
return False
try:
rs = requests.get(url)
except requests.exceptions.ConnectionError:
return False
# Codes above NOT_FOUND mean the URL to the document doesn't
# exist, that the URL is very malformed or that it points to a
# broken resource
return rs.status_code < HTTPStatus.NOT_FOUND
def valid_owners(owners: Iterable[Mapping[str, str]]) -> bool:
"""Confirm whether all the owners have a name and a valid email address."""
return all(
o["name"]
and o["email"]
and EMAIL_ADDRESS_REGEXP.fullmatch(o["email"])
and len(o["email"]) <= MAX_EMAIL_ADDRESS_LENGTH
for o in owners
)
VALIDATORS: Dict[str, Callable] = {
"sopsUrl": url_makes_sense,
"architectureDocument": url_makes_sense,
"grafanaUrls": lambda x: all(url_makes_sense(y["url"]) for y in x),
"serviceOwners": valid_owners,
}
def render_template(
template: Path, name: str, path: str, field: str, bad_value: str
) -> str:
"""Render the template with all its fields."""
with open(template) as f:
t = Template(f.read(), keep_trailing_newline=True, trim_blocks=True)
return t.render(
app_name=name, app_path=path, field=field, field_value=bad_value
)
def file_ticket(
jira: JiraClient,
field: str,
app_name: str,
app_path: str,
labels: Iterable[str],
parent: str,
bad_value: str,
) -> Issue:
"""Return a ticket."""
if bad_value:
summary = f"Incorrect metadata {field} for {app_name}"
else:
summary = f"Missing metadata {field} for {app_name}"
i = jira.create_issue(
summary,
render_template(
MISSING_DATA_TEMPLATE, app_name, app_path, field, bad_value
),
labels=labels,
links=(parent,),
)
return i
def report_invalid_metadata(
app: Mapping[str, Any],
path: str,
board: Mapping[str, Union[str, Mapping]],
settings: Mapping[str, Any],
parent: str,
dry_run: bool = False,
) -> None:
"""Cut tickets for any missing/invalid field in the app.
During dry runs it will only log the rendered template.
:param app: App description, as returned by
queries.JIRA_BOARDS_QUICK_QUERY
:param path: path in app-interface to said app
:param board: JIRA board description, as per
queries.JIRA_BOARDS_QUERY
:param settings: app-interface settings (necessary to log into the
JIRA instance)
:param parent: parent ticket for this checkpoint
:param dry_run: whether this is a dry run
"""
if dry_run:
do_cut = partial(
render_template,
template=MISSING_DATA_TEMPLATE,
name=app["name"],
path=path,
)
else:
jira = JiraClient(board, settings)
do_cut = partial(
file_ticket, # type: ignore
jira=jira,
app_name=app["name"],
labels=DEFAULT_CHECKPOINT_LABELS,
parent=parent,
app_path=path,
)
for field, validator in VALIDATORS.items():
value = app.get(field)
try:
if not validator(value):
i = do_cut(field=field, bad_value=str(value))
logging.error(
f"Reporting bad field {field} with value {value}: {i}"
)
except Exception as e:
i = do_cut(field=field, bad_value=str(value))
logging.error(f"Problems with {field} for {app['name']}: {e}")
logging.error(f"Will report as {i}")
logging.debug(f"Stack trace of {e}:", exc_info=True)
|
"""
# Validation loop
The lightning validation loop handles everything except the actual computations of your model.
To decide what will happen in your validation loop, define the `validation_step` function.
Below are all the things lightning automates for you in the validation loop.
.. note:: Lightning will run 5 steps of validation in the beginning of training as a sanity
check so you don't have to wait until a full epoch to catch possible validation issues.
Check validation every n epochs
-------------------------------
If you have a small dataset you might want to check validation every n epochs
.. code-block:: python
# DEFAULT
trainer = Trainer(check_val_every_n_epoch=1)
Set how much of the validation set to check
-------------------------------------------
If you don't want to check 100% of the validation set (for debugging or if it's huge), set this flag
val_percent_check will be overwritten by overfit_pct if `overfit_pct > 0`
.. code-block:: python
# DEFAULT
trainer = Trainer(val_percent_check=1.0)
# check 10% only
trainer = Trainer(val_percent_check=0.1)
Set how much of the test set to check
-------------------------------------
If you don't want to check 100% of the test set (for debugging or if it's huge), set this flag
test_percent_check will be overwritten by overfit_pct if `overfit_pct > 0`
.. code-block:: python
# DEFAULT
trainer = Trainer(test_percent_check=1.0)
# check 10% only
trainer = Trainer(test_percent_check=0.1)
Set validation check frequency within 1 training epoch
------------------------------------------------------
For large datasets it's often desirable to check validation multiple times within a training loop.
Pass in a float to check that often within 1 training epoch.
Pass in an int k to check every k training batches. Must use an int if using an IterableDataset.
.. code-block:: python
# DEFAULT
trainer = Trainer(val_check_interval=0.95)
# check every .25 of an epoch
trainer = Trainer(val_check_interval=0.25)
# check every 100 train batches (ie: for IterableDatasets or fixed frequency)
trainer = Trainer(val_check_interval=100)
Set the number of validation sanity steps
-----------------------------------------
Lightning runs a few steps of validation in the beginning of training.
This avoids crashing in the validation loop sometime deep into a lengthy training loop.
.. code-block:: python
# DEFAULT
trainer = Trainer(num_sanity_val_steps=5)
You can use `Trainer(num_sanity_val_steps=0)` to skip the sanity check.
# Testing loop
To ensure you don't accidentally use test data to guide training decisions Lightning
makes running the test set deliberate.
**test**
You have two options to run the test set.
First case is where you test right after a full training routine.
.. code-block:: python
# run full training
trainer.fit(model)
# run test set
trainer.test()
Second case is where you load a model and run the test set
.. code-block:: python
model = MyLightningModule.load_from_metrics(
weights_path='/path/to/pytorch_checkpoint.ckpt',
tags_csv='/path/to/test_tube/experiment/version/meta_tags.csv',
on_gpu=True,
map_location=None
)
# init trainer with whatever options
trainer = Trainer(...)
# test (pass in the model)
trainer.test(model)
In this second case, the options you pass to trainer will be used when running
the test set (ie: 16-bit, dp, ddp, etc...)
"""
from abc import ABC, abstractmethod
import torch
import sys
import tqdm
from pytorch_lightning.utilities.debugging import MisconfigurationException
class TrainerEvaluationLoopMixin(ABC):
def __init__(self):
# this is just a summary on variables used in this abstract class,
# the proper values/initialisation should be done in child class
self.test_progress_bar = None
self.val_progress_bar = None
self.main_progress_bar = None
self.use_ddp = None
self.use_dp = None
self.use_ddp2 = None
self.single_gpu = None
self.data_parallel_device_ids = None
self.model = None
self.num_test_batches = None
self.num_val_batches = None
self.fast_dev_run = None
self.process_position = None
self.show_progress_bar = None
self.process_output = None
self.training_tqdm_dict = None
self.proc_rank = None
self.checkpoint_callback = None
self.current_epoch = None
self.callback_metrics = None
self.get_test_dataloaders = None
self.get_val_dataloaders = None
@abstractmethod
def copy_trainer_model_properties(self, model):
# this is just empty shell for code from other class
pass
@abstractmethod
def get_model(self):
# this is just empty shell for code from other class
pass
@abstractmethod
def is_overriden(self, m):
# this is just empty shell for code from other class
pass
@abstractmethod
def transfer_batch_to_gpu(self, batch, gpu):
# this is just empty shell for code from other class
pass
@abstractmethod
def add_tqdm_metrics(self, metrics):
# this is just empty shell for code from other class
pass
@abstractmethod
def log_metrics(self, metrics, grad_norm_dic):
# this is just empty shell for code from other class
pass
def evaluate(self, model, dataloaders, max_batches, test=False):
"""Run evaluation code.
:param model: PT model
:param dataloaders: list of PT dataloaders
:param max_batches: Scalar
:param test: boolean
:return:
"""
# enable eval mode
model.zero_grad()
model.eval()
# copy properties for forward overrides
self.copy_trainer_model_properties(model)
# disable gradients to save memory
torch.set_grad_enabled(False)
# bookkeeping
outputs = []
# run training
for dataloader_idx, dataloader in enumerate(dataloaders):
dl_outputs = []
for batch_idx, batch in enumerate(dataloader):
if batch is None: # pragma: no cover
continue
# stop short when on fast_dev_run (sets max_batch=1)
if batch_idx >= max_batches:
break
# -----------------
# RUN EVALUATION STEP
# -----------------
output = self.evaluation_forward(model,
batch,
batch_idx,
dataloader_idx,
test)
# track outputs for collation
dl_outputs.append(output)
# batch done
if test:
self.test_progress_bar.update(1)
else:
self.val_progress_bar.update(1)
self.main_progress_bar.update(1)
outputs.append(dl_outputs)
eval_results = {}
# with a single dataloader don't pass an array
if len(dataloaders) == 1:
outputs = outputs[0]
# give model a chance to do something with the outputs (and method defined)
model = self.get_model()
if test and self.is_overriden('test_end'):
eval_results = model.test_end(outputs)
elif self.is_overriden('validation_end'):
eval_results = model.validation_end(outputs)
# enable train mode again
model.train()
# enable gradients to save memory
torch.set_grad_enabled(True)
return eval_results
def run_evaluation(self, test=False):
# when testing make sure user defined a test step
can_run_test_step = False
if test:
can_run_test_step = self.is_overriden('test_step') and self.is_overriden('test_end')
if not can_run_test_step:
m = '''You called .test() without defining a test step or test_end.
Please define and try again'''
raise MisconfigurationException(m)
# validate only if model has validation_step defined
# test only if test_step or validation_step are defined
run_val_step = self.is_overriden('validation_step')
if run_val_step or can_run_test_step:
# hook
model = self.get_model()
model.on_pre_performance_check()
# select dataloaders
if test:
dataloaders = self.get_test_dataloaders()
max_batches = self.num_test_batches
else:
# val
dataloaders = self.get_val_dataloaders()
max_batches = self.num_val_batches
# cap max batches to 1 when using fast_dev_run
if self.fast_dev_run:
max_batches = 1
# init validation or test progress bar
# main progress bar will already be closed when testing so initial position is free
position = 2 * self.process_position + (not test)
desc = 'Testing' if test else 'Validating'
pbar = tqdm.tqdm(desc=desc, total=max_batches, leave=test, position=position,
disable=not self.show_progress_bar, dynamic_ncols=True,
unit='batch', file=sys.stdout)
setattr(self, f'{'test' if test else 'val'}_progress_bar', pbar)
# run evaluation
eval_results = self.evaluate(self.model,
dataloaders,
max_batches,
test)
_, prog_bar_metrics, log_metrics, callback_metrics, _ = self.process_output(
eval_results)
# add metrics to prog bar
self.add_tqdm_metrics(prog_bar_metrics)
# log metrics
self.log_metrics(log_metrics, {})
# track metrics for callbacks
self.callback_metrics.update(callback_metrics)
# hook
model.on_post_performance_check()
# add model specific metrics
tqdm_metrics = self.training_tqdm_dict
if not test:
self.main_progress_bar.set_postfix(**tqdm_metrics)
# close progress bar
if test:
self.test_progress_bar.close()
else:
self.val_progress_bar.close()
# model checkpointing
if self.proc_rank == 0 and self.checkpoint_callback is not None and not test:
self.checkpoint_callback.on_epoch_end(epoch=self.current_epoch,
logs=self.callback_metrics)
def evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test=False):
# make dataloader_idx arg in validation_step optional
args = [batch, batch_idx]
if test and len(self.get_test_dataloaders()) > 1:
args.append(dataloader_idx)
elif not test and len(self.get_val_dataloaders()) > 1:
args.append(dataloader_idx)
# handle DP, DDP forward
if self.use_ddp or self.use_dp or self.use_ddp2:
output = model(*args)
return output
# single GPU
if self.single_gpu:
# for single GPU put inputs on gpu manually
root_gpu = 0
if isinstance(self.data_parallel_device_ids, list):
root_gpu = self.data_parallel_device_ids[0]
batch = self.transfer_batch_to_gpu(batch, root_gpu)
args[0] = batch
# CPU
if test:
output = model.test_step(*args)
else:
output = model.validation_step(*args)
return output
|
"""
# Validation loop
The lightning validation loop handles everything except the actual computations of your model.
To decide what will happen in your validation loop, define the `validation_step` function.
Below are all the things lightning automates for you in the validation loop.
.. note:: Lightning will run 5 steps of validation in the beginning of training as a sanity
check so you don't have to wait until a full epoch to catch possible validation issues.
Check validation every n epochs
-------------------------------
If you have a small dataset you might want to check validation every n epochs
.. code-block:: python
# DEFAULT
trainer = Trainer(check_val_every_n_epoch=1)
Set how much of the validation set to check
-------------------------------------------
If you don't want to check 100% of the validation set (for debugging or if it's huge), set this flag
val_percent_check will be overwritten by overfit_pct if `overfit_pct > 0`
.. code-block:: python
# DEFAULT
trainer = Trainer(val_percent_check=1.0)
# check 10% only
trainer = Trainer(val_percent_check=0.1)
Set how much of the test set to check
-------------------------------------
If you don't want to check 100% of the test set (for debugging or if it's huge), set this flag
test_percent_check will be overwritten by overfit_pct if `overfit_pct > 0`
.. code-block:: python
# DEFAULT
trainer = Trainer(test_percent_check=1.0)
# check 10% only
trainer = Trainer(test_percent_check=0.1)
Set validation check frequency within 1 training epoch
------------------------------------------------------
For large datasets it's often desirable to check validation multiple times within a training loop.
Pass in a float to check that often within 1 training epoch.
Pass in an int k to check every k training batches. Must use an int if using an IterableDataset.
.. code-block:: python
# DEFAULT
trainer = Trainer(val_check_interval=0.95)
# check every .25 of an epoch
trainer = Trainer(val_check_interval=0.25)
# check every 100 train batches (ie: for IterableDatasets or fixed frequency)
trainer = Trainer(val_check_interval=100)
Set the number of validation sanity steps
-----------------------------------------
Lightning runs a few steps of validation in the beginning of training.
This avoids crashing in the validation loop sometime deep into a lengthy training loop.
.. code-block:: python
# DEFAULT
trainer = Trainer(num_sanity_val_steps=5)
You can use `Trainer(num_sanity_val_steps=0)` to skip the sanity check.
# Testing loop
To ensure you don't accidentally use test data to guide training decisions Lightning
makes running the test set deliberate.
**test**
You have two options to run the test set.
First case is where you test right after a full training routine.
.. code-block:: python
# run full training
trainer.fit(model)
# run test set
trainer.test()
Second case is where you load a model and run the test set
.. code-block:: python
model = MyLightningModule.load_from_metrics(
weights_path='/path/to/pytorch_checkpoint.ckpt',
tags_csv='/path/to/test_tube/experiment/version/meta_tags.csv',
on_gpu=True,
map_location=None
)
# init trainer with whatever options
trainer = Trainer(...)
# test (pass in the model)
trainer.test(model)
In this second case, the options you pass to trainer will be used when running
the test set (ie: 16-bit, dp, ddp, etc...)
"""
from abc import ABC, abstractmethod
import torch
import sys
import tqdm
from pytorch_lightning.utilities.debugging import MisconfigurationException
class TrainerEvaluationLoopMixin(ABC):
def __init__(self):
# this is just a summary on variables used in this abstract class,
# the proper values/initialisation should be done in child class
self.test_progress_bar = None
self.val_progress_bar = None
self.main_progress_bar = None
self.use_ddp = None
self.use_dp = None
self.use_ddp2 = None
self.single_gpu = None
self.data_parallel_device_ids = None
self.model = None
self.num_test_batches = None
self.num_val_batches = None
self.fast_dev_run = None
self.process_position = None
self.show_progress_bar = None
self.process_output = None
self.training_tqdm_dict = None
self.proc_rank = None
self.checkpoint_callback = None
self.current_epoch = None
self.callback_metrics = None
self.get_test_dataloaders = None
self.get_val_dataloaders = None
@abstractmethod
def copy_trainer_model_properties(self, model):
# this is just empty shell for code from other class
pass
@abstractmethod
def get_model(self):
# this is just empty shell for code from other class
pass
@abstractmethod
def is_overriden(self, m):
# this is just empty shell for code from other class
pass
@abstractmethod
def transfer_batch_to_gpu(self, batch, gpu):
# this is just empty shell for code from other class
pass
@abstractmethod
def add_tqdm_metrics(self, metrics):
# this is just empty shell for code from other class
pass
@abstractmethod
def log_metrics(self, metrics, grad_norm_dic):
# this is just empty shell for code from other class
pass
def evaluate(self, model, dataloaders, max_batches, test=False):
"""Run evaluation code.
:param model: PT model
:param dataloaders: list of PT dataloaders
:param max_batches: Scalar
:param test: boolean
:return:
"""
# enable eval mode
model.zero_grad()
model.eval()
# copy properties for forward overrides
self.copy_trainer_model_properties(model)
# disable gradients to save memory
torch.set_grad_enabled(False)
# bookkeeping
outputs = []
# run training
for dataloader_idx, dataloader in enumerate(dataloaders):
dl_outputs = []
for batch_idx, batch in enumerate(dataloader):
if batch is None: # pragma: no cover
continue
# stop short when on fast_dev_run (sets max_batch=1)
if batch_idx >= max_batches:
break
# -----------------
# RUN EVALUATION STEP
# -----------------
output = self.evaluation_forward(model,
batch,
batch_idx,
dataloader_idx,
test)
# track outputs for collation
dl_outputs.append(output)
# batch done
if test:
self.test_progress_bar.update(1)
else:
self.val_progress_bar.update(1)
self.main_progress_bar.update(1)
outputs.append(dl_outputs)
eval_results = {}
# with a single dataloader don't pass an array
if len(dataloaders) == 1:
outputs = outputs[0]
# give model a chance to do something with the outputs (and method defined)
model = self.get_model()
if test and self.is_overriden('test_end'):
eval_results = model.test_end(outputs)
elif self.is_overriden('validation_end'):
eval_results = model.validation_end(outputs)
# enable train mode again
model.train()
# enable gradients to save memory
torch.set_grad_enabled(True)
return eval_results
def run_evaluation(self, test=False):
# when testing make sure user defined a test step
can_run_test_step = False
if test:
can_run_test_step = self.is_overriden('test_step') and self.is_overriden('test_end')
if not can_run_test_step:
m = '''You called .test() without defining a test step or test_end.
Please define and try again'''
raise MisconfigurationException(m)
# validate only if model has validation_step defined
# test only if test_step or validation_step are defined
run_val_step = self.is_overriden('validation_step')
if run_val_step or can_run_test_step:
# hook
model = self.get_model()
model.on_pre_performance_check()
# select dataloaders
if test:
dataloaders = self.get_test_dataloaders()
max_batches = self.num_test_batches
else:
# val
dataloaders = self.get_val_dataloaders()
max_batches = self.num_val_batches
# cap max batches to 1 when using fast_dev_run
if self.fast_dev_run:
max_batches = 1
# init validation or test progress bar
# main progress bar will already be closed when testing so initial position is free
position = 2 * self.process_position + (not test)
desc = 'Testing' if test else 'Validating'
pbar = tqdm.tqdm(desc=desc, total=max_batches, leave=test, position=position,
disable=not self.show_progress_bar, dynamic_ncols=True,
unit='batch', file=sys.stdout)
setattr(self, f'{"test" if test else "val"}_progress_bar', pbar)
# run evaluation
eval_results = self.evaluate(self.model,
dataloaders,
max_batches,
test)
_, prog_bar_metrics, log_metrics, callback_metrics, _ = self.process_output(
eval_results)
# add metrics to prog bar
self.add_tqdm_metrics(prog_bar_metrics)
# log metrics
self.log_metrics(log_metrics, {})
# track metrics for callbacks
self.callback_metrics.update(callback_metrics)
# hook
model.on_post_performance_check()
# add model specific metrics
tqdm_metrics = self.training_tqdm_dict
if not test:
self.main_progress_bar.set_postfix(**tqdm_metrics)
# close progress bar
if test:
self.test_progress_bar.close()
else:
self.val_progress_bar.close()
# model checkpointing
if self.proc_rank == 0 and self.checkpoint_callback is not None and not test:
self.checkpoint_callback.on_epoch_end(epoch=self.current_epoch,
logs=self.callback_metrics)
def evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test=False):
# make dataloader_idx arg in validation_step optional
args = [batch, batch_idx]
if test and len(self.get_test_dataloaders()) > 1:
args.append(dataloader_idx)
elif not test and len(self.get_val_dataloaders()) > 1:
args.append(dataloader_idx)
# handle DP, DDP forward
if self.use_ddp or self.use_dp or self.use_ddp2:
output = model(*args)
return output
# single GPU
if self.single_gpu:
# for single GPU put inputs on gpu manually
root_gpu = 0
if isinstance(self.data_parallel_device_ids, list):
root_gpu = self.data_parallel_device_ids[0]
batch = self.transfer_batch_to_gpu(batch, root_gpu)
args[0] = batch
# CPU
if test:
output = model.test_step(*args)
else:
output = model.validation_step(*args)
return output
|
import logging
import os
from itertools import permutations
from typing import Dict, List, Set, Tuple, Union
import pysbd
from rich import print
from sumeval.metrics.rouge import RougeCalculator
from factsumm.utils.module_entity import load_ie, load_ner, load_rel
from factsumm.utils.module_question import load_qa, load_qg
from factsumm.utils.module_sentence import load_bert_score
from factsumm.utils.utils import Config, qags_score
os.environ["TOKENIZERS_PARALLELISM"] = "false"
logging.getLogger("transformers").setLevel(logging.ERROR)
logging.getLogger("flair").setLevel(logging.ERROR)
class FactSumm:
def __init__(
self,
ner_model: str = None,
rel_model: str = None,
qg_model: str = None,
qa_model: str = None,
bert_score_model: str = None,
):
"""
FactSumm object used to calculate Factual Consistency score of Abstractive Summarization model
Args:
ner_model (str, optional): NER model to be used (Flair or HuggingFace). Defaults to None.
rel_model (str, optional): RE model to be used (HuggingFace). Defaults to None.
qg_model (str, optional): QA model to be used (HuggingFace). Defaults to None.
qa_model (str, optional): QG model to be used (HuggingFace). Defaults to None.
bert_score_model (str, optional): BERTScore model to be used (HuggingFace). Defaults to None.
"""
self.config = Config()
self.segmenter = pysbd.Segmenter(language="en", clean=False)
self.rouge = RougeCalculator(stopwords=True, lang="en")
# NER, RE, QG, QA models supported by HuggingFace can be used (default can be found in `config.py`)
self.ner = ner_model if ner_model is not None else self.config.NER_MODEL
self.rel = rel_model if rel_model is not None else self.config.REL_MODEL
self.qg = qg_model if qg_model is not None else self.config.QG_MODEL
self.qa = qa_model if qa_model is not None else self.config.QA_MODEL
self.bert_score = bert_score_model if bert_score_model is not None else self.config.BERT_SCORE_MODEL
self.ie = None
def build_perm(
self,
lines: List[str],
total_entities: Union[List[Dict], List[List[Dict]]],
) -> List:
"""
Build entity permutations for Relation Extraction
Args:
lines (List[str]): segmented document lines
total_entities (Union[List[Dict], List[List[Dict]]]): list of total entities
Returns:
List: list of permutations
"""
total_perms = list()
for line, line_entities in zip(lines, total_entities):
line_perms = list(permutations(line_entities, 2))
line_perms = [{
"text":
line,
"spans": [
(comb[0]["start"], comb[0]["end"]),
(comb[-1]["start"], comb[-1]["end"]),
]
} for comb in line_perms]
total_perms.append(line_perms)
return total_perms
def get_facts(self, lines: List[str], entities: List[List[Dict]]) -> Set:
"""
Get fact triples using Relation Extraction model
Args:
lines (List[str]): segmented document lines
entities (List[List[Dict]]): list of total entities
Returns:
Set: set of relation inferenced from permutations
"""
perms = self.build_perm(lines, entities)
triples = list()
for perm in perms:
triples.extend(self.rel(perm))
return set(triples)
def _segment(self, text: str) -> List[str]:
"""
Segment input text into (possibly) multiple sentences
Args:
text (str): text to be segmented
Returns:
List[str]: list of segmented lines
"""
return [line.strip() for line in self.segmenter.segment(text)]
def _print_entities(self, mode: str, total_entities: List[List[Dict]]):
# yapf:disable
print(f"{mode.upper()} Entities")
for i, line_entities in enumerate(total_entities):
print(f'{i+1}: {[(entity['word'], entity['entity']) for entity in line_entities]}')
print()
# yapf:enable
def calculate_rouge(
self,
source: str,
summary: str,
) -> Tuple[float, float, float]:
"""
Calculate ROUGE score
Args:
source (str): original source
summary (str): generated summary
Returns:
Tuple: (ROUGE-1, ROUGE-2, ROUGE-L) tuple
"""
source_lines = self._segment(source)
rouge_1 = self.rouge.rouge_n(summary, source_lines, 1)
rouge_2 = self.rouge.rouge_n(summary, source_lines, 2)
rouge_l = self.rouge.rouge_l(summary, source_lines)
print(
f"Avg. ROUGE-1: {rouge_1}\nAvg. ROUGE-2: {rouge_2}\nAvg. ROUGE-L: {rouge_l}"
)
return rouge_1, rouge_2, rouge_l
def _print_facts(self, mode: str, facts: Set[Tuple]):
print(f"{mode.upper()} Facts")
for fact in facts:
print(fact)
print()
def _filter_out(self, sources: Set, summaries: Set) -> Tuple[Set, Set]:
"""
Filter out triples that don't share a subject and relation for comparability
Args:
sources (Set): set of triples from source
summaries (Set): set of triples from summary
Returns:
Tuple[Set, Set]: filtered sources and summaries
"""
source_tuple = {(source[0], source[1]) for source in sources}
summary_tuple = {(summary[0], summary[1]) for summary in summaries}
sources = {
source for source in sources
if (source[0], source[1]) in summary_tuple
}
summaries = {
summary for summary in summaries
if (summary[0], summary[1]) in source_tuple
}
return sources, summaries
def extract_facts(
self,
source: str,
summary: str,
verbose: bool = False,
device: str = "cpu",
):
"""
Extract (head_entity, relation, tail_entity) relation triple using NER & RE module
See also https://arxiv.org/abs/1905.13322.pdf
Args:
source (str): original source
summary (str): generated summary
verbose (bool, optional): print verbose option. Defaults to False.
device (str): device info
"""
if isinstance(self.ner, str) and isinstance(self.rel, str):
self.ner = load_ner(self.ner, device)
self.rel = load_rel(self.rel, device)
source_lines = self._segment(source)
summary_lines = self._segment(summary)
# extract per-line entities
source_ents = self.ner(source_lines)
summary_ents = self.ner(summary_lines)
# extract entity-based triple: (head, relation, tail)
source_facts = self.get_facts(source_lines, source_ents)
summary_facts = self.get_facts(summary_lines, summary_ents)
# filter out some facts
source_facts, summary_facts = self._filter_out(
source_facts,
summary_facts,
)
common_facts = summary_facts.intersection(source_facts)
diff_facts = summary_facts.difference(source_facts)
if verbose:
self._print_entities("source", source_ents)
self._print_entities("summary", summary_ents)
self._print_facts("source", source_facts)
self._print_facts("summary", summary_facts)
self._print_facts("common", common_facts)
self._print_facts("diff", diff_facts)
if not summary_facts:
fact_score = 0.0
else:
fact_score = len(common_facts) / len(summary_facts)
print(f"Fact Score: {fact_score}")
return source_ents, summary_ents, fact_score
def _print_qas(self, mode: str, questions: List[Dict]):
# yapf:disable
print(f"Answers based on {mode.upper()} (Questions are generated from Summary)")
for question in questions:
print(f"[Q] {question["question"]}\t[Pred] {question["prediction"]}")
print()
# yapf:enable
def extract_qas(
self,
source: str,
summary: str,
source_ents: List = None,
summary_ents: List = None,
verbose: bool = False,
device: str = "cpu",
) -> float:
"""
Extract Question & Answering Pair generated from Question Generation module
See also https://arxiv.org/abs/2004.04228
Args:
source (str): original source
summary (str): generated summary
source_ents (List, optional): named entities extracted from source. Defaults to None.
summary_ents (List, optional): named entities extracted from source. Defaults to None.
verbose (bool, optional): print verbose option. Defaults to False.
device (str): device info
"""
if isinstance(self.qg, str) and isinstance(self.qa, str):
self.qg = load_qg(self.qg, device)
self.qa = load_qa(self.qa, device)
if isinstance(self.ner, str):
self.ner = load_ner(self.ner, device)
source_lines = self._segment(source)
summary_lines = self._segment(summary)
if source_ents is None:
source_ents = self.ner(source_lines)
if summary_ents is None:
summary_ents = self.ner(summary_lines)
summary_qas = self.qg(summary_lines, summary_ents)
source_answers = self.qa(source, summary_qas)
summary_answers = self.qa(summary, summary_qas)
if verbose:
self._print_qas("source", source_answers)
self._print_qas("summary", summary_answers)
qa_score = qags_score(source_answers, summary_answers)
print(f"QAGS Score: {qa_score}\n")
return qa_score
def _print_triples(self, mode: str, triples: Set):
print(f"{mode.upper()} Triples")
for triple in triples:
print(triple)
print()
def extract_triples(self, source: str, summary: str, verbose: bool = False):
"""
Extract OpenIE based fact triples
Args:
source (str): original source
summary (str): generated summary
verbose (bool, optional): print verbose option. Defaults to False.
"""
if self.ie is None:
self.ie = load_ie()
source_triples = {(
triple["subject"],
triple["relation"],
triple["object"],
) for triple in self.ie(source)}
summary_triples = {(
triple["subject"],
triple["relation"],
triple["object"],
) for triple in self.ie(summary)}
source_triples, summary_triples = self._filter_out(
source_triples,
summary_triples,
)
if verbose:
self._print_triples("source", source_triples)
self._print_triples("summary", summary_triples)
common_triples = summary_triples.intersection(source_triples)
if not summary_triples:
triple_score = 0.0
else:
triple_score = len(common_triples) / len(summary_triples)
print(f"Triple Score: {triple_score}\n")
return triple_score
def calculate_bert_score(
self,
source: str,
summary: str,
device: str = "cpu",
) -> List[float]:
"""
Calculate BERTScore
See also https://arxiv.org/abs/2005.03754
Args:
source (str): original source
summary (str): generated summary
device (str): device info
Returns:
List: (Precision, Recall, F1) BERTScore list
"""
if isinstance(self.bert_score, str):
self.bert_score = load_bert_score(self.bert_score, device)
# BUG: When len(source_lines) == 1, bmm error raises
source_lines = self._segment(source)
summary_lines = [summary, "dummy"]
scores = self.bert_score(summary_lines, source_lines)
filtered_scores = list()
for score in scores:
score = score.tolist()
score.pop(-1)
filtered_scores.append(sum(score) / len(score))
print(
f"BERTScore Score\nPrecision: {filtered_scores[0]}\nRecall: {filtered_scores[1]}\nF1: {filtered_scores[2]}"
)
return filtered_scores
def __call__(
self,
sources: Union[List[str], str],
summaries: Union[List[str], str],
verbose: bool = False,
device: str = "cpu",
) -> Dict:
if isinstance(sources, str) and isinstance(summaries, str):
sources = [sources]
summaries = [summaries]
if len(sources) != len(summaries):
# yapf:disable
raise ValueError("`sources` and `summaries` must have the same number of elements!")
# yapf:enable
num_pairs = len(sources)
fact_scores = 0
qags_scores = 0
triple_scores = 0
rouges = [0, 0, 0]
bert_scores = [0, 0, 0]
for source, summary in zip(sources, summaries):
source_ents, summary_ents, fact_score = self.extract_facts(
source,
summary,
verbose,
device,
)
fact_scores += fact_score
qags_score = self.extract_qas(
source,
summary,
source_ents,
summary_ents,
verbose,
device,
)
qags_scores += qags_score
triple_score = self.extract_triples(source, summary, verbose)
triple_scores += triple_score
rouge_1, rouge_2, rouge_l = self.calculate_rouge(source, summary)
rouges[0] += rouge_1
rouges[1] += rouge_2
rouges[2] += rouge_l
bert_score = self.calculate_bert_score(source, summary, device)
bert_scores[0] += bert_score[0]
bert_scores[1] += bert_score[1]
bert_scores[2] += bert_score[2]
return {
"fact_score": fact_scores / num_pairs,
"qa_score": qags_scores / num_pairs,
"triple_score": triple_scores / num_pairs,
"rouge": (
rouges[0] / num_pairs,
rouges[1] / num_pairs,
rouges[2] / num_pairs,
),
"bert_score": {
"precision": bert_scores[0],
"recall": bert_scores[1],
"f1": bert_scores[2],
},
}
|
import logging
import os
from itertools import permutations
from typing import Dict, List, Set, Tuple, Union
import pysbd
from rich import print
from sumeval.metrics.rouge import RougeCalculator
from factsumm.utils.module_entity import load_ie, load_ner, load_rel
from factsumm.utils.module_question import load_qa, load_qg
from factsumm.utils.module_sentence import load_bert_score
from factsumm.utils.utils import Config, qags_score
os.environ["TOKENIZERS_PARALLELISM"] = "false"
logging.getLogger("transformers").setLevel(logging.ERROR)
logging.getLogger("flair").setLevel(logging.ERROR)
class FactSumm:
def __init__(
self,
ner_model: str = None,
rel_model: str = None,
qg_model: str = None,
qa_model: str = None,
bert_score_model: str = None,
):
"""
FactSumm object used to calculate Factual Consistency score of Abstractive Summarization model
Args:
ner_model (str, optional): NER model to be used (Flair or HuggingFace). Defaults to None.
rel_model (str, optional): RE model to be used (HuggingFace). Defaults to None.
qg_model (str, optional): QA model to be used (HuggingFace). Defaults to None.
qa_model (str, optional): QG model to be used (HuggingFace). Defaults to None.
bert_score_model (str, optional): BERTScore model to be used (HuggingFace). Defaults to None.
"""
self.config = Config()
self.segmenter = pysbd.Segmenter(language="en", clean=False)
self.rouge = RougeCalculator(stopwords=True, lang="en")
# NER, RE, QG, QA models supported by HuggingFace can be used (default can be found in `config.py`)
self.ner = ner_model if ner_model is not None else self.config.NER_MODEL
self.rel = rel_model if rel_model is not None else self.config.REL_MODEL
self.qg = qg_model if qg_model is not None else self.config.QG_MODEL
self.qa = qa_model if qa_model is not None else self.config.QA_MODEL
self.bert_score = bert_score_model if bert_score_model is not None else self.config.BERT_SCORE_MODEL
self.ie = None
def build_perm(
self,
lines: List[str],
total_entities: Union[List[Dict], List[List[Dict]]],
) -> List:
"""
Build entity permutations for Relation Extraction
Args:
lines (List[str]): segmented document lines
total_entities (Union[List[Dict], List[List[Dict]]]): list of total entities
Returns:
List: list of permutations
"""
total_perms = list()
for line, line_entities in zip(lines, total_entities):
line_perms = list(permutations(line_entities, 2))
line_perms = [{
"text":
line,
"spans": [
(comb[0]["start"], comb[0]["end"]),
(comb[-1]["start"], comb[-1]["end"]),
]
} for comb in line_perms]
total_perms.append(line_perms)
return total_perms
def get_facts(self, lines: List[str], entities: List[List[Dict]]) -> Set:
"""
Get fact triples using Relation Extraction model
Args:
lines (List[str]): segmented document lines
entities (List[List[Dict]]): list of total entities
Returns:
Set: set of relation inferenced from permutations
"""
perms = self.build_perm(lines, entities)
triples = list()
for perm in perms:
triples.extend(self.rel(perm))
return set(triples)
def _segment(self, text: str) -> List[str]:
"""
Segment input text into (possibly) multiple sentences
Args:
text (str): text to be segmented
Returns:
List[str]: list of segmented lines
"""
return [line.strip() for line in self.segmenter.segment(text)]
def _print_entities(self, mode: str, total_entities: List[List[Dict]]):
# yapf:disable
print(f"{mode.upper()} Entities")
for i, line_entities in enumerate(total_entities):
print(f'{i+1}: {[(entity["word"], entity["entity"]) for entity in line_entities]}')
print()
# yapf:enable
def calculate_rouge(
self,
source: str,
summary: str,
) -> Tuple[float, float, float]:
"""
Calculate ROUGE score
Args:
source (str): original source
summary (str): generated summary
Returns:
Tuple: (ROUGE-1, ROUGE-2, ROUGE-L) tuple
"""
source_lines = self._segment(source)
rouge_1 = self.rouge.rouge_n(summary, source_lines, 1)
rouge_2 = self.rouge.rouge_n(summary, source_lines, 2)
rouge_l = self.rouge.rouge_l(summary, source_lines)
print(
f"Avg. ROUGE-1: {rouge_1}\nAvg. ROUGE-2: {rouge_2}\nAvg. ROUGE-L: {rouge_l}"
)
return rouge_1, rouge_2, rouge_l
def _print_facts(self, mode: str, facts: Set[Tuple]):
print(f"{mode.upper()} Facts")
for fact in facts:
print(fact)
print()
def _filter_out(self, sources: Set, summaries: Set) -> Tuple[Set, Set]:
"""
Filter out triples that don't share a subject and relation for comparability
Args:
sources (Set): set of triples from source
summaries (Set): set of triples from summary
Returns:
Tuple[Set, Set]: filtered sources and summaries
"""
source_tuple = {(source[0], source[1]) for source in sources}
summary_tuple = {(summary[0], summary[1]) for summary in summaries}
sources = {
source for source in sources
if (source[0], source[1]) in summary_tuple
}
summaries = {
summary for summary in summaries
if (summary[0], summary[1]) in source_tuple
}
return sources, summaries
def extract_facts(
self,
source: str,
summary: str,
verbose: bool = False,
device: str = "cpu",
):
"""
Extract (head_entity, relation, tail_entity) relation triple using NER & RE module
See also https://arxiv.org/abs/1905.13322.pdf
Args:
source (str): original source
summary (str): generated summary
verbose (bool, optional): print verbose option. Defaults to False.
device (str): device info
"""
if isinstance(self.ner, str) and isinstance(self.rel, str):
self.ner = load_ner(self.ner, device)
self.rel = load_rel(self.rel, device)
source_lines = self._segment(source)
summary_lines = self._segment(summary)
# extract per-line entities
source_ents = self.ner(source_lines)
summary_ents = self.ner(summary_lines)
# extract entity-based triple: (head, relation, tail)
source_facts = self.get_facts(source_lines, source_ents)
summary_facts = self.get_facts(summary_lines, summary_ents)
# filter out some facts
source_facts, summary_facts = self._filter_out(
source_facts,
summary_facts,
)
common_facts = summary_facts.intersection(source_facts)
diff_facts = summary_facts.difference(source_facts)
if verbose:
self._print_entities("source", source_ents)
self._print_entities("summary", summary_ents)
self._print_facts("source", source_facts)
self._print_facts("summary", summary_facts)
self._print_facts("common", common_facts)
self._print_facts("diff", diff_facts)
if not summary_facts:
fact_score = 0.0
else:
fact_score = len(common_facts) / len(summary_facts)
print(f"Fact Score: {fact_score}")
return source_ents, summary_ents, fact_score
def _print_qas(self, mode: str, questions: List[Dict]):
# yapf:disable
print(f"Answers based on {mode.upper()} (Questions are generated from Summary)")
for question in questions:
print(f"[Q] {question['question']}\t[Pred] {question['prediction']}")
print()
# yapf:enable
def extract_qas(
self,
source: str,
summary: str,
source_ents: List = None,
summary_ents: List = None,
verbose: bool = False,
device: str = "cpu",
) -> float:
"""
Extract Question & Answering Pair generated from Question Generation module
See also https://arxiv.org/abs/2004.04228
Args:
source (str): original source
summary (str): generated summary
source_ents (List, optional): named entities extracted from source. Defaults to None.
summary_ents (List, optional): named entities extracted from source. Defaults to None.
verbose (bool, optional): print verbose option. Defaults to False.
device (str): device info
"""
if isinstance(self.qg, str) and isinstance(self.qa, str):
self.qg = load_qg(self.qg, device)
self.qa = load_qa(self.qa, device)
if isinstance(self.ner, str):
self.ner = load_ner(self.ner, device)
source_lines = self._segment(source)
summary_lines = self._segment(summary)
if source_ents is None:
source_ents = self.ner(source_lines)
if summary_ents is None:
summary_ents = self.ner(summary_lines)
summary_qas = self.qg(summary_lines, summary_ents)
source_answers = self.qa(source, summary_qas)
summary_answers = self.qa(summary, summary_qas)
if verbose:
self._print_qas("source", source_answers)
self._print_qas("summary", summary_answers)
qa_score = qags_score(source_answers, summary_answers)
print(f"QAGS Score: {qa_score}\n")
return qa_score
def _print_triples(self, mode: str, triples: Set):
print(f"{mode.upper()} Triples")
for triple in triples:
print(triple)
print()
def extract_triples(self, source: str, summary: str, verbose: bool = False):
"""
Extract OpenIE based fact triples
Args:
source (str): original source
summary (str): generated summary
verbose (bool, optional): print verbose option. Defaults to False.
"""
if self.ie is None:
self.ie = load_ie()
source_triples = {(
triple["subject"],
triple["relation"],
triple["object"],
) for triple in self.ie(source)}
summary_triples = {(
triple["subject"],
triple["relation"],
triple["object"],
) for triple in self.ie(summary)}
source_triples, summary_triples = self._filter_out(
source_triples,
summary_triples,
)
if verbose:
self._print_triples("source", source_triples)
self._print_triples("summary", summary_triples)
common_triples = summary_triples.intersection(source_triples)
if not summary_triples:
triple_score = 0.0
else:
triple_score = len(common_triples) / len(summary_triples)
print(f"Triple Score: {triple_score}\n")
return triple_score
def calculate_bert_score(
self,
source: str,
summary: str,
device: str = "cpu",
) -> List[float]:
"""
Calculate BERTScore
See also https://arxiv.org/abs/2005.03754
Args:
source (str): original source
summary (str): generated summary
device (str): device info
Returns:
List: (Precision, Recall, F1) BERTScore list
"""
if isinstance(self.bert_score, str):
self.bert_score = load_bert_score(self.bert_score, device)
# BUG: When len(source_lines) == 1, bmm error raises
source_lines = self._segment(source)
summary_lines = [summary, "dummy"]
scores = self.bert_score(summary_lines, source_lines)
filtered_scores = list()
for score in scores:
score = score.tolist()
score.pop(-1)
filtered_scores.append(sum(score) / len(score))
print(
f"BERTScore Score\nPrecision: {filtered_scores[0]}\nRecall: {filtered_scores[1]}\nF1: {filtered_scores[2]}"
)
return filtered_scores
def __call__(
self,
sources: Union[List[str], str],
summaries: Union[List[str], str],
verbose: bool = False,
device: str = "cpu",
) -> Dict:
if isinstance(sources, str) and isinstance(summaries, str):
sources = [sources]
summaries = [summaries]
if len(sources) != len(summaries):
# yapf:disable
raise ValueError("`sources` and `summaries` must have the same number of elements!")
# yapf:enable
num_pairs = len(sources)
fact_scores = 0
qags_scores = 0
triple_scores = 0
rouges = [0, 0, 0]
bert_scores = [0, 0, 0]
for source, summary in zip(sources, summaries):
source_ents, summary_ents, fact_score = self.extract_facts(
source,
summary,
verbose,
device,
)
fact_scores += fact_score
qags_score = self.extract_qas(
source,
summary,
source_ents,
summary_ents,
verbose,
device,
)
qags_scores += qags_score
triple_score = self.extract_triples(source, summary, verbose)
triple_scores += triple_score
rouge_1, rouge_2, rouge_l = self.calculate_rouge(source, summary)
rouges[0] += rouge_1
rouges[1] += rouge_2
rouges[2] += rouge_l
bert_score = self.calculate_bert_score(source, summary, device)
bert_scores[0] += bert_score[0]
bert_scores[1] += bert_score[1]
bert_scores[2] += bert_score[2]
return {
"fact_score": fact_scores / num_pairs,
"qa_score": qags_scores / num_pairs,
"triple_score": triple_scores / num_pairs,
"rouge": (
rouges[0] / num_pairs,
rouges[1] / num_pairs,
rouges[2] / num_pairs,
),
"bert_score": {
"precision": bert_scores[0],
"recall": bert_scores[1],
"f1": bert_scores[2],
},
}
|
import logging
from google.protobuf.json_format import MessageToDict
from spaceone.core import pygrpc
from spaceone.core.connector import BaseConnector
from spaceone.core.utils import parse_endpoint
from spaceone.identity.error.error_authentication import *
_LOGGER = logging.getLogger(__name__)
class AuthPluginConnector(BaseConnector):
def __init__(self, transaction, config):
super().__init__(transaction, config)
self.client = None
def initialize(self, endpoint):
_LOGGER.info(f'[initialize] endpoint: {endpoint}')
e = parse_endpoint(endpoint)
self.client = pygrpc.client(endpoint=f'{e.get('hostname')}:{e.get('port')}', version='plugin')
def call_login(self, endpoint, credentials, options, secret_data, schema=None):
self.initialize(endpoint)
params = {
'options': options,
'secret_data': secret_data,
'schema': schema,
'user_credentials': credentials
}
try:
user_info = self.client.Auth.login(params, metadata=self.transaction.get_connection_meta())
except ERROR_BASE as e:
_LOGGER.error(f'[call_login] Auth.login failed. (reason={e.message})')
raise ERROR_INVALID_CREDENTIALS()
except Exception as e:
_LOGGER.error(f'[call_login] Auth.login failed. (reason={str(e)})')
raise ERROR_INVALID_CREDENTIALS()
return MessageToDict(user_info, preserving_proto_field_name=True)
def init(self, options):
params = {
'options': options
}
try:
plugin_info = self.client.Auth.init(params, metadata=self.transaction.get_connection_meta())
return MessageToDict(plugin_info)
except ERROR_BASE as e:
raise ERROR_AUTHENTICATION_FAILURE_PLUGIN(message=e.message)
except Exception as e:
raise ERROR_AUTHENTICATION_FAILURE_PLUGIN(messsage=str(e))
def verify(self, options, secret_data, schema=None):
params = {
'options': options,
'secret_data': secret_data,
'schema': schema
}
try:
# TODO: meta (plugin has no meta)
auth_verify_info = self.client.Auth.verify(params, metadata=self.transaction.get_connection_meta())
return MessageToDict(auth_verify_info)
except ERROR_BASE as e:
raise ERROR_AUTHENTICATION_FAILURE_PLUGIN(message=e.message)
except Exception as e:
raise ERROR_AUTHENTICATION_FAILURE_PLUGIN(messsage=str(e))
def call_find(self, keyword, user_id, options, secret_data, schema=None):
params = {
'options': options,
'secret_data': secret_data,
'schema': schema,
'keyword': keyword,
'user_id': user_id
}
_LOGGER.info(f'[call_find] params: {params}')
try:
users_info = self.client.Auth.find(params, metadata=self.transaction.get_connection_meta())
_LOGGER.debug(f'[call_find] MessageToDict(user_info): '
f'{MessageToDict(users_info, preserving_proto_field_name=True)}')
return MessageToDict(users_info, preserving_proto_field_name=True)
except ERROR_BASE as e:
raise ERROR_AUTHENTICATION_FAILURE_PLUGIN(message=e.message)
except Exception as e:
raise ERROR_AUTHENTICATION_FAILURE_PLUGIN(messsage=str(e))
|
import logging
from google.protobuf.json_format import MessageToDict
from spaceone.core import pygrpc
from spaceone.core.connector import BaseConnector
from spaceone.core.utils import parse_endpoint
from spaceone.identity.error.error_authentication import *
_LOGGER = logging.getLogger(__name__)
class AuthPluginConnector(BaseConnector):
def __init__(self, transaction, config):
super().__init__(transaction, config)
self.client = None
def initialize(self, endpoint):
_LOGGER.info(f'[initialize] endpoint: {endpoint}')
e = parse_endpoint(endpoint)
self.client = pygrpc.client(endpoint=f'{e.get("hostname")}:{e.get("port")}', version='plugin')
def call_login(self, endpoint, credentials, options, secret_data, schema=None):
self.initialize(endpoint)
params = {
'options': options,
'secret_data': secret_data,
'schema': schema,
'user_credentials': credentials
}
try:
user_info = self.client.Auth.login(params, metadata=self.transaction.get_connection_meta())
except ERROR_BASE as e:
_LOGGER.error(f'[call_login] Auth.login failed. (reason={e.message})')
raise ERROR_INVALID_CREDENTIALS()
except Exception as e:
_LOGGER.error(f'[call_login] Auth.login failed. (reason={str(e)})')
raise ERROR_INVALID_CREDENTIALS()
return MessageToDict(user_info, preserving_proto_field_name=True)
def init(self, options):
params = {
'options': options
}
try:
plugin_info = self.client.Auth.init(params, metadata=self.transaction.get_connection_meta())
return MessageToDict(plugin_info)
except ERROR_BASE as e:
raise ERROR_AUTHENTICATION_FAILURE_PLUGIN(message=e.message)
except Exception as e:
raise ERROR_AUTHENTICATION_FAILURE_PLUGIN(messsage=str(e))
def verify(self, options, secret_data, schema=None):
params = {
'options': options,
'secret_data': secret_data,
'schema': schema
}
try:
# TODO: meta (plugin has no meta)
auth_verify_info = self.client.Auth.verify(params, metadata=self.transaction.get_connection_meta())
return MessageToDict(auth_verify_info)
except ERROR_BASE as e:
raise ERROR_AUTHENTICATION_FAILURE_PLUGIN(message=e.message)
except Exception as e:
raise ERROR_AUTHENTICATION_FAILURE_PLUGIN(messsage=str(e))
def call_find(self, keyword, user_id, options, secret_data, schema=None):
params = {
'options': options,
'secret_data': secret_data,
'schema': schema,
'keyword': keyword,
'user_id': user_id
}
_LOGGER.info(f'[call_find] params: {params}')
try:
users_info = self.client.Auth.find(params, metadata=self.transaction.get_connection_meta())
_LOGGER.debug(f'[call_find] MessageToDict(user_info): '
f'{MessageToDict(users_info, preserving_proto_field_name=True)}')
return MessageToDict(users_info, preserving_proto_field_name=True)
except ERROR_BASE as e:
raise ERROR_AUTHENTICATION_FAILURE_PLUGIN(message=e.message)
except Exception as e:
raise ERROR_AUTHENTICATION_FAILURE_PLUGIN(messsage=str(e))
|
# Copyright 2021 Raven Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import string
from typing import List
import paramiko
def ssh_exec_commands(hostname: str, port: int = 22, *, username: str, key_name: str, commands: List[str],
redirect_output=True):
if not commands:
return
if not key_name:
raise ValueError('EC2 key pair must be specified.')
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if os.name == 'posix':
key_filename = f'{os.environ['HOME']}/.aws/{key_name}.pem'
else:
key_filename = f'{os.environ['HOMEPATH']}/.aws/{key_name}.pem'
ssh.connect(hostname=hostname, port=port, username=username, key_filename=key_filename)
for command in commands:
stdin, stdout, stderr = ssh.exec_command(command)
if redirect_output:
print(stdout.read().decode())
print(stderr.read().decode())
ssh.close()
def get_random_id(n: int = 10) -> str:
"""Generate random identifier.
:param n: Length of the identifier.
:return:
"""
return ''.join(random.choices(string.ascii_letters + string.digits, k=n))
|
# Copyright 2021 Raven Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import string
from typing import List
import paramiko
def ssh_exec_commands(hostname: str, port: int = 22, *, username: str, key_name: str, commands: List[str],
redirect_output=True):
if not commands:
return
if not key_name:
raise ValueError('EC2 key pair must be specified.')
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if os.name == 'posix':
key_filename = f'{os.environ["HOME"]}/.aws/{key_name}.pem'
else:
key_filename = f'{os.environ["HOMEPATH"]}/.aws/{key_name}.pem'
ssh.connect(hostname=hostname, port=port, username=username, key_filename=key_filename)
for command in commands:
stdin, stdout, stderr = ssh.exec_command(command)
if redirect_output:
print(stdout.read().decode())
print(stderr.read().decode())
ssh.close()
def get_random_id(n: int = 10) -> str:
"""Generate random identifier.
:param n: Length of the identifier.
:return:
"""
return ''.join(random.choices(string.ascii_letters + string.digits, k=n))
|
import kopf
import yaml
import kubernetes
import time
from jinja2 import Environment, FileSystemLoader
def wait_until_job_end(jobname):
api = kubernetes.client.BatchV1Api()
job_finished = False
jobs = api.list_namespaced_job('default')
while (not job_finished) and \
any(job.metadata.name == jobname for job in jobs.items):
time.sleep(1)
jobs = api.list_namespaced_job('default')
for job in jobs.items:
if job.metadata.name == jobname:
print(f"job with { jobname } found,wait untill end")
if job.status.succeeded == 1:
print(f"job with { jobname } success")
job_finished = True
def render_template(filename, vars_dict):
env = Environment(loader=FileSystemLoader('./templates'))
template = env.get_template(filename)
yaml_manifest = template.render(vars_dict)
json_manifest = yaml.load(yaml_manifest)
return json_manifest
def delete_success_jobs(mysql_instance_name):
print("start deletion")
api = kubernetes.client.BatchV1Api()
jobs = api.list_namespaced_job('default')
for job in jobs.items:
jobname = job.metadata.name
if (jobname == f"backup-{mysql_instance_name}-job") or \
(jobname == f"restore-{mysql_instance_name}-job"):
if job.status.succeeded == 1:
api.delete_namespaced_job(jobname,
'default',
propagation_policy='Background')
@kopf.on.create('otus.homework', 'v1', 'mysqls')
# Функция, которая будет запускаться при создании объектов тип MySQL:
def mysql_on_create(body, spec, **kwargs):
name = body['metadata']['name']
image = body['spec']['image']
password = body['spec']['password']
database = body['spec']['database']
storage_size = body['spec']['storage_size']
# Генерируем JSON манифесты для деплоя
persistent_volume = render_template('mysql-pv.yml.j2',
{'name': name,
'storage_size': storage_size})
persistent_volume_claim = render_template('mysql-pvc.yml.j2',
{'name': name,
'storage_size': storage_size})
service = render_template('mysql-service.yml.j2', {'name': name})
deployment = render_template('mysql-deployment.yml.j2', {
'name': name,
'image': image,
'password': password,
'database': database})
restore_job = render_template('restore-job.yml.j2', {
'name': name,
'image': image,
'password': password,
'database': database})
# Определяем, что созданные ресурсы являются дочерними к управляемому CustomResource:
kopf.append_owner_reference(persistent_volume, owner=body)
kopf.append_owner_reference(persistent_volume_claim, owner=body) # addopt
kopf.append_owner_reference(service, owner=body)
kopf.append_owner_reference(deployment, owner=body)
# ^ Таким образом при удалении CR удалятся все, связанные с ним pv,pvc,svc, deployments
api = kubernetes.client.CoreV1Api()
# Создаем mysql PV:
api.create_persistent_volume(persistent_volume)
# Создаем mysql PVC:
api.create_namespaced_persistent_volume_claim('default', persistent_volume_claim)
# Создаем mysql SVC:
api.create_namespaced_service('default', service)
# Создаем mysql Deployment:
api = kubernetes.client.AppsV1Api()
api.create_namespaced_deployment('default', deployment)
kopf.event(body, type='Normal', reason='Logging', message=f"mysql deployment {body["metadata"]["name"]} created")
# Пытаемся восстановиться из backup
try:
api = kubernetes.client.BatchV1Api()
api.create_namespaced_job('default', restore_job)
restore_result = 'with'
kopf.event(body, type='Normal', reason='Logging',
message=f"restore_job created")
except kubernetes.client.rest.ApiException:
restore_result = 'without'
kopf.event(body, type='Error', reason='Logging',
message=f"restore_job creation failed")
# Cоздаем PVC и PV для бэкапов:
try:
backup_pv = render_template('backup-pv.yml.j2', {'name': name})
api = kubernetes.client.CoreV1Api()
print(api.create_persistent_volume(backup_pv))
api.create_persistent_volume(backup_pv)
except kubernetes.client.rest.ApiException:
pass
try:
backup_pvc = render_template('backup-pvc.yml.j2', {'name': name})
api = kubernetes.client.CoreV1Api()
api.create_namespaced_persistent_volume_claim('default', backup_pvc)
except kubernetes.client.rest.ApiException:
pass
return {'message': f"mysql-instance created {restore_result} restore-job"}
@kopf.on.delete('otus.homework', 'v1', 'mysqls')
def delete_object_make_backup(body, **kwargs):
name = body['metadata']['name']
image = body['spec']['image']
password = body['spec']['password']
database = body['spec']['database']
delete_success_jobs(name)
# Cоздаем backup job:
api = kubernetes.client.BatchV1Api()
backup_job = render_template('backup-job.yml.j2', {
'name': name,
'image': image,
'password': password,
'database': database})
api.create_namespaced_job('default', backup_job)
wait_until_job_end(f"backup-{name}-job")
return {'message': "mysql and its children resources deleted"}
|
import kopf
import yaml
import kubernetes
import time
from jinja2 import Environment, FileSystemLoader
def wait_until_job_end(jobname):
api = kubernetes.client.BatchV1Api()
job_finished = False
jobs = api.list_namespaced_job('default')
while (not job_finished) and \
any(job.metadata.name == jobname for job in jobs.items):
time.sleep(1)
jobs = api.list_namespaced_job('default')
for job in jobs.items:
if job.metadata.name == jobname:
print(f"job with { jobname } found,wait untill end")
if job.status.succeeded == 1:
print(f"job with { jobname } success")
job_finished = True
def render_template(filename, vars_dict):
env = Environment(loader=FileSystemLoader('./templates'))
template = env.get_template(filename)
yaml_manifest = template.render(vars_dict)
json_manifest = yaml.load(yaml_manifest)
return json_manifest
def delete_success_jobs(mysql_instance_name):
print("start deletion")
api = kubernetes.client.BatchV1Api()
jobs = api.list_namespaced_job('default')
for job in jobs.items:
jobname = job.metadata.name
if (jobname == f"backup-{mysql_instance_name}-job") or \
(jobname == f"restore-{mysql_instance_name}-job"):
if job.status.succeeded == 1:
api.delete_namespaced_job(jobname,
'default',
propagation_policy='Background')
@kopf.on.create('otus.homework', 'v1', 'mysqls')
# Функция, которая будет запускаться при создании объектов тип MySQL:
def mysql_on_create(body, spec, **kwargs):
name = body['metadata']['name']
image = body['spec']['image']
password = body['spec']['password']
database = body['spec']['database']
storage_size = body['spec']['storage_size']
# Генерируем JSON манифесты для деплоя
persistent_volume = render_template('mysql-pv.yml.j2',
{'name': name,
'storage_size': storage_size})
persistent_volume_claim = render_template('mysql-pvc.yml.j2',
{'name': name,
'storage_size': storage_size})
service = render_template('mysql-service.yml.j2', {'name': name})
deployment = render_template('mysql-deployment.yml.j2', {
'name': name,
'image': image,
'password': password,
'database': database})
restore_job = render_template('restore-job.yml.j2', {
'name': name,
'image': image,
'password': password,
'database': database})
# Определяем, что созданные ресурсы являются дочерними к управляемому CustomResource:
kopf.append_owner_reference(persistent_volume, owner=body)
kopf.append_owner_reference(persistent_volume_claim, owner=body) # addopt
kopf.append_owner_reference(service, owner=body)
kopf.append_owner_reference(deployment, owner=body)
# ^ Таким образом при удалении CR удалятся все, связанные с ним pv,pvc,svc, deployments
api = kubernetes.client.CoreV1Api()
# Создаем mysql PV:
api.create_persistent_volume(persistent_volume)
# Создаем mysql PVC:
api.create_namespaced_persistent_volume_claim('default', persistent_volume_claim)
# Создаем mysql SVC:
api.create_namespaced_service('default', service)
# Создаем mysql Deployment:
api = kubernetes.client.AppsV1Api()
api.create_namespaced_deployment('default', deployment)
kopf.event(body, type='Normal', reason='Logging', message=f"mysql deployment {body['metadata']['name']} created")
# Пытаемся восстановиться из backup
try:
api = kubernetes.client.BatchV1Api()
api.create_namespaced_job('default', restore_job)
restore_result = 'with'
kopf.event(body, type='Normal', reason='Logging',
message=f"restore_job created")
except kubernetes.client.rest.ApiException:
restore_result = 'without'
kopf.event(body, type='Error', reason='Logging',
message=f"restore_job creation failed")
# Cоздаем PVC и PV для бэкапов:
try:
backup_pv = render_template('backup-pv.yml.j2', {'name': name})
api = kubernetes.client.CoreV1Api()
print(api.create_persistent_volume(backup_pv))
api.create_persistent_volume(backup_pv)
except kubernetes.client.rest.ApiException:
pass
try:
backup_pvc = render_template('backup-pvc.yml.j2', {'name': name})
api = kubernetes.client.CoreV1Api()
api.create_namespaced_persistent_volume_claim('default', backup_pvc)
except kubernetes.client.rest.ApiException:
pass
return {'message': f"mysql-instance created {restore_result} restore-job"}
@kopf.on.delete('otus.homework', 'v1', 'mysqls')
def delete_object_make_backup(body, **kwargs):
name = body['metadata']['name']
image = body['spec']['image']
password = body['spec']['password']
database = body['spec']['database']
delete_success_jobs(name)
# Cоздаем backup job:
api = kubernetes.client.BatchV1Api()
backup_job = render_template('backup-job.yml.j2', {
'name': name,
'image': image,
'password': password,
'database': database})
api.create_namespaced_job('default', backup_job)
wait_until_job_end(f"backup-{name}-job")
return {'message': "mysql and its children resources deleted"}
|
import enum
import json
import pytest
from dagster import (
ConfigMapping,
DagsterInstance,
Enum,
Field,
In,
InputDefinition,
Nothing,
Out,
Permissive,
Shape,
graph,
logger,
op,
resource,
success_hook,
)
from dagster.check import CheckError
from dagster.core.definitions.graph_definition import GraphDefinition
from dagster.core.definitions.partition import (
Partition,
PartitionedConfig,
StaticPartitionsDefinition,
)
from dagster.core.definitions.pipeline_definition import PipelineSubsetDefinition
from dagster.core.definitions.time_window_partitions import DailyPartitionsDefinition
from dagster.core.errors import (
DagsterConfigMappingFunctionError,
DagsterInvalidConfigError,
DagsterInvalidDefinitionError,
)
from dagster.loggers import json_console_logger
def get_ops():
@op
def emit_one(_):
return 1
@op
def add(_, x, y):
return x + y
return emit_one, add
def test_basic_graph():
emit_one, add = get_ops()
@graph
def get_two():
return add(emit_one(), emit_one())
assert isinstance(get_two, GraphDefinition)
result = get_two.execute_in_process()
assert result.success
def test_aliased_graph():
emit_one, add = get_ops()
@graph
def get_two():
return add(emit_one(), emit_one.alias("emit_one_part_two")())
assert isinstance(get_two, GraphDefinition)
result = get_two.execute_in_process()
assert result.success
assert result.output_for_node("emit_one") == 1
assert result.output_for_node("emit_one_part_two") == 1
def test_composite_graph():
emit_one, add = get_ops()
@graph
def add_one(x):
return add(emit_one(), x)
@graph
def add_two(x):
return add(add_one(x), emit_one())
assert isinstance(add_two, GraphDefinition)
def test_with_resources():
@resource
def a_resource(_):
return "a"
@op(required_resource_keys={"a"})
def needs_resource(context):
return context.resources.a
@graph
def my_graph():
needs_resource()
# proxy for "executable/job"
my_job = my_graph.to_job(resource_defs={"a": a_resource})
assert my_job.name == "my_graph"
result = my_job.execute_in_process()
assert result.success
result = my_graph.execute_in_process(resources={"a": "foo"})
assert result.success
def test_error_on_invalid_resource_key():
@resource
def test_resource():
return "test-resource"
@op(required_resource_keys={"test-resource"})
def needs_resource(_):
return ""
@graph
def test_graph():
needs_resource()
with pytest.raises(CheckError, match="test-resource"):
test_graph.to_job(
resource_defs={
"test-resource": test_resource,
}
)
def test_config_mapping_fn():
@resource(config_schema=str)
def date(context) -> str:
return context.resource_config
@op(
required_resource_keys={"date"},
config_schema={"msg": str},
)
def do_stuff(context):
return f"{context.op_config["msg"] } on {context.resources.date}"
@graph
def needs_config():
do_stuff()
def _mapped(val):
return {
"ops": {"do_stuff": {"config": {"msg": "i am here"}}},
"resources": {"date": {"config": val["date"]}},
}
job = needs_config.to_job(
resource_defs={"date": date},
config=ConfigMapping(
config_schema={"date": str}, # top level has to be dict
config_fn=_mapped,
),
)
result = job.execute_in_process(run_config={"date": "6/4"})
assert result.success
assert result.output_for_node("do_stuff") == "i am here on 6/4"
def test_default_config():
@resource(config_schema=str)
def date(context) -> str:
return context.resource_config
@op(
required_resource_keys={"date"},
config_schema={"msg": str},
)
def do_stuff(context):
return f"{context.op_config["msg"] } on {context.resources.date}"
@graph
def needs_config():
do_stuff()
job = needs_config.to_job(
resource_defs={"date": date},
config={
"ops": {"do_stuff": {"config": {"msg": "i am here"}}},
"resources": {"date": {"config": "6/3"}},
},
)
result = job.execute_in_process()
assert result.success
assert result.output_for_node("do_stuff") == "i am here on 6/3"
def test_suffix():
emit_one, add = get_ops()
@graph
def get_two():
return add(emit_one(), emit_one())
assert isinstance(get_two, GraphDefinition)
my_job = get_two.to_job(name="get_two_prod")
assert my_job.name == "get_two_prod"
def test_partitions():
@op(config_schema={"date": str})
def my_op(_):
pass
@graph
def my_graph():
my_op()
def config_fn(partition: Partition):
return {"ops": {"my_op": {"config": {"date": partition.value}}}}
job = my_graph.to_job(
config=PartitionedConfig(
run_config_for_partition_fn=config_fn,
partitions_def=StaticPartitionsDefinition(["2020-02-25", "2020-02-26"]),
),
)
partition_set = job.get_partition_set_def()
partitions = partition_set.get_partitions()
assert len(partitions) == 2
assert partitions[0].value == "2020-02-25"
assert partitions[0].name == "2020-02-25"
assert partition_set.run_config_for_partition(partitions[0]) == {
"ops": {"my_op": {"config": {"date": "2020-02-25"}}}
}
assert partition_set.run_config_for_partition(partitions[1]) == {
"ops": {"my_op": {"config": {"date": "2020-02-26"}}}
}
# Verify that even if the partition set config function mutates shared state
# when returning run config, the result partitions have different config
SHARED_CONFIG = {}
def shared_config_fn(partition: Partition):
my_config = SHARED_CONFIG
my_config["ops"] = {"my_op": {"config": {"date": partition.value}}}
return my_config
job = my_graph.to_job(
config=PartitionedConfig(
run_config_for_partition_fn=shared_config_fn,
partitions_def=StaticPartitionsDefinition(["2020-02-25", "2020-02-26"]),
),
)
partition_set = job.get_partition_set_def()
partitions = partition_set.get_partitions()
assert len(partitions) == 2
assert partitions[0].value == "2020-02-25"
assert partitions[0].name == "2020-02-25"
first_config = partition_set.run_config_for_partition(partitions[0])
second_config = partition_set.run_config_for_partition(partitions[1])
assert first_config != second_config
assert first_config == {"ops": {"my_op": {"config": {"date": "2020-02-25"}}}}
assert second_config == {"ops": {"my_op": {"config": {"date": "2020-02-26"}}}}
def test_tags_on_job():
@op
def basic():
pass
@graph
def basic_graph():
basic()
tags = {"my_tag": "yes"}
job = basic_graph.to_job(tags=tags)
assert job.tags == tags
result = job.execute_in_process()
assert result.success
def test_non_string_tag():
@op
def basic():
pass
@graph
def basic_graph():
basic()
inner = {"a": "b"}
tags = {"my_tag": inner}
job = basic_graph.to_job(tags=tags)
assert job.tags == {"my_tag": json.dumps(inner)}
with pytest.raises(DagsterInvalidDefinitionError, match="Invalid value for tag"):
basic_graph.to_job(tags={"my_tag": basic_graph})
def test_logger_defs():
@op
def my_op(_):
pass
@graph
def my_graph():
my_op()
@logger
def my_logger(_):
pass
my_job = my_graph.to_job(logger_defs={"abc": my_logger})
assert my_job.mode_definitions[0].loggers == {"abc": my_logger}
def test_job_with_hooks():
entered = []
@success_hook
def basic_hook(_):
entered.append("yes")
@op
def basic_emit():
pass
@graph
def basic_hook_graph():
basic_emit()
job_for_hook_testing = basic_hook_graph.to_job(hooks={basic_hook})
result = job_for_hook_testing.execute_in_process()
assert result.success
assert entered == ["yes"]
def test_composition_bug():
@op
def expensive_task1():
pass
@op
def expensive_task2(_my_input):
pass
@op
def expensive_task3(_my_input):
pass
@graph
def my_graph1():
task1_done = expensive_task1()
_task2_done = expensive_task2(task1_done)
@graph
def my_graph2():
_task3_done = expensive_task3()
@graph
def my_graph_final():
my_graph1()
my_graph2()
my_job = my_graph_final.to_job()
index = my_job.get_pipeline_index()
assert index.get_node_def_snap("my_graph1")
assert index.get_node_def_snap("my_graph2")
def test_conflict():
@op(name="conflict")
def test_1():
pass
@graph(name="conflict")
def test_2():
pass
with pytest.raises(DagsterInvalidDefinitionError, match="definitions with the same name"):
@graph
def _conflict_zone():
test_1()
test_2()
def test_desc():
@graph(description="graph desc")
def empty():
pass
job = empty.to_job()
assert job.description == "graph desc"
desc = "job desc"
job = empty.to_job(description=desc)
assert job.description == desc
def test_config_naming_collisions():
@op(config_schema={"solids": Permissive(), "ops": Permissive()})
def my_op(context):
return context.op_config
@graph
def my_graph():
return my_op()
config = {
"solids": {"solids": {"foo": {"config": {"foobar": "bar"}}}},
"ops": {"solids": {"foo": {"config": {"foobar": "bar"}}}},
}
result = my_graph.execute_in_process(run_config={"ops": {"my_op": {"config": config}}})
assert result.success
assert result.output_value() == config
@graph
def ops():
return my_op()
result = ops.execute_in_process(run_config={"ops": {"my_op": {"config": config}}})
assert result.success
assert result.output_value() == config
def test_to_job_default_config_field_aliasing():
@op
def add_one(x):
return x + 1
@graph
def my_graph():
return add_one()
my_job = my_graph.to_job(config={"ops": {"add_one": {"inputs": {"x": {"value": 1}}}}})
result = my_job.execute_in_process()
assert result.success
result = my_job.execute_in_process({"solids": {"add_one": {"inputs": {"x": {"value": 1}}}}})
assert result.success
result = my_job.execute_in_process({"ops": {"add_one": {"inputs": {"x": {"value": 1}}}}})
assert result.success
def test_to_job_incomplete_default_config():
@op(config_schema={"foo": str})
def my_op(_):
pass
@graph
def my_graph():
my_op()
default_config_error = "Error in config when building job 'my_job' from graph 'my_graph' "
invalid_default_error = "Invalid default_value for Field."
invalid_configs = [
(
{},
default_config_error,
), # Not providing required config nested into the op config schema.
(
{
"ops": {
"my_op": {"config": {"foo": "bar"}},
"not_my_op": {"config": {"foo": "bar"}},
}
},
invalid_default_error,
), # Providing extraneous config for an op that doesn't exist.
(
{
"ops": {"my_op": {"config": {"foo": "bar"}}},
"solids": {"my_op": {"config": {"foo": "bar"}}},
},
default_config_error,
), # Providing the same config with multiple aliases.
]
# Ensure that errors nested into the config tree are caught
for invalid_config, error_msg in invalid_configs:
with pytest.raises(
DagsterInvalidConfigError,
match=error_msg,
):
my_graph.to_job(name="my_job", config=invalid_config)
class TestEnum(enum.Enum):
ONE = 1
TWO = 2
def test_enum_config_mapping():
@op(
config_schema={
"my_enum": Field(
Enum.from_python_enum(TestEnum), is_required=False, default_value="ONE"
)
}
)
def my_op(context):
return context.op_config["my_enum"]
@graph
def my_graph():
my_op()
def _use_defaults_mapping(_):
return {}
use_defaults = my_graph.to_job(config=ConfigMapping(config_fn=_use_defaults_mapping))
result = use_defaults.execute_in_process()
assert result.success
assert result.output_for_node("my_op") == TestEnum.ONE
def _override_defaults_mapping(_):
return {"ops": {"my_op": {"config": {"my_enum": "TWO"}}}}
override_defaults = my_graph.to_job(config=ConfigMapping(config_fn=_override_defaults_mapping))
result = override_defaults.execute_in_process()
assert result.success
assert result.output_for_node("my_op") == TestEnum.TWO
def _ingest_config_mapping(x):
return {"ops": {"my_op": {"config": {"my_enum": x["my_field"]}}}}
default_config_mapping = ConfigMapping(
config_fn=_ingest_config_mapping,
config_schema=Shape(
{
"my_field": Field(
Enum.from_python_enum(TestEnum), is_required=False, default_value="TWO"
)
}
),
receive_processed_config_values=False,
)
ingest_mapping = my_graph.to_job(config=default_config_mapping)
result = ingest_mapping.execute_in_process()
assert result.success
assert result.output_for_node("my_op") == TestEnum.TWO
no_default_config_mapping = ConfigMapping(
config_fn=_ingest_config_mapping,
config_schema=Shape({"my_field": Field(Enum.from_python_enum(TestEnum), is_required=True)}),
receive_processed_config_values=False,
)
ingest_mapping_no_default = my_graph.to_job(config=no_default_config_mapping)
result = ingest_mapping_no_default.execute_in_process(run_config={"my_field": "TWO"})
assert result.success
assert result.output_for_node("my_op") == TestEnum.TWO
def _ingest_post_processed_config(x):
assert x["my_field"] == TestEnum.TWO
return {"ops": {"my_op": {"config": {"my_enum": "TWO"}}}}
config_mapping_with_preprocessing = ConfigMapping(
config_fn=_ingest_post_processed_config,
config_schema=Shape({"my_field": Field(Enum.from_python_enum(TestEnum), is_required=True)}),
)
ingest_preprocessing = my_graph.to_job(config=config_mapping_with_preprocessing)
result = ingest_preprocessing.execute_in_process(run_config={"my_field": "TWO"})
assert result.success
assert result.output_for_node("my_op") == TestEnum.TWO
def test_enum_default_config():
@op(
config_schema={
"my_enum": Field(
Enum.from_python_enum(TestEnum), is_required=False, default_value="ONE"
)
}
)
def my_op(context):
return context.op_config["my_enum"]
@graph
def my_graph():
my_op()
my_job = my_graph.to_job(config={"ops": {"my_op": {"config": {"my_enum": "TWO"}}}})
result = my_job.execute_in_process()
assert result.success
assert result.output_for_node("my_op") == TestEnum.TWO
def test_enum_to_execution():
@op(
config_schema={
"my_enum": Field(
Enum.from_python_enum(TestEnum), is_required=False, default_value="ONE"
)
}
)
def my_op(context):
return context.op_config["my_enum"]
@graph
def my_graph():
my_op()
my_job = my_graph.to_job()
result = my_job.execute_in_process()
assert result.success
assert result.output_for_node("my_op") == TestEnum.ONE
result = my_graph.execute_in_process(
run_config={"ops": {"my_op": {"config": {"my_enum": "TWO"}}}}
)
assert result.success
assert result.output_for_node("my_op") == TestEnum.TWO
def test_raise_on_error_execute_in_process():
error_str = "My error"
@op
def emit_error():
raise Exception(error_str)
@graph
def error_graph():
emit_error()
error_job = error_graph.to_job()
with pytest.raises(Exception, match=error_str):
error_job.execute_in_process()
result = error_job.execute_in_process(raise_on_error=False)
assert not result.success
def test_job_subset():
@op
def my_op():
pass
@graph
def basic():
my_op()
my_op()
the_job = basic.to_job()
assert isinstance(the_job.get_pipeline_subset_def({"my_op"}), PipelineSubsetDefinition)
def test_tags():
@graph(tags={"a": "x"})
def mygraphic():
pass
mygraphic_job = mygraphic.to_job()
assert mygraphic_job.tags == {"a": "x"}
with DagsterInstance.ephemeral() as instance:
result = mygraphic_job.execute_in_process(instance=instance)
assert result.success
run = instance.get_runs()[0]
assert run.tags.get("a") == "x"
def test_job_and_graph_tags():
@graph(tags={"a": "x", "c": "q"})
def mygraphic():
pass
job = mygraphic.to_job(tags={"a": "y", "b": "z"})
assert job.tags == {"a": "y", "b": "z", "c": "q"}
with DagsterInstance.ephemeral() as instance:
result = job.execute_in_process(instance=instance)
assert result.success
run = instance.get_runs()[0]
assert run.tags == {"a": "y", "b": "z", "c": "q"}
def test_output_for_node_non_standard_name():
@op(out={"foo": Out()})
def my_op():
return 5
@graph
def basic():
my_op()
result = basic.execute_in_process()
assert result.output_for_node("my_op", "foo") == 5
def test_execute_in_process_aliased_graph():
@op
def my_op():
return 5
@graph
def my_graph():
return my_op()
result = my_graph.alias("foo_graph").execute_in_process()
assert result.success
assert result.output_value() == 5
def test_execute_in_process_aliased_graph_config():
@op(config_schema=str)
def my_op(context):
return context.op_config
@graph
def my_graph():
return my_op()
result = my_graph.alias("foo_graph").execute_in_process(
run_config={"ops": {"my_op": {"config": "foo"}}}
)
assert result.success
assert result.output_value() == "foo"
def test_job_name_valid():
with pytest.raises(DagsterInvalidDefinitionError):
@graph
def my_graph():
pass
my_graph.to_job(name="a/b")
def test_top_level_config_mapping_graph():
@op(config_schema=str)
def my_op(context):
return context.op_config
def _config_fn(_):
return {"my_op": {"config": "foo"}}
@graph(config=ConfigMapping(config_fn=_config_fn))
def my_graph():
my_op()
result = my_graph.execute_in_process()
assert result.success
assert result.output_for_node("my_op") == "foo"
def test_top_level_config_mapping_config_schema():
@op(config_schema=str)
def my_op(context):
return context.op_config
def _config_fn(outer):
return {"my_op": {"config": outer}}
@graph(config=ConfigMapping(config_fn=_config_fn, config_schema=str))
def my_graph():
my_op()
result = my_graph.to_job().execute_in_process(run_config={"ops": {"config": "foo"}})
assert result.success
assert result.output_for_node("my_op") == "foo"
my_job = my_graph.to_job(config={"ops": {"config": "foo"}})
result = my_job.execute_in_process()
assert result.success
assert result.output_for_node("my_op") == "foo"
def test_nested_graph_config_mapping():
@op(config_schema=str)
def my_op(context):
return context.op_config
def _nested_config_fn(outer):
return {"my_op": {"config": outer}}
@graph(config=ConfigMapping(config_fn=_nested_config_fn, config_schema=str))
def my_nested_graph():
my_op()
def _config_fn(outer):
return {"my_nested_graph": {"config": outer}}
@graph(config=ConfigMapping(config_fn=_config_fn, config_schema=str))
def my_graph():
my_nested_graph()
result = my_graph.to_job().execute_in_process(run_config={"ops": {"config": "foo"}})
assert result.success
assert result.output_for_node("my_nested_graph.my_op") == "foo"
def test_top_level_graph_config_mapping_failure():
@op(config_schema=str)
def my_op(context):
return context.op_config
def _nested_config_fn(_):
return "foo"
@graph(config=ConfigMapping(config_fn=_nested_config_fn))
def my_nested_graph():
my_op()
with pytest.raises(
DagsterInvalidConfigError,
match="In pipeline 'my_nested_graph', top level graph 'my_nested_graph' has a configuration error.",
):
my_nested_graph.execute_in_process()
def test_top_level_graph_outer_config_failure():
@op(config_schema=str)
def my_op(context):
return context.op_config
def _config_fn(outer):
return {"my_op": {"config": outer}}
@graph(config=ConfigMapping(config_fn=_config_fn, config_schema=str))
def my_graph():
my_op()
with pytest.raises(DagsterInvalidConfigError, match="Invalid scalar at path root:ops:config"):
my_graph.to_job().execute_in_process(run_config={"ops": {"config": {"bad_type": "foo"}}})
with pytest.raises(DagsterInvalidConfigError, match="Invalid scalar at path root:config"):
my_graph.to_job(config={"ops": {"config": {"bad_type": "foo"}}})
def test_graph_dict_config():
@op(config_schema=str)
def my_op(context):
return context.op_config
@graph(config={"my_op": {"config": "foo"}})
def my_graph():
return my_op()
result = my_graph.execute_in_process()
assert result.success
assert result.output_value() == "foo"
def test_graph_with_configured():
@op(config_schema=str)
def my_op(context):
return context.op_config
def _config_fn(outer):
return {"my_op": {"config": outer}}
@graph(config=ConfigMapping(config_fn=_config_fn, config_schema=str))
def my_graph():
my_op()
result = my_graph.configured(name="my_graph", config_or_config_fn="foo").execute_in_process()
assert result.success
assert result.output_for_node("my_op") == "foo"
def _configured_use_fn(outer):
return outer
result = (
my_graph.configured(
name="my_graph", config_or_config_fn=_configured_use_fn, config_schema=str
)
.to_job()
.execute_in_process(run_config={"ops": {"config": "foo"}})
)
assert result.success
assert result.output_for_node("my_op") == "foo"
def test_graph_configured_error_in_config():
@op(config_schema=str)
def my_op(context):
return context.op_config
def _config_fn(outer):
return {"my_op": {"config": outer}}
@graph(config=ConfigMapping(config_fn=_config_fn, config_schema=str))
def my_graph():
my_op()
def _bad_config_fn(_):
return 2
configured_graph = my_graph.configured(name="blah", config_or_config_fn=_bad_config_fn)
with pytest.raises(DagsterInvalidConfigError, match="Error in config for graph blah"):
configured_graph.execute_in_process()
def test_graph_configured_error_in_fn():
@op(config_schema=str)
def my_op(context):
return context.op_config
def _config_fn(outer):
return {"my_op": {"config": outer}}
@graph(config=ConfigMapping(config_fn=_config_fn, config_schema=str))
def my_graph():
my_op()
def _bad_config_fn(_):
raise Exception("Uh oh")
configured_graph = my_graph.configured(name="blah", config_or_config_fn=_bad_config_fn)
with pytest.raises(
DagsterConfigMappingFunctionError,
match="The config mapping function on a `configured` GraphDefinition has thrown an "
"unexpected error during its execution.",
):
configured_graph.execute_in_process()
def test_job_non_default_logger_config():
@graph
def your_graph():
pass
your_job = your_graph.to_job(
logger_defs={"json": json_console_logger}, config={"loggers": {"json": {"config": {}}}}
)
result = your_job.execute_in_process()
assert result.success
result = your_job.execute_in_process(
run_config={"loggers": {"json": {"config": {"log_level": "DEBUG"}}}}
)
assert result.success
def test_job_partitions_def():
@op
def my_op(context):
assert context.has_partition_key
assert context.partition_key == "2020-01-01"
@graph
def my_graph():
my_op()
my_job = my_graph.to_job(partitions_def=DailyPartitionsDefinition(start_date="2020-01-01"))
assert my_job.execute_in_process(partition_key="2020-01-01").success
def test_graph_top_level_input():
@op
def my_op(x, y):
return x + y
@graph
def my_graph(x, y):
return my_op(x, y)
result = my_graph.execute_in_process(
run_config={"inputs": {"x": {"value": 2}, "y": {"value": 3}}}
)
assert result.success
assert result.output_for_node("my_op") == 5
@graph
def my_graph_with_nesting(x):
my_graph(x, x)
result = my_graph_with_nesting.execute_in_process(run_config={"inputs": {"x": {"value": 2}}})
assert result.success
assert result.output_for_node("my_graph.my_op") == 4
def test_nothing_inputs_graph():
@op(ins={"sync_signal": In(Nothing)})
def my_op():
...
@graph(input_defs=[InputDefinition("sync_signal", Nothing)])
def my_pipeline(sync_signal):
my_op(sync_signal)
the_job = my_pipeline.to_job()
result = the_job.execute_in_process()
assert result.success
|
import enum
import json
import pytest
from dagster import (
ConfigMapping,
DagsterInstance,
Enum,
Field,
In,
InputDefinition,
Nothing,
Out,
Permissive,
Shape,
graph,
logger,
op,
resource,
success_hook,
)
from dagster.check import CheckError
from dagster.core.definitions.graph_definition import GraphDefinition
from dagster.core.definitions.partition import (
Partition,
PartitionedConfig,
StaticPartitionsDefinition,
)
from dagster.core.definitions.pipeline_definition import PipelineSubsetDefinition
from dagster.core.definitions.time_window_partitions import DailyPartitionsDefinition
from dagster.core.errors import (
DagsterConfigMappingFunctionError,
DagsterInvalidConfigError,
DagsterInvalidDefinitionError,
)
from dagster.loggers import json_console_logger
def get_ops():
@op
def emit_one(_):
return 1
@op
def add(_, x, y):
return x + y
return emit_one, add
def test_basic_graph():
emit_one, add = get_ops()
@graph
def get_two():
return add(emit_one(), emit_one())
assert isinstance(get_two, GraphDefinition)
result = get_two.execute_in_process()
assert result.success
def test_aliased_graph():
emit_one, add = get_ops()
@graph
def get_two():
return add(emit_one(), emit_one.alias("emit_one_part_two")())
assert isinstance(get_two, GraphDefinition)
result = get_two.execute_in_process()
assert result.success
assert result.output_for_node("emit_one") == 1
assert result.output_for_node("emit_one_part_two") == 1
def test_composite_graph():
emit_one, add = get_ops()
@graph
def add_one(x):
return add(emit_one(), x)
@graph
def add_two(x):
return add(add_one(x), emit_one())
assert isinstance(add_two, GraphDefinition)
def test_with_resources():
@resource
def a_resource(_):
return "a"
@op(required_resource_keys={"a"})
def needs_resource(context):
return context.resources.a
@graph
def my_graph():
needs_resource()
# proxy for "executable/job"
my_job = my_graph.to_job(resource_defs={"a": a_resource})
assert my_job.name == "my_graph"
result = my_job.execute_in_process()
assert result.success
result = my_graph.execute_in_process(resources={"a": "foo"})
assert result.success
def test_error_on_invalid_resource_key():
@resource
def test_resource():
return "test-resource"
@op(required_resource_keys={"test-resource"})
def needs_resource(_):
return ""
@graph
def test_graph():
needs_resource()
with pytest.raises(CheckError, match="test-resource"):
test_graph.to_job(
resource_defs={
"test-resource": test_resource,
}
)
def test_config_mapping_fn():
@resource(config_schema=str)
def date(context) -> str:
return context.resource_config
@op(
required_resource_keys={"date"},
config_schema={"msg": str},
)
def do_stuff(context):
return f"{context.op_config['msg'] } on {context.resources.date}"
@graph
def needs_config():
do_stuff()
def _mapped(val):
return {
"ops": {"do_stuff": {"config": {"msg": "i am here"}}},
"resources": {"date": {"config": val["date"]}},
}
job = needs_config.to_job(
resource_defs={"date": date},
config=ConfigMapping(
config_schema={"date": str}, # top level has to be dict
config_fn=_mapped,
),
)
result = job.execute_in_process(run_config={"date": "6/4"})
assert result.success
assert result.output_for_node("do_stuff") == "i am here on 6/4"
def test_default_config():
@resource(config_schema=str)
def date(context) -> str:
return context.resource_config
@op(
required_resource_keys={"date"},
config_schema={"msg": str},
)
def do_stuff(context):
return f"{context.op_config['msg'] } on {context.resources.date}"
@graph
def needs_config():
do_stuff()
job = needs_config.to_job(
resource_defs={"date": date},
config={
"ops": {"do_stuff": {"config": {"msg": "i am here"}}},
"resources": {"date": {"config": "6/3"}},
},
)
result = job.execute_in_process()
assert result.success
assert result.output_for_node("do_stuff") == "i am here on 6/3"
def test_suffix():
emit_one, add = get_ops()
@graph
def get_two():
return add(emit_one(), emit_one())
assert isinstance(get_two, GraphDefinition)
my_job = get_two.to_job(name="get_two_prod")
assert my_job.name == "get_two_prod"
def test_partitions():
@op(config_schema={"date": str})
def my_op(_):
pass
@graph
def my_graph():
my_op()
def config_fn(partition: Partition):
return {"ops": {"my_op": {"config": {"date": partition.value}}}}
job = my_graph.to_job(
config=PartitionedConfig(
run_config_for_partition_fn=config_fn,
partitions_def=StaticPartitionsDefinition(["2020-02-25", "2020-02-26"]),
),
)
partition_set = job.get_partition_set_def()
partitions = partition_set.get_partitions()
assert len(partitions) == 2
assert partitions[0].value == "2020-02-25"
assert partitions[0].name == "2020-02-25"
assert partition_set.run_config_for_partition(partitions[0]) == {
"ops": {"my_op": {"config": {"date": "2020-02-25"}}}
}
assert partition_set.run_config_for_partition(partitions[1]) == {
"ops": {"my_op": {"config": {"date": "2020-02-26"}}}
}
# Verify that even if the partition set config function mutates shared state
# when returning run config, the result partitions have different config
SHARED_CONFIG = {}
def shared_config_fn(partition: Partition):
my_config = SHARED_CONFIG
my_config["ops"] = {"my_op": {"config": {"date": partition.value}}}
return my_config
job = my_graph.to_job(
config=PartitionedConfig(
run_config_for_partition_fn=shared_config_fn,
partitions_def=StaticPartitionsDefinition(["2020-02-25", "2020-02-26"]),
),
)
partition_set = job.get_partition_set_def()
partitions = partition_set.get_partitions()
assert len(partitions) == 2
assert partitions[0].value == "2020-02-25"
assert partitions[0].name == "2020-02-25"
first_config = partition_set.run_config_for_partition(partitions[0])
second_config = partition_set.run_config_for_partition(partitions[1])
assert first_config != second_config
assert first_config == {"ops": {"my_op": {"config": {"date": "2020-02-25"}}}}
assert second_config == {"ops": {"my_op": {"config": {"date": "2020-02-26"}}}}
def test_tags_on_job():
@op
def basic():
pass
@graph
def basic_graph():
basic()
tags = {"my_tag": "yes"}
job = basic_graph.to_job(tags=tags)
assert job.tags == tags
result = job.execute_in_process()
assert result.success
def test_non_string_tag():
@op
def basic():
pass
@graph
def basic_graph():
basic()
inner = {"a": "b"}
tags = {"my_tag": inner}
job = basic_graph.to_job(tags=tags)
assert job.tags == {"my_tag": json.dumps(inner)}
with pytest.raises(DagsterInvalidDefinitionError, match="Invalid value for tag"):
basic_graph.to_job(tags={"my_tag": basic_graph})
def test_logger_defs():
@op
def my_op(_):
pass
@graph
def my_graph():
my_op()
@logger
def my_logger(_):
pass
my_job = my_graph.to_job(logger_defs={"abc": my_logger})
assert my_job.mode_definitions[0].loggers == {"abc": my_logger}
def test_job_with_hooks():
entered = []
@success_hook
def basic_hook(_):
entered.append("yes")
@op
def basic_emit():
pass
@graph
def basic_hook_graph():
basic_emit()
job_for_hook_testing = basic_hook_graph.to_job(hooks={basic_hook})
result = job_for_hook_testing.execute_in_process()
assert result.success
assert entered == ["yes"]
def test_composition_bug():
@op
def expensive_task1():
pass
@op
def expensive_task2(_my_input):
pass
@op
def expensive_task3(_my_input):
pass
@graph
def my_graph1():
task1_done = expensive_task1()
_task2_done = expensive_task2(task1_done)
@graph
def my_graph2():
_task3_done = expensive_task3()
@graph
def my_graph_final():
my_graph1()
my_graph2()
my_job = my_graph_final.to_job()
index = my_job.get_pipeline_index()
assert index.get_node_def_snap("my_graph1")
assert index.get_node_def_snap("my_graph2")
def test_conflict():
@op(name="conflict")
def test_1():
pass
@graph(name="conflict")
def test_2():
pass
with pytest.raises(DagsterInvalidDefinitionError, match="definitions with the same name"):
@graph
def _conflict_zone():
test_1()
test_2()
def test_desc():
@graph(description="graph desc")
def empty():
pass
job = empty.to_job()
assert job.description == "graph desc"
desc = "job desc"
job = empty.to_job(description=desc)
assert job.description == desc
def test_config_naming_collisions():
@op(config_schema={"solids": Permissive(), "ops": Permissive()})
def my_op(context):
return context.op_config
@graph
def my_graph():
return my_op()
config = {
"solids": {"solids": {"foo": {"config": {"foobar": "bar"}}}},
"ops": {"solids": {"foo": {"config": {"foobar": "bar"}}}},
}
result = my_graph.execute_in_process(run_config={"ops": {"my_op": {"config": config}}})
assert result.success
assert result.output_value() == config
@graph
def ops():
return my_op()
result = ops.execute_in_process(run_config={"ops": {"my_op": {"config": config}}})
assert result.success
assert result.output_value() == config
def test_to_job_default_config_field_aliasing():
@op
def add_one(x):
return x + 1
@graph
def my_graph():
return add_one()
my_job = my_graph.to_job(config={"ops": {"add_one": {"inputs": {"x": {"value": 1}}}}})
result = my_job.execute_in_process()
assert result.success
result = my_job.execute_in_process({"solids": {"add_one": {"inputs": {"x": {"value": 1}}}}})
assert result.success
result = my_job.execute_in_process({"ops": {"add_one": {"inputs": {"x": {"value": 1}}}}})
assert result.success
def test_to_job_incomplete_default_config():
@op(config_schema={"foo": str})
def my_op(_):
pass
@graph
def my_graph():
my_op()
default_config_error = "Error in config when building job 'my_job' from graph 'my_graph' "
invalid_default_error = "Invalid default_value for Field."
invalid_configs = [
(
{},
default_config_error,
), # Not providing required config nested into the op config schema.
(
{
"ops": {
"my_op": {"config": {"foo": "bar"}},
"not_my_op": {"config": {"foo": "bar"}},
}
},
invalid_default_error,
), # Providing extraneous config for an op that doesn't exist.
(
{
"ops": {"my_op": {"config": {"foo": "bar"}}},
"solids": {"my_op": {"config": {"foo": "bar"}}},
},
default_config_error,
), # Providing the same config with multiple aliases.
]
# Ensure that errors nested into the config tree are caught
for invalid_config, error_msg in invalid_configs:
with pytest.raises(
DagsterInvalidConfigError,
match=error_msg,
):
my_graph.to_job(name="my_job", config=invalid_config)
class TestEnum(enum.Enum):
ONE = 1
TWO = 2
def test_enum_config_mapping():
@op(
config_schema={
"my_enum": Field(
Enum.from_python_enum(TestEnum), is_required=False, default_value="ONE"
)
}
)
def my_op(context):
return context.op_config["my_enum"]
@graph
def my_graph():
my_op()
def _use_defaults_mapping(_):
return {}
use_defaults = my_graph.to_job(config=ConfigMapping(config_fn=_use_defaults_mapping))
result = use_defaults.execute_in_process()
assert result.success
assert result.output_for_node("my_op") == TestEnum.ONE
def _override_defaults_mapping(_):
return {"ops": {"my_op": {"config": {"my_enum": "TWO"}}}}
override_defaults = my_graph.to_job(config=ConfigMapping(config_fn=_override_defaults_mapping))
result = override_defaults.execute_in_process()
assert result.success
assert result.output_for_node("my_op") == TestEnum.TWO
def _ingest_config_mapping(x):
return {"ops": {"my_op": {"config": {"my_enum": x["my_field"]}}}}
default_config_mapping = ConfigMapping(
config_fn=_ingest_config_mapping,
config_schema=Shape(
{
"my_field": Field(
Enum.from_python_enum(TestEnum), is_required=False, default_value="TWO"
)
}
),
receive_processed_config_values=False,
)
ingest_mapping = my_graph.to_job(config=default_config_mapping)
result = ingest_mapping.execute_in_process()
assert result.success
assert result.output_for_node("my_op") == TestEnum.TWO
no_default_config_mapping = ConfigMapping(
config_fn=_ingest_config_mapping,
config_schema=Shape({"my_field": Field(Enum.from_python_enum(TestEnum), is_required=True)}),
receive_processed_config_values=False,
)
ingest_mapping_no_default = my_graph.to_job(config=no_default_config_mapping)
result = ingest_mapping_no_default.execute_in_process(run_config={"my_field": "TWO"})
assert result.success
assert result.output_for_node("my_op") == TestEnum.TWO
def _ingest_post_processed_config(x):
assert x["my_field"] == TestEnum.TWO
return {"ops": {"my_op": {"config": {"my_enum": "TWO"}}}}
config_mapping_with_preprocessing = ConfigMapping(
config_fn=_ingest_post_processed_config,
config_schema=Shape({"my_field": Field(Enum.from_python_enum(TestEnum), is_required=True)}),
)
ingest_preprocessing = my_graph.to_job(config=config_mapping_with_preprocessing)
result = ingest_preprocessing.execute_in_process(run_config={"my_field": "TWO"})
assert result.success
assert result.output_for_node("my_op") == TestEnum.TWO
def test_enum_default_config():
@op(
config_schema={
"my_enum": Field(
Enum.from_python_enum(TestEnum), is_required=False, default_value="ONE"
)
}
)
def my_op(context):
return context.op_config["my_enum"]
@graph
def my_graph():
my_op()
my_job = my_graph.to_job(config={"ops": {"my_op": {"config": {"my_enum": "TWO"}}}})
result = my_job.execute_in_process()
assert result.success
assert result.output_for_node("my_op") == TestEnum.TWO
def test_enum_to_execution():
@op(
config_schema={
"my_enum": Field(
Enum.from_python_enum(TestEnum), is_required=False, default_value="ONE"
)
}
)
def my_op(context):
return context.op_config["my_enum"]
@graph
def my_graph():
my_op()
my_job = my_graph.to_job()
result = my_job.execute_in_process()
assert result.success
assert result.output_for_node("my_op") == TestEnum.ONE
result = my_graph.execute_in_process(
run_config={"ops": {"my_op": {"config": {"my_enum": "TWO"}}}}
)
assert result.success
assert result.output_for_node("my_op") == TestEnum.TWO
def test_raise_on_error_execute_in_process():
error_str = "My error"
@op
def emit_error():
raise Exception(error_str)
@graph
def error_graph():
emit_error()
error_job = error_graph.to_job()
with pytest.raises(Exception, match=error_str):
error_job.execute_in_process()
result = error_job.execute_in_process(raise_on_error=False)
assert not result.success
def test_job_subset():
@op
def my_op():
pass
@graph
def basic():
my_op()
my_op()
the_job = basic.to_job()
assert isinstance(the_job.get_pipeline_subset_def({"my_op"}), PipelineSubsetDefinition)
def test_tags():
@graph(tags={"a": "x"})
def mygraphic():
pass
mygraphic_job = mygraphic.to_job()
assert mygraphic_job.tags == {"a": "x"}
with DagsterInstance.ephemeral() as instance:
result = mygraphic_job.execute_in_process(instance=instance)
assert result.success
run = instance.get_runs()[0]
assert run.tags.get("a") == "x"
def test_job_and_graph_tags():
@graph(tags={"a": "x", "c": "q"})
def mygraphic():
pass
job = mygraphic.to_job(tags={"a": "y", "b": "z"})
assert job.tags == {"a": "y", "b": "z", "c": "q"}
with DagsterInstance.ephemeral() as instance:
result = job.execute_in_process(instance=instance)
assert result.success
run = instance.get_runs()[0]
assert run.tags == {"a": "y", "b": "z", "c": "q"}
def test_output_for_node_non_standard_name():
@op(out={"foo": Out()})
def my_op():
return 5
@graph
def basic():
my_op()
result = basic.execute_in_process()
assert result.output_for_node("my_op", "foo") == 5
def test_execute_in_process_aliased_graph():
@op
def my_op():
return 5
@graph
def my_graph():
return my_op()
result = my_graph.alias("foo_graph").execute_in_process()
assert result.success
assert result.output_value() == 5
def test_execute_in_process_aliased_graph_config():
@op(config_schema=str)
def my_op(context):
return context.op_config
@graph
def my_graph():
return my_op()
result = my_graph.alias("foo_graph").execute_in_process(
run_config={"ops": {"my_op": {"config": "foo"}}}
)
assert result.success
assert result.output_value() == "foo"
def test_job_name_valid():
with pytest.raises(DagsterInvalidDefinitionError):
@graph
def my_graph():
pass
my_graph.to_job(name="a/b")
def test_top_level_config_mapping_graph():
@op(config_schema=str)
def my_op(context):
return context.op_config
def _config_fn(_):
return {"my_op": {"config": "foo"}}
@graph(config=ConfigMapping(config_fn=_config_fn))
def my_graph():
my_op()
result = my_graph.execute_in_process()
assert result.success
assert result.output_for_node("my_op") == "foo"
def test_top_level_config_mapping_config_schema():
@op(config_schema=str)
def my_op(context):
return context.op_config
def _config_fn(outer):
return {"my_op": {"config": outer}}
@graph(config=ConfigMapping(config_fn=_config_fn, config_schema=str))
def my_graph():
my_op()
result = my_graph.to_job().execute_in_process(run_config={"ops": {"config": "foo"}})
assert result.success
assert result.output_for_node("my_op") == "foo"
my_job = my_graph.to_job(config={"ops": {"config": "foo"}})
result = my_job.execute_in_process()
assert result.success
assert result.output_for_node("my_op") == "foo"
def test_nested_graph_config_mapping():
@op(config_schema=str)
def my_op(context):
return context.op_config
def _nested_config_fn(outer):
return {"my_op": {"config": outer}}
@graph(config=ConfigMapping(config_fn=_nested_config_fn, config_schema=str))
def my_nested_graph():
my_op()
def _config_fn(outer):
return {"my_nested_graph": {"config": outer}}
@graph(config=ConfigMapping(config_fn=_config_fn, config_schema=str))
def my_graph():
my_nested_graph()
result = my_graph.to_job().execute_in_process(run_config={"ops": {"config": "foo"}})
assert result.success
assert result.output_for_node("my_nested_graph.my_op") == "foo"
def test_top_level_graph_config_mapping_failure():
@op(config_schema=str)
def my_op(context):
return context.op_config
def _nested_config_fn(_):
return "foo"
@graph(config=ConfigMapping(config_fn=_nested_config_fn))
def my_nested_graph():
my_op()
with pytest.raises(
DagsterInvalidConfigError,
match="In pipeline 'my_nested_graph', top level graph 'my_nested_graph' has a configuration error.",
):
my_nested_graph.execute_in_process()
def test_top_level_graph_outer_config_failure():
@op(config_schema=str)
def my_op(context):
return context.op_config
def _config_fn(outer):
return {"my_op": {"config": outer}}
@graph(config=ConfigMapping(config_fn=_config_fn, config_schema=str))
def my_graph():
my_op()
with pytest.raises(DagsterInvalidConfigError, match="Invalid scalar at path root:ops:config"):
my_graph.to_job().execute_in_process(run_config={"ops": {"config": {"bad_type": "foo"}}})
with pytest.raises(DagsterInvalidConfigError, match="Invalid scalar at path root:config"):
my_graph.to_job(config={"ops": {"config": {"bad_type": "foo"}}})
def test_graph_dict_config():
@op(config_schema=str)
def my_op(context):
return context.op_config
@graph(config={"my_op": {"config": "foo"}})
def my_graph():
return my_op()
result = my_graph.execute_in_process()
assert result.success
assert result.output_value() == "foo"
def test_graph_with_configured():
@op(config_schema=str)
def my_op(context):
return context.op_config
def _config_fn(outer):
return {"my_op": {"config": outer}}
@graph(config=ConfigMapping(config_fn=_config_fn, config_schema=str))
def my_graph():
my_op()
result = my_graph.configured(name="my_graph", config_or_config_fn="foo").execute_in_process()
assert result.success
assert result.output_for_node("my_op") == "foo"
def _configured_use_fn(outer):
return outer
result = (
my_graph.configured(
name="my_graph", config_or_config_fn=_configured_use_fn, config_schema=str
)
.to_job()
.execute_in_process(run_config={"ops": {"config": "foo"}})
)
assert result.success
assert result.output_for_node("my_op") == "foo"
def test_graph_configured_error_in_config():
@op(config_schema=str)
def my_op(context):
return context.op_config
def _config_fn(outer):
return {"my_op": {"config": outer}}
@graph(config=ConfigMapping(config_fn=_config_fn, config_schema=str))
def my_graph():
my_op()
def _bad_config_fn(_):
return 2
configured_graph = my_graph.configured(name="blah", config_or_config_fn=_bad_config_fn)
with pytest.raises(DagsterInvalidConfigError, match="Error in config for graph blah"):
configured_graph.execute_in_process()
def test_graph_configured_error_in_fn():
@op(config_schema=str)
def my_op(context):
return context.op_config
def _config_fn(outer):
return {"my_op": {"config": outer}}
@graph(config=ConfigMapping(config_fn=_config_fn, config_schema=str))
def my_graph():
my_op()
def _bad_config_fn(_):
raise Exception("Uh oh")
configured_graph = my_graph.configured(name="blah", config_or_config_fn=_bad_config_fn)
with pytest.raises(
DagsterConfigMappingFunctionError,
match="The config mapping function on a `configured` GraphDefinition has thrown an "
"unexpected error during its execution.",
):
configured_graph.execute_in_process()
def test_job_non_default_logger_config():
@graph
def your_graph():
pass
your_job = your_graph.to_job(
logger_defs={"json": json_console_logger}, config={"loggers": {"json": {"config": {}}}}
)
result = your_job.execute_in_process()
assert result.success
result = your_job.execute_in_process(
run_config={"loggers": {"json": {"config": {"log_level": "DEBUG"}}}}
)
assert result.success
def test_job_partitions_def():
@op
def my_op(context):
assert context.has_partition_key
assert context.partition_key == "2020-01-01"
@graph
def my_graph():
my_op()
my_job = my_graph.to_job(partitions_def=DailyPartitionsDefinition(start_date="2020-01-01"))
assert my_job.execute_in_process(partition_key="2020-01-01").success
def test_graph_top_level_input():
@op
def my_op(x, y):
return x + y
@graph
def my_graph(x, y):
return my_op(x, y)
result = my_graph.execute_in_process(
run_config={"inputs": {"x": {"value": 2}, "y": {"value": 3}}}
)
assert result.success
assert result.output_for_node("my_op") == 5
@graph
def my_graph_with_nesting(x):
my_graph(x, x)
result = my_graph_with_nesting.execute_in_process(run_config={"inputs": {"x": {"value": 2}}})
assert result.success
assert result.output_for_node("my_graph.my_op") == 4
def test_nothing_inputs_graph():
@op(ins={"sync_signal": In(Nothing)})
def my_op():
...
@graph(input_defs=[InputDefinition("sync_signal", Nothing)])
def my_pipeline(sync_signal):
my_op(sync_signal)
the_job = my_pipeline.to_job()
result = the_job.execute_in_process()
assert result.success
|
import pytest
from text_providers.Nestle1904LowfatProvider import Nestle1904LowfatProvider
from typing import Callable
from unittest.mock import MagicMock
from AnoixoError import ProbableBugError, ServerOverwhelmedError
from TextQuery import TextQuery
@pytest.fixture
def basex_session_mock(mocker):
basex_session_mock = MagicMock()
mocker.patch('BaseXClient.BaseXClient.Session', basex_session_mock)
return basex_session_mock
@pytest.fixture
def provider():
return Nestle1904LowfatProvider()
def mock_basex_on_query_execute(mocker, basex_session_mock: MagicMock, on_query_execute: Callable):
class MockQuery:
def __init__(self, query_string):
pass
def execute(self):
return on_query_execute()
basex_session_mock.return_value.query = lambda query_string: MockQuery(query_string)
spy = mocker.spy(MockQuery, '__init__')
return spy
def test_handles_basex_not_available(mocker, provider):
def raise_connection_refused(*args):
raise ConnectionRefusedError()
mocker.patch('BaseXClient.BaseXClient.Session.__init__', new=raise_connection_refused)
with pytest.raises(ServerOverwhelmedError) as excinfo:
provider.attribute_query('gender')
assert excinfo.value.message == 'Error executing XML database query: ConnectionRefusedError'
def test_reconnects_to_basex_after_error(mocker, provider):
def raise_connection_refused(*args):
raise ConnectionRefusedError()
mocker.patch('BaseXClient.BaseXClient.Session.__init__', new=raise_connection_refused)
with pytest.raises(ServerOverwhelmedError):
provider.attribute_query('gender')
basex_session_mock = MagicMock()
mocker.patch('BaseXClient.BaseXClient.Session', basex_session_mock)
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: '["value1","value2"]')
result = provider.attribute_query('gender')
assert result == ['value1', 'value2']
def test_retries_queries(basex_session_mock, provider):
class MockQuery:
def execute(self):
return '["value1","value2"]'
basex_session_mock.return_value.query.side_effect = [Exception(), Exception(), MockQuery()]
result = provider.attribute_query('gender')
assert result == ['value1', 'value2']
assert basex_session_mock.return_value.query.call_count == 3
def test_reconnects_to_basex_even_if_close_fails(basex_session_mock, provider):
class MockQuery:
def execute(self):
return '["value1","value2"]'
def raise_broken_pipe_error():
raise BrokenPipeError()
basex_session_mock.return_value.query.side_effect = [Exception(), Exception(), MockQuery()]
basex_session_mock.return_value.close = raise_broken_pipe_error
result = provider.attribute_query('gender')
assert result == ['value1', 'value2']
def test_closes_basex_session_even_on_errors(mocker, basex_session_mock, provider):
def raise_exception():
raise ServerOverwhelmedError('exception on query')
mock_basex_on_query_execute(mocker, basex_session_mock, raise_exception)
with pytest.raises(ServerOverwhelmedError):
provider.attribute_query('gender')
assert basex_session_mock.return_value.close.call_count == 3
def test_build_query_string_adds_extra_attributes(mocker, basex_session_mock, provider):
basex_query_spy = mock_basex_on_query_execute(mocker, basex_session_mock, lambda: '[]')
query = TextQuery({'sequences': [[{'attributes': {'lemma': 'λόγος'}}]]}, lambda x: None)
provider.text_query(query)
assert '"class": data($w/@class)' in basex_query_spy.call_args.args[1]
assert '"lemma": data($w/@lemma)' in basex_query_spy.call_args.args[1]
assert '"normalized": data($w/@normalized)' in basex_query_spy.call_args.args[1]
assert '"person": data($w/@person)' in basex_query_spy.call_args.args[1]
assert '"number": data($w/@number)' in basex_query_spy.call_args.args[1]
assert '"gender": data($w/@gender)' in basex_query_spy.call_args.args[1]
assert '"case": data($w/@case)' in basex_query_spy.call_args.args[1]
assert '"tense": data($w/@tense)' in basex_query_spy.call_args.args[1]
assert '"voice": data($w/@voice)' in basex_query_spy.call_args.args[1]
assert '"mood": data($w/@mood)' in basex_query_spy.call_args.args[1]
def test_text_query_includes_extra_attributes(mocker, basex_session_mock, provider):
basex_results = [
'{"references": ["Mark.1.1"], "words": [{"gender": "feminine", "matchedSequence": -1, "text": "ἣν", "matchedWordQuery": -1}]}']
basex_string = f'[{','.join(basex_results)}]'
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: basex_string)
result = provider.text_query(TextQuery({'sequences': []}, lambda x: None))
assert result.passages[0].words[0].attributes["gender"] == "feminine"
def test_text_query_excludes_null_attributes(mocker, basex_session_mock, provider):
basex_results = ['{"references": ["Mark.1.1"], "words": [{"gender": "feminine", "matchedSequence": -1, "text": "ἣν", "tense": null, "matchedWordQuery": -1}]}']
basex_string = f'[{','.join(basex_results)}]'
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: basex_string)
result = provider.text_query(TextQuery({'sequences': []}, lambda x: None))
assert "tense" not in result.passages[0].words[0].attributes
def test_text_query_adds_pagination_info(mocker, basex_session_mock, provider):
basex_results = ['{"references": ["Mark.1.1"], "words": []}' for _ in range(23)]
basex_string = f'[{','.join(basex_results)}]'
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: basex_string)
result = provider.text_query(TextQuery({'sequences': []}, lambda x: None))
assert result.page == 1
assert result.total_pages == 3
def test_text_query_returns_requested_page(mocker, basex_session_mock, provider):
basex_results = [f'{{'references': ['Mark.1.{i}"], "words": []}}' for i in range(23)]
basex_string = f'[{','.join(basex_results)}]'
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: basex_string)
result = provider.text_query(TextQuery({'sequences': [], 'page': 2}, lambda x: None))
assert result.page == 2
assert len(result.passages) == 10
assert result.passages[0].references[0].verse == 10
assert result.passages[9].references[0].verse == 19
def test_text_query_handles_page_smaller_than_pagesize(mocker, basex_session_mock, provider):
basex_results = [f'{{'references': ['Mark.1.1'], 'words': []}}' for i in range(5)]
basex_string = f'[{','.join(basex_results)}]'
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: basex_string)
result = provider.text_query(TextQuery({'sequences': []}, lambda x: None))
assert len(result.passages) == 5
def test_text_query_handles_request_for_invalid_page(mocker, basex_session_mock, provider):
basex_string = '[{"references": ["Mark.1.1"], "words": []}]'
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: basex_string)
with pytest.raises(ProbableBugError) as excinfo:
provider.text_query(TextQuery({'sequences': [], 'page': 2}, lambda x: None))
assert excinfo.value.message == 'Requested page 2 is out of bounds for results with 1 total pages'
def test_text_query_handles_pagination_for_no_results(mocker, basex_session_mock, provider):
basex_string = '[]'
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: basex_string)
result = provider.text_query(TextQuery({'sequences': []}, lambda x: None))
assert result.page == 1
assert result.total_pages == 1
def test_text_query_error_on_query(mocker, basex_session_mock, provider):
def raise_exception():
raise Exception()
mock_basex_on_query_execute(mocker, basex_session_mock, raise_exception)
with pytest.raises(ServerOverwhelmedError) as excinfo:
provider.text_query(TextQuery({'sequences': []}, lambda x: None))
assert excinfo.value.message == 'Error executing XML database query: Exception'
def test_text_query_error_on_processing_results(mocker, basex_session_mock, provider):
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: '{"invalid": "json"}')
with pytest.raises(ProbableBugError) as excinfo:
provider.text_query(TextQuery({'sequences': []}, lambda x: None))
assert excinfo.value.message == 'Error parsing XML database response JSON: Results are not a list'
def test_text_query_handles_word_query_with_no_attributes(mocker, basex_session_mock, provider):
basex_query_spy = mock_basex_on_query_execute(mocker, basex_session_mock, lambda: '[]')
query = TextQuery({
'sequences': [
[
{
'attributes': {
'lemma': 'λόγος'
}
},
{
# no attributes
}
]
]
}, lambda x: None)
provider.text_query(query)
assert "for $word0 in $sentence//w[@lemma='λόγος'] for $word1 in $sentence//w" in basex_query_spy.call_args.args[1]
def test_text_query_handles_disallowed_attribute(provider):
query = TextQuery({
'sequences': [
[
{
'attributes': {
'fake-attr': 'value'
}
}
]
]
}, lambda x: None)
with pytest.raises(ProbableBugError) as excinfo:
provider.text_query(query)
assert excinfo.value.message == 'Attribute \'fake-attr\' not allowed'
def test_text_query_sanitizes_attribute_values(mocker, basex_session_mock, provider):
basex_query_spy = mock_basex_on_query_execute(mocker, basex_session_mock, lambda: '[]')
query = TextQuery({
'sequences': [
[
{
'attributes': {
'lemma': "&μετ'"
}
}
]
]
}, lambda x: None)
provider.text_query(query)
assert "for $word0 in $sentence//w[@lemma='μετ’']" in basex_query_spy.call_args.args[1]
def test_attribute_query_success(mocker, basex_session_mock, provider):
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: '["value1","value2"]')
result = provider.attribute_query('gender')
assert result == ['value1', 'value2']
def test_attribute_query_query_string(mocker, basex_session_mock, provider):
basex_query_spy = mock_basex_on_query_execute(mocker, basex_session_mock, lambda: '["value1","value2"]')
provider.attribute_query('gender')
assert basex_query_spy.call_args.args[1] == """
json:serialize(
array {
sort(distinct-values(//w/@gender))
}
)
"""
def test_attribute_query_lemma_caching(mocker, basex_session_mock, provider):
basex_query_spy = mock_basex_on_query_execute(mocker, basex_session_mock, lambda: '["lemma1","lemma2"]')
result1 = provider.attribute_query('lemma')
assert result1 == ['lemma1', 'lemma2']
assert basex_query_spy.call_count == 1
result2 = provider.attribute_query('lemma')
assert result2 == ['lemma1', 'lemma2']
assert basex_query_spy.call_count == 1
def test_attribute_query_surface_form_caching(mocker, basex_session_mock, provider):
basex_query_spy = mock_basex_on_query_execute(mocker, basex_session_mock, lambda: '["normalized1","normalized2"]')
result1 = provider.attribute_query('normalized')
assert result1 == ['normalized1', 'normalized2']
assert basex_query_spy.call_count == 1
result2 = provider.attribute_query('normalized')
assert result2 == ['normalized1', 'normalized2']
assert basex_query_spy.call_count == 1
def test_attribute_query_disallowed_attribute(provider):
with pytest.raises(ProbableBugError) as excinfo:
provider.attribute_query('disallowed')
assert excinfo.value.message == 'Attribute \'disallowed\' not allowed'
def test_attribute_query_error_on_query(mocker, basex_session_mock, provider):
def raise_exception():
raise Exception()
mock_basex_on_query_execute(mocker, basex_session_mock, raise_exception)
with pytest.raises(ServerOverwhelmedError) as excinfo:
provider.attribute_query('gender')
assert excinfo.value.message == 'Error executing XML database query: Exception'
def test_attribute_query_error_on_processing_results(mocker, basex_session_mock, provider):
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: 'not valid json')
with pytest.raises(ProbableBugError) as excinfo:
provider.attribute_query('gender')
assert excinfo.value.message == 'Error processing query results: JSONDecodeError'
|
import pytest
from text_providers.Nestle1904LowfatProvider import Nestle1904LowfatProvider
from typing import Callable
from unittest.mock import MagicMock
from AnoixoError import ProbableBugError, ServerOverwhelmedError
from TextQuery import TextQuery
@pytest.fixture
def basex_session_mock(mocker):
basex_session_mock = MagicMock()
mocker.patch('BaseXClient.BaseXClient.Session', basex_session_mock)
return basex_session_mock
@pytest.fixture
def provider():
return Nestle1904LowfatProvider()
def mock_basex_on_query_execute(mocker, basex_session_mock: MagicMock, on_query_execute: Callable):
class MockQuery:
def __init__(self, query_string):
pass
def execute(self):
return on_query_execute()
basex_session_mock.return_value.query = lambda query_string: MockQuery(query_string)
spy = mocker.spy(MockQuery, '__init__')
return spy
def test_handles_basex_not_available(mocker, provider):
def raise_connection_refused(*args):
raise ConnectionRefusedError()
mocker.patch('BaseXClient.BaseXClient.Session.__init__', new=raise_connection_refused)
with pytest.raises(ServerOverwhelmedError) as excinfo:
provider.attribute_query('gender')
assert excinfo.value.message == 'Error executing XML database query: ConnectionRefusedError'
def test_reconnects_to_basex_after_error(mocker, provider):
def raise_connection_refused(*args):
raise ConnectionRefusedError()
mocker.patch('BaseXClient.BaseXClient.Session.__init__', new=raise_connection_refused)
with pytest.raises(ServerOverwhelmedError):
provider.attribute_query('gender')
basex_session_mock = MagicMock()
mocker.patch('BaseXClient.BaseXClient.Session', basex_session_mock)
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: '["value1","value2"]')
result = provider.attribute_query('gender')
assert result == ['value1', 'value2']
def test_retries_queries(basex_session_mock, provider):
class MockQuery:
def execute(self):
return '["value1","value2"]'
basex_session_mock.return_value.query.side_effect = [Exception(), Exception(), MockQuery()]
result = provider.attribute_query('gender')
assert result == ['value1', 'value2']
assert basex_session_mock.return_value.query.call_count == 3
def test_reconnects_to_basex_even_if_close_fails(basex_session_mock, provider):
class MockQuery:
def execute(self):
return '["value1","value2"]'
def raise_broken_pipe_error():
raise BrokenPipeError()
basex_session_mock.return_value.query.side_effect = [Exception(), Exception(), MockQuery()]
basex_session_mock.return_value.close = raise_broken_pipe_error
result = provider.attribute_query('gender')
assert result == ['value1', 'value2']
def test_closes_basex_session_even_on_errors(mocker, basex_session_mock, provider):
def raise_exception():
raise ServerOverwhelmedError('exception on query')
mock_basex_on_query_execute(mocker, basex_session_mock, raise_exception)
with pytest.raises(ServerOverwhelmedError):
provider.attribute_query('gender')
assert basex_session_mock.return_value.close.call_count == 3
def test_build_query_string_adds_extra_attributes(mocker, basex_session_mock, provider):
basex_query_spy = mock_basex_on_query_execute(mocker, basex_session_mock, lambda: '[]')
query = TextQuery({'sequences': [[{'attributes': {'lemma': 'λόγος'}}]]}, lambda x: None)
provider.text_query(query)
assert '"class": data($w/@class)' in basex_query_spy.call_args.args[1]
assert '"lemma": data($w/@lemma)' in basex_query_spy.call_args.args[1]
assert '"normalized": data($w/@normalized)' in basex_query_spy.call_args.args[1]
assert '"person": data($w/@person)' in basex_query_spy.call_args.args[1]
assert '"number": data($w/@number)' in basex_query_spy.call_args.args[1]
assert '"gender": data($w/@gender)' in basex_query_spy.call_args.args[1]
assert '"case": data($w/@case)' in basex_query_spy.call_args.args[1]
assert '"tense": data($w/@tense)' in basex_query_spy.call_args.args[1]
assert '"voice": data($w/@voice)' in basex_query_spy.call_args.args[1]
assert '"mood": data($w/@mood)' in basex_query_spy.call_args.args[1]
def test_text_query_includes_extra_attributes(mocker, basex_session_mock, provider):
basex_results = [
'{"references": ["Mark.1.1"], "words": [{"gender": "feminine", "matchedSequence": -1, "text": "ἣν", "matchedWordQuery": -1}]}']
basex_string = f'[{",".join(basex_results)}]'
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: basex_string)
result = provider.text_query(TextQuery({'sequences': []}, lambda x: None))
assert result.passages[0].words[0].attributes["gender"] == "feminine"
def test_text_query_excludes_null_attributes(mocker, basex_session_mock, provider):
basex_results = ['{"references": ["Mark.1.1"], "words": [{"gender": "feminine", "matchedSequence": -1, "text": "ἣν", "tense": null, "matchedWordQuery": -1}]}']
basex_string = f'[{",".join(basex_results)}]'
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: basex_string)
result = provider.text_query(TextQuery({'sequences': []}, lambda x: None))
assert "tense" not in result.passages[0].words[0].attributes
def test_text_query_adds_pagination_info(mocker, basex_session_mock, provider):
basex_results = ['{"references": ["Mark.1.1"], "words": []}' for _ in range(23)]
basex_string = f'[{",".join(basex_results)}]'
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: basex_string)
result = provider.text_query(TextQuery({'sequences': []}, lambda x: None))
assert result.page == 1
assert result.total_pages == 3
def test_text_query_returns_requested_page(mocker, basex_session_mock, provider):
basex_results = [f'{{"references": ["Mark.1.{i}"], "words": []}}' for i in range(23)]
basex_string = f'[{",".join(basex_results)}]'
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: basex_string)
result = provider.text_query(TextQuery({'sequences': [], 'page': 2}, lambda x: None))
assert result.page == 2
assert len(result.passages) == 10
assert result.passages[0].references[0].verse == 10
assert result.passages[9].references[0].verse == 19
def test_text_query_handles_page_smaller_than_pagesize(mocker, basex_session_mock, provider):
basex_results = [f'{{"references": ["Mark.1.1"], "words": []}}' for i in range(5)]
basex_string = f'[{",".join(basex_results)}]'
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: basex_string)
result = provider.text_query(TextQuery({'sequences': []}, lambda x: None))
assert len(result.passages) == 5
def test_text_query_handles_request_for_invalid_page(mocker, basex_session_mock, provider):
basex_string = '[{"references": ["Mark.1.1"], "words": []}]'
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: basex_string)
with pytest.raises(ProbableBugError) as excinfo:
provider.text_query(TextQuery({'sequences': [], 'page': 2}, lambda x: None))
assert excinfo.value.message == 'Requested page 2 is out of bounds for results with 1 total pages'
def test_text_query_handles_pagination_for_no_results(mocker, basex_session_mock, provider):
basex_string = '[]'
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: basex_string)
result = provider.text_query(TextQuery({'sequences': []}, lambda x: None))
assert result.page == 1
assert result.total_pages == 1
def test_text_query_error_on_query(mocker, basex_session_mock, provider):
def raise_exception():
raise Exception()
mock_basex_on_query_execute(mocker, basex_session_mock, raise_exception)
with pytest.raises(ServerOverwhelmedError) as excinfo:
provider.text_query(TextQuery({'sequences': []}, lambda x: None))
assert excinfo.value.message == 'Error executing XML database query: Exception'
def test_text_query_error_on_processing_results(mocker, basex_session_mock, provider):
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: '{"invalid": "json"}')
with pytest.raises(ProbableBugError) as excinfo:
provider.text_query(TextQuery({'sequences': []}, lambda x: None))
assert excinfo.value.message == 'Error parsing XML database response JSON: Results are not a list'
def test_text_query_handles_word_query_with_no_attributes(mocker, basex_session_mock, provider):
basex_query_spy = mock_basex_on_query_execute(mocker, basex_session_mock, lambda: '[]')
query = TextQuery({
'sequences': [
[
{
'attributes': {
'lemma': 'λόγος'
}
},
{
# no attributes
}
]
]
}, lambda x: None)
provider.text_query(query)
assert "for $word0 in $sentence//w[@lemma='λόγος'] for $word1 in $sentence//w" in basex_query_spy.call_args.args[1]
def test_text_query_handles_disallowed_attribute(provider):
query = TextQuery({
'sequences': [
[
{
'attributes': {
'fake-attr': 'value'
}
}
]
]
}, lambda x: None)
with pytest.raises(ProbableBugError) as excinfo:
provider.text_query(query)
assert excinfo.value.message == 'Attribute \'fake-attr\' not allowed'
def test_text_query_sanitizes_attribute_values(mocker, basex_session_mock, provider):
basex_query_spy = mock_basex_on_query_execute(mocker, basex_session_mock, lambda: '[]')
query = TextQuery({
'sequences': [
[
{
'attributes': {
'lemma': "&μετ'"
}
}
]
]
}, lambda x: None)
provider.text_query(query)
assert "for $word0 in $sentence//w[@lemma='μετ’']" in basex_query_spy.call_args.args[1]
def test_attribute_query_success(mocker, basex_session_mock, provider):
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: '["value1","value2"]')
result = provider.attribute_query('gender')
assert result == ['value1', 'value2']
def test_attribute_query_query_string(mocker, basex_session_mock, provider):
basex_query_spy = mock_basex_on_query_execute(mocker, basex_session_mock, lambda: '["value1","value2"]')
provider.attribute_query('gender')
assert basex_query_spy.call_args.args[1] == """
json:serialize(
array {
sort(distinct-values(//w/@gender))
}
)
"""
def test_attribute_query_lemma_caching(mocker, basex_session_mock, provider):
basex_query_spy = mock_basex_on_query_execute(mocker, basex_session_mock, lambda: '["lemma1","lemma2"]')
result1 = provider.attribute_query('lemma')
assert result1 == ['lemma1', 'lemma2']
assert basex_query_spy.call_count == 1
result2 = provider.attribute_query('lemma')
assert result2 == ['lemma1', 'lemma2']
assert basex_query_spy.call_count == 1
def test_attribute_query_surface_form_caching(mocker, basex_session_mock, provider):
basex_query_spy = mock_basex_on_query_execute(mocker, basex_session_mock, lambda: '["normalized1","normalized2"]')
result1 = provider.attribute_query('normalized')
assert result1 == ['normalized1', 'normalized2']
assert basex_query_spy.call_count == 1
result2 = provider.attribute_query('normalized')
assert result2 == ['normalized1', 'normalized2']
assert basex_query_spy.call_count == 1
def test_attribute_query_disallowed_attribute(provider):
with pytest.raises(ProbableBugError) as excinfo:
provider.attribute_query('disallowed')
assert excinfo.value.message == 'Attribute \'disallowed\' not allowed'
def test_attribute_query_error_on_query(mocker, basex_session_mock, provider):
def raise_exception():
raise Exception()
mock_basex_on_query_execute(mocker, basex_session_mock, raise_exception)
with pytest.raises(ServerOverwhelmedError) as excinfo:
provider.attribute_query('gender')
assert excinfo.value.message == 'Error executing XML database query: Exception'
def test_attribute_query_error_on_processing_results(mocker, basex_session_mock, provider):
mock_basex_on_query_execute(mocker, basex_session_mock, lambda: 'not valid json')
with pytest.raises(ProbableBugError) as excinfo:
provider.attribute_query('gender')
assert excinfo.value.message == 'Error processing query results: JSONDecodeError'
|
import asyncio
import datetime
from typing import Optional
import discord
from ElevatorBot.database.database import lookupDiscordID
from ElevatorBot.events.backgroundTasks import UpdateActivityDB
from ElevatorBot.events.baseEvent import BaseEvent
from ElevatorBot.backendNetworking.destinyPlayer import DestinyPlayer
from ElevatorBot.backendNetworking.formating import split_into_chucks_of_max_2000_characters
from ElevatorBot.backendNetworking.persistentMessages import bot_status
from ElevatorBot.backendNetworking.roleLookup import (
assignRolesToUser,
removeRolesFromUser,
get_player_roles,
)
from ElevatorBot.networking.bungieAuth import handle_and_return_token
from ElevatorBot.networking.network import get_json_from_url
from ElevatorBot.static.config import CLANID, BOTDEVCHANNELID
from ElevatorBot.static.globals import *
# # todo call this one elevator, and the sleep. Backend calls elevator webhook when done
# class AutomaticRoleAssignment(BaseEvent):
# """Will automatically _update the roles"""
#
# def __init__(self):
# # Set the interval for this event
# dow_day_of_week = "*"
# dow_hour = 1
# dow_minute = 0
# super().__init__(
# scheduler_type="cron",
# dow_day_of_week=dow_day_of_week,
# dow_hour=dow_hour,
# dow_minute=dow_minute,
# )
#
# async def run(self, client):
# async def update_user(discord_member: discord.Member) -> Optional[str]:
# if discord_member.bot:
# return None
#
# destiny_player = await DestinyPlayer.from_discord_id(discord_member.id)
# if not destiny_player:
# return
#
# # gets the roles of the specific player and assigns/removes them
# roles_to_add, roles_to_remove, _, _ = await get_player_roles(discord_member, destiny_player)
#
# # assign roles
# await discord_member.add_roles(*roles_to_add, reason="Achievement Role Earned")
#
# # _delete roles
# await discord_member.remove_roles(*roles_to_remove, reason="Achievement Role Not Deserved")
#
# # convert to str
# new_roles = [role.name for role in roles_to_add]
# remove_roles = [role.name for role in roles_to_remove]
#
# if new_roles or remove_roles:
# return f'Updated player {discord_member.mention} by adding `{', '.join(new_roles or ['nothing'])}` and removing `{', '.join(remove_roles or ['nothing'])}`\n'
# else:
# return None
#
# print("Running the automatic role assignment...")
#
# # acquires the newtonslab channel from the descend server and notifies about starting
# newtonslab = client.get_channel(BOTDEVCHANNELID)
# guild = newtonslab.guild
#
# async with newtonslab.typing():
# update = UpdateActivityDB()
# await update.run(client)
#
# joblist = []
# for member in guild.members:
# # only allow people who accepted the rules
# if not member.pending:
# joblist.append(update_user(member))
#
# results = await asyncio.gather(*joblist)
# news = []
# for result in results:
# if result:
# news.append(result)
#
# if news:
# for chunk in split_into_chucks_of_max_2000_characters(text_list=news):
# await newtonslab.send(chunk)
#
# # _update the status
# await bot_status(
# client,
# "Achievement Role Update",
# datetime.datetime.now(tz=datetime.timezone.utc),
# )
class AutoRegisteredRole(BaseEvent):
"""Will automatically _update the registration and clan roles"""
def __init__(self):
interval_minutes = 30 # Set the interval for this event
super().__init__(scheduler_type="interval", interval_minutes=interval_minutes)
async def run(self, client):
# get all clan members discordID
memberlist = []
for member in (await get_json_from_url(f"https://www.bungie.net/Platform/GroupV2/{CLANID}/Members/")).content[
"Response"
]["results"]:
destinyID = int(member["destinyUserInfo"]["membershipId"])
discordID = await lookupDiscordID(destinyID)
if discordID is not None:
memberlist.append(discordID)
if not len(memberlist) > 5:
# error.log
print("something broke at AutoRegisteredRole, clansize <= 5")
return
for guild in client.guilds:
newtonsLab = discord.utils.get(guild.channels, id=BOTDEVCHANNELID)
clan_role = discord.utils.get(guild.roles, id=clan_role_id)
member_role = discord.utils.get(guild.roles, id=member_role_id)
for member in guild.members:
# only allow people who accepted the rules
if member.pending:
continue
# dont do that for bots
if member.bot:
continue
# add "Registered" if they have a token but not the role
if (await handle_and_return_token(member.id)).token:
if discord.utils.get(guild.roles, id=not_registered_role_id) in member.roles:
await removeRolesFromUser([not_registered_role_id], member, guild)
await assignRolesToUser([registered_role_id], member, guild)
# add "Not Registered" if they have no token but the role (after unregister)
else:
if discord.utils.get(guild.roles, id=registered_role_id) in member.roles:
await removeRolesFromUser([registered_role_id], member, guild)
await assignRolesToUser([not_registered_role_id], member, guild)
# add clan role if user is in clan, has token, is member and doesn't have clan role
if clan_role not in member.roles:
if (member_role in member.roles) and (member.id in memberlist):
await assignRolesToUser([clan_role_id], member, guild)
if newtonsLab:
await newtonsLab.send(f"Added Descend role to {member.mention}")
# Remove clan role it if no longer in clan or not member or no token
if clan_role in member.roles:
if (member_role not in member.roles) or (member.id not in memberlist):
await removeRolesFromUser([clan_role_id], member, guild)
if newtonsLab:
await newtonsLab.send(f"Removed Descend role from {member.mention}")
# _update the status
await bot_status(
client,
"Member Role Update",
datetime.datetime.now(tz=datetime.timezone.utc),
)
|
import asyncio
import datetime
from typing import Optional
import discord
from ElevatorBot.database.database import lookupDiscordID
from ElevatorBot.events.backgroundTasks import UpdateActivityDB
from ElevatorBot.events.baseEvent import BaseEvent
from ElevatorBot.backendNetworking.destinyPlayer import DestinyPlayer
from ElevatorBot.backendNetworking.formating import split_into_chucks_of_max_2000_characters
from ElevatorBot.backendNetworking.persistentMessages import bot_status
from ElevatorBot.backendNetworking.roleLookup import (
assignRolesToUser,
removeRolesFromUser,
get_player_roles,
)
from ElevatorBot.networking.bungieAuth import handle_and_return_token
from ElevatorBot.networking.network import get_json_from_url
from ElevatorBot.static.config import CLANID, BOTDEVCHANNELID
from ElevatorBot.static.globals import *
# # todo call this one elevator, and the sleep. Backend calls elevator webhook when done
# class AutomaticRoleAssignment(BaseEvent):
# """Will automatically _update the roles"""
#
# def __init__(self):
# # Set the interval for this event
# dow_day_of_week = "*"
# dow_hour = 1
# dow_minute = 0
# super().__init__(
# scheduler_type="cron",
# dow_day_of_week=dow_day_of_week,
# dow_hour=dow_hour,
# dow_minute=dow_minute,
# )
#
# async def run(self, client):
# async def update_user(discord_member: discord.Member) -> Optional[str]:
# if discord_member.bot:
# return None
#
# destiny_player = await DestinyPlayer.from_discord_id(discord_member.id)
# if not destiny_player:
# return
#
# # gets the roles of the specific player and assigns/removes them
# roles_to_add, roles_to_remove, _, _ = await get_player_roles(discord_member, destiny_player)
#
# # assign roles
# await discord_member.add_roles(*roles_to_add, reason="Achievement Role Earned")
#
# # _delete roles
# await discord_member.remove_roles(*roles_to_remove, reason="Achievement Role Not Deserved")
#
# # convert to str
# new_roles = [role.name for role in roles_to_add]
# remove_roles = [role.name for role in roles_to_remove]
#
# if new_roles or remove_roles:
# return f'Updated player {discord_member.mention} by adding `{", ".join(new_roles or ["nothing"])}` and removing `{", ".join(remove_roles or ["nothing"])}`\n'
# else:
# return None
#
# print("Running the automatic role assignment...")
#
# # acquires the newtonslab channel from the descend server and notifies about starting
# newtonslab = client.get_channel(BOTDEVCHANNELID)
# guild = newtonslab.guild
#
# async with newtonslab.typing():
# update = UpdateActivityDB()
# await update.run(client)
#
# joblist = []
# for member in guild.members:
# # only allow people who accepted the rules
# if not member.pending:
# joblist.append(update_user(member))
#
# results = await asyncio.gather(*joblist)
# news = []
# for result in results:
# if result:
# news.append(result)
#
# if news:
# for chunk in split_into_chucks_of_max_2000_characters(text_list=news):
# await newtonslab.send(chunk)
#
# # _update the status
# await bot_status(
# client,
# "Achievement Role Update",
# datetime.datetime.now(tz=datetime.timezone.utc),
# )
class AutoRegisteredRole(BaseEvent):
"""Will automatically _update the registration and clan roles"""
def __init__(self):
interval_minutes = 30 # Set the interval for this event
super().__init__(scheduler_type="interval", interval_minutes=interval_minutes)
async def run(self, client):
# get all clan members discordID
memberlist = []
for member in (await get_json_from_url(f"https://www.bungie.net/Platform/GroupV2/{CLANID}/Members/")).content[
"Response"
]["results"]:
destinyID = int(member["destinyUserInfo"]["membershipId"])
discordID = await lookupDiscordID(destinyID)
if discordID is not None:
memberlist.append(discordID)
if not len(memberlist) > 5:
# error.log
print("something broke at AutoRegisteredRole, clansize <= 5")
return
for guild in client.guilds:
newtonsLab = discord.utils.get(guild.channels, id=BOTDEVCHANNELID)
clan_role = discord.utils.get(guild.roles, id=clan_role_id)
member_role = discord.utils.get(guild.roles, id=member_role_id)
for member in guild.members:
# only allow people who accepted the rules
if member.pending:
continue
# dont do that for bots
if member.bot:
continue
# add "Registered" if they have a token but not the role
if (await handle_and_return_token(member.id)).token:
if discord.utils.get(guild.roles, id=not_registered_role_id) in member.roles:
await removeRolesFromUser([not_registered_role_id], member, guild)
await assignRolesToUser([registered_role_id], member, guild)
# add "Not Registered" if they have no token but the role (after unregister)
else:
if discord.utils.get(guild.roles, id=registered_role_id) in member.roles:
await removeRolesFromUser([registered_role_id], member, guild)
await assignRolesToUser([not_registered_role_id], member, guild)
# add clan role if user is in clan, has token, is member and doesn't have clan role
if clan_role not in member.roles:
if (member_role in member.roles) and (member.id in memberlist):
await assignRolesToUser([clan_role_id], member, guild)
if newtonsLab:
await newtonsLab.send(f"Added Descend role to {member.mention}")
# Remove clan role it if no longer in clan or not member or no token
if clan_role in member.roles:
if (member_role not in member.roles) or (member.id not in memberlist):
await removeRolesFromUser([clan_role_id], member, guild)
if newtonsLab:
await newtonsLab.send(f"Removed Descend role from {member.mention}")
# _update the status
await bot_status(
client,
"Member Role Update",
datetime.datetime.now(tz=datetime.timezone.utc),
)
|
"""
Credit: m3hrdadfi
https://huggingface.co/m3hrdadfi/wav2vec2-large-xlsr-persian-v2
"""
import re
import string
import hazm
_normalizer = hazm.Normalizer()
chars_to_ignore = [
",",
"?",
".",
"!",
"-",
";",
":",
'""',
"%",
"'",
'"',
"�",
"#",
"!",
"؟",
"?",
"«",
"»",
"،",
"(",
")",
"؛",
"'ٔ",
"٬",
"ٔ",
",",
"?",
".",
"!",
"-",
";",
":",
'"',
"“",
"%",
"‘",
"”",
"�",
"–",
"…",
"_",
"”",
"“",
"„",
"ā",
"š",
# "ء",
]
# In case of farsi
chars_to_ignore = chars_to_ignore + list(string.ascii_lowercase + string.digits)
chars_to_mapping = {
"ك": "ک",
"دِ": "د",
"بِ": "ب",
"زِ": "ز",
"ذِ": "ذ",
"شِ": "ش",
"سِ": "س",
"ى": "ی",
"ي": "ی",
"أ": "ا",
"ؤ": "و",
"ے": "ی",
"ۀ": "ه",
"ﭘ": "پ",
"ﮐ": "ک",
"ﯽ": "ی",
"ﺎ": "ا",
"ﺑ": "ب",
"ﺘ": "ت",
"ﺧ": "خ",
"ﺩ": "د",
"ﺱ": "س",
"ﻀ": "ض",
"ﻌ": "ع",
"ﻟ": "ل",
"ﻡ": "م",
"ﻢ": "م",
"ﻪ": "ه",
"ﻮ": "و",
"ﺍ": "ا",
"ة": "ه",
"ﯾ": "ی",
"ﯿ": "ی",
"ﺒ": "ب",
"ﺖ": "ت",
"ﺪ": "د",
"ﺮ": "ر",
"ﺴ": "س",
"ﺷ": "ش",
"ﺸ": "ش",
"ﻋ": "ع",
"ﻤ": "م",
"ﻥ": "ن",
"ﻧ": "ن",
"ﻭ": "و",
"ﺭ": "ر",
"ﮔ": "گ",
# "ها": " ها", "ئ": "ی",
"a": " ای ",
"b": " بی ",
"c": " سی ",
"d": " دی ",
"e": " ایی ",
"f": " اف ",
"g": " جی ",
"h": " اچ ",
"i": " آی ",
"j": " جی ",
"k": " کی ",
"l": " ال ",
"m": " ام ",
"n": " ان ",
"o": " او ",
"p": " پی ",
"q": " کیو ",
"r": " آر ",
"s": " اس ",
"t": " تی ",
"u": " یو ",
"v": " وی ",
"w": " دبلیو ",
"x": " اکس ",
"y": " وای ",
"z": " زد ",
"\u200c": " ",
"\u200d": " ",
"\u200e": " ",
"\u200f": " ",
"\ufeff": " ",
}
def multiple_replace(text, chars_to_mapping):
pattern = "|".join(map(re.escape, chars_to_mapping.keys()))
return re.sub(pattern, lambda m: chars_to_mapping[m.group()], str(text))
def remove_special_characters(text, chars_to_ignore_regex):
text = re.sub(chars_to_ignore_regex, "", text).lower()
return text
def normalize(text):
chars_to_ignore_regex = f"""[{"".join(chars_to_ignore)}]"""
text = text.lower().strip()
text = _normalizer.normalize(text)
text = multiple_replace(text, chars_to_mapping)
text = remove_special_characters(text, chars_to_ignore_regex)
text = re.sub(" +", " ", text)
text = text.strip() + " "
return text
|
"""
Credit: m3hrdadfi
https://huggingface.co/m3hrdadfi/wav2vec2-large-xlsr-persian-v2
"""
import re
import string
import hazm
_normalizer = hazm.Normalizer()
chars_to_ignore = [
",",
"?",
".",
"!",
"-",
";",
":",
'""',
"%",
"'",
'"',
"�",
"#",
"!",
"؟",
"?",
"«",
"»",
"،",
"(",
")",
"؛",
"'ٔ",
"٬",
"ٔ",
",",
"?",
".",
"!",
"-",
";",
":",
'"',
"“",
"%",
"‘",
"”",
"�",
"–",
"…",
"_",
"”",
"“",
"„",
"ā",
"š",
# "ء",
]
# In case of farsi
chars_to_ignore = chars_to_ignore + list(string.ascii_lowercase + string.digits)
chars_to_mapping = {
"ك": "ک",
"دِ": "د",
"بِ": "ب",
"زِ": "ز",
"ذِ": "ذ",
"شِ": "ش",
"سِ": "س",
"ى": "ی",
"ي": "ی",
"أ": "ا",
"ؤ": "و",
"ے": "ی",
"ۀ": "ه",
"ﭘ": "پ",
"ﮐ": "ک",
"ﯽ": "ی",
"ﺎ": "ا",
"ﺑ": "ب",
"ﺘ": "ت",
"ﺧ": "خ",
"ﺩ": "د",
"ﺱ": "س",
"ﻀ": "ض",
"ﻌ": "ع",
"ﻟ": "ل",
"ﻡ": "م",
"ﻢ": "م",
"ﻪ": "ه",
"ﻮ": "و",
"ﺍ": "ا",
"ة": "ه",
"ﯾ": "ی",
"ﯿ": "ی",
"ﺒ": "ب",
"ﺖ": "ت",
"ﺪ": "د",
"ﺮ": "ر",
"ﺴ": "س",
"ﺷ": "ش",
"ﺸ": "ش",
"ﻋ": "ع",
"ﻤ": "م",
"ﻥ": "ن",
"ﻧ": "ن",
"ﻭ": "و",
"ﺭ": "ر",
"ﮔ": "گ",
# "ها": " ها", "ئ": "ی",
"a": " ای ",
"b": " بی ",
"c": " سی ",
"d": " دی ",
"e": " ایی ",
"f": " اف ",
"g": " جی ",
"h": " اچ ",
"i": " آی ",
"j": " جی ",
"k": " کی ",
"l": " ال ",
"m": " ام ",
"n": " ان ",
"o": " او ",
"p": " پی ",
"q": " کیو ",
"r": " آر ",
"s": " اس ",
"t": " تی ",
"u": " یو ",
"v": " وی ",
"w": " دبلیو ",
"x": " اکس ",
"y": " وای ",
"z": " زد ",
"\u200c": " ",
"\u200d": " ",
"\u200e": " ",
"\u200f": " ",
"\ufeff": " ",
}
def multiple_replace(text, chars_to_mapping):
pattern = "|".join(map(re.escape, chars_to_mapping.keys()))
return re.sub(pattern, lambda m: chars_to_mapping[m.group()], str(text))
def remove_special_characters(text, chars_to_ignore_regex):
text = re.sub(chars_to_ignore_regex, "", text).lower()
return text
def normalize(text):
chars_to_ignore_regex = f"""[{"".join(chars_to_ignore)}]"""
text = text.lower().strip()
text = _normalizer.normalize(text)
text = multiple_replace(text, chars_to_mapping)
text = remove_special_characters(text, chars_to_ignore_regex)
text = re.sub(" +", " ", text)
text = text.strip() + " "
return text
|
import discord
from discord.ext import commands
from discord import utils
import asyncio
import json
class Suggest():
def __init__(self, bot):
self.bot = bot
@commands.command(name="suggest")
@commands.guild_only()
async def suggest(self, ctx):
"""Test"""
if self.bot.config["GUILDS"][str(ctx.guild.id)]["TOGGLED"] == "OFF":
await ctx.send("Suggestions currently aren't open!")
return
author = ctx.author
time = discord.Embed(title=f'Time', description=f'You ran out of time, please try again!', footer=f'Suggestion by: {author.name} • Bot made by DerpDays', color=0xFF4C4C)
briefembed = discord.Embed(title=f'Suggest', description=f'Please give a brief explanation of your suggestion!', footer=f'Suggestion by: {author.name} • Bot made by DerpDays', color=0xffffff)
explainembed = discord.Embed(title=f'Suggest', description=f'Please explain your suggestion in futher detail, maybe a example etc!', footer=f'Suggestion by: {author.name} • Bot made by DerpDays', color=0xffffff)
channelnotexist = discord.Embed(title=f'Suggest', description=f'The channel you gave does not exist.', footer=f'Suggestion by: {author.name} • Bot made by DerpDays', color=0xffffff)
await ctx.message.delete()
if self.bot.config["GUILDS"][str(ctx.guild.id)]["TOGGLEPM"] == "ON":
def check(m):
return True if m.channel.id == ctx.channel.id and m.author.id == author.id else False
msg = await ctx.send(f'Please reply to the following {author.mention}!')
await ctx.send(embed=briefembed)
try:
brief = await self.bot.wait_for('message', check=check, timeout=300)
except asyncio.TimeoutError:
timemsg = await ctx.send(embed=time)
await asyncio.sleep(30)
await timemsg.delete()
await msg.delete()
return
await msg.delete()
await ctx.send(embed=explainembed)
try:
explain = await self.bot.wait_for('message', check=check, timeout=300)
except asyncio.TimeoutError:
timemsg = await ctx.send(embed=time)
await asyncio.sleep(30)
await timemsg.delete()
return
embed = discord.Embed(title=f'Suggestion ID: {self.bot.config['GUILDS'][str(ctx.guild.id)]['ID']}', colour=0xffffff)
embed.add_field(name=f'Brief Explanation: ', value=f'{brief.content}')
embed.add_field(name=f'Detailed Explanation: ', value=f'{explain.content}')
embed.set_footer(text=f'Suggestion by: {author.name} • Bot made by DerpDays')
try:
channel = discord.utils.get(ctx.guild.text_channels, id=int(self.bot.config["GUILDS"][str(ctx.guild.id)]["OUTPUT"]))
msg = await channel.send(embed=embed)
except:
await ctx.send(embed=discord.Embed(title=f'Suggest', description=f'Sending your suggestion failed. This could be because the owner didnt set the output properly or your suggestion exceeded 2,000 Chars. To configure the bot properly do {ctx.prefix}suggestsettings', color=0xff4c4c))
return
await msg.add_reaction("✅")
await msg.add_reaction("❌")
id = self.bot.config["GUILDS"][str(ctx.guild.id)]["ID"]
newid = int(id) + 1
self.bot.config["GUILDS"][str(ctx.guild.id)]["ID"] = str(newid)
with open('settings.json', 'w') as f:
json.dump(self.bot.config, f, indent=2)
if self.bot.config["GUILDS"][str(ctx.guild.id)]["TOGGLEPM"] == "OFF":
def check(m):
return True if m.channel.id == author.dm_channel.id and m.author.id == author.id else False
msg = await ctx.send(f'{author.mention} Check your PM''s')
await author.send(embed=briefembed)
try:
brief = await self.bot.wait_for('message', check=check, timeout=300)
except asyncio.TimeoutError:
timemsg = await author.send(embed=time)
await asyncio.sleep(30)
await timemsg.delete()
await msg.delete()
return
await msg.delete()
await author.send(embed=explainembed)
try:
explain = await self.bot.wait_for('message', check=check, timeout=300)
except asyncio.TimeoutError:
timemsg = await author.send(embed=time)
await asyncio.sleep(30)
await timemsg.delete()
return
embed = discord.Embed(title=f'Suggestion ID: {self.bot.config['GUILDS'][str(ctx.guild.id)]['ID']}', colour=0xffffff)
embed.add_field(name=f'Brief Explanation: ', value=f'{brief.content}')
embed.add_field(name=f'Detailed Explanation: ', value=f'{explain.content}')
embed.set_footer(text=f'Suggestion by: {author.name} • Bot made by DerpDays')
try:
channel = discord.utils.get(ctx.guild.text_channels, id=int(self.bot.config["GUILDS"][str(ctx.guild.id)]["OUTPUT"]))
msg = await channel.send(embed=embed)
except:
await author.send(embed=discord.Embed(title=f'Suggest', description=f'Sending your suggestion failed. This could be because the owner didnt set the output properly or your suggestion exceeded 2,000 Chars. To configure the bot properly do {ctx.prefix}suggestsettings', color=0xff4c4c))
return
await msg.add_reaction("✅")
await msg.add_reaction("❌")
id = self.bot.config["GUILDS"][str(ctx.guild.id)]["ID"]
newid = int(id) + 1
self.bot.config["GUILDS"][str(ctx.guild.id)]["ID"] = str(newid)
with open('settings.json', 'w') as f:
json.dump(self.bot.config, f, indent=2)
def setup(bot):
bot.add_cog(Suggest(bot))
|
import discord
from discord.ext import commands
from discord import utils
import asyncio
import json
class Suggest():
def __init__(self, bot):
self.bot = bot
@commands.command(name="suggest")
@commands.guild_only()
async def suggest(self, ctx):
"""Test"""
if self.bot.config["GUILDS"][str(ctx.guild.id)]["TOGGLED"] == "OFF":
await ctx.send("Suggestions currently aren't open!")
return
author = ctx.author
time = discord.Embed(title=f'Time', description=f'You ran out of time, please try again!', footer=f'Suggestion by: {author.name} • Bot made by DerpDays', color=0xFF4C4C)
briefembed = discord.Embed(title=f'Suggest', description=f'Please give a brief explanation of your suggestion!', footer=f'Suggestion by: {author.name} • Bot made by DerpDays', color=0xffffff)
explainembed = discord.Embed(title=f'Suggest', description=f'Please explain your suggestion in futher detail, maybe a example etc!', footer=f'Suggestion by: {author.name} • Bot made by DerpDays', color=0xffffff)
channelnotexist = discord.Embed(title=f'Suggest', description=f'The channel you gave does not exist.', footer=f'Suggestion by: {author.name} • Bot made by DerpDays', color=0xffffff)
await ctx.message.delete()
if self.bot.config["GUILDS"][str(ctx.guild.id)]["TOGGLEPM"] == "ON":
def check(m):
return True if m.channel.id == ctx.channel.id and m.author.id == author.id else False
msg = await ctx.send(f'Please reply to the following {author.mention}!')
await ctx.send(embed=briefembed)
try:
brief = await self.bot.wait_for('message', check=check, timeout=300)
except asyncio.TimeoutError:
timemsg = await ctx.send(embed=time)
await asyncio.sleep(30)
await timemsg.delete()
await msg.delete()
return
await msg.delete()
await ctx.send(embed=explainembed)
try:
explain = await self.bot.wait_for('message', check=check, timeout=300)
except asyncio.TimeoutError:
timemsg = await ctx.send(embed=time)
await asyncio.sleep(30)
await timemsg.delete()
return
embed = discord.Embed(title=f'Suggestion ID: {self.bot.config["GUILDS"][str(ctx.guild.id)]["ID"]}', colour=0xffffff)
embed.add_field(name=f'Brief Explanation: ', value=f'{brief.content}')
embed.add_field(name=f'Detailed Explanation: ', value=f'{explain.content}')
embed.set_footer(text=f'Suggestion by: {author.name} • Bot made by DerpDays')
try:
channel = discord.utils.get(ctx.guild.text_channels, id=int(self.bot.config["GUILDS"][str(ctx.guild.id)]["OUTPUT"]))
msg = await channel.send(embed=embed)
except:
await ctx.send(embed=discord.Embed(title=f'Suggest', description=f'Sending your suggestion failed. This could be because the owner didnt set the output properly or your suggestion exceeded 2,000 Chars. To configure the bot properly do {ctx.prefix}suggestsettings', color=0xff4c4c))
return
await msg.add_reaction("✅")
await msg.add_reaction("❌")
id = self.bot.config["GUILDS"][str(ctx.guild.id)]["ID"]
newid = int(id) + 1
self.bot.config["GUILDS"][str(ctx.guild.id)]["ID"] = str(newid)
with open('settings.json', 'w') as f:
json.dump(self.bot.config, f, indent=2)
if self.bot.config["GUILDS"][str(ctx.guild.id)]["TOGGLEPM"] == "OFF":
def check(m):
return True if m.channel.id == author.dm_channel.id and m.author.id == author.id else False
msg = await ctx.send(f'{author.mention} Check your PM''s')
await author.send(embed=briefembed)
try:
brief = await self.bot.wait_for('message', check=check, timeout=300)
except asyncio.TimeoutError:
timemsg = await author.send(embed=time)
await asyncio.sleep(30)
await timemsg.delete()
await msg.delete()
return
await msg.delete()
await author.send(embed=explainembed)
try:
explain = await self.bot.wait_for('message', check=check, timeout=300)
except asyncio.TimeoutError:
timemsg = await author.send(embed=time)
await asyncio.sleep(30)
await timemsg.delete()
return
embed = discord.Embed(title=f'Suggestion ID: {self.bot.config["GUILDS"][str(ctx.guild.id)]["ID"]}', colour=0xffffff)
embed.add_field(name=f'Brief Explanation: ', value=f'{brief.content}')
embed.add_field(name=f'Detailed Explanation: ', value=f'{explain.content}')
embed.set_footer(text=f'Suggestion by: {author.name} • Bot made by DerpDays')
try:
channel = discord.utils.get(ctx.guild.text_channels, id=int(self.bot.config["GUILDS"][str(ctx.guild.id)]["OUTPUT"]))
msg = await channel.send(embed=embed)
except:
await author.send(embed=discord.Embed(title=f'Suggest', description=f'Sending your suggestion failed. This could be because the owner didnt set the output properly or your suggestion exceeded 2,000 Chars. To configure the bot properly do {ctx.prefix}suggestsettings', color=0xff4c4c))
return
await msg.add_reaction("✅")
await msg.add_reaction("❌")
id = self.bot.config["GUILDS"][str(ctx.guild.id)]["ID"]
newid = int(id) + 1
self.bot.config["GUILDS"][str(ctx.guild.id)]["ID"] = str(newid)
with open('settings.json', 'w') as f:
json.dump(self.bot.config, f, indent=2)
def setup(bot):
bot.add_cog(Suggest(bot))
|
import copy
import os
import warnings
from collections import OrderedDict
import numpy as np
import pandas as pd
import woodwork as ww
from joblib import Parallel, delayed
from sklearn.exceptions import NotFittedError
from sklearn.inspection import partial_dependence as sk_partial_dependence
from sklearn.inspection import \
permutation_importance as sk_permutation_importance
from sklearn.manifold import TSNE
from sklearn.metrics import auc as sklearn_auc
from sklearn.metrics import confusion_matrix as sklearn_confusion_matrix
from sklearn.metrics import \
precision_recall_curve as sklearn_precision_recall_curve
from sklearn.metrics import roc_curve as sklearn_roc_curve
from sklearn.preprocessing import LabelBinarizer
from sklearn.tree import export_graphviz
from sklearn.utils.multiclass import unique_labels
import evalml
from evalml.exceptions import NullsInColumnWarning
from evalml.model_family import ModelFamily
from evalml.objectives.utils import get_objective
from evalml.problem_types import ProblemTypes, is_classification
from evalml.utils import (
_convert_woodwork_types_wrapper,
import_or_raise,
infer_feature_types,
jupyter_check
)
def confusion_matrix(y_true, y_predicted, normalize_method='true'):
"""Confusion matrix for binary and multiclass classification.
Arguments:
y_true (ww.DataColumn, pd.Series or np.ndarray): True binary labels.
y_pred (ww.DataColumn, pd.Series or np.ndarray): Predictions from a binary classifier.
normalize_method ({'true', 'pred', 'all', None}): Normalization method to use, if not None. Supported options are: 'true' to normalize by row, 'pred' to normalize by column, or 'all' to normalize by all values. Defaults to 'true'.
Returns:
pd.DataFrame: Confusion matrix. The column header represents the predicted labels while row header represents the actual labels.
"""
y_true = infer_feature_types(y_true)
y_predicted = infer_feature_types(y_predicted)
y_true = _convert_woodwork_types_wrapper(y_true.to_series()).to_numpy()
y_predicted = _convert_woodwork_types_wrapper(y_predicted.to_series()).to_numpy()
labels = unique_labels(y_true, y_predicted)
conf_mat = sklearn_confusion_matrix(y_true, y_predicted)
conf_mat = pd.DataFrame(conf_mat, index=labels, columns=labels)
if normalize_method is not None:
return normalize_confusion_matrix(conf_mat, normalize_method=normalize_method)
return conf_mat
def normalize_confusion_matrix(conf_mat, normalize_method='true'):
"""Normalizes a confusion matrix.
Arguments:
conf_mat (ww.DataTable, pd.DataFrame or np.ndarray): Confusion matrix to normalize.
normalize_method ({'true', 'pred', 'all'}): Normalization method. Supported options are: 'true' to normalize by row, 'pred' to normalize by column, or 'all' to normalize by all values. Defaults to 'true'.
Returns:
pd.DataFrame: normalized version of the input confusion matrix. The column header represents the predicted labels while row header represents the actual labels.
"""
conf_mat = infer_feature_types(conf_mat)
conf_mat = _convert_woodwork_types_wrapper(conf_mat.to_dataframe())
col_names = conf_mat.columns
conf_mat = conf_mat.to_numpy()
with warnings.catch_warnings(record=True) as w:
if normalize_method == 'true':
conf_mat = conf_mat.astype('float') / conf_mat.sum(axis=1)[:, np.newaxis]
elif normalize_method == 'pred':
conf_mat = conf_mat.astype('float') / conf_mat.sum(axis=0)
elif normalize_method == 'all':
conf_mat = conf_mat.astype('float') / conf_mat.sum().sum()
else:
raise ValueError('Invalid value provided for "normalize_method": {}'.format(normalize_method))
if w and "invalid value encountered in" in str(w[0].message):
raise ValueError("Sum of given axis is 0 and normalization is not possible. Please select another option.")
conf_mat = pd.DataFrame(conf_mat, index=col_names, columns=col_names)
return conf_mat
def graph_confusion_matrix(y_true, y_pred, normalize_method='true', title_addition=None):
"""Generate and display a confusion matrix plot.
If `normalize_method` is set, hover text will show raw count, otherwise hover text will show count normalized with method 'true'.
Arguments:
y_true (ww.DataColumn, pd.Series or np.ndarray): True binary labels.
y_pred (ww.DataColumn, pd.Series or np.ndarray): Predictions from a binary classifier.
normalize_method ({'true', 'pred', 'all', None}): Normalization method to use, if not None. Supported options are: 'true' to normalize by row, 'pred' to normalize by column, or 'all' to normalize by all values. Defaults to 'true'.
title_addition (str or None): if not None, append to plot title. Defaults to None.
Returns:
plotly.Figure representing the confusion matrix plot generated
"""
_go = import_or_raise("plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects")
_ff = import_or_raise("plotly.figure_factory", error_msg="Cannot find dependency plotly.figure_factory")
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
conf_mat = confusion_matrix(y_true, y_pred, normalize_method=None)
conf_mat_normalized = confusion_matrix(y_true, y_pred, normalize_method=normalize_method or 'true')
labels = conf_mat.columns.tolist()
title = 'Confusion matrix{}{}'.format(
'' if title_addition is None else (' ' + title_addition),
'' if normalize_method is None else (', normalized using method "' + normalize_method + '"'))
z_data, custom_data = (conf_mat, conf_mat_normalized) if normalize_method is None else (conf_mat_normalized, conf_mat)
z_data = z_data.to_numpy()
z_text = [["{:.3f}".format(y) for y in x] for x in z_data]
primary_heading, secondary_heading = ('Raw', 'Normalized') if normalize_method is None else ('Normalized', 'Raw')
hover_text = '<br><b>' + primary_heading + ' Count</b>: %{z}<br><b>' + secondary_heading + ' Count</b>: %{customdata} <br>'
# the "<extra> tags at the end are necessary to remove unwanted trace info
hover_template = '<b>True</b>: %{y}<br><b>Predicted</b>: %{x}' + hover_text + '<extra></extra>'
layout = _go.Layout(title={'text': title},
xaxis={'title': 'Predicted Label', 'type': 'category', 'tickvals': labels},
yaxis={'title': 'True Label', 'type': 'category', 'tickvals': labels})
fig = _ff.create_annotated_heatmap(z_data, x=labels, y=labels,
annotation_text=z_text,
customdata=custom_data,
hovertemplate=hover_template,
colorscale='Blues',
showscale=True)
fig.update_layout(layout)
# put xaxis text on bottom to not overlap with title
fig['layout']['xaxis'].update(side='bottom')
# plotly Heatmap y axis defaults to the reverse of what we want: https://community.plotly.com/t/heatmap-y-axis-is-reversed-by-default-going-against-standard-convention-for-matrices/32180
fig.update_yaxes(autorange="reversed")
return fig
def precision_recall_curve(y_true, y_pred_proba):
"""
Given labels and binary classifier predicted probabilities, compute and return the data representing a precision-recall curve.
Arguments:
y_true (ww.DataColumn, pd.Series or np.ndarray): True binary labels.
y_pred_proba (ww.DataColumn, pd.Series or np.ndarray): Predictions from a binary classifier, before thresholding has been applied. Note this should be the predicted probability for the "true" label.
Returns:
list: Dictionary containing metrics used to generate a precision-recall plot, with the following keys:
* `precision`: Precision values.
* `recall`: Recall values.
* `thresholds`: Threshold values used to produce the precision and recall.
* `auc_score`: The area under the ROC curve.
"""
y_true = infer_feature_types(y_true)
y_pred_proba = infer_feature_types(y_pred_proba)
y_true = _convert_woodwork_types_wrapper(y_true.to_series())
y_pred_proba = _convert_woodwork_types_wrapper(y_pred_proba.to_series())
precision, recall, thresholds = sklearn_precision_recall_curve(y_true, y_pred_proba)
auc_score = sklearn_auc(recall, precision)
return {'precision': precision,
'recall': recall,
'thresholds': thresholds,
'auc_score': auc_score}
def graph_precision_recall_curve(y_true, y_pred_proba, title_addition=None):
"""Generate and display a precision-recall plot.
Arguments:
y_true (ww.DataColumn, pd.Series or np.ndarray): True binary labels.
y_pred_proba (ww.DataColumn, pd.Series or np.ndarray): Predictions from a binary classifier, before thresholding has been applied. Note this should be the predicted probability for the "true" label.
title_addition (str or None): If not None, append to plot title. Default None.
Returns:
plotly.Figure representing the precision-recall plot generated
"""
_go = import_or_raise("plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects")
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
precision_recall_curve_data = precision_recall_curve(y_true, y_pred_proba)
title = 'Precision-Recall{}'.format('' if title_addition is None else (' ' + title_addition))
layout = _go.Layout(title={'text': title},
xaxis={'title': 'Recall', 'range': [-0.05, 1.05]},
yaxis={'title': 'Precision', 'range': [-0.05, 1.05]})
data = []
data.append(_go.Scatter(x=precision_recall_curve_data['recall'], y=precision_recall_curve_data['precision'],
name='Precision-Recall (AUC {:06f})'.format(precision_recall_curve_data['auc_score']),
line=dict(width=3)))
return _go.Figure(layout=layout, data=data)
def roc_curve(y_true, y_pred_proba):
"""
Given labels and classifier predicted probabilities, compute and return the data representing a Receiver Operating Characteristic (ROC) curve. Works with binary or multiclass problems.
Arguments:
y_true (ww.DataColumn, pd.Series or np.ndarray): True labels.
y_pred_proba (ww.DataColumn, pd.Series or np.ndarray): Predictions from a classifier, before thresholding has been applied.
Returns:
list(dict): A list of dictionaries (with one for each class) is returned. Binary classification problems return a list with one dictionary.
Each dictionary contains metrics used to generate an ROC plot with the following keys:
* `fpr_rate`: False positive rate.
* `tpr_rate`: True positive rate.
* `threshold`: Threshold values used to produce each pair of true/false positive rates.
* `auc_score`: The area under the ROC curve.
"""
y_true = infer_feature_types(y_true)
y_pred_proba = infer_feature_types(y_pred_proba)
if isinstance(y_pred_proba, ww.DataTable):
y_pred_proba = _convert_woodwork_types_wrapper(y_pred_proba.to_dataframe()).to_numpy()
else:
y_pred_proba = _convert_woodwork_types_wrapper(y_pred_proba.to_series()).to_numpy()
y_true = _convert_woodwork_types_wrapper(y_true.to_series()).to_numpy()
if len(y_pred_proba.shape) == 1:
y_pred_proba = y_pred_proba.reshape(-1, 1)
if y_pred_proba.shape[1] == 2:
y_pred_proba = y_pred_proba[:, 1].reshape(-1, 1)
nan_indices = np.logical_or(pd.isna(y_true), np.isnan(y_pred_proba).any(axis=1))
y_true = y_true[~nan_indices]
y_pred_proba = y_pred_proba[~nan_indices]
lb = LabelBinarizer()
lb.fit(np.unique(y_true))
y_one_hot_true = lb.transform(y_true)
n_classes = y_one_hot_true.shape[1]
curve_data = []
for i in range(n_classes):
fpr_rates, tpr_rates, thresholds = sklearn_roc_curve(y_one_hot_true[:, i], y_pred_proba[:, i])
auc_score = sklearn_auc(fpr_rates, tpr_rates)
curve_data.append({'fpr_rates': fpr_rates,
'tpr_rates': tpr_rates,
'thresholds': thresholds,
'auc_score': auc_score})
return curve_data
def graph_roc_curve(y_true, y_pred_proba, custom_class_names=None, title_addition=None):
"""Generate and display a Receiver Operating Characteristic (ROC) plot for binary and multiclass classification problems.
Arguments:
y_true (ww.DataColumn, pd.Series or np.ndarray): True labels.
y_pred_proba (ww.DataColumn, pd.Series or np.ndarray): Predictions from a classifier, before thresholding has been applied. Note this should a one dimensional array with the predicted probability for the "true" label in the binary case.
custom_class_labels (list or None): If not None, custom labels for classes. Default None.
title_addition (str or None): if not None, append to plot title. Default None.
Returns:
plotly.Figure representing the ROC plot generated
"""
_go = import_or_raise("plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects")
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
title = 'Receiver Operating Characteristic{}'.format('' if title_addition is None else (' ' + title_addition))
layout = _go.Layout(title={'text': title},
xaxis={'title': 'False Positive Rate', 'range': [-0.05, 1.05]},
yaxis={'title': 'True Positive Rate', 'range': [-0.05, 1.05]})
all_curve_data = roc_curve(y_true, y_pred_proba)
graph_data = []
n_classes = len(all_curve_data)
if custom_class_names and len(custom_class_names) != n_classes:
raise ValueError('Number of custom class names does not match number of classes')
for i in range(n_classes):
roc_curve_data = all_curve_data[i]
name = i + 1 if custom_class_names is None else custom_class_names[i]
graph_data.append(_go.Scatter(x=roc_curve_data['fpr_rates'], y=roc_curve_data['tpr_rates'],
hovertemplate="(False Postive Rate: %{x}, True Positive Rate: %{y})<br>" + "Threshold: %{text}",
name=f"Class {name} (AUC {roc_curve_data["auc_score"]:.06f})",
text=roc_curve_data["thresholds"],
line=dict(width=3)))
graph_data.append(_go.Scatter(x=[0, 1], y=[0, 1],
name='Trivial Model (AUC 0.5)',
line=dict(dash='dash')))
return _go.Figure(layout=layout, data=graph_data)
def _calculate_permutation_scores_fast(pipeline, precomputed_features, y, objective, col_name,
random_seed, n_repeats, scorer, baseline_score):
"""Calculate the permutation score when `col_name` is permuted."""
random_state = np.random.RandomState(random_seed)
scores = np.zeros(n_repeats)
# If column is not in the features or provenance, assume the column was dropped
if col_name not in precomputed_features.columns and col_name not in pipeline._get_feature_provenance():
return scores + baseline_score
if col_name in precomputed_features.columns:
col_idx = precomputed_features.columns.get_loc(col_name)
else:
col_idx = [precomputed_features.columns.get_loc(col) for col in pipeline._get_feature_provenance()[col_name]]
# This is what sk_permutation_importance does. Useful for thread safety
X_permuted = precomputed_features.copy()
shuffling_idx = np.arange(precomputed_features.shape[0])
for n_round in range(n_repeats):
random_state.shuffle(shuffling_idx)
col = X_permuted.iloc[shuffling_idx, col_idx]
col.index = X_permuted.index
X_permuted.iloc[:, col_idx] = col
feature_score = scorer(pipeline, X_permuted, y, objective)
scores[n_round] = feature_score
return scores
def _fast_permutation_importance(pipeline, X, y, objective, n_repeats=5, n_jobs=None, random_seed=None):
"""Calculate permutation importance faster by only computing the estimator features once.
Only used for pipelines that support this optimization.
"""
precomputed_features = _convert_woodwork_types_wrapper(pipeline.compute_estimator_features(X, y).to_dataframe())
if is_classification(pipeline.problem_type):
y = pipeline._encode_targets(y)
def scorer(pipeline, features, y, objective):
if objective.score_needs_proba:
preds = pipeline.estimator.predict_proba(features)
preds = _convert_woodwork_types_wrapper(preds.to_dataframe())
else:
preds = pipeline.estimator.predict(features)
preds = _convert_woodwork_types_wrapper(preds.to_series())
score = pipeline._score(X, y, preds, objective)
return score if objective.greater_is_better else -score
baseline_score = scorer(pipeline, precomputed_features, y, objective)
scores = Parallel(n_jobs=n_jobs)(delayed(_calculate_permutation_scores_fast)(
pipeline, precomputed_features, y, objective, col_name, random_seed, n_repeats, scorer, baseline_score,
) for col_name in X.columns)
importances = baseline_score - np.array(scores)
return {'importances_mean': np.mean(importances, axis=1)}
def calculate_permutation_importance(pipeline, X, y, objective, n_repeats=5, n_jobs=None, random_seed=0):
"""Calculates permutation importance for features.
Arguments:
pipeline (PipelineBase or subclass): Fitted pipeline
X (ww.DataTable, pd.DataFrame): The input data used to score and compute permutation importance
y (ww.DataColumn, pd.Series): The target data
objective (str, ObjectiveBase): Objective to score on
n_repeats (int): Number of times to permute a feature. Defaults to 5.
n_jobs (int or None): Non-negative integer describing level of parallelism used for pipelines.
None and 1 are equivalent. If set to -1, all CPUs are used. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
random_seed (int): Seed for the random number generator. Defaults to 0.
Returns:
pd.DataFrame, Mean feature importance scores over 5 shuffles.
"""
X = infer_feature_types(X)
y = infer_feature_types(y)
X = _convert_woodwork_types_wrapper(X.to_dataframe())
y = _convert_woodwork_types_wrapper(y.to_series())
objective = get_objective(objective, return_instance=True)
if not objective.is_defined_for_problem_type(pipeline.problem_type):
raise ValueError(f"Given objective '{objective.name}' cannot be used with '{pipeline.name}'")
if pipeline._supports_fast_permutation_importance:
perm_importance = _fast_permutation_importance(pipeline, X, y, objective, n_repeats=n_repeats, n_jobs=n_jobs,
random_seed=random_seed)
else:
def scorer(pipeline, X, y):
scores = pipeline.score(X, y, objectives=[objective])
return scores[objective.name] if objective.greater_is_better else -scores[objective.name]
perm_importance = sk_permutation_importance(pipeline, X, y, n_repeats=n_repeats, scoring=scorer, n_jobs=n_jobs,
random_state=random_seed)
mean_perm_importance = perm_importance["importances_mean"]
feature_names = list(X.columns)
mean_perm_importance = list(zip(feature_names, mean_perm_importance))
mean_perm_importance.sort(key=lambda x: x[1], reverse=True)
return pd.DataFrame(mean_perm_importance, columns=["feature", "importance"])
def graph_permutation_importance(pipeline, X, y, objective, importance_threshold=0):
"""Generate a bar graph of the pipeline's permutation importance.
Arguments:
pipeline (PipelineBase or subclass): Fitted pipeline
X (ww.DataTable, pd.DataFrame): The input data used to score and compute permutation importance
y (ww.DataColumn, pd.Series): The target data
objective (str, ObjectiveBase): Objective to score on
importance_threshold (float, optional): If provided, graph features with a permutation importance whose absolute value is larger than importance_threshold. Defaults to zero.
Returns:
plotly.Figure, a bar graph showing features and their respective permutation importance.
"""
go = import_or_raise("plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects")
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
perm_importance = calculate_permutation_importance(pipeline, X, y, objective)
perm_importance['importance'] = perm_importance['importance']
if importance_threshold < 0:
raise ValueError(f'Provided importance threshold of {importance_threshold} must be greater than or equal to 0')
# Remove features with close to zero importance
perm_importance = perm_importance[abs(perm_importance['importance']) >= importance_threshold]
# List is reversed to go from ascending order to descending order
perm_importance = perm_importance.iloc[::-1]
title = "Permutation Importance"
subtitle = "The relative importance of each input feature's "\
"overall influence on the pipelines' predictions, computed using "\
"the permutation importance algorithm."
data = [go.Bar(x=perm_importance['importance'],
y=perm_importance['feature'],
orientation='h'
)]
layout = {
'title': '{0}<br><sub>{1}</sub>'.format(title, subtitle),
'height': 800,
'xaxis_title': 'Permutation Importance',
'yaxis_title': 'Feature',
'yaxis': {
'type': 'category'
}
}
fig = go.Figure(data=data, layout=layout)
return fig
def binary_objective_vs_threshold(pipeline, X, y, objective, steps=100):
"""Computes objective score as a function of potential binary classification
decision thresholds for a fitted binary classification pipeline.
Arguments:
pipeline (BinaryClassificationPipeline obj): Fitted binary classification pipeline
X (ww.DataTable, pd.DataFrame): The input data used to compute objective score
y (ww.DataColumn, pd.Series): The target labels
objective (ObjectiveBase obj, str): Objective used to score
steps (int): Number of intervals to divide and calculate objective score at
Returns:
pd.DataFrame: DataFrame with thresholds and the corresponding objective score calculated at each threshold
"""
objective = get_objective(objective, return_instance=True)
if not objective.is_defined_for_problem_type(ProblemTypes.BINARY):
raise ValueError("`binary_objective_vs_threshold` can only be calculated for binary classification objectives")
if objective.score_needs_proba:
raise ValueError("Objective `score_needs_proba` must be False")
pipeline_tmp = copy.copy(pipeline)
thresholds = np.linspace(0, 1, steps + 1)
costs = []
for threshold in thresholds:
pipeline_tmp.threshold = threshold
scores = pipeline_tmp.score(X, y, [objective])
costs.append(scores[objective.name])
df = pd.DataFrame({"threshold": thresholds, "score": costs})
return df
def graph_binary_objective_vs_threshold(pipeline, X, y, objective, steps=100):
"""Generates a plot graphing objective score vs. decision thresholds for a fitted binary classification pipeline.
Arguments:
pipeline (PipelineBase or subclass): Fitted pipeline
X (ww.DataTable, pd.DataFrame): The input data used to score and compute scores
y (ww.DataColumn, pd.Series): The target labels
objective (ObjectiveBase obj, str): Objective used to score, shown on the y-axis of the graph
steps (int): Number of intervals to divide and calculate objective score at
Returns:
plotly.Figure representing the objective score vs. threshold graph generated
"""
_go = import_or_raise("plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects")
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
objective = get_objective(objective, return_instance=True)
df = binary_objective_vs_threshold(pipeline, X, y, objective, steps)
title = f'{objective.name} Scores vs. Thresholds'
layout = _go.Layout(title={'text': title},
xaxis={'title': 'Threshold', 'range': _calculate_axis_range(df['threshold'])},
yaxis={'title': f"{objective.name} Scores vs. Binary Classification Decision Threshold", 'range': _calculate_axis_range(df['score'])})
data = []
data.append(_go.Scatter(x=df['threshold'],
y=df['score'],
line=dict(width=3)))
return _go.Figure(layout=layout, data=data)
def partial_dependence(pipeline, X, features, percentiles=(0.05, 0.95), grid_resolution=100):
"""Calculates one or two-way partial dependence. If a single integer or
string is given for features, one-way partial dependence is calculated. If
a tuple of two integers or strings is given, two-way partial dependence
is calculated with the first feature in the y-axis and second feature in the
x-axis.
Arguments:
pipeline (PipelineBase or subclass): Fitted pipeline
X (ww.DataTable, pd.DataFrame, np.ndarray): The input data used to generate a grid of values
for feature where partial dependence will be calculated at
features (int, string, tuple[int or string]): The target feature for which to create the partial dependence plot for.
If features is an int, it must be the index of the feature to use.
If features is a string, it must be a valid column name in X.
If features is a tuple of int/strings, it must contain valid column integers/names in X.
percentiles (tuple[float]): The lower and upper percentile used to create the extreme values for the grid.
Must be in [0, 1]. Defaults to (0.05, 0.95).
grid_resolution (int): Number of samples of feature(s) for partial dependence plot. If this value
is less than the maximum number of categories present in categorical data within X, it will be
set to the max number of categories + 1. Defaults to 100.
Returns:
pd.DataFrame: DataFrame with averaged predictions for all points in the grid averaged
over all samples of X and the values used to calculate those predictions.
In the one-way case: The dataframe will contain two columns, "feature_values" (grid points at which the
partial dependence was calculated) and "partial_dependence" (the partial dependence at that feature value).
For classification problems, there will be a third column called "class_label" (the class label for which
the partial dependence was calculated). For binary classification, the partial dependence is only calculated
for the "positive" class.
In the two-way case: The data frame will contain grid_resolution number of columns and rows where the
index and column headers are the sampled values of the first and second features, respectively, used to make
the partial dependence contour. The values of the data frame contain the partial dependence data for each
feature value pair.
Raises:
ValueError: if the user provides a tuple of not exactly two features.
ValueError: if the provided pipeline isn't fitted.
ValueError: if the provided pipeline is a Baseline pipeline.
"""
X = infer_feature_types(X)
# Dynamically set the grid resolution to the maximum number of categories
# in the categorical variables if there are more categories than resolution cells
X_cats = X.select("categorical")
if X_cats.shape[1] != 0:
max_num_cats = max(X_cats.describe().loc["nunique"])
grid_resolution = max([max_num_cats + 1, grid_resolution])
X = _convert_woodwork_types_wrapper(X.to_dataframe())
if isinstance(features, (list, tuple)):
if len(features) != 2:
raise ValueError("Too many features given to graph_partial_dependence. Only one or two-way partial "
"dependence is supported.")
if not (all([isinstance(x, str) for x in features]) or all([isinstance(x, int) for x in features])):
raise ValueError("Features provided must be a tuple entirely of integers or strings, not a mixture of both.")
if not pipeline._is_fitted:
raise ValueError("Pipeline to calculate partial dependence for must be fitted")
if pipeline.model_family == ModelFamily.BASELINE:
raise ValueError("Partial dependence plots are not supported for Baseline pipelines")
feature_list = []
if isinstance(features, int):
feature_list = X.iloc[:, features]
elif isinstance(features, str):
feature_list = X[features]
if len(feature_list) and feature_list.isnull().sum():
warnings.warn("There are null values in the features, which will cause NaN values in the partial dependence output. Fill in these values to remove the NaN values.", NullsInColumnWarning)
if len(feature_list) and feature_list.value_counts(normalize=True).values[0] + 0.01 > percentiles[1]:
val = feature_list.value_counts(normalize=True).index[0]
feature_name = features if isinstance(features, str) else X.columns[features]
raise ValueError(f"Feature '{feature_name}' is mostly one value, {val}, and cannot be used to compute partial dependence. Try raising the upper percentage value.")
wrapped = evalml.pipelines.components.utils.scikit_learn_wrapped_estimator(pipeline)
avg_pred, values = sk_partial_dependence(wrapped, X=X, features=features, percentiles=percentiles, grid_resolution=grid_resolution)
classes = None
if isinstance(pipeline, evalml.pipelines.BinaryClassificationPipeline):
classes = [pipeline.classes_[1]]
elif isinstance(pipeline, evalml.pipelines.MulticlassClassificationPipeline):
classes = pipeline.classes_
if isinstance(features, (int, str)):
data = pd.DataFrame({"feature_values": np.tile(values[0], avg_pred.shape[0]),
"partial_dependence": np.concatenate([pred for pred in avg_pred])})
elif isinstance(features, (list, tuple)):
data = pd.DataFrame(avg_pred.reshape((-1, avg_pred.shape[-1])))
data.columns = values[1]
data.index = np.tile(values[0], avg_pred.shape[0])
if classes is not None:
data['class_label'] = np.repeat(classes, len(values[0]))
return data
def graph_partial_dependence(pipeline, X, features, class_label=None, grid_resolution=100):
"""Create an one-way or two-way partial dependence plot. Passing a single integer or
string as features will create a one-way partial dependence plot with the feature values
plotted against the partial dependence. Passing features a tuple of int/strings will create
a two-way partial dependence plot with a contour of feature[0] in the y-axis, feature[1]
in the x-axis and the partial dependence in the z-axis.
Arguments:
pipeline (PipelineBase or subclass): Fitted pipeline
X (ww.DataTable, pd.DataFrame, np.ndarray): The input data used to generate a grid of values
for feature where partial dependence will be calculated at
features (int, string, tuple[int or string]): The target feature for which to create the partial dependence plot for.
If features is an int, it must be the index of the feature to use.
If features is a string, it must be a valid column name in X.
If features is a tuple of strings, it must contain valid column int/names in X.
class_label (string, optional): Name of class to plot for multiclass problems. If None, will plot
the partial dependence for each class. This argument does not change behavior for regression or binary
classification pipelines. For binary classification, the partial dependence for the positive label will
always be displayed. Defaults to None.
grid_resolution (int): Number of samples of feature(s) for partial dependence plot
Returns:
plotly.graph_objects.Figure: figure object containing the partial dependence data for plotting
Raises:
ValueError: if a graph is requested for a class name that isn't present in the pipeline
"""
if isinstance(features, (list, tuple)):
mode = "two-way"
elif isinstance(features, (int, str)):
mode = "one-way"
_go = import_or_raise("plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects")
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
if isinstance(pipeline, evalml.pipelines.MulticlassClassificationPipeline) and class_label is not None:
if class_label not in pipeline.classes_:
msg = f"Class {class_label} is not one of the classes the pipeline was fit on: {", ".join(list(pipeline.classes_))}"
raise ValueError(msg)
part_dep = partial_dependence(pipeline, X, features=features, grid_resolution=grid_resolution)
if mode == "two-way":
title = f"Partial Dependence of '{features[0]}' vs. '{features[1]}'"
layout = _go.Layout(title={'text': title},
xaxis={'title': f'{features[0]}'},
yaxis={'title': f'{features[1]}'},
showlegend=False)
elif mode == "one-way":
feature_name = str(features)
title = f"Partial Dependence of '{feature_name}'"
layout = _go.Layout(title={'text': title},
xaxis={'title': f'{feature_name}'},
yaxis={'title': 'Partial Dependence'},
showlegend=False)
if isinstance(pipeline, evalml.pipelines.MulticlassClassificationPipeline):
class_labels = [class_label] if class_label is not None else pipeline.classes_
_subplots = import_or_raise("plotly.subplots", error_msg="Cannot find dependency plotly.graph_objects")
# If the user passes in a value for class_label, we want to create a 1 x 1 subplot or else there would
# be an empty column in the plot and it would look awkward
rows, cols = ((len(class_labels) + 1) // 2, 2) if len(class_labels) > 1 else (1, len(class_labels))
# Don't specify share_xaxis and share_yaxis so that we get tickmarks in each subplot
fig = _subplots.make_subplots(rows=rows, cols=cols, subplot_titles=class_labels)
for i, label in enumerate(class_labels):
label_df = part_dep.loc[part_dep.class_label == label]
if mode == "two-way":
x = label_df.index
y = np.array([col for col in label_df.columns if isinstance(col, (int, float))])
z = label_df.values
fig.add_trace(_go.Contour(x=x, y=y, z=z, name=label, coloraxis="coloraxis"),
row=(i + 2) // 2, col=(i % 2) + 1)
elif mode == "one-way":
x = label_df['feature_values']
y = label_df['partial_dependence']
fig.add_trace(_go.Scatter(x=x, y=y, line=dict(width=3), name=label),
row=(i + 2) // 2, col=(i % 2) + 1)
fig.update_layout(layout)
if mode == "two-way":
title = f'{features[0]}'
xrange = _calculate_axis_range(part_dep.index)
yrange = _calculate_axis_range(np.array([x for x in part_dep.columns if isinstance(x, (int, float))]))
fig.update_layout(coloraxis=dict(colorscale='Bluered_r'), showlegend=False)
elif mode == "one-way":
title = f'{feature_name}'
xrange = _calculate_axis_range(part_dep['feature_values'])
yrange = _calculate_axis_range(part_dep['partial_dependence'])
fig.update_xaxes(title=title, range=xrange)
fig.update_yaxes(range=yrange)
else:
if mode == "two-way":
trace = _go.Contour(x=part_dep.index,
y=part_dep.columns,
z=part_dep.values,
name="Partial Dependence")
elif mode == "one-way":
trace = _go.Scatter(x=part_dep['feature_values'],
y=part_dep['partial_dependence'],
name='Partial Dependence',
line=dict(width=3))
fig = _go.Figure(layout=layout, data=[trace])
return fig
def _calculate_axis_range(arr):
"""Helper method to help calculate the appropriate range for an axis based on the data to graph."""
max_value = arr.max()
min_value = arr.min()
margins = abs(max_value - min_value) * 0.05
return [min_value - margins, max_value + margins]
def get_prediction_vs_actual_data(y_true, y_pred, outlier_threshold=None):
"""Combines y_true and y_pred into a single dataframe and adds a column for outliers. Used in `graph_prediction_vs_actual()`.
Arguments:
y_true (pd.Series, ww.DataColumn, or np.ndarray): The real target values of the data
y_pred (pd.Series, ww.DataColumn, or np.ndarray): The predicted values outputted by the regression model.
outlier_threshold (int, float): A positive threshold for what is considered an outlier value. This value is compared to the absolute difference
between each value of y_true and y_pred. Values within this threshold will be blue, otherwise they will be yellow.
Defaults to None
Returns:
pd.DataFrame with the following columns:
* `prediction`: Predicted values from regression model.
* `actual`: Real target values.
* `outlier`: Colors indicating which values are in the threshold for what is considered an outlier value.
"""
if outlier_threshold and outlier_threshold <= 0:
raise ValueError(f"Threshold must be positive! Provided threshold is {outlier_threshold}")
y_true = infer_feature_types(y_true)
y_true = _convert_woodwork_types_wrapper(y_true.to_series())
y_pred = infer_feature_types(y_pred)
y_pred = _convert_woodwork_types_wrapper(y_pred.to_series())
predictions = y_pred.reset_index(drop=True)
actual = y_true.reset_index(drop=True)
data = pd.concat([pd.Series(predictions),
pd.Series(actual)], axis=1)
data.columns = ['prediction', 'actual']
if outlier_threshold:
data['outlier'] = np.where((abs(data['prediction'] - data['actual']) >= outlier_threshold), "#ffff00", "#0000ff")
else:
data['outlier'] = '#0000ff'
return data
def graph_prediction_vs_actual(y_true, y_pred, outlier_threshold=None):
"""Generate a scatter plot comparing the true and predicted values. Used for regression plotting
Arguments:
y_true (ww.DataColumn, pd.Series): The real target values of the data
y_pred (ww.DataColumn, pd.Series): The predicted values outputted by the regression model.
outlier_threshold (int, float): A positive threshold for what is considered an outlier value. This value is compared to the absolute difference
between each value of y_true and y_pred. Values within this threshold will be blue, otherwise they will be yellow.
Defaults to None
Returns:
plotly.Figure representing the predicted vs. actual values graph
"""
_go = import_or_raise("plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects")
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
if outlier_threshold and outlier_threshold <= 0:
raise ValueError(f"Threshold must be positive! Provided threshold is {outlier_threshold}")
df = get_prediction_vs_actual_data(y_true, y_pred, outlier_threshold)
data = []
x_axis = _calculate_axis_range(df['prediction'])
y_axis = _calculate_axis_range(df['actual'])
x_y_line = [min(x_axis[0], y_axis[0]), max(x_axis[1], y_axis[1])]
data.append(_go.Scatter(x=x_y_line, y=x_y_line, name="y = x line", line_color='grey'))
title = 'Predicted vs Actual Values Scatter Plot'
layout = _go.Layout(title={'text': title},
xaxis={'title': 'Prediction', 'range': x_y_line},
yaxis={'title': 'Actual', 'range': x_y_line})
for color, outlier_group in df.groupby('outlier'):
if outlier_threshold:
name = "< outlier_threshold" if color == "#0000ff" else ">= outlier_threshold"
else:
name = "Values"
data.append(_go.Scatter(x=outlier_group['prediction'],
y=outlier_group['actual'],
mode='markers',
marker=_go.scatter.Marker(color=color),
name=name))
return _go.Figure(layout=layout, data=data)
def _tree_parse(est, feature_names):
children_left = est.tree_.children_left
children_right = est.tree_.children_right
features = est.tree_.feature
thresholds = est.tree_.threshold
values = est.tree_.value
def recurse(i):
if children_left[i] == children_right[i]:
return {'Value': values[i]}
return OrderedDict({
'Feature': feature_names[features[i]],
'Threshold': thresholds[i],
'Value': values[i],
'Left_Child': recurse(children_left[i]),
'Right_Child': recurse(children_right[i])
})
return recurse(0)
def decision_tree_data_from_estimator(estimator):
"""Return data for a fitted tree in a restructured format
Arguments:
estimator (ComponentBase): A fitted DecisionTree-based estimator.
Returns:
OrderedDict: An OrderedDict of OrderedDicts describing a tree structure
"""
if not estimator.model_family == ModelFamily.DECISION_TREE:
raise ValueError("Tree structure reformatting is only supported for decision tree estimators")
if not estimator._is_fitted:
raise NotFittedError("This DecisionTree estimator is not fitted yet. Call 'fit' with appropriate arguments "
"before using this estimator.")
est = estimator._component_obj
feature_names = estimator.input_feature_names
return _tree_parse(est, feature_names)
def decision_tree_data_from_pipeline(pipeline_):
"""Return data for a fitted pipeline with in a restructured format
Arguments:
pipeline_ (PipelineBase): A pipeline with a DecisionTree-based estimator.
Returns:
OrderedDict: An OrderedDict of OrderedDicts describing a tree structure
"""
if not pipeline_.model_family == ModelFamily.DECISION_TREE:
raise ValueError("Tree structure reformatting is only supported for decision tree estimators")
if not pipeline_._is_fitted:
raise NotFittedError("The DecisionTree estimator associated with this pipeline is not fitted yet. Call 'fit' "
"with appropriate arguments before using this estimator.")
est = pipeline_.estimator._component_obj
feature_names = pipeline_.input_feature_names[pipeline_.estimator.name]
return _tree_parse(est, feature_names)
def visualize_decision_tree(estimator, max_depth=None, rotate=False, filled=False, filepath=None):
"""Generate an image visualizing the decision tree
Arguments:
estimator (ComponentBase): A fitted DecisionTree-based estimator.
max_depth (int, optional): The depth to which the tree should be displayed. If set to None (as by default),
tree is fully generated.
rotate (bool, optional): Orient tree left to right rather than top-down.
filled (bool, optional): Paint nodes to indicate majority class for classification, extremity of values for
regression, or purity of node for multi-output.
filepath (str, optional): Path to where the graph should be saved. If set to None (as by default), the graph
will not be saved.
Returns:
graphviz.Source: DOT object that can be directly displayed in Jupyter notebooks.
"""
if not estimator.model_family == ModelFamily.DECISION_TREE:
raise ValueError("Tree visualizations are only supported for decision tree estimators")
if max_depth and (not isinstance(max_depth, int) or not max_depth >= 0):
raise ValueError("Unknown value: '{}'. The parameter max_depth has to be a non-negative integer"
.format(max_depth))
if not estimator._is_fitted:
raise NotFittedError("This DecisionTree estimator is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.")
est = estimator._component_obj
graphviz = import_or_raise('graphviz', error_msg='Please install graphviz to visualize trees.')
graph_format = None
if filepath:
# Cast to str in case a Path object was passed in
filepath = str(filepath)
try:
f = open(filepath, 'w')
f.close()
except (IOError, FileNotFoundError):
raise ValueError(('Specified filepath is not writeable: {}'.format(filepath)))
path_and_name, graph_format = os.path.splitext(filepath)
if graph_format:
graph_format = graph_format[1:].lower() # ignore the dot
supported_filetypes = graphviz.backend.FORMATS
if graph_format not in supported_filetypes:
raise ValueError(("Unknown format '{}'. Make sure your format is one of the " +
"following: {}").format(graph_format, supported_filetypes))
else:
graph_format = 'pdf' # If the filepath has no extension default to pdf
dot_data = export_graphviz(decision_tree=est, max_depth=max_depth, rotate=rotate, filled=filled, feature_names=estimator.input_feature_names)
source_obj = graphviz.Source(source=dot_data, format=graph_format)
if filepath:
source_obj.render(filename=path_and_name, cleanup=True)
return source_obj
def get_prediction_vs_actual_over_time_data(pipeline, X, y, dates):
"""Get the data needed for the prediction_vs_actual_over_time plot.
Arguments:
pipeline (TimeSeriesRegressionPipeline): Fitted time series regression pipeline.
X (ww.DataTable, pd.DataFrame): Features used to generate new predictions.
y (ww.DataColumn, pd.Series): Target values to compare predictions against.
dates (ww.DataColumn, pd.Series): Dates corresponding to target values and predictions.
Returns:
pd.DataFrame
"""
dates = infer_feature_types(dates)
y = infer_feature_types(y)
prediction = pipeline.predict(X, y)
dates = _convert_woodwork_types_wrapper(dates.to_series())
y = _convert_woodwork_types_wrapper(y.to_series())
return pd.DataFrame({"dates": dates.reset_index(drop=True),
"target": y.reset_index(drop=True),
"prediction": prediction.reset_index(drop=True)})
def graph_prediction_vs_actual_over_time(pipeline, X, y, dates):
"""Plot the target values and predictions against time on the x-axis.
Arguments:
pipeline (TimeSeriesRegressionPipeline): Fitted time series regression pipeline.
X (ww.DataTable, pd.DataFrame): Features used to generate new predictions.
y (ww.DataColumn, pd.Series): Target values to compare predictions against.
dates (ww.DataColumn, pd.Series): Dates corresponding to target values and predictions.
Returns:
plotly.Figure: Showing the prediction vs actual over time.
"""
_go = import_or_raise("plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects")
if pipeline.problem_type != ProblemTypes.TIME_SERIES_REGRESSION:
raise ValueError("graph_prediction_vs_actual_over_time only supports time series regression pipelines! "
f"Received {str(pipeline.problem_type)}.")
data = get_prediction_vs_actual_over_time_data(pipeline, X, y, dates)
data = [_go.Scatter(x=data["dates"], y=data["target"], mode='lines+markers', name="Target",
line=dict(color='#1f77b4')),
_go.Scatter(x=data["dates"], y=data["prediction"], mode='lines+markers', name='Prediction',
line=dict(color='#d62728'))]
# Let plotly pick the best date format.
layout = _go.Layout(title={'text': "Prediction vs Target over time"},
xaxis={'title': 'Time'},
yaxis={'title': 'Target Values and Predictions'})
return _go.Figure(data=data, layout=layout)
def get_linear_coefficients(estimator, features=None):
"""Returns a dataframe showing the features with the greatest predictive power for a linear model.
Arguments:
estimator (Estimator): Fitted linear model family estimator.
features (list[str]): List of feature names associated with the underlying data.
Returns:
pd.DataFrame: Displaying the features by importance.
"""
if not estimator.model_family == ModelFamily.LINEAR_MODEL:
raise ValueError("Linear coefficients are only available for linear family models")
if not estimator._is_fitted:
raise NotFittedError("This linear estimator is not fitted yet. Call 'fit' with appropriate arguments "
"before using this estimator.")
coef_ = estimator.feature_importance
coef_ = pd.Series(coef_, name='Coefficients', index=features)
coef_ = coef_.sort_values()
coef_ = pd.Series(estimator._component_obj.intercept_, index=['Intercept']).append(coef_)
return coef_
def t_sne(X, n_components=2, perplexity=30.0, learning_rate=200.0, metric='euclidean', **kwargs):
"""Get the transformed output after fitting X to the embedded space using t-SNE.
Arguments:
X (np.ndarray, ww.DataTable, pd.DataFrame): Data to be transformed. Must be numeric.
n_components (int, optional): Dimension of the embedded space.
perplexity (float, optional): Related to the number of nearest neighbors that is used in other manifold learning
algorithms. Larger datasets usually require a larger perplexity. Consider selecting a value between 5 and 50.
learning_rate (float, optional): Usually in the range [10.0, 1000.0]. If the cost function gets stuck in a bad
local minimum, increasing the learning rate may help.
metric (str, optional): The metric to use when calculating distance between instances in a feature array.
Returns:
np.ndarray (n_samples, n_components)
"""
if not isinstance(n_components, int) or not n_components > 0:
raise ValueError("The parameter n_components must be of type integer and greater than 0")
if not perplexity >= 0:
raise ValueError("The parameter perplexity must be non-negative")
X = infer_feature_types(X)
X = _convert_woodwork_types_wrapper(X.to_dataframe())
t_sne_ = TSNE(n_components=n_components, perplexity=perplexity, learning_rate=learning_rate, metric=metric, **kwargs)
X_new = t_sne_.fit_transform(X)
return X_new
def graph_t_sne(X, n_components=2, perplexity=30.0, learning_rate=200.0, metric='euclidean', marker_line_width=2, marker_size=7, **kwargs):
"""Plot high dimensional data into lower dimensional space using t-SNE .
Arguments:
X (np.ndarray, pd.DataFrame, ww.DataTable): Data to be transformed. Must be numeric.
n_components (int, optional): Dimension of the embedded space.
perplexity (float, optional): Related to the number of nearest neighbors that is used in other manifold learning
algorithms. Larger datasets usually require a larger perplexity. Consider selecting a value between 5 and 50.
learning_rate (float, optional): Usually in the range [10.0, 1000.0]. If the cost function gets stuck in a bad
local minimum, increasing the learning rate may help.
metric (str, optional): The metric to use when calculating distance between instances in a feature array.
marker_line_width (int, optional): Determines the line width of the marker boundary.
marker_size (int, optional): Determines the size of the marker.
Returns:
plotly.Figure representing the transformed data
"""
_go = import_or_raise("plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects")
if not marker_line_width >= 0:
raise ValueError("The parameter marker_line_width must be non-negative")
if not marker_size >= 0:
raise ValueError("The parameter marker_size must be non-negative")
X_embedded = t_sne(X, n_components=n_components, perplexity=perplexity, learning_rate=learning_rate, metric=metric, **kwargs)
fig = _go.Figure()
fig.add_trace(_go.Scatter(
x=X_embedded[:, 0], y=X_embedded[:, 1],
mode='markers'
))
fig.update_traces(mode='markers', marker_line_width=marker_line_width, marker_size=marker_size)
fig.update_layout(title='t-SNE', yaxis_zeroline=False, xaxis_zeroline=False)
return fig
|
import copy
import os
import warnings
from collections import OrderedDict
import numpy as np
import pandas as pd
import woodwork as ww
from joblib import Parallel, delayed
from sklearn.exceptions import NotFittedError
from sklearn.inspection import partial_dependence as sk_partial_dependence
from sklearn.inspection import \
permutation_importance as sk_permutation_importance
from sklearn.manifold import TSNE
from sklearn.metrics import auc as sklearn_auc
from sklearn.metrics import confusion_matrix as sklearn_confusion_matrix
from sklearn.metrics import \
precision_recall_curve as sklearn_precision_recall_curve
from sklearn.metrics import roc_curve as sklearn_roc_curve
from sklearn.preprocessing import LabelBinarizer
from sklearn.tree import export_graphviz
from sklearn.utils.multiclass import unique_labels
import evalml
from evalml.exceptions import NullsInColumnWarning
from evalml.model_family import ModelFamily
from evalml.objectives.utils import get_objective
from evalml.problem_types import ProblemTypes, is_classification
from evalml.utils import (
_convert_woodwork_types_wrapper,
import_or_raise,
infer_feature_types,
jupyter_check
)
def confusion_matrix(y_true, y_predicted, normalize_method='true'):
"""Confusion matrix for binary and multiclass classification.
Arguments:
y_true (ww.DataColumn, pd.Series or np.ndarray): True binary labels.
y_pred (ww.DataColumn, pd.Series or np.ndarray): Predictions from a binary classifier.
normalize_method ({'true', 'pred', 'all', None}): Normalization method to use, if not None. Supported options are: 'true' to normalize by row, 'pred' to normalize by column, or 'all' to normalize by all values. Defaults to 'true'.
Returns:
pd.DataFrame: Confusion matrix. The column header represents the predicted labels while row header represents the actual labels.
"""
y_true = infer_feature_types(y_true)
y_predicted = infer_feature_types(y_predicted)
y_true = _convert_woodwork_types_wrapper(y_true.to_series()).to_numpy()
y_predicted = _convert_woodwork_types_wrapper(y_predicted.to_series()).to_numpy()
labels = unique_labels(y_true, y_predicted)
conf_mat = sklearn_confusion_matrix(y_true, y_predicted)
conf_mat = pd.DataFrame(conf_mat, index=labels, columns=labels)
if normalize_method is not None:
return normalize_confusion_matrix(conf_mat, normalize_method=normalize_method)
return conf_mat
def normalize_confusion_matrix(conf_mat, normalize_method='true'):
"""Normalizes a confusion matrix.
Arguments:
conf_mat (ww.DataTable, pd.DataFrame or np.ndarray): Confusion matrix to normalize.
normalize_method ({'true', 'pred', 'all'}): Normalization method. Supported options are: 'true' to normalize by row, 'pred' to normalize by column, or 'all' to normalize by all values. Defaults to 'true'.
Returns:
pd.DataFrame: normalized version of the input confusion matrix. The column header represents the predicted labels while row header represents the actual labels.
"""
conf_mat = infer_feature_types(conf_mat)
conf_mat = _convert_woodwork_types_wrapper(conf_mat.to_dataframe())
col_names = conf_mat.columns
conf_mat = conf_mat.to_numpy()
with warnings.catch_warnings(record=True) as w:
if normalize_method == 'true':
conf_mat = conf_mat.astype('float') / conf_mat.sum(axis=1)[:, np.newaxis]
elif normalize_method == 'pred':
conf_mat = conf_mat.astype('float') / conf_mat.sum(axis=0)
elif normalize_method == 'all':
conf_mat = conf_mat.astype('float') / conf_mat.sum().sum()
else:
raise ValueError('Invalid value provided for "normalize_method": {}'.format(normalize_method))
if w and "invalid value encountered in" in str(w[0].message):
raise ValueError("Sum of given axis is 0 and normalization is not possible. Please select another option.")
conf_mat = pd.DataFrame(conf_mat, index=col_names, columns=col_names)
return conf_mat
def graph_confusion_matrix(y_true, y_pred, normalize_method='true', title_addition=None):
"""Generate and display a confusion matrix plot.
If `normalize_method` is set, hover text will show raw count, otherwise hover text will show count normalized with method 'true'.
Arguments:
y_true (ww.DataColumn, pd.Series or np.ndarray): True binary labels.
y_pred (ww.DataColumn, pd.Series or np.ndarray): Predictions from a binary classifier.
normalize_method ({'true', 'pred', 'all', None}): Normalization method to use, if not None. Supported options are: 'true' to normalize by row, 'pred' to normalize by column, or 'all' to normalize by all values. Defaults to 'true'.
title_addition (str or None): if not None, append to plot title. Defaults to None.
Returns:
plotly.Figure representing the confusion matrix plot generated
"""
_go = import_or_raise("plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects")
_ff = import_or_raise("plotly.figure_factory", error_msg="Cannot find dependency plotly.figure_factory")
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
conf_mat = confusion_matrix(y_true, y_pred, normalize_method=None)
conf_mat_normalized = confusion_matrix(y_true, y_pred, normalize_method=normalize_method or 'true')
labels = conf_mat.columns.tolist()
title = 'Confusion matrix{}{}'.format(
'' if title_addition is None else (' ' + title_addition),
'' if normalize_method is None else (', normalized using method "' + normalize_method + '"'))
z_data, custom_data = (conf_mat, conf_mat_normalized) if normalize_method is None else (conf_mat_normalized, conf_mat)
z_data = z_data.to_numpy()
z_text = [["{:.3f}".format(y) for y in x] for x in z_data]
primary_heading, secondary_heading = ('Raw', 'Normalized') if normalize_method is None else ('Normalized', 'Raw')
hover_text = '<br><b>' + primary_heading + ' Count</b>: %{z}<br><b>' + secondary_heading + ' Count</b>: %{customdata} <br>'
# the "<extra> tags at the end are necessary to remove unwanted trace info
hover_template = '<b>True</b>: %{y}<br><b>Predicted</b>: %{x}' + hover_text + '<extra></extra>'
layout = _go.Layout(title={'text': title},
xaxis={'title': 'Predicted Label', 'type': 'category', 'tickvals': labels},
yaxis={'title': 'True Label', 'type': 'category', 'tickvals': labels})
fig = _ff.create_annotated_heatmap(z_data, x=labels, y=labels,
annotation_text=z_text,
customdata=custom_data,
hovertemplate=hover_template,
colorscale='Blues',
showscale=True)
fig.update_layout(layout)
# put xaxis text on bottom to not overlap with title
fig['layout']['xaxis'].update(side='bottom')
# plotly Heatmap y axis defaults to the reverse of what we want: https://community.plotly.com/t/heatmap-y-axis-is-reversed-by-default-going-against-standard-convention-for-matrices/32180
fig.update_yaxes(autorange="reversed")
return fig
def precision_recall_curve(y_true, y_pred_proba):
"""
Given labels and binary classifier predicted probabilities, compute and return the data representing a precision-recall curve.
Arguments:
y_true (ww.DataColumn, pd.Series or np.ndarray): True binary labels.
y_pred_proba (ww.DataColumn, pd.Series or np.ndarray): Predictions from a binary classifier, before thresholding has been applied. Note this should be the predicted probability for the "true" label.
Returns:
list: Dictionary containing metrics used to generate a precision-recall plot, with the following keys:
* `precision`: Precision values.
* `recall`: Recall values.
* `thresholds`: Threshold values used to produce the precision and recall.
* `auc_score`: The area under the ROC curve.
"""
y_true = infer_feature_types(y_true)
y_pred_proba = infer_feature_types(y_pred_proba)
y_true = _convert_woodwork_types_wrapper(y_true.to_series())
y_pred_proba = _convert_woodwork_types_wrapper(y_pred_proba.to_series())
precision, recall, thresholds = sklearn_precision_recall_curve(y_true, y_pred_proba)
auc_score = sklearn_auc(recall, precision)
return {'precision': precision,
'recall': recall,
'thresholds': thresholds,
'auc_score': auc_score}
def graph_precision_recall_curve(y_true, y_pred_proba, title_addition=None):
"""Generate and display a precision-recall plot.
Arguments:
y_true (ww.DataColumn, pd.Series or np.ndarray): True binary labels.
y_pred_proba (ww.DataColumn, pd.Series or np.ndarray): Predictions from a binary classifier, before thresholding has been applied. Note this should be the predicted probability for the "true" label.
title_addition (str or None): If not None, append to plot title. Default None.
Returns:
plotly.Figure representing the precision-recall plot generated
"""
_go = import_or_raise("plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects")
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
precision_recall_curve_data = precision_recall_curve(y_true, y_pred_proba)
title = 'Precision-Recall{}'.format('' if title_addition is None else (' ' + title_addition))
layout = _go.Layout(title={'text': title},
xaxis={'title': 'Recall', 'range': [-0.05, 1.05]},
yaxis={'title': 'Precision', 'range': [-0.05, 1.05]})
data = []
data.append(_go.Scatter(x=precision_recall_curve_data['recall'], y=precision_recall_curve_data['precision'],
name='Precision-Recall (AUC {:06f})'.format(precision_recall_curve_data['auc_score']),
line=dict(width=3)))
return _go.Figure(layout=layout, data=data)
def roc_curve(y_true, y_pred_proba):
"""
Given labels and classifier predicted probabilities, compute and return the data representing a Receiver Operating Characteristic (ROC) curve. Works with binary or multiclass problems.
Arguments:
y_true (ww.DataColumn, pd.Series or np.ndarray): True labels.
y_pred_proba (ww.DataColumn, pd.Series or np.ndarray): Predictions from a classifier, before thresholding has been applied.
Returns:
list(dict): A list of dictionaries (with one for each class) is returned. Binary classification problems return a list with one dictionary.
Each dictionary contains metrics used to generate an ROC plot with the following keys:
* `fpr_rate`: False positive rate.
* `tpr_rate`: True positive rate.
* `threshold`: Threshold values used to produce each pair of true/false positive rates.
* `auc_score`: The area under the ROC curve.
"""
y_true = infer_feature_types(y_true)
y_pred_proba = infer_feature_types(y_pred_proba)
if isinstance(y_pred_proba, ww.DataTable):
y_pred_proba = _convert_woodwork_types_wrapper(y_pred_proba.to_dataframe()).to_numpy()
else:
y_pred_proba = _convert_woodwork_types_wrapper(y_pred_proba.to_series()).to_numpy()
y_true = _convert_woodwork_types_wrapper(y_true.to_series()).to_numpy()
if len(y_pred_proba.shape) == 1:
y_pred_proba = y_pred_proba.reshape(-1, 1)
if y_pred_proba.shape[1] == 2:
y_pred_proba = y_pred_proba[:, 1].reshape(-1, 1)
nan_indices = np.logical_or(pd.isna(y_true), np.isnan(y_pred_proba).any(axis=1))
y_true = y_true[~nan_indices]
y_pred_proba = y_pred_proba[~nan_indices]
lb = LabelBinarizer()
lb.fit(np.unique(y_true))
y_one_hot_true = lb.transform(y_true)
n_classes = y_one_hot_true.shape[1]
curve_data = []
for i in range(n_classes):
fpr_rates, tpr_rates, thresholds = sklearn_roc_curve(y_one_hot_true[:, i], y_pred_proba[:, i])
auc_score = sklearn_auc(fpr_rates, tpr_rates)
curve_data.append({'fpr_rates': fpr_rates,
'tpr_rates': tpr_rates,
'thresholds': thresholds,
'auc_score': auc_score})
return curve_data
def graph_roc_curve(y_true, y_pred_proba, custom_class_names=None, title_addition=None):
"""Generate and display a Receiver Operating Characteristic (ROC) plot for binary and multiclass classification problems.
Arguments:
y_true (ww.DataColumn, pd.Series or np.ndarray): True labels.
y_pred_proba (ww.DataColumn, pd.Series or np.ndarray): Predictions from a classifier, before thresholding has been applied. Note this should a one dimensional array with the predicted probability for the "true" label in the binary case.
custom_class_labels (list or None): If not None, custom labels for classes. Default None.
title_addition (str or None): if not None, append to plot title. Default None.
Returns:
plotly.Figure representing the ROC plot generated
"""
_go = import_or_raise("plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects")
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
title = 'Receiver Operating Characteristic{}'.format('' if title_addition is None else (' ' + title_addition))
layout = _go.Layout(title={'text': title},
xaxis={'title': 'False Positive Rate', 'range': [-0.05, 1.05]},
yaxis={'title': 'True Positive Rate', 'range': [-0.05, 1.05]})
all_curve_data = roc_curve(y_true, y_pred_proba)
graph_data = []
n_classes = len(all_curve_data)
if custom_class_names and len(custom_class_names) != n_classes:
raise ValueError('Number of custom class names does not match number of classes')
for i in range(n_classes):
roc_curve_data = all_curve_data[i]
name = i + 1 if custom_class_names is None else custom_class_names[i]
graph_data.append(_go.Scatter(x=roc_curve_data['fpr_rates'], y=roc_curve_data['tpr_rates'],
hovertemplate="(False Postive Rate: %{x}, True Positive Rate: %{y})<br>" + "Threshold: %{text}",
name=f"Class {name} (AUC {roc_curve_data['auc_score']:.06f})",
text=roc_curve_data["thresholds"],
line=dict(width=3)))
graph_data.append(_go.Scatter(x=[0, 1], y=[0, 1],
name='Trivial Model (AUC 0.5)',
line=dict(dash='dash')))
return _go.Figure(layout=layout, data=graph_data)
def _calculate_permutation_scores_fast(pipeline, precomputed_features, y, objective, col_name,
random_seed, n_repeats, scorer, baseline_score):
"""Calculate the permutation score when `col_name` is permuted."""
random_state = np.random.RandomState(random_seed)
scores = np.zeros(n_repeats)
# If column is not in the features or provenance, assume the column was dropped
if col_name not in precomputed_features.columns and col_name not in pipeline._get_feature_provenance():
return scores + baseline_score
if col_name in precomputed_features.columns:
col_idx = precomputed_features.columns.get_loc(col_name)
else:
col_idx = [precomputed_features.columns.get_loc(col) for col in pipeline._get_feature_provenance()[col_name]]
# This is what sk_permutation_importance does. Useful for thread safety
X_permuted = precomputed_features.copy()
shuffling_idx = np.arange(precomputed_features.shape[0])
for n_round in range(n_repeats):
random_state.shuffle(shuffling_idx)
col = X_permuted.iloc[shuffling_idx, col_idx]
col.index = X_permuted.index
X_permuted.iloc[:, col_idx] = col
feature_score = scorer(pipeline, X_permuted, y, objective)
scores[n_round] = feature_score
return scores
def _fast_permutation_importance(pipeline, X, y, objective, n_repeats=5, n_jobs=None, random_seed=None):
"""Calculate permutation importance faster by only computing the estimator features once.
Only used for pipelines that support this optimization.
"""
precomputed_features = _convert_woodwork_types_wrapper(pipeline.compute_estimator_features(X, y).to_dataframe())
if is_classification(pipeline.problem_type):
y = pipeline._encode_targets(y)
def scorer(pipeline, features, y, objective):
if objective.score_needs_proba:
preds = pipeline.estimator.predict_proba(features)
preds = _convert_woodwork_types_wrapper(preds.to_dataframe())
else:
preds = pipeline.estimator.predict(features)
preds = _convert_woodwork_types_wrapper(preds.to_series())
score = pipeline._score(X, y, preds, objective)
return score if objective.greater_is_better else -score
baseline_score = scorer(pipeline, precomputed_features, y, objective)
scores = Parallel(n_jobs=n_jobs)(delayed(_calculate_permutation_scores_fast)(
pipeline, precomputed_features, y, objective, col_name, random_seed, n_repeats, scorer, baseline_score,
) for col_name in X.columns)
importances = baseline_score - np.array(scores)
return {'importances_mean': np.mean(importances, axis=1)}
def calculate_permutation_importance(pipeline, X, y, objective, n_repeats=5, n_jobs=None, random_seed=0):
"""Calculates permutation importance for features.
Arguments:
pipeline (PipelineBase or subclass): Fitted pipeline
X (ww.DataTable, pd.DataFrame): The input data used to score and compute permutation importance
y (ww.DataColumn, pd.Series): The target data
objective (str, ObjectiveBase): Objective to score on
n_repeats (int): Number of times to permute a feature. Defaults to 5.
n_jobs (int or None): Non-negative integer describing level of parallelism used for pipelines.
None and 1 are equivalent. If set to -1, all CPUs are used. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
random_seed (int): Seed for the random number generator. Defaults to 0.
Returns:
pd.DataFrame, Mean feature importance scores over 5 shuffles.
"""
X = infer_feature_types(X)
y = infer_feature_types(y)
X = _convert_woodwork_types_wrapper(X.to_dataframe())
y = _convert_woodwork_types_wrapper(y.to_series())
objective = get_objective(objective, return_instance=True)
if not objective.is_defined_for_problem_type(pipeline.problem_type):
raise ValueError(f"Given objective '{objective.name}' cannot be used with '{pipeline.name}'")
if pipeline._supports_fast_permutation_importance:
perm_importance = _fast_permutation_importance(pipeline, X, y, objective, n_repeats=n_repeats, n_jobs=n_jobs,
random_seed=random_seed)
else:
def scorer(pipeline, X, y):
scores = pipeline.score(X, y, objectives=[objective])
return scores[objective.name] if objective.greater_is_better else -scores[objective.name]
perm_importance = sk_permutation_importance(pipeline, X, y, n_repeats=n_repeats, scoring=scorer, n_jobs=n_jobs,
random_state=random_seed)
mean_perm_importance = perm_importance["importances_mean"]
feature_names = list(X.columns)
mean_perm_importance = list(zip(feature_names, mean_perm_importance))
mean_perm_importance.sort(key=lambda x: x[1], reverse=True)
return pd.DataFrame(mean_perm_importance, columns=["feature", "importance"])
def graph_permutation_importance(pipeline, X, y, objective, importance_threshold=0):
"""Generate a bar graph of the pipeline's permutation importance.
Arguments:
pipeline (PipelineBase or subclass): Fitted pipeline
X (ww.DataTable, pd.DataFrame): The input data used to score and compute permutation importance
y (ww.DataColumn, pd.Series): The target data
objective (str, ObjectiveBase): Objective to score on
importance_threshold (float, optional): If provided, graph features with a permutation importance whose absolute value is larger than importance_threshold. Defaults to zero.
Returns:
plotly.Figure, a bar graph showing features and their respective permutation importance.
"""
go = import_or_raise("plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects")
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
perm_importance = calculate_permutation_importance(pipeline, X, y, objective)
perm_importance['importance'] = perm_importance['importance']
if importance_threshold < 0:
raise ValueError(f'Provided importance threshold of {importance_threshold} must be greater than or equal to 0')
# Remove features with close to zero importance
perm_importance = perm_importance[abs(perm_importance['importance']) >= importance_threshold]
# List is reversed to go from ascending order to descending order
perm_importance = perm_importance.iloc[::-1]
title = "Permutation Importance"
subtitle = "The relative importance of each input feature's "\
"overall influence on the pipelines' predictions, computed using "\
"the permutation importance algorithm."
data = [go.Bar(x=perm_importance['importance'],
y=perm_importance['feature'],
orientation='h'
)]
layout = {
'title': '{0}<br><sub>{1}</sub>'.format(title, subtitle),
'height': 800,
'xaxis_title': 'Permutation Importance',
'yaxis_title': 'Feature',
'yaxis': {
'type': 'category'
}
}
fig = go.Figure(data=data, layout=layout)
return fig
def binary_objective_vs_threshold(pipeline, X, y, objective, steps=100):
"""Computes objective score as a function of potential binary classification
decision thresholds for a fitted binary classification pipeline.
Arguments:
pipeline (BinaryClassificationPipeline obj): Fitted binary classification pipeline
X (ww.DataTable, pd.DataFrame): The input data used to compute objective score
y (ww.DataColumn, pd.Series): The target labels
objective (ObjectiveBase obj, str): Objective used to score
steps (int): Number of intervals to divide and calculate objective score at
Returns:
pd.DataFrame: DataFrame with thresholds and the corresponding objective score calculated at each threshold
"""
objective = get_objective(objective, return_instance=True)
if not objective.is_defined_for_problem_type(ProblemTypes.BINARY):
raise ValueError("`binary_objective_vs_threshold` can only be calculated for binary classification objectives")
if objective.score_needs_proba:
raise ValueError("Objective `score_needs_proba` must be False")
pipeline_tmp = copy.copy(pipeline)
thresholds = np.linspace(0, 1, steps + 1)
costs = []
for threshold in thresholds:
pipeline_tmp.threshold = threshold
scores = pipeline_tmp.score(X, y, [objective])
costs.append(scores[objective.name])
df = pd.DataFrame({"threshold": thresholds, "score": costs})
return df
def graph_binary_objective_vs_threshold(pipeline, X, y, objective, steps=100):
"""Generates a plot graphing objective score vs. decision thresholds for a fitted binary classification pipeline.
Arguments:
pipeline (PipelineBase or subclass): Fitted pipeline
X (ww.DataTable, pd.DataFrame): The input data used to score and compute scores
y (ww.DataColumn, pd.Series): The target labels
objective (ObjectiveBase obj, str): Objective used to score, shown on the y-axis of the graph
steps (int): Number of intervals to divide and calculate objective score at
Returns:
plotly.Figure representing the objective score vs. threshold graph generated
"""
_go = import_or_raise("plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects")
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
objective = get_objective(objective, return_instance=True)
df = binary_objective_vs_threshold(pipeline, X, y, objective, steps)
title = f'{objective.name} Scores vs. Thresholds'
layout = _go.Layout(title={'text': title},
xaxis={'title': 'Threshold', 'range': _calculate_axis_range(df['threshold'])},
yaxis={'title': f"{objective.name} Scores vs. Binary Classification Decision Threshold", 'range': _calculate_axis_range(df['score'])})
data = []
data.append(_go.Scatter(x=df['threshold'],
y=df['score'],
line=dict(width=3)))
return _go.Figure(layout=layout, data=data)
def partial_dependence(pipeline, X, features, percentiles=(0.05, 0.95), grid_resolution=100):
"""Calculates one or two-way partial dependence. If a single integer or
string is given for features, one-way partial dependence is calculated. If
a tuple of two integers or strings is given, two-way partial dependence
is calculated with the first feature in the y-axis and second feature in the
x-axis.
Arguments:
pipeline (PipelineBase or subclass): Fitted pipeline
X (ww.DataTable, pd.DataFrame, np.ndarray): The input data used to generate a grid of values
for feature where partial dependence will be calculated at
features (int, string, tuple[int or string]): The target feature for which to create the partial dependence plot for.
If features is an int, it must be the index of the feature to use.
If features is a string, it must be a valid column name in X.
If features is a tuple of int/strings, it must contain valid column integers/names in X.
percentiles (tuple[float]): The lower and upper percentile used to create the extreme values for the grid.
Must be in [0, 1]. Defaults to (0.05, 0.95).
grid_resolution (int): Number of samples of feature(s) for partial dependence plot. If this value
is less than the maximum number of categories present in categorical data within X, it will be
set to the max number of categories + 1. Defaults to 100.
Returns:
pd.DataFrame: DataFrame with averaged predictions for all points in the grid averaged
over all samples of X and the values used to calculate those predictions.
In the one-way case: The dataframe will contain two columns, "feature_values" (grid points at which the
partial dependence was calculated) and "partial_dependence" (the partial dependence at that feature value).
For classification problems, there will be a third column called "class_label" (the class label for which
the partial dependence was calculated). For binary classification, the partial dependence is only calculated
for the "positive" class.
In the two-way case: The data frame will contain grid_resolution number of columns and rows where the
index and column headers are the sampled values of the first and second features, respectively, used to make
the partial dependence contour. The values of the data frame contain the partial dependence data for each
feature value pair.
Raises:
ValueError: if the user provides a tuple of not exactly two features.
ValueError: if the provided pipeline isn't fitted.
ValueError: if the provided pipeline is a Baseline pipeline.
"""
X = infer_feature_types(X)
# Dynamically set the grid resolution to the maximum number of categories
# in the categorical variables if there are more categories than resolution cells
X_cats = X.select("categorical")
if X_cats.shape[1] != 0:
max_num_cats = max(X_cats.describe().loc["nunique"])
grid_resolution = max([max_num_cats + 1, grid_resolution])
X = _convert_woodwork_types_wrapper(X.to_dataframe())
if isinstance(features, (list, tuple)):
if len(features) != 2:
raise ValueError("Too many features given to graph_partial_dependence. Only one or two-way partial "
"dependence is supported.")
if not (all([isinstance(x, str) for x in features]) or all([isinstance(x, int) for x in features])):
raise ValueError("Features provided must be a tuple entirely of integers or strings, not a mixture of both.")
if not pipeline._is_fitted:
raise ValueError("Pipeline to calculate partial dependence for must be fitted")
if pipeline.model_family == ModelFamily.BASELINE:
raise ValueError("Partial dependence plots are not supported for Baseline pipelines")
feature_list = []
if isinstance(features, int):
feature_list = X.iloc[:, features]
elif isinstance(features, str):
feature_list = X[features]
if len(feature_list) and feature_list.isnull().sum():
warnings.warn("There are null values in the features, which will cause NaN values in the partial dependence output. Fill in these values to remove the NaN values.", NullsInColumnWarning)
if len(feature_list) and feature_list.value_counts(normalize=True).values[0] + 0.01 > percentiles[1]:
val = feature_list.value_counts(normalize=True).index[0]
feature_name = features if isinstance(features, str) else X.columns[features]
raise ValueError(f"Feature '{feature_name}' is mostly one value, {val}, and cannot be used to compute partial dependence. Try raising the upper percentage value.")
wrapped = evalml.pipelines.components.utils.scikit_learn_wrapped_estimator(pipeline)
avg_pred, values = sk_partial_dependence(wrapped, X=X, features=features, percentiles=percentiles, grid_resolution=grid_resolution)
classes = None
if isinstance(pipeline, evalml.pipelines.BinaryClassificationPipeline):
classes = [pipeline.classes_[1]]
elif isinstance(pipeline, evalml.pipelines.MulticlassClassificationPipeline):
classes = pipeline.classes_
if isinstance(features, (int, str)):
data = pd.DataFrame({"feature_values": np.tile(values[0], avg_pred.shape[0]),
"partial_dependence": np.concatenate([pred for pred in avg_pred])})
elif isinstance(features, (list, tuple)):
data = pd.DataFrame(avg_pred.reshape((-1, avg_pred.shape[-1])))
data.columns = values[1]
data.index = np.tile(values[0], avg_pred.shape[0])
if classes is not None:
data['class_label'] = np.repeat(classes, len(values[0]))
return data
def graph_partial_dependence(pipeline, X, features, class_label=None, grid_resolution=100):
"""Create an one-way or two-way partial dependence plot. Passing a single integer or
string as features will create a one-way partial dependence plot with the feature values
plotted against the partial dependence. Passing features a tuple of int/strings will create
a two-way partial dependence plot with a contour of feature[0] in the y-axis, feature[1]
in the x-axis and the partial dependence in the z-axis.
Arguments:
pipeline (PipelineBase or subclass): Fitted pipeline
X (ww.DataTable, pd.DataFrame, np.ndarray): The input data used to generate a grid of values
for feature where partial dependence will be calculated at
features (int, string, tuple[int or string]): The target feature for which to create the partial dependence plot for.
If features is an int, it must be the index of the feature to use.
If features is a string, it must be a valid column name in X.
If features is a tuple of strings, it must contain valid column int/names in X.
class_label (string, optional): Name of class to plot for multiclass problems. If None, will plot
the partial dependence for each class. This argument does not change behavior for regression or binary
classification pipelines. For binary classification, the partial dependence for the positive label will
always be displayed. Defaults to None.
grid_resolution (int): Number of samples of feature(s) for partial dependence plot
Returns:
plotly.graph_objects.Figure: figure object containing the partial dependence data for plotting
Raises:
ValueError: if a graph is requested for a class name that isn't present in the pipeline
"""
if isinstance(features, (list, tuple)):
mode = "two-way"
elif isinstance(features, (int, str)):
mode = "one-way"
_go = import_or_raise("plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects")
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
if isinstance(pipeline, evalml.pipelines.MulticlassClassificationPipeline) and class_label is not None:
if class_label not in pipeline.classes_:
msg = f"Class {class_label} is not one of the classes the pipeline was fit on: {', '.join(list(pipeline.classes_))}"
raise ValueError(msg)
part_dep = partial_dependence(pipeline, X, features=features, grid_resolution=grid_resolution)
if mode == "two-way":
title = f"Partial Dependence of '{features[0]}' vs. '{features[1]}'"
layout = _go.Layout(title={'text': title},
xaxis={'title': f'{features[0]}'},
yaxis={'title': f'{features[1]}'},
showlegend=False)
elif mode == "one-way":
feature_name = str(features)
title = f"Partial Dependence of '{feature_name}'"
layout = _go.Layout(title={'text': title},
xaxis={'title': f'{feature_name}'},
yaxis={'title': 'Partial Dependence'},
showlegend=False)
if isinstance(pipeline, evalml.pipelines.MulticlassClassificationPipeline):
class_labels = [class_label] if class_label is not None else pipeline.classes_
_subplots = import_or_raise("plotly.subplots", error_msg="Cannot find dependency plotly.graph_objects")
# If the user passes in a value for class_label, we want to create a 1 x 1 subplot or else there would
# be an empty column in the plot and it would look awkward
rows, cols = ((len(class_labels) + 1) // 2, 2) if len(class_labels) > 1 else (1, len(class_labels))
# Don't specify share_xaxis and share_yaxis so that we get tickmarks in each subplot
fig = _subplots.make_subplots(rows=rows, cols=cols, subplot_titles=class_labels)
for i, label in enumerate(class_labels):
label_df = part_dep.loc[part_dep.class_label == label]
if mode == "two-way":
x = label_df.index
y = np.array([col for col in label_df.columns if isinstance(col, (int, float))])
z = label_df.values
fig.add_trace(_go.Contour(x=x, y=y, z=z, name=label, coloraxis="coloraxis"),
row=(i + 2) // 2, col=(i % 2) + 1)
elif mode == "one-way":
x = label_df['feature_values']
y = label_df['partial_dependence']
fig.add_trace(_go.Scatter(x=x, y=y, line=dict(width=3), name=label),
row=(i + 2) // 2, col=(i % 2) + 1)
fig.update_layout(layout)
if mode == "two-way":
title = f'{features[0]}'
xrange = _calculate_axis_range(part_dep.index)
yrange = _calculate_axis_range(np.array([x for x in part_dep.columns if isinstance(x, (int, float))]))
fig.update_layout(coloraxis=dict(colorscale='Bluered_r'), showlegend=False)
elif mode == "one-way":
title = f'{feature_name}'
xrange = _calculate_axis_range(part_dep['feature_values'])
yrange = _calculate_axis_range(part_dep['partial_dependence'])
fig.update_xaxes(title=title, range=xrange)
fig.update_yaxes(range=yrange)
else:
if mode == "two-way":
trace = _go.Contour(x=part_dep.index,
y=part_dep.columns,
z=part_dep.values,
name="Partial Dependence")
elif mode == "one-way":
trace = _go.Scatter(x=part_dep['feature_values'],
y=part_dep['partial_dependence'],
name='Partial Dependence',
line=dict(width=3))
fig = _go.Figure(layout=layout, data=[trace])
return fig
def _calculate_axis_range(arr):
"""Helper method to help calculate the appropriate range for an axis based on the data to graph."""
max_value = arr.max()
min_value = arr.min()
margins = abs(max_value - min_value) * 0.05
return [min_value - margins, max_value + margins]
def get_prediction_vs_actual_data(y_true, y_pred, outlier_threshold=None):
"""Combines y_true and y_pred into a single dataframe and adds a column for outliers. Used in `graph_prediction_vs_actual()`.
Arguments:
y_true (pd.Series, ww.DataColumn, or np.ndarray): The real target values of the data
y_pred (pd.Series, ww.DataColumn, or np.ndarray): The predicted values outputted by the regression model.
outlier_threshold (int, float): A positive threshold for what is considered an outlier value. This value is compared to the absolute difference
between each value of y_true and y_pred. Values within this threshold will be blue, otherwise they will be yellow.
Defaults to None
Returns:
pd.DataFrame with the following columns:
* `prediction`: Predicted values from regression model.
* `actual`: Real target values.
* `outlier`: Colors indicating which values are in the threshold for what is considered an outlier value.
"""
if outlier_threshold and outlier_threshold <= 0:
raise ValueError(f"Threshold must be positive! Provided threshold is {outlier_threshold}")
y_true = infer_feature_types(y_true)
y_true = _convert_woodwork_types_wrapper(y_true.to_series())
y_pred = infer_feature_types(y_pred)
y_pred = _convert_woodwork_types_wrapper(y_pred.to_series())
predictions = y_pred.reset_index(drop=True)
actual = y_true.reset_index(drop=True)
data = pd.concat([pd.Series(predictions),
pd.Series(actual)], axis=1)
data.columns = ['prediction', 'actual']
if outlier_threshold:
data['outlier'] = np.where((abs(data['prediction'] - data['actual']) >= outlier_threshold), "#ffff00", "#0000ff")
else:
data['outlier'] = '#0000ff'
return data
def graph_prediction_vs_actual(y_true, y_pred, outlier_threshold=None):
"""Generate a scatter plot comparing the true and predicted values. Used for regression plotting
Arguments:
y_true (ww.DataColumn, pd.Series): The real target values of the data
y_pred (ww.DataColumn, pd.Series): The predicted values outputted by the regression model.
outlier_threshold (int, float): A positive threshold for what is considered an outlier value. This value is compared to the absolute difference
between each value of y_true and y_pred. Values within this threshold will be blue, otherwise they will be yellow.
Defaults to None
Returns:
plotly.Figure representing the predicted vs. actual values graph
"""
_go = import_or_raise("plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects")
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
if outlier_threshold and outlier_threshold <= 0:
raise ValueError(f"Threshold must be positive! Provided threshold is {outlier_threshold}")
df = get_prediction_vs_actual_data(y_true, y_pred, outlier_threshold)
data = []
x_axis = _calculate_axis_range(df['prediction'])
y_axis = _calculate_axis_range(df['actual'])
x_y_line = [min(x_axis[0], y_axis[0]), max(x_axis[1], y_axis[1])]
data.append(_go.Scatter(x=x_y_line, y=x_y_line, name="y = x line", line_color='grey'))
title = 'Predicted vs Actual Values Scatter Plot'
layout = _go.Layout(title={'text': title},
xaxis={'title': 'Prediction', 'range': x_y_line},
yaxis={'title': 'Actual', 'range': x_y_line})
for color, outlier_group in df.groupby('outlier'):
if outlier_threshold:
name = "< outlier_threshold" if color == "#0000ff" else ">= outlier_threshold"
else:
name = "Values"
data.append(_go.Scatter(x=outlier_group['prediction'],
y=outlier_group['actual'],
mode='markers',
marker=_go.scatter.Marker(color=color),
name=name))
return _go.Figure(layout=layout, data=data)
def _tree_parse(est, feature_names):
children_left = est.tree_.children_left
children_right = est.tree_.children_right
features = est.tree_.feature
thresholds = est.tree_.threshold
values = est.tree_.value
def recurse(i):
if children_left[i] == children_right[i]:
return {'Value': values[i]}
return OrderedDict({
'Feature': feature_names[features[i]],
'Threshold': thresholds[i],
'Value': values[i],
'Left_Child': recurse(children_left[i]),
'Right_Child': recurse(children_right[i])
})
return recurse(0)
def decision_tree_data_from_estimator(estimator):
"""Return data for a fitted tree in a restructured format
Arguments:
estimator (ComponentBase): A fitted DecisionTree-based estimator.
Returns:
OrderedDict: An OrderedDict of OrderedDicts describing a tree structure
"""
if not estimator.model_family == ModelFamily.DECISION_TREE:
raise ValueError("Tree structure reformatting is only supported for decision tree estimators")
if not estimator._is_fitted:
raise NotFittedError("This DecisionTree estimator is not fitted yet. Call 'fit' with appropriate arguments "
"before using this estimator.")
est = estimator._component_obj
feature_names = estimator.input_feature_names
return _tree_parse(est, feature_names)
def decision_tree_data_from_pipeline(pipeline_):
"""Return data for a fitted pipeline with in a restructured format
Arguments:
pipeline_ (PipelineBase): A pipeline with a DecisionTree-based estimator.
Returns:
OrderedDict: An OrderedDict of OrderedDicts describing a tree structure
"""
if not pipeline_.model_family == ModelFamily.DECISION_TREE:
raise ValueError("Tree structure reformatting is only supported for decision tree estimators")
if not pipeline_._is_fitted:
raise NotFittedError("The DecisionTree estimator associated with this pipeline is not fitted yet. Call 'fit' "
"with appropriate arguments before using this estimator.")
est = pipeline_.estimator._component_obj
feature_names = pipeline_.input_feature_names[pipeline_.estimator.name]
return _tree_parse(est, feature_names)
def visualize_decision_tree(estimator, max_depth=None, rotate=False, filled=False, filepath=None):
"""Generate an image visualizing the decision tree
Arguments:
estimator (ComponentBase): A fitted DecisionTree-based estimator.
max_depth (int, optional): The depth to which the tree should be displayed. If set to None (as by default),
tree is fully generated.
rotate (bool, optional): Orient tree left to right rather than top-down.
filled (bool, optional): Paint nodes to indicate majority class for classification, extremity of values for
regression, or purity of node for multi-output.
filepath (str, optional): Path to where the graph should be saved. If set to None (as by default), the graph
will not be saved.
Returns:
graphviz.Source: DOT object that can be directly displayed in Jupyter notebooks.
"""
if not estimator.model_family == ModelFamily.DECISION_TREE:
raise ValueError("Tree visualizations are only supported for decision tree estimators")
if max_depth and (not isinstance(max_depth, int) or not max_depth >= 0):
raise ValueError("Unknown value: '{}'. The parameter max_depth has to be a non-negative integer"
.format(max_depth))
if not estimator._is_fitted:
raise NotFittedError("This DecisionTree estimator is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.")
est = estimator._component_obj
graphviz = import_or_raise('graphviz', error_msg='Please install graphviz to visualize trees.')
graph_format = None
if filepath:
# Cast to str in case a Path object was passed in
filepath = str(filepath)
try:
f = open(filepath, 'w')
f.close()
except (IOError, FileNotFoundError):
raise ValueError(('Specified filepath is not writeable: {}'.format(filepath)))
path_and_name, graph_format = os.path.splitext(filepath)
if graph_format:
graph_format = graph_format[1:].lower() # ignore the dot
supported_filetypes = graphviz.backend.FORMATS
if graph_format not in supported_filetypes:
raise ValueError(("Unknown format '{}'. Make sure your format is one of the " +
"following: {}").format(graph_format, supported_filetypes))
else:
graph_format = 'pdf' # If the filepath has no extension default to pdf
dot_data = export_graphviz(decision_tree=est, max_depth=max_depth, rotate=rotate, filled=filled, feature_names=estimator.input_feature_names)
source_obj = graphviz.Source(source=dot_data, format=graph_format)
if filepath:
source_obj.render(filename=path_and_name, cleanup=True)
return source_obj
def get_prediction_vs_actual_over_time_data(pipeline, X, y, dates):
"""Get the data needed for the prediction_vs_actual_over_time plot.
Arguments:
pipeline (TimeSeriesRegressionPipeline): Fitted time series regression pipeline.
X (ww.DataTable, pd.DataFrame): Features used to generate new predictions.
y (ww.DataColumn, pd.Series): Target values to compare predictions against.
dates (ww.DataColumn, pd.Series): Dates corresponding to target values and predictions.
Returns:
pd.DataFrame
"""
dates = infer_feature_types(dates)
y = infer_feature_types(y)
prediction = pipeline.predict(X, y)
dates = _convert_woodwork_types_wrapper(dates.to_series())
y = _convert_woodwork_types_wrapper(y.to_series())
return pd.DataFrame({"dates": dates.reset_index(drop=True),
"target": y.reset_index(drop=True),
"prediction": prediction.reset_index(drop=True)})
def graph_prediction_vs_actual_over_time(pipeline, X, y, dates):
"""Plot the target values and predictions against time on the x-axis.
Arguments:
pipeline (TimeSeriesRegressionPipeline): Fitted time series regression pipeline.
X (ww.DataTable, pd.DataFrame): Features used to generate new predictions.
y (ww.DataColumn, pd.Series): Target values to compare predictions against.
dates (ww.DataColumn, pd.Series): Dates corresponding to target values and predictions.
Returns:
plotly.Figure: Showing the prediction vs actual over time.
"""
_go = import_or_raise("plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects")
if pipeline.problem_type != ProblemTypes.TIME_SERIES_REGRESSION:
raise ValueError("graph_prediction_vs_actual_over_time only supports time series regression pipelines! "
f"Received {str(pipeline.problem_type)}.")
data = get_prediction_vs_actual_over_time_data(pipeline, X, y, dates)
data = [_go.Scatter(x=data["dates"], y=data["target"], mode='lines+markers', name="Target",
line=dict(color='#1f77b4')),
_go.Scatter(x=data["dates"], y=data["prediction"], mode='lines+markers', name='Prediction',
line=dict(color='#d62728'))]
# Let plotly pick the best date format.
layout = _go.Layout(title={'text': "Prediction vs Target over time"},
xaxis={'title': 'Time'},
yaxis={'title': 'Target Values and Predictions'})
return _go.Figure(data=data, layout=layout)
def get_linear_coefficients(estimator, features=None):
"""Returns a dataframe showing the features with the greatest predictive power for a linear model.
Arguments:
estimator (Estimator): Fitted linear model family estimator.
features (list[str]): List of feature names associated with the underlying data.
Returns:
pd.DataFrame: Displaying the features by importance.
"""
if not estimator.model_family == ModelFamily.LINEAR_MODEL:
raise ValueError("Linear coefficients are only available for linear family models")
if not estimator._is_fitted:
raise NotFittedError("This linear estimator is not fitted yet. Call 'fit' with appropriate arguments "
"before using this estimator.")
coef_ = estimator.feature_importance
coef_ = pd.Series(coef_, name='Coefficients', index=features)
coef_ = coef_.sort_values()
coef_ = pd.Series(estimator._component_obj.intercept_, index=['Intercept']).append(coef_)
return coef_
def t_sne(X, n_components=2, perplexity=30.0, learning_rate=200.0, metric='euclidean', **kwargs):
"""Get the transformed output after fitting X to the embedded space using t-SNE.
Arguments:
X (np.ndarray, ww.DataTable, pd.DataFrame): Data to be transformed. Must be numeric.
n_components (int, optional): Dimension of the embedded space.
perplexity (float, optional): Related to the number of nearest neighbors that is used in other manifold learning
algorithms. Larger datasets usually require a larger perplexity. Consider selecting a value between 5 and 50.
learning_rate (float, optional): Usually in the range [10.0, 1000.0]. If the cost function gets stuck in a bad
local minimum, increasing the learning rate may help.
metric (str, optional): The metric to use when calculating distance between instances in a feature array.
Returns:
np.ndarray (n_samples, n_components)
"""
if not isinstance(n_components, int) or not n_components > 0:
raise ValueError("The parameter n_components must be of type integer and greater than 0")
if not perplexity >= 0:
raise ValueError("The parameter perplexity must be non-negative")
X = infer_feature_types(X)
X = _convert_woodwork_types_wrapper(X.to_dataframe())
t_sne_ = TSNE(n_components=n_components, perplexity=perplexity, learning_rate=learning_rate, metric=metric, **kwargs)
X_new = t_sne_.fit_transform(X)
return X_new
def graph_t_sne(X, n_components=2, perplexity=30.0, learning_rate=200.0, metric='euclidean', marker_line_width=2, marker_size=7, **kwargs):
"""Plot high dimensional data into lower dimensional space using t-SNE .
Arguments:
X (np.ndarray, pd.DataFrame, ww.DataTable): Data to be transformed. Must be numeric.
n_components (int, optional): Dimension of the embedded space.
perplexity (float, optional): Related to the number of nearest neighbors that is used in other manifold learning
algorithms. Larger datasets usually require a larger perplexity. Consider selecting a value between 5 and 50.
learning_rate (float, optional): Usually in the range [10.0, 1000.0]. If the cost function gets stuck in a bad
local minimum, increasing the learning rate may help.
metric (str, optional): The metric to use when calculating distance between instances in a feature array.
marker_line_width (int, optional): Determines the line width of the marker boundary.
marker_size (int, optional): Determines the size of the marker.
Returns:
plotly.Figure representing the transformed data
"""
_go = import_or_raise("plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects")
if not marker_line_width >= 0:
raise ValueError("The parameter marker_line_width must be non-negative")
if not marker_size >= 0:
raise ValueError("The parameter marker_size must be non-negative")
X_embedded = t_sne(X, n_components=n_components, perplexity=perplexity, learning_rate=learning_rate, metric=metric, **kwargs)
fig = _go.Figure()
fig.add_trace(_go.Scatter(
x=X_embedded[:, 0], y=X_embedded[:, 1],
mode='markers'
))
fig.update_traces(mode='markers', marker_line_width=marker_line_width, marker_size=marker_size)
fig.update_layout(title='t-SNE', yaxis_zeroline=False, xaxis_zeroline=False)
return fig
|
import xarray
from matplotlib.pyplot import figure
def timeprofile(iono: xarray.Dataset):
fig = figure(figsize=(16, 12))
axs = fig.subplots(3, 1, sharex=True).ravel()
fig.suptitle(
f"{str(iono.time[0].values)[:-13]} to "
f"{str(iono.time[-1].values)[:-13]}\n"
f"Glat, Glon: {iono.glat.item()}, {iono.glon.item()}"
)
ax = axs[0]
ax.plot(iono.time, iono["NmF2"], label="N$_m$F$_2$")
ax.plot(iono.time, iono["NmF1"], label="N$_m$F$_1$")
ax.plot(iono.time, iono["NmE"], label="N$_m$E")
ax.set_title("Maximum number densities vs. ionospheric layer")
ax.set_ylabel("(m$^{-3}$)")
ax.set_yscale("log")
ax.legend(loc="best")
ax = axs[1]
ax.plot(iono.time, iono["hmF2"], label="h$_m$F$_2$")
ax.plot(iono.time, iono["hmF1"], label="h$_m$F$_1$")
ax.plot(iono.time, iono["hmE"], label="h$_m$E")
ax.set_title("Height of maximum density vs. ionospheric layer")
ax.set_ylabel("(km)")
ax.set_ylim((90, None))
ax.legend(loc="best")
ax = axs[2]
ax.plot(iono.time, iono["foF2"], label="foF2")
ax.set_title("F2 layer plasma frequency")
ax.set_ylabel("(MHz)")
for a in axs.ravel():
a.grid(True)
# %%
fig = figure(figsize=(16, 12))
axs = fig.subplots(1, 1, sharex=True)
fig.suptitle(
f"{str(iono.time[0].values)[:-13]} to "
f"{str(iono.time[-1].values)[:-13]}\n"
f"Glat, Glon: {iono.glat.item()}, {iono.glon.item()}"
)
# %% Tec(time)
ax = axs
ax.plot(iono.time, iono["TEC"], label="TEC")
ax.set_ylabel("(m$^{-2}$)")
ax.set_title("Total Electron Content (TEC)")
# ax.set_yscale('log')
ax.legend(loc="best")
ax.grid(True)
# %% ion_drift(time)
# ax = axs[1]
# ax.plot(iono.time, iono["EqVertIonDrift"], label=r"V$_y$")
# ax.set_xlabel("time (UTC)")
# ax.set_ylabel("(m/s)")
# ax.legend(loc="best")
# for a in axs.ravel():
# a.grid(True)
# %% Ne(time)
fg = figure()
ax = fg.gca()
hi = ax.pcolormesh(iono.time, iono.alt_km, iono["ne"].values.T)
fg.colorbar(hi, ax=ax).set_label("[m$^{-3}$]")
ax.set_ylabel("altitude [km]")
ax.set_title("$N_e$ vs. altitude and time")
def altprofile(iono: xarray.Dataset):
fig = figure(figsize=(16, 6))
axs = fig.subplots(1, 2)
fig.suptitle(f"{str(iono.time[0].values)[:-13]}\n" f"Glat, Glon: {iono.glat.item()}, {iono.glon.item()}")
pn = axs[0]
pn.plot(iono["ne"], iono.alt_km, label="N$_e$")
# pn.set_title(iri2016Obj.title1)
pn.set_xlabel("Density (m$^{-3}$)")
pn.set_ylabel("Altitude (km)")
pn.set_xscale("log")
pn.legend(loc="best")
pn.grid(True)
pn = axs[1]
pn.plot(iono["Ti"], iono.alt_km, label="T$_i$")
pn.plot(iono["Te"], iono.alt_km, label="T$_e$")
# pn.set_title(iri2016Obj.title2)
pn.set_xlabel("Temperature (K)")
pn.set_ylabel("Altitude (km)")
pn.legend(loc="best")
pn.grid(True)
def latprofile(iono: xarray.Dataset):
fig = figure(figsize=(8, 12))
axs = fig.subplots(2, 1, sharex=True)
ax = axs[0]
ax.plot(iono["glat"], iono["NmF2"], label="N$_m$F$_2$")
ax.plot(iono["glat"], iono["NmF1"], label="N$_m$F$_1$")
ax.plot(iono["glat"], iono["NmE"], label="N$_m$E")
ax.set_title(str(iono.time[0].values)[:-13] + f' latitude {iono['glat'][[0, -1]].values}')
# ax.set_xlim(iono.lat[[0, -1]])
ax.set_xlabel(r"Geog. Lat. ($^\circ$)")
ax.set_ylabel("(m$^{-3}$)")
ax.set_yscale("log")
ax = axs[1]
ax.plot(iono["glat"], iono["hmF2"], label="h$_m$F$_2$")
ax.plot(iono["glat"], iono["hmF1"], label="h$_m$F$_1$")
ax.plot(iono["glat"], iono["hmE"], label="h$_m$E")
ax.set_xlim(iono["glat"][[0, -1]])
ax.set_title(str(iono.time[0].values)[:-13] + f' latitude {iono['glat'][[0, -1]].values}')
ax.set_xlabel(r"Geog. Lat. ($^\circ$)")
ax.set_ylabel("(km)")
for a in axs:
a.legend(loc="best")
a.grid(True)
|
import xarray
from matplotlib.pyplot import figure
def timeprofile(iono: xarray.Dataset):
fig = figure(figsize=(16, 12))
axs = fig.subplots(3, 1, sharex=True).ravel()
fig.suptitle(
f"{str(iono.time[0].values)[:-13]} to "
f"{str(iono.time[-1].values)[:-13]}\n"
f"Glat, Glon: {iono.glat.item()}, {iono.glon.item()}"
)
ax = axs[0]
ax.plot(iono.time, iono["NmF2"], label="N$_m$F$_2$")
ax.plot(iono.time, iono["NmF1"], label="N$_m$F$_1$")
ax.plot(iono.time, iono["NmE"], label="N$_m$E")
ax.set_title("Maximum number densities vs. ionospheric layer")
ax.set_ylabel("(m$^{-3}$)")
ax.set_yscale("log")
ax.legend(loc="best")
ax = axs[1]
ax.plot(iono.time, iono["hmF2"], label="h$_m$F$_2$")
ax.plot(iono.time, iono["hmF1"], label="h$_m$F$_1$")
ax.plot(iono.time, iono["hmE"], label="h$_m$E")
ax.set_title("Height of maximum density vs. ionospheric layer")
ax.set_ylabel("(km)")
ax.set_ylim((90, None))
ax.legend(loc="best")
ax = axs[2]
ax.plot(iono.time, iono["foF2"], label="foF2")
ax.set_title("F2 layer plasma frequency")
ax.set_ylabel("(MHz)")
for a in axs.ravel():
a.grid(True)
# %%
fig = figure(figsize=(16, 12))
axs = fig.subplots(1, 1, sharex=True)
fig.suptitle(
f"{str(iono.time[0].values)[:-13]} to "
f"{str(iono.time[-1].values)[:-13]}\n"
f"Glat, Glon: {iono.glat.item()}, {iono.glon.item()}"
)
# %% Tec(time)
ax = axs
ax.plot(iono.time, iono["TEC"], label="TEC")
ax.set_ylabel("(m$^{-2}$)")
ax.set_title("Total Electron Content (TEC)")
# ax.set_yscale('log')
ax.legend(loc="best")
ax.grid(True)
# %% ion_drift(time)
# ax = axs[1]
# ax.plot(iono.time, iono["EqVertIonDrift"], label=r"V$_y$")
# ax.set_xlabel("time (UTC)")
# ax.set_ylabel("(m/s)")
# ax.legend(loc="best")
# for a in axs.ravel():
# a.grid(True)
# %% Ne(time)
fg = figure()
ax = fg.gca()
hi = ax.pcolormesh(iono.time, iono.alt_km, iono["ne"].values.T)
fg.colorbar(hi, ax=ax).set_label("[m$^{-3}$]")
ax.set_ylabel("altitude [km]")
ax.set_title("$N_e$ vs. altitude and time")
def altprofile(iono: xarray.Dataset):
fig = figure(figsize=(16, 6))
axs = fig.subplots(1, 2)
fig.suptitle(f"{str(iono.time[0].values)[:-13]}\n" f"Glat, Glon: {iono.glat.item()}, {iono.glon.item()}")
pn = axs[0]
pn.plot(iono["ne"], iono.alt_km, label="N$_e$")
# pn.set_title(iri2016Obj.title1)
pn.set_xlabel("Density (m$^{-3}$)")
pn.set_ylabel("Altitude (km)")
pn.set_xscale("log")
pn.legend(loc="best")
pn.grid(True)
pn = axs[1]
pn.plot(iono["Ti"], iono.alt_km, label="T$_i$")
pn.plot(iono["Te"], iono.alt_km, label="T$_e$")
# pn.set_title(iri2016Obj.title2)
pn.set_xlabel("Temperature (K)")
pn.set_ylabel("Altitude (km)")
pn.legend(loc="best")
pn.grid(True)
def latprofile(iono: xarray.Dataset):
fig = figure(figsize=(8, 12))
axs = fig.subplots(2, 1, sharex=True)
ax = axs[0]
ax.plot(iono["glat"], iono["NmF2"], label="N$_m$F$_2$")
ax.plot(iono["glat"], iono["NmF1"], label="N$_m$F$_1$")
ax.plot(iono["glat"], iono["NmE"], label="N$_m$E")
ax.set_title(str(iono.time[0].values)[:-13] + f' latitude {iono["glat"][[0, -1]].values}')
# ax.set_xlim(iono.lat[[0, -1]])
ax.set_xlabel(r"Geog. Lat. ($^\circ$)")
ax.set_ylabel("(m$^{-3}$)")
ax.set_yscale("log")
ax = axs[1]
ax.plot(iono["glat"], iono["hmF2"], label="h$_m$F$_2$")
ax.plot(iono["glat"], iono["hmF1"], label="h$_m$F$_1$")
ax.plot(iono["glat"], iono["hmE"], label="h$_m$E")
ax.set_xlim(iono["glat"][[0, -1]])
ax.set_title(str(iono.time[0].values)[:-13] + f' latitude {iono["glat"][[0, -1]].values}')
ax.set_xlabel(r"Geog. Lat. ($^\circ$)")
ax.set_ylabel("(km)")
for a in axs:
a.legend(loc="best")
a.grid(True)
|
# %%
import time
import datetime
from scipy.stats import uniform
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.pipeline import Pipeline
from sklearn.model_selection import RandomizedSearchCV, train_test_split
# Moldeling Libraries
from sklearn.linear_model import LinearRegression
# Scoring Libraries
from sklearn.metrics import r2_score
from IPython.display import display
import xgboost as xgb
from traitlets.traitlets import Any, Dict
from pandas.core.frame import DataFrame
import pandas as pd
import numpy as np
from enum import Enum, auto
import warnings
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import plotly.figure_factory as ff
import plotly.express as px
warnings.filterwarnings('ignore')
%matplotlib inline
# %% [markdown]
'''
# Pandas settings
'''
# %%
pd.options.display.max_columns = None
pd.options.display.max_rows = 500
pd.options.display.width = None
pd.options.display.max_colwidth = 100
pd.options.display.precision = 3
# %% [markdown]
'''
# Modeling Utilities
'''
# %%
class DataMode(Enum):
ENCODED = auto()
ENCODED_DIM_RED = auto()
def split_data(X, y=None, test_fraction: float = 0.2, shuffle: bool = True, stratify=None):
return train_test_split(X, y, test_size=test_fraction, random_state=42, shuffle=shuffle, stratify=stratify) if y is not None else train_test_split(X, test_size=test_fraction, random_state=42, shuffle=shuffle, stratify=stratify)
def plot_error_patterns(error_data, **kwargs) -> None:
pass
def print_feature_importance():
pass
def print_equation():
pass
# %%
linear = 'algoname'
model_obj = 'model'
params_dict = 'params'
models = {
linear: {model_obj: LinearRegression(), params_dict: dict()}
}
# %% [markdown]
'''
### read data
'''
# %%
base_path = '../../data/'
encoded_train = pd.read_pickle(f"{base_path}encoded_train.pkl")
encoded_train_dim_red = pd.read_pickle(f"{base_path}encoded_train_dim_red.pkl")
target_col_name = 'y'
# %% [markdown]
'''
# Train test splitResidual Analysis
'''
# %%
X_train_encoded, X_test_encoded, y_train_encoded, y_test_encoded = split_data(encoded_train.drop(target_col_name, axis=1), encoded_train[[target_col_name]])
X_train_dim_red, X_test_dim_red, y_train_dim_red, y_test_dim_red = split_data(encoded_train_dim_red, encoded_train[[target_col_name]])
# %% [markdown]
'''
# Class Balancing - NA
'''
# %% [markdown]
'''
# Create model
'''
# %%
def prepare_error_data_and_plot(model, X, actual_y: pd.DataFrame, pred_y, mode, model_name, plot: bool = False) -> pd.DataFrame:
data_mode_str = "without PCA" if mode == DataMode.ENCODED else "with PCA"
error_df = actual_y.copy()
error_df["Pred"] = pred_y
# error_df["Res"] = error_df["y"] - error_df["Pred"]
# error_df.columns = ["Actual", "Predicted", "Residual"]
if plot:
print(f"Residual Analysis graphs for \"Train Data\" {model_name} {data_mode_str}")
plot_error_patterns(error_df)
return error_df
# %%
show_graphs = False
all_model_metrics = []
for model, model_config in models.items():
print(f"Starting for {model.title()}")
# With and without Dimensionality Reduction
for mode in [DataMode.ENCODED, DataMode.ENCODED_DIM_RED]:
X_train, X_test, y_train, y_test = (X_train_encoded, X_test_encoded, y_train_encoded, y_test_encoded) if mode == DataMode.ENCODED else (
X_train_dim_red, X_test_dim_red, y_train_dim_red, y_test_dim_red)
data_mode_str = "without PCA" if mode == DataMode.ENCODED else "with PCA"
# Hyper- Parameter tuning
ra_s_cv = RandomizedSearchCV(model_config.get(model_obj), model_config.get(params_dict), random_state=0, n_jobs=-1, cv=3, verbose=3, return_train_score=True)
start_time = time.perf_counter()
ra_s_cv.fit(X_train, y_train)
end_time = time.perf_counter()
train_pred = ra_s_cv.predict(X_train)
test_pred = ra_s_cv.predict(X_test)
print("-"*50)
print(f"Best Estimator for {model} {data_mode_str} is {ra_s_cv.best_estimator_}\n")
print(f"Best Params for {model} {data_mode_str} are {ra_s_cv.best_params_}\n")
print(f"Cross validation Results for {model} {data_mode_str}\n")
display(pd.DataFrame(ra_s_cv.cv_results_))
print("-"*50)
# Plot evaluation matrix
prepare_error_data_and_plot(ra_s_cv, X_train, y_train, train_pred, mode, model, show_graphs)
prepare_error_data_and_plot(ra_s_cv, X_test, y_test, test_pred, mode, model, show_graphs)
# Record performance
all_model_metrics.append([f"{model.title()} {data_mode_str if mode == DataMode.ENCODED_DIM_RED else ""}", ra_s_cv.best_score_, ra_s_cv.score(X_train, y_train),
ra_s_cv.score(X_test, y_test.values),
# r2_score(y_train, train_pred), r2_score(y_test, test_pred), # Metrics on train and test
end_time-start_time])
print("="*50, "\n")
# %%
perf_frame = pd.DataFrame(all_model_metrics, columns=["Algo", "Best Training Score (CV)", "Train Score", "Test Score",
# "Train R2", "Test R2", # Col name for metrics on train and test
"Time Taken(Sec)"])
perf_frame["R2Diff"] = perf_frame["Train R2"] - perf_frame["Test R2"]
perf_frame.sort_values(by=["R2Diff", "Test R2", "Train R2"], ascending=[True, False, False]).style.format({
"Best Training Score (CV)": "{:.2f}",
"Train Score": "{:.2f}",
"Test Score": "{:.2f}",
# "Train R2": "{:.2f}",
# "Test R2": "{:.2f}"
}).hide_index()
# %% [markdown]
'''
# Best Model Selection
'''
# %% [markdown]
'''
# Tune the best model
'''
# %% [markdown]
'''
# Conclusion
'''
|
# %%
import time
import datetime
from scipy.stats import uniform
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.pipeline import Pipeline
from sklearn.model_selection import RandomizedSearchCV, train_test_split
# Moldeling Libraries
from sklearn.linear_model import LinearRegression
# Scoring Libraries
from sklearn.metrics import r2_score
from IPython.display import display
import xgboost as xgb
from traitlets.traitlets import Any, Dict
from pandas.core.frame import DataFrame
import pandas as pd
import numpy as np
from enum import Enum, auto
import warnings
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import plotly.figure_factory as ff
import plotly.express as px
warnings.filterwarnings('ignore')
%matplotlib inline
# %% [markdown]
'''
# Pandas settings
'''
# %%
pd.options.display.max_columns = None
pd.options.display.max_rows = 500
pd.options.display.width = None
pd.options.display.max_colwidth = 100
pd.options.display.precision = 3
# %% [markdown]
'''
# Modeling Utilities
'''
# %%
class DataMode(Enum):
ENCODED = auto()
ENCODED_DIM_RED = auto()
def split_data(X, y=None, test_fraction: float = 0.2, shuffle: bool = True, stratify=None):
return train_test_split(X, y, test_size=test_fraction, random_state=42, shuffle=shuffle, stratify=stratify) if y is not None else train_test_split(X, test_size=test_fraction, random_state=42, shuffle=shuffle, stratify=stratify)
def plot_error_patterns(error_data, **kwargs) -> None:
pass
def print_feature_importance():
pass
def print_equation():
pass
# %%
linear = 'algoname'
model_obj = 'model'
params_dict = 'params'
models = {
linear: {model_obj: LinearRegression(), params_dict: dict()}
}
# %% [markdown]
'''
### read data
'''
# %%
base_path = '../../data/'
encoded_train = pd.read_pickle(f"{base_path}encoded_train.pkl")
encoded_train_dim_red = pd.read_pickle(f"{base_path}encoded_train_dim_red.pkl")
target_col_name = 'y'
# %% [markdown]
'''
# Train test splitResidual Analysis
'''
# %%
X_train_encoded, X_test_encoded, y_train_encoded, y_test_encoded = split_data(encoded_train.drop(target_col_name, axis=1), encoded_train[[target_col_name]])
X_train_dim_red, X_test_dim_red, y_train_dim_red, y_test_dim_red = split_data(encoded_train_dim_red, encoded_train[[target_col_name]])
# %% [markdown]
'''
# Class Balancing - NA
'''
# %% [markdown]
'''
# Create model
'''
# %%
def prepare_error_data_and_plot(model, X, actual_y: pd.DataFrame, pred_y, mode, model_name, plot: bool = False) -> pd.DataFrame:
data_mode_str = "without PCA" if mode == DataMode.ENCODED else "with PCA"
error_df = actual_y.copy()
error_df["Pred"] = pred_y
# error_df["Res"] = error_df["y"] - error_df["Pred"]
# error_df.columns = ["Actual", "Predicted", "Residual"]
if plot:
print(f"Residual Analysis graphs for \"Train Data\" {model_name} {data_mode_str}")
plot_error_patterns(error_df)
return error_df
# %%
show_graphs = False
all_model_metrics = []
for model, model_config in models.items():
print(f"Starting for {model.title()}")
# With and without Dimensionality Reduction
for mode in [DataMode.ENCODED, DataMode.ENCODED_DIM_RED]:
X_train, X_test, y_train, y_test = (X_train_encoded, X_test_encoded, y_train_encoded, y_test_encoded) if mode == DataMode.ENCODED else (
X_train_dim_red, X_test_dim_red, y_train_dim_red, y_test_dim_red)
data_mode_str = "without PCA" if mode == DataMode.ENCODED else "with PCA"
# Hyper- Parameter tuning
ra_s_cv = RandomizedSearchCV(model_config.get(model_obj), model_config.get(params_dict), random_state=0, n_jobs=-1, cv=3, verbose=3, return_train_score=True)
start_time = time.perf_counter()
ra_s_cv.fit(X_train, y_train)
end_time = time.perf_counter()
train_pred = ra_s_cv.predict(X_train)
test_pred = ra_s_cv.predict(X_test)
print("-"*50)
print(f"Best Estimator for {model} {data_mode_str} is {ra_s_cv.best_estimator_}\n")
print(f"Best Params for {model} {data_mode_str} are {ra_s_cv.best_params_}\n")
print(f"Cross validation Results for {model} {data_mode_str}\n")
display(pd.DataFrame(ra_s_cv.cv_results_))
print("-"*50)
# Plot evaluation matrix
prepare_error_data_and_plot(ra_s_cv, X_train, y_train, train_pred, mode, model, show_graphs)
prepare_error_data_and_plot(ra_s_cv, X_test, y_test, test_pred, mode, model, show_graphs)
# Record performance
all_model_metrics.append([f"{model.title()} {data_mode_str if mode == DataMode.ENCODED_DIM_RED else ''}", ra_s_cv.best_score_, ra_s_cv.score(X_train, y_train),
ra_s_cv.score(X_test, y_test.values),
# r2_score(y_train, train_pred), r2_score(y_test, test_pred), # Metrics on train and test
end_time-start_time])
print("="*50, "\n")
# %%
perf_frame = pd.DataFrame(all_model_metrics, columns=["Algo", "Best Training Score (CV)", "Train Score", "Test Score",
# "Train R2", "Test R2", # Col name for metrics on train and test
"Time Taken(Sec)"])
perf_frame["R2Diff"] = perf_frame["Train R2"] - perf_frame["Test R2"]
perf_frame.sort_values(by=["R2Diff", "Test R2", "Train R2"], ascending=[True, False, False]).style.format({
"Best Training Score (CV)": "{:.2f}",
"Train Score": "{:.2f}",
"Test Score": "{:.2f}",
# "Train R2": "{:.2f}",
# "Test R2": "{:.2f}"
}).hide_index()
# %% [markdown]
'''
# Best Model Selection
'''
# %% [markdown]
'''
# Tune the best model
'''
# %% [markdown]
'''
# Conclusion
'''
|
import traceback
import json
import os
import asyncio
import concurrent.futures
import datetime
import aiohttp
from aiohttp import web
import uvloop
import jinja2
import humanize
import aiohttp_jinja2
from gidgethub import aiohttp as gh_aiohttp, routing as gh_routing, sansio as gh_sansio
import batch
from .log import log
from .constants import BUCKET
from .github import Repo, FQBranch, WatchedBranch
with open(os.environ.get('HAIL_CI2_OAUTH_TOKEN', 'oauth-token/oauth-token'), 'r') as f:
oauth_token = f.read().strip()
uvloop.install()
watched_branches = [
WatchedBranch(index, FQBranch.from_short_str(bss), deployable)
for (index, [bss, deployable]) in enumerate(json.loads(os.environ.get('HAIL_WATCHED_BRANCHES')))
]
app = web.Application()
routes = web.RouteTableDef()
@routes.get('/')
@aiohttp_jinja2.template('index.html')
async def index(request): # pylint: disable=unused-argument
wb_configs = []
for i, wb in enumerate(watched_branches):
if wb.prs:
pr_configs = []
for pr in wb.prs.values():
pr_config = {
'number': pr.number,
'title': pr.title,
# FIXME generate links to the merge log
'batch_id': pr.batch.id if pr.batch and hasattr(pr.batch, 'id') else None,
'build_state': pr.build_state,
'review_state': pr.review_state,
'author': pr.author
}
pr_configs.append(pr_config)
else:
pr_configs = None
# FIXME recent deploy history
wb_config = {
'index': i,
'branch': wb.branch.short_str(),
'sha': wb.sha,
# FIXME generate links to the merge log
'deploy_batch_id': wb.deploy_batch.id if wb.deploy_batch and hasattr(wb.deploy_batch, 'id') else None,
'deploy_state': wb.deploy_state,
'repo': wb.branch.repo.short_str(),
'prs': pr_configs
}
wb_configs.append(wb_config)
return {
'watched_branches': wb_configs
}
@routes.get('/watched_branches/{watched_branch_index}/pr/{pr_number}')
@aiohttp_jinja2.template('pr.html')
async def get_pr(request):
watched_branch_index = int(request.match_info['watched_branch_index'])
pr_number = int(request.match_info['pr_number'])
if watched_branch_index < 0 or watched_branch_index >= len(watched_branches):
raise web.HTTPNotFound()
wb = watched_branches[watched_branch_index]
if not wb.prs or pr_number not in wb.prs:
raise web.HTTPNotFound()
pr = wb.prs[pr_number]
config = {}
config['number'] = pr.number
# FIXME
if pr.batch:
if hasattr(pr.batch, 'id'):
status = await pr.batch.status()
for j in status['jobs']:
if 'duration' in j and j['duration'] is not None:
j['duration'] = humanize.naturaldelta(datetime.timedelta(seconds=j['duration']))
attrs = j['attributes']
if 'link' in attrs:
attrs['link'] = attrs['link'].split(',')
config['batch'] = status
config['artifacts'] = f'{BUCKET}/build/{pr.batch.attributes['token']}'
else:
config['exception'] = str(pr.batch.exception)
batch_client = request.app['batch_client']
batches = await batch_client.list_batches(
attributes={
'test': '1',
'pr': pr_number
})
batches = sorted(batches, key=lambda b: b.id, reverse=True)
config['history'] = [await b.status() for b in batches]
return config
@routes.get('/batches')
@aiohttp_jinja2.template('batches.html')
async def get_batches(request):
batch_client = request.app['batch_client']
batches = await batch_client.list_batches()
statuses = [await b.status() for b in batches]
return {
'batches': statuses
}
@routes.get('/batches/{batch_id}')
@aiohttp_jinja2.template('batch.html')
async def get_batch(request):
batch_id = int(request.match_info['batch_id'])
batch_client = request.app['batch_client']
b = await batch_client.get_batch(batch_id)
status = await b.status()
for j in status['jobs']:
if 'duration' in j and j['duration'] is not None:
j['duration'] = humanize.naturaldelta(datetime.timedelta(seconds=j['duration']))
return {
'batch': status
}
@routes.get('/jobs/{job_id}/log')
@aiohttp_jinja2.template('job_log.html')
async def get_job_log(request):
job_id = int(request.match_info['job_id'])
batch_client = request.app['batch_client']
job = await batch_client.get_job(job_id)
return {
'job_id': job_id,
'job_log': await job.log()
}
@routes.get('/healthcheck')
async def healthcheck(request): # pylint: disable=unused-argument
return web.Response(status=200)
gh_router = gh_routing.Router()
@gh_router.register('pull_request')
async def pull_request_callback(event):
gh_pr = event.data['pull_request']
number = gh_pr['number']
target_branch = FQBranch.from_gh_json(gh_pr['base'])
for wb in watched_branches:
if (wb.prs and number in wb.prs) or (wb.branch == target_branch):
await wb.notify_github_changed(event.app)
@gh_router.register('push')
async def push_callback(event):
data = event.data
ref = data['ref']
if ref.startswith('refs/heads/'):
branch_name = ref[len('refs/heads/'):]
branch = FQBranch(Repo.from_gh_json(data['repository']), branch_name)
for wb in watched_branches:
if wb.branch == branch or any(pr.branch == branch for pr in wb.prs.values()):
await wb.notify_github_changed(event.app)
@gh_router.register('pull_request_review')
async def pull_request_review_callback(event):
gh_pr = event.data['pull_request']
number = gh_pr['number']
for wb in watched_branches:
if number in wb.prs:
await wb.notify_github_changed(event.app)
async def github_callback_handler(request):
event = gh_sansio.Event.from_http(request.headers, await request.read())
event.app = request.app
await gh_router.dispatch(event)
@routes.post('/callback')
async def callback(request):
await asyncio.shield(github_callback_handler(request))
return web.Response(status=200)
async def batch_callback_handler(request):
params = await request.json()
log.info(f'batch callback {params}')
attrs = params.get('attributes')
if attrs:
target_branch = attrs.get('target_branch')
if target_branch:
for wb in watched_branches:
if wb.branch.short_str() == target_branch:
log.info(f'watched_branch {wb.branch.short_str()} notify batch changed')
wb.notify_batch_changed()
@routes.post('/batch_callack')
async def batch_callback(request):
await asyncio.shield(batch_callback_handler(request))
return web.Response(status=200)
async def update_loop(app):
while True:
try:
for wb in watched_branches:
log.info(f'updating {wb.branch.short_str()}')
await wb.update(app)
except concurrent.futures.CancelledError:
raise
except Exception as e: # pylint: disable=broad-except
log.error(f'{wb.branch.short_str()} update failed due to exception: {traceback.format_exc()}{e}')
await asyncio.sleep(300)
routes.static('/static', 'ci/static')
app.add_routes(routes)
aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader('ci/templates'))
async def on_startup(app):
app['client_session'] = aiohttp.ClientSession(
raise_for_status=True,
timeout=aiohttp.ClientTimeout(total=60))
app['github_client'] = gh_aiohttp.GitHubAPI(app['client_session'], 'ci2', oauth_token=oauth_token)
app['batch_client'] = batch.aioclient.BatchClient(app['client_session'], url=os.environ.get('BATCH_SERVER_URL'))
asyncio.ensure_future(update_loop(app))
app.on_startup.append(on_startup)
async def on_cleanup(app):
session = app['client_session']
await session.close()
app.on_cleanup.append(on_cleanup)
def run():
web.run_app(app, host='0.0.0.0', port=5000)
|
import traceback
import json
import os
import asyncio
import concurrent.futures
import datetime
import aiohttp
from aiohttp import web
import uvloop
import jinja2
import humanize
import aiohttp_jinja2
from gidgethub import aiohttp as gh_aiohttp, routing as gh_routing, sansio as gh_sansio
import batch
from .log import log
from .constants import BUCKET
from .github import Repo, FQBranch, WatchedBranch
with open(os.environ.get('HAIL_CI2_OAUTH_TOKEN', 'oauth-token/oauth-token'), 'r') as f:
oauth_token = f.read().strip()
uvloop.install()
watched_branches = [
WatchedBranch(index, FQBranch.from_short_str(bss), deployable)
for (index, [bss, deployable]) in enumerate(json.loads(os.environ.get('HAIL_WATCHED_BRANCHES')))
]
app = web.Application()
routes = web.RouteTableDef()
@routes.get('/')
@aiohttp_jinja2.template('index.html')
async def index(request): # pylint: disable=unused-argument
wb_configs = []
for i, wb in enumerate(watched_branches):
if wb.prs:
pr_configs = []
for pr in wb.prs.values():
pr_config = {
'number': pr.number,
'title': pr.title,
# FIXME generate links to the merge log
'batch_id': pr.batch.id if pr.batch and hasattr(pr.batch, 'id') else None,
'build_state': pr.build_state,
'review_state': pr.review_state,
'author': pr.author
}
pr_configs.append(pr_config)
else:
pr_configs = None
# FIXME recent deploy history
wb_config = {
'index': i,
'branch': wb.branch.short_str(),
'sha': wb.sha,
# FIXME generate links to the merge log
'deploy_batch_id': wb.deploy_batch.id if wb.deploy_batch and hasattr(wb.deploy_batch, 'id') else None,
'deploy_state': wb.deploy_state,
'repo': wb.branch.repo.short_str(),
'prs': pr_configs
}
wb_configs.append(wb_config)
return {
'watched_branches': wb_configs
}
@routes.get('/watched_branches/{watched_branch_index}/pr/{pr_number}')
@aiohttp_jinja2.template('pr.html')
async def get_pr(request):
watched_branch_index = int(request.match_info['watched_branch_index'])
pr_number = int(request.match_info['pr_number'])
if watched_branch_index < 0 or watched_branch_index >= len(watched_branches):
raise web.HTTPNotFound()
wb = watched_branches[watched_branch_index]
if not wb.prs or pr_number not in wb.prs:
raise web.HTTPNotFound()
pr = wb.prs[pr_number]
config = {}
config['number'] = pr.number
# FIXME
if pr.batch:
if hasattr(pr.batch, 'id'):
status = await pr.batch.status()
for j in status['jobs']:
if 'duration' in j and j['duration'] is not None:
j['duration'] = humanize.naturaldelta(datetime.timedelta(seconds=j['duration']))
attrs = j['attributes']
if 'link' in attrs:
attrs['link'] = attrs['link'].split(',')
config['batch'] = status
config['artifacts'] = f'{BUCKET}/build/{pr.batch.attributes["token"]}'
else:
config['exception'] = str(pr.batch.exception)
batch_client = request.app['batch_client']
batches = await batch_client.list_batches(
attributes={
'test': '1',
'pr': pr_number
})
batches = sorted(batches, key=lambda b: b.id, reverse=True)
config['history'] = [await b.status() for b in batches]
return config
@routes.get('/batches')
@aiohttp_jinja2.template('batches.html')
async def get_batches(request):
batch_client = request.app['batch_client']
batches = await batch_client.list_batches()
statuses = [await b.status() for b in batches]
return {
'batches': statuses
}
@routes.get('/batches/{batch_id}')
@aiohttp_jinja2.template('batch.html')
async def get_batch(request):
batch_id = int(request.match_info['batch_id'])
batch_client = request.app['batch_client']
b = await batch_client.get_batch(batch_id)
status = await b.status()
for j in status['jobs']:
if 'duration' in j and j['duration'] is not None:
j['duration'] = humanize.naturaldelta(datetime.timedelta(seconds=j['duration']))
return {
'batch': status
}
@routes.get('/jobs/{job_id}/log')
@aiohttp_jinja2.template('job_log.html')
async def get_job_log(request):
job_id = int(request.match_info['job_id'])
batch_client = request.app['batch_client']
job = await batch_client.get_job(job_id)
return {
'job_id': job_id,
'job_log': await job.log()
}
@routes.get('/healthcheck')
async def healthcheck(request): # pylint: disable=unused-argument
return web.Response(status=200)
gh_router = gh_routing.Router()
@gh_router.register('pull_request')
async def pull_request_callback(event):
gh_pr = event.data['pull_request']
number = gh_pr['number']
target_branch = FQBranch.from_gh_json(gh_pr['base'])
for wb in watched_branches:
if (wb.prs and number in wb.prs) or (wb.branch == target_branch):
await wb.notify_github_changed(event.app)
@gh_router.register('push')
async def push_callback(event):
data = event.data
ref = data['ref']
if ref.startswith('refs/heads/'):
branch_name = ref[len('refs/heads/'):]
branch = FQBranch(Repo.from_gh_json(data['repository']), branch_name)
for wb in watched_branches:
if wb.branch == branch or any(pr.branch == branch for pr in wb.prs.values()):
await wb.notify_github_changed(event.app)
@gh_router.register('pull_request_review')
async def pull_request_review_callback(event):
gh_pr = event.data['pull_request']
number = gh_pr['number']
for wb in watched_branches:
if number in wb.prs:
await wb.notify_github_changed(event.app)
async def github_callback_handler(request):
event = gh_sansio.Event.from_http(request.headers, await request.read())
event.app = request.app
await gh_router.dispatch(event)
@routes.post('/callback')
async def callback(request):
await asyncio.shield(github_callback_handler(request))
return web.Response(status=200)
async def batch_callback_handler(request):
params = await request.json()
log.info(f'batch callback {params}')
attrs = params.get('attributes')
if attrs:
target_branch = attrs.get('target_branch')
if target_branch:
for wb in watched_branches:
if wb.branch.short_str() == target_branch:
log.info(f'watched_branch {wb.branch.short_str()} notify batch changed')
wb.notify_batch_changed()
@routes.post('/batch_callack')
async def batch_callback(request):
await asyncio.shield(batch_callback_handler(request))
return web.Response(status=200)
async def update_loop(app):
while True:
try:
for wb in watched_branches:
log.info(f'updating {wb.branch.short_str()}')
await wb.update(app)
except concurrent.futures.CancelledError:
raise
except Exception as e: # pylint: disable=broad-except
log.error(f'{wb.branch.short_str()} update failed due to exception: {traceback.format_exc()}{e}')
await asyncio.sleep(300)
routes.static('/static', 'ci/static')
app.add_routes(routes)
aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader('ci/templates'))
async def on_startup(app):
app['client_session'] = aiohttp.ClientSession(
raise_for_status=True,
timeout=aiohttp.ClientTimeout(total=60))
app['github_client'] = gh_aiohttp.GitHubAPI(app['client_session'], 'ci2', oauth_token=oauth_token)
app['batch_client'] = batch.aioclient.BatchClient(app['client_session'], url=os.environ.get('BATCH_SERVER_URL'))
asyncio.ensure_future(update_loop(app))
app.on_startup.append(on_startup)
async def on_cleanup(app):
session = app['client_session']
await session.close()
app.on_cleanup.append(on_cleanup)
def run():
web.run_app(app, host='0.0.0.0', port=5000)
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import textwrap
from dataclasses import dataclass
from typing import Generic, Optional, Sequence, Type, get_type_hints
from pants.engine.console import Console
from pants.engine.goal import Goal, GoalSubsystem, LineOriented
from pants.engine.rules import goal_rule
from pants.engine.target import (
AsyncField,
BoolField,
DictStringToStringField,
DictStringToStringSequenceField,
Field,
FloatField,
IntField,
PrimitiveField,
RegisteredTargetTypes,
ScalarField,
SequenceField,
StringField,
StringOrStringSequenceField,
StringSequenceField,
Target,
)
from pants.engine.unions import UnionMembership
from pants.option.global_options import GlobalOptions
from pants.util.objects import get_docstring, get_docstring_summary, pretty_print_type_hint
class TargetTypesOptions(LineOriented, GoalSubsystem):
"""List all the registered target types, including custom plugin types."""
name = "target-types"
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--details",
type=str,
metavar="<target_type>",
help="List all of the target type's registered fields.",
)
class TargetTypes(Goal):
subsystem_cls = TargetTypesOptions
@dataclass(frozen=True)
class AbbreviatedTargetInfo:
alias: str
description: Optional[str]
v1_only: bool
@classmethod
def create(cls, target_type: Type[Target]) -> "AbbreviatedTargetInfo":
return cls(
alias=target_type.alias,
description=get_docstring_summary(target_type),
v1_only=target_type.v1_only,
)
def format_for_cli(self, console: Console, *, longest_target_alias: int) -> str:
chars_before_description = longest_target_alias + 2
alias = console.cyan(f"{self.alias}".ljust(chars_before_description))
if not self.description:
description = "<no description>"
else:
description_lines = textwrap.wrap(self.description, 80 - chars_before_description)
if len(description_lines) > 1:
description_lines = [
description_lines[0],
*(f"{" " * chars_before_description}{line}" for line in description_lines[1:]),
]
description = "\n".join(description_lines)
return f"{alias}{description}\n"
@dataclass(frozen=True)
class FieldInfo:
alias: str
description: Optional[str]
type_hint: str
required: bool
default: Optional[str]
v1_only: bool
@classmethod
def create(cls, field: Type[Field]) -> "FieldInfo":
# NB: It is very common (and encouraged) to subclass Fields to give custom behavior, e.g.
# `PythonSources` subclassing `Sources`. Here, we set `fallback_to_ancestors=True` so that
# we can still generate meaningful documentation for all these custom fields without
# requiring the Field author to rewrite the docstring.
#
# However, if the original `Field` author did not define docstring, then this means we
# would typically fall back to the docstring for `AsyncField`, `PrimitiveField`, or a
# helper class like `StringField`. This is a quirk of this heuristic and it's not
# intentional since these core `Field` types have documentation oriented to the custom
# `Field` author and not the end user filling in fields in a BUILD file target.
description = (
get_docstring(
field,
flatten=True,
fallback_to_ancestors=True,
ignored_ancestors={
*Field.mro(),
AsyncField,
PrimitiveField,
BoolField,
DictStringToStringField,
DictStringToStringSequenceField,
FloatField,
Generic, # type: ignore[arg-type]
IntField,
ScalarField,
SequenceField,
StringField,
StringOrStringSequenceField,
StringSequenceField,
},
)
or ""
)
if issubclass(field, PrimitiveField):
raw_value_type = get_type_hints(field.compute_value)["raw_value"]
elif issubclass(field, AsyncField):
raw_value_type = get_type_hints(field.sanitize_raw_value)["raw_value"]
else:
raw_value_type = get_type_hints(field.__init__)["raw_value"]
type_hint = pretty_print_type_hint(raw_value_type)
# Check if the field only allows for certain choices.
if issubclass(field, StringField) and field.valid_choices is not None:
valid_choices = sorted(
field.valid_choices
if isinstance(field.valid_choices, tuple)
else (choice.value for choice in field.valid_choices)
)
type_hint = " | ".join([*(repr(c) for c in valid_choices), "None"])
if field.required:
# We hackily remove `None` as a valid option for the field when it's required. This
# greatly simplifies Field definitions because it means that they don't need to
# override the type hints for `PrimitiveField.compute_value()` and
# `AsyncField.sanitize_raw_value()` to indicate that `None` is an invalid type.
type_hint = type_hint.replace(" | None", "")
return cls(
alias=field.alias,
description=description,
type_hint=type_hint,
required=field.required,
default=repr(field.default) if not field.required else None,
v1_only=field.v1_only,
)
def format_for_cli(self, console: Console) -> str:
field_alias = console.magenta(f"{self.alias}")
indent = " "
required_or_default = "required" if self.required else f"default: {self.default}"
type_info = console.cyan(f"{indent}type: {self.type_hint}, {required_or_default}")
lines = [field_alias, type_info]
if self.description:
lines.extend(f"{indent}{line}" for line in textwrap.wrap(self.description, 80))
return "\n".join(f"{indent}{line}" for line in lines)
@dataclass(frozen=True)
class VerboseTargetInfo:
alias: str
description: Optional[str]
fields: Sequence[FieldInfo]
@classmethod
def create(
cls, target_type: Type[Target], *, union_membership: UnionMembership
) -> "VerboseTargetInfo":
return cls(
alias=target_type.alias,
description=get_docstring(target_type),
fields=[
FieldInfo.create(field)
for field in target_type.class_field_types(union_membership=union_membership)
],
)
def format_for_cli(self, console: Console, *, v1_disabled: bool) -> str:
output = [console.green(f"{self.alias}\n{"-" * len(self.alias)}\n")]
if self.description:
output.append(f"{self.description}\n")
output.extend(
[
"Valid fields:\n",
*sorted(
f"{field.format_for_cli(console)}\n"
for field in self.fields
if not field.alias.startswith("_") and (not v1_disabled or not field.v1_only)
),
]
)
return "\n".join(output).rstrip()
@goal_rule
def list_target_types(
registered_target_types: RegisteredTargetTypes,
union_membership: UnionMembership,
target_types_options: TargetTypesOptions,
global_options: GlobalOptions,
console: Console,
) -> TargetTypes:
v1_disabled = not global_options.options.v1
with target_types_options.line_oriented(console) as print_stdout:
if target_types_options.values.details:
alias = target_types_options.values.details
target_type = registered_target_types.aliases_to_types.get(alias)
if target_type is None:
raise ValueError(
f"Unrecognized target type {repr(alias)}. All registered "
f"target types: {list(registered_target_types.aliases)}"
)
verbose_target_info = VerboseTargetInfo.create(
target_type, union_membership=union_membership
)
print_stdout("")
print_stdout(verbose_target_info.format_for_cli(console, v1_disabled=v1_disabled))
else:
title_text = "Target types"
title = console.green(f"{title_text}\n{"-" * len(title_text)}")
target_infos = [
AbbreviatedTargetInfo.create(target_type)
for target_type in registered_target_types.types
]
longest_target_alias = max(
len(target_type.alias) for target_type in registered_target_types.types
)
lines = [
f"\n{title}\n",
textwrap.fill(
"Use `./pants target-types --details=$target_type` to get detailed "
"information for a particular target type.",
80,
),
"\n",
*(
target_info.format_for_cli(console, longest_target_alias=longest_target_alias)
for target_info in target_infos
if not target_info.alias.startswith("_")
and (not v1_disabled or not target_info.v1_only)
),
]
print_stdout("\n".join(lines).rstrip())
return TargetTypes(exit_code=0)
def rules():
return [list_target_types]
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import textwrap
from dataclasses import dataclass
from typing import Generic, Optional, Sequence, Type, get_type_hints
from pants.engine.console import Console
from pants.engine.goal import Goal, GoalSubsystem, LineOriented
from pants.engine.rules import goal_rule
from pants.engine.target import (
AsyncField,
BoolField,
DictStringToStringField,
DictStringToStringSequenceField,
Field,
FloatField,
IntField,
PrimitiveField,
RegisteredTargetTypes,
ScalarField,
SequenceField,
StringField,
StringOrStringSequenceField,
StringSequenceField,
Target,
)
from pants.engine.unions import UnionMembership
from pants.option.global_options import GlobalOptions
from pants.util.objects import get_docstring, get_docstring_summary, pretty_print_type_hint
class TargetTypesOptions(LineOriented, GoalSubsystem):
"""List all the registered target types, including custom plugin types."""
name = "target-types"
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--details",
type=str,
metavar="<target_type>",
help="List all of the target type's registered fields.",
)
class TargetTypes(Goal):
subsystem_cls = TargetTypesOptions
@dataclass(frozen=True)
class AbbreviatedTargetInfo:
alias: str
description: Optional[str]
v1_only: bool
@classmethod
def create(cls, target_type: Type[Target]) -> "AbbreviatedTargetInfo":
return cls(
alias=target_type.alias,
description=get_docstring_summary(target_type),
v1_only=target_type.v1_only,
)
def format_for_cli(self, console: Console, *, longest_target_alias: int) -> str:
chars_before_description = longest_target_alias + 2
alias = console.cyan(f"{self.alias}".ljust(chars_before_description))
if not self.description:
description = "<no description>"
else:
description_lines = textwrap.wrap(self.description, 80 - chars_before_description)
if len(description_lines) > 1:
description_lines = [
description_lines[0],
*(f"{' ' * chars_before_description}{line}" for line in description_lines[1:]),
]
description = "\n".join(description_lines)
return f"{alias}{description}\n"
@dataclass(frozen=True)
class FieldInfo:
alias: str
description: Optional[str]
type_hint: str
required: bool
default: Optional[str]
v1_only: bool
@classmethod
def create(cls, field: Type[Field]) -> "FieldInfo":
# NB: It is very common (and encouraged) to subclass Fields to give custom behavior, e.g.
# `PythonSources` subclassing `Sources`. Here, we set `fallback_to_ancestors=True` so that
# we can still generate meaningful documentation for all these custom fields without
# requiring the Field author to rewrite the docstring.
#
# However, if the original `Field` author did not define docstring, then this means we
# would typically fall back to the docstring for `AsyncField`, `PrimitiveField`, or a
# helper class like `StringField`. This is a quirk of this heuristic and it's not
# intentional since these core `Field` types have documentation oriented to the custom
# `Field` author and not the end user filling in fields in a BUILD file target.
description = (
get_docstring(
field,
flatten=True,
fallback_to_ancestors=True,
ignored_ancestors={
*Field.mro(),
AsyncField,
PrimitiveField,
BoolField,
DictStringToStringField,
DictStringToStringSequenceField,
FloatField,
Generic, # type: ignore[arg-type]
IntField,
ScalarField,
SequenceField,
StringField,
StringOrStringSequenceField,
StringSequenceField,
},
)
or ""
)
if issubclass(field, PrimitiveField):
raw_value_type = get_type_hints(field.compute_value)["raw_value"]
elif issubclass(field, AsyncField):
raw_value_type = get_type_hints(field.sanitize_raw_value)["raw_value"]
else:
raw_value_type = get_type_hints(field.__init__)["raw_value"]
type_hint = pretty_print_type_hint(raw_value_type)
# Check if the field only allows for certain choices.
if issubclass(field, StringField) and field.valid_choices is not None:
valid_choices = sorted(
field.valid_choices
if isinstance(field.valid_choices, tuple)
else (choice.value for choice in field.valid_choices)
)
type_hint = " | ".join([*(repr(c) for c in valid_choices), "None"])
if field.required:
# We hackily remove `None` as a valid option for the field when it's required. This
# greatly simplifies Field definitions because it means that they don't need to
# override the type hints for `PrimitiveField.compute_value()` and
# `AsyncField.sanitize_raw_value()` to indicate that `None` is an invalid type.
type_hint = type_hint.replace(" | None", "")
return cls(
alias=field.alias,
description=description,
type_hint=type_hint,
required=field.required,
default=repr(field.default) if not field.required else None,
v1_only=field.v1_only,
)
def format_for_cli(self, console: Console) -> str:
field_alias = console.magenta(f"{self.alias}")
indent = " "
required_or_default = "required" if self.required else f"default: {self.default}"
type_info = console.cyan(f"{indent}type: {self.type_hint}, {required_or_default}")
lines = [field_alias, type_info]
if self.description:
lines.extend(f"{indent}{line}" for line in textwrap.wrap(self.description, 80))
return "\n".join(f"{indent}{line}" for line in lines)
@dataclass(frozen=True)
class VerboseTargetInfo:
alias: str
description: Optional[str]
fields: Sequence[FieldInfo]
@classmethod
def create(
cls, target_type: Type[Target], *, union_membership: UnionMembership
) -> "VerboseTargetInfo":
return cls(
alias=target_type.alias,
description=get_docstring(target_type),
fields=[
FieldInfo.create(field)
for field in target_type.class_field_types(union_membership=union_membership)
],
)
def format_for_cli(self, console: Console, *, v1_disabled: bool) -> str:
output = [console.green(f"{self.alias}\n{'-' * len(self.alias)}\n")]
if self.description:
output.append(f"{self.description}\n")
output.extend(
[
"Valid fields:\n",
*sorted(
f"{field.format_for_cli(console)}\n"
for field in self.fields
if not field.alias.startswith("_") and (not v1_disabled or not field.v1_only)
),
]
)
return "\n".join(output).rstrip()
@goal_rule
def list_target_types(
registered_target_types: RegisteredTargetTypes,
union_membership: UnionMembership,
target_types_options: TargetTypesOptions,
global_options: GlobalOptions,
console: Console,
) -> TargetTypes:
v1_disabled = not global_options.options.v1
with target_types_options.line_oriented(console) as print_stdout:
if target_types_options.values.details:
alias = target_types_options.values.details
target_type = registered_target_types.aliases_to_types.get(alias)
if target_type is None:
raise ValueError(
f"Unrecognized target type {repr(alias)}. All registered "
f"target types: {list(registered_target_types.aliases)}"
)
verbose_target_info = VerboseTargetInfo.create(
target_type, union_membership=union_membership
)
print_stdout("")
print_stdout(verbose_target_info.format_for_cli(console, v1_disabled=v1_disabled))
else:
title_text = "Target types"
title = console.green(f"{title_text}\n{'-' * len(title_text)}")
target_infos = [
AbbreviatedTargetInfo.create(target_type)
for target_type in registered_target_types.types
]
longest_target_alias = max(
len(target_type.alias) for target_type in registered_target_types.types
)
lines = [
f"\n{title}\n",
textwrap.fill(
"Use `./pants target-types --details=$target_type` to get detailed "
"information for a particular target type.",
80,
),
"\n",
*(
target_info.format_for_cli(console, longest_target_alias=longest_target_alias)
for target_info in target_infos
if not target_info.alias.startswith("_")
and (not v1_disabled or not target_info.v1_only)
),
]
print_stdout("\n".join(lines).rstrip())
return TargetTypes(exit_code=0)
def rules():
return [list_target_types]
|
import tensorflow as tf
import os
import numpy as np
from data.preprocess import load_image
from models.encoder import CNN_Encoder
from models.decoder import RNN_Decoder
import pickle
from train import embedding_dim, units, vocab_size, img_name_train, img_name_val, max_length, cap_val
import matplotlib.pyplot as plt
from PIL import Image
from config import config
from pathlib import Path
encoder = CNN_Encoder(embedding_dim)
decoder = RNN_Decoder(embedding_dim, units, vocab_size)
num_steps = len(img_name_train)
BASE_DIR = os.path.join(config.base_dir, 'datasets')
tokenizer_path = os.path.join(BASE_DIR, 'tokenizer.pkl')
with open(tokenizer_path, 'rb') as f:
tokenizer = pickle.load(f)
attention_features_shape = 64
def evaluate(image):
image_model = tf.keras.applications.InceptionV3(
include_top=False, weights='imagenet')
new_input = image_model.input
hidden_layer = image_model.layers[-1].output
image_features_extract_model = tf.keras.Model(new_input, hidden_layer)
attention_plot = np.zeros((max_length, attention_features_shape))
hidden = decoder.reset_state(batch_size=1)
temp_input = tf.expand_dims(load_image(image)[0], 0)
img_tensor_val = image_features_extract_model(
temp_input)
img_tensor_val = tf.reshape(
img_tensor_val, (img_tensor_val.shape[0], -1, img_tensor_val.shape[3]))
features = encoder(img_tensor_val)
dec_input = tf.expand_dims([tokenizer.word_index['<start>']], 0)
result = []
for i in range(max_length):
predictions, hidden, attention_weights = decoder(
dec_input, features, hidden)
attention_plot[i] = tf.reshape(attention_weights, (-1, )).numpy()
predicted_id = tf.random.categorical(predictions, 1)[0][0].numpy()
result.append(tokenizer.index_word[predicted_id])
if tokenizer.index_word[predicted_id] == '<end>':
return result, attention_plot
dec_input = tf.expand_dims([predicted_id], 0)
attention_plot = attention_plot[:len(result), :]
return result, attention_plot
def plot_attention(image, result, attention_plot):
temp_image = np.array(Image.open(image))
fig = plt.figure(figsize=(10, 10))
len_result = len(result)
for l in range(len_result):
temp_att = np.resize(attention_plot[l], (8, 8))
ax = fig.add_subplot(len_result//2, len_result//2, l+1)
ax.set_title(result[l])
img = ax.imshow(temp_image)
ax.imshow(temp_att, cmap='gray', alpha=0.6, extent=img.get_extent())
plt.tight_layout()
plt.show()
rid = np.random.randint(0, len(img_name_val))
image = f"{Path(config.base_dir,"datasets","images",img_name_val[rid])}"
real_caption = ' '.join([tokenizer.index_word[i]
for i in cap_val[rid] if i not in [0]])
result, attention_plot = evaluate(image)
print(img_name_val[rid])
print('Real Caption:', real_caption)
print('Prediction Caption:', ' '.join(result))
plot_attention(image, result, attention_plot)
img = Image.open(image)
w, h = img.size
plt.text(0, h+50, 'Real Caption: {}\nPrediction Caption: {}'.format(real_caption, ' '.join(result)), fontsize=12)
plt.imshow(img)
plt.show()
|
import tensorflow as tf
import os
import numpy as np
from data.preprocess import load_image
from models.encoder import CNN_Encoder
from models.decoder import RNN_Decoder
import pickle
from train import embedding_dim, units, vocab_size, img_name_train, img_name_val, max_length, cap_val
import matplotlib.pyplot as plt
from PIL import Image
from config import config
from pathlib import Path
encoder = CNN_Encoder(embedding_dim)
decoder = RNN_Decoder(embedding_dim, units, vocab_size)
num_steps = len(img_name_train)
BASE_DIR = os.path.join(config.base_dir, 'datasets')
tokenizer_path = os.path.join(BASE_DIR, 'tokenizer.pkl')
with open(tokenizer_path, 'rb') as f:
tokenizer = pickle.load(f)
attention_features_shape = 64
def evaluate(image):
image_model = tf.keras.applications.InceptionV3(
include_top=False, weights='imagenet')
new_input = image_model.input
hidden_layer = image_model.layers[-1].output
image_features_extract_model = tf.keras.Model(new_input, hidden_layer)
attention_plot = np.zeros((max_length, attention_features_shape))
hidden = decoder.reset_state(batch_size=1)
temp_input = tf.expand_dims(load_image(image)[0], 0)
img_tensor_val = image_features_extract_model(
temp_input)
img_tensor_val = tf.reshape(
img_tensor_val, (img_tensor_val.shape[0], -1, img_tensor_val.shape[3]))
features = encoder(img_tensor_val)
dec_input = tf.expand_dims([tokenizer.word_index['<start>']], 0)
result = []
for i in range(max_length):
predictions, hidden, attention_weights = decoder(
dec_input, features, hidden)
attention_plot[i] = tf.reshape(attention_weights, (-1, )).numpy()
predicted_id = tf.random.categorical(predictions, 1)[0][0].numpy()
result.append(tokenizer.index_word[predicted_id])
if tokenizer.index_word[predicted_id] == '<end>':
return result, attention_plot
dec_input = tf.expand_dims([predicted_id], 0)
attention_plot = attention_plot[:len(result), :]
return result, attention_plot
def plot_attention(image, result, attention_plot):
temp_image = np.array(Image.open(image))
fig = plt.figure(figsize=(10, 10))
len_result = len(result)
for l in range(len_result):
temp_att = np.resize(attention_plot[l], (8, 8))
ax = fig.add_subplot(len_result//2, len_result//2, l+1)
ax.set_title(result[l])
img = ax.imshow(temp_image)
ax.imshow(temp_att, cmap='gray', alpha=0.6, extent=img.get_extent())
plt.tight_layout()
plt.show()
rid = np.random.randint(0, len(img_name_val))
image = f"{Path(config.base_dir,'datasets','images',img_name_val[rid])}"
real_caption = ' '.join([tokenizer.index_word[i]
for i in cap_val[rid] if i not in [0]])
result, attention_plot = evaluate(image)
print(img_name_val[rid])
print('Real Caption:', real_caption)
print('Prediction Caption:', ' '.join(result))
plot_attention(image, result, attention_plot)
img = Image.open(image)
w, h = img.size
plt.text(0, h+50, 'Real Caption: {}\nPrediction Caption: {}'.format(real_caption, ' '.join(result)), fontsize=12)
plt.imshow(img)
plt.show()
|
from collections import OrderedDict
from entities.person import Person
class PersonTracker():
def __init__(self):
self.persons = OrderedDict()
self.persons_activities = []
def register(self, name, entry_time):
self.persons[name] = Person(name, entry_time, 0)
def mark_person_disappeared(self, name, exit_time):
if name in self.persons.keys():
self.persons_activities.append(Person(name, self.persons[name].entry_time, exit_time))
def print_persons_activity(self):
for person in self.persons_activities:
print(f'Staff Name:{person.name}, Entry Time={person.entry_time.strftime('%d %b %Y %H:%M:%S')}, '
f'Exit Time={person.exit_time.strftime('%d %b %Y %H:%M:%S')}')
def write_file(self):
with open("person_activities.txt", mode="w") as f:
sorted_by_name = sorted(self.persons_activities, key=lambda x: x.name)
for person in sorted_by_name:
f.write(f'Staff Name:{person.name}, Entry Time={person.entry_time.strftime('%d %b %Y %H:%M:%S')}, '
f'Exit Time={person.exit_time.strftime('%d %b %Y %H:%M:%S')}\n')
f.close()
def send_server(self, name, room_id):
for persons_activity in reversed(self.persons_activities):
if persons_activity.name == name:
index = self.persons_activities.index(persons_activity)
entry = self.persons_activities[index].entry_time
exit = self.persons_activities[index].exit_time
id = name.split('_')[2]
httpreq.send_staff_activity(room_id,id,entry,exit)
del self.persons[name]
break
|
from collections import OrderedDict
from entities.person import Person
class PersonTracker():
def __init__(self):
self.persons = OrderedDict()
self.persons_activities = []
def register(self, name, entry_time):
self.persons[name] = Person(name, entry_time, 0)
def mark_person_disappeared(self, name, exit_time):
if name in self.persons.keys():
self.persons_activities.append(Person(name, self.persons[name].entry_time, exit_time))
def print_persons_activity(self):
for person in self.persons_activities:
print(f'Staff Name:{person.name}, Entry Time={person.entry_time.strftime("%d %b %Y %H:%M:%S")}, '
f'Exit Time={person.exit_time.strftime("%d %b %Y %H:%M:%S")}')
def write_file(self):
with open("person_activities.txt", mode="w") as f:
sorted_by_name = sorted(self.persons_activities, key=lambda x: x.name)
for person in sorted_by_name:
f.write(f'Staff Name:{person.name}, Entry Time={person.entry_time.strftime("%d %b %Y %H:%M:%S")}, '
f'Exit Time={person.exit_time.strftime("%d %b %Y %H:%M:%S")}\n')
f.close()
def send_server(self, name, room_id):
for persons_activity in reversed(self.persons_activities):
if persons_activity.name == name:
index = self.persons_activities.index(persons_activity)
entry = self.persons_activities[index].entry_time
exit = self.persons_activities[index].exit_time
id = name.split('_')[2]
httpreq.send_staff_activity(room_id,id,entry,exit)
del self.persons[name]
break
|
import multiprocessing
import queue
import random
import threading
import unittest
import requests
import time
from dateutil.parser import parse
from .fixtures import APITestCase
class ContainerTestCase(APITestCase):
def test_list(self):
r = requests.get(self.uri("/containers/json"), timeout=5)
self.assertEqual(r.status_code, 200, r.text)
obj = r.json()
self.assertEqual(len(obj), 1)
def test_list_filters(self):
r = requests.get(
self.podman_url
+ "/v1.40/containers/json?filters%3D%7B%22status%22%3A%5B%22running%22%5D%7D"
)
self.assertEqual(r.status_code, 200, r.text)
payload = r.json()
containerAmnt = len(payload)
self.assertGreater(containerAmnt, 0)
def test_list_all(self):
r = requests.get(self.uri("/containers/json?all=true"))
self.assertEqual(r.status_code, 200, r.text)
self.assertId(r.content)
def test_inspect(self):
r = requests.get(self.uri(self.resolve_container("/containers/{}/json")))
self.assertEqual(r.status_code, 200, r.text)
self.assertId(r.content)
_ = parse(r.json()["Created"])
r = requests.post(
self.podman_url + "/v1.40/containers/create?name=topcontainer",
json={
"Cmd": ["top"],
"Image": "alpine:latest",
"Healthcheck": {
"Test": ["CMD", "pidof", "top"],
"Interval": 5000000000,
"Timeout": 2000000000,
"Retries": 3,
"StartPeriod": 5000000000,
},
},
)
self.assertEqual(r.status_code, 201, r.text)
payload = r.json()
container_id = payload["Id"]
self.assertIsNotNone(container_id)
r = requests.get(self.podman_url + f"/v1.40/containers/{container_id}/json")
self.assertEqual(r.status_code, 200, r.text)
self.assertId(r.content)
out = r.json()
self.assertIsNotNone(out["State"].get("Health"))
self.assertListEqual(["CMD", "pidof", "top"], out["Config"]["Healthcheck"]["Test"])
self.assertEqual(5000000000, out["Config"]["Healthcheck"]["Interval"])
self.assertEqual(2000000000, out["Config"]["Healthcheck"]["Timeout"])
self.assertEqual(3, out["Config"]["Healthcheck"]["Retries"])
self.assertEqual(5000000000, out["Config"]["Healthcheck"]["StartPeriod"])
r = requests.get(self.uri(f"/containers/{container_id}/json"))
self.assertEqual(r.status_code, 200, r.text)
self.assertId(r.content)
out = r.json()
hc = out["Config"]["Healthcheck"]["Test"]
self.assertListEqual(["CMD", "pidof", "top"], hc)
r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/start")
self.assertEqual(r.status_code, 204, r.text)
r = requests.get(self.podman_url + f"/v1.40/containers/{container_id}/json")
self.assertEqual(r.status_code, 200, r.text)
out = r.json()
state = out["State"]["Health"]
self.assertIsInstance(state, dict)
def test_stats(self):
r = requests.get(self.uri(self.resolve_container("/containers/{}/stats?stream=false")))
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
self.assertId(r.content)
r = requests.get(
self.uri(self.resolve_container("/containers/{}/stats?stream=false&one-shot=true"))
)
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
self.assertId(r.content)
def test_delete(self):
r = requests.delete(self.uri(self.resolve_container("/containers/{}?force=true")))
self.assertEqual(r.status_code, 200, r.text)
def test_stop(self):
r = requests.post(self.uri(self.resolve_container("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(self.uri(self.resolve_container("/containers/{}/stop")))
self.assertIn(r.status_code, (204, 304), r.text)
def test_start(self):
r = requests.post(self.uri(self.resolve_container("/containers/{}/stop")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(self.uri(self.resolve_container("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
def test_restart(self):
r = requests.post(self.uri(self.resolve_container("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(self.uri(self.resolve_container("/containers/{}/restart")), timeout=5)
self.assertEqual(r.status_code, 204, r.text)
def test_resize(self):
r = requests.post(self.uri(self.resolve_container("/containers/{}/resize?h=43&w=80")))
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
self.assertEqual(r.text, "", r.text)
def test_attach(self):
self.skipTest("FIXME: Test timeouts")
r = requests.post(self.uri(self.resolve_container("/containers/{}/attach?logs=true")), timeout=5)
self.assertIn(r.status_code, (101, 500), r.text)
def test_logs(self):
r = requests.get(self.uri(self.resolve_container("/containers/{}/logs?stdout=true")))
self.assertEqual(r.status_code, 200, r.text)
r = requests.post(
self.podman_url + "/v1.40/containers/create?name=topcontainer",
json={"Cmd": ["top", "ls"], "Image": "alpine:latest"},
)
self.assertEqual(r.status_code, 201, r.text)
payload = r.json()
container_id = payload["Id"]
self.assertIsNotNone(container_id)
r = requests.get(
self.podman_url
+ f"/v1.40/containers/{payload["Id"]}/logs?follow=false&stdout=true&until=0"
)
self.assertEqual(r.status_code, 200, r.text)
r = requests.get(
self.podman_url
+ f"/v1.40/containers/{payload["Id"]}/logs?follow=false&stdout=true&until=1"
)
self.assertEqual(r.status_code, 200, r.text)
def test_commit(self):
r = requests.post(self.uri(self.resolve_container("/commit?container={}")))
self.assertEqual(r.status_code, 200, r.text)
self.assertId(r.content)
obj = r.json()
self.assertIsInstance(obj, dict)
def test_prune(self):
name = f"Container_{random.getrandbits(160):x}"
r = requests.post(
self.podman_url + f"/v1.40/containers/create?name={name}",
json={
"Cmd": ["cp", "/etc/motd", "/motd.size_test"],
"Image": "alpine:latest",
"NetworkDisabled": True,
},
)
self.assertEqual(r.status_code, 201, r.text)
create = r.json()
r = requests.post(self.podman_url + f"/v1.40/containers/{create["Id"]}/start")
self.assertEqual(r.status_code, 204, r.text)
r = requests.post(self.podman_url + f"/v1.40/containers/{create["Id"]}/wait")
self.assertEqual(r.status_code, 200, r.text)
wait = r.json()
self.assertEqual(wait["StatusCode"], 0, wait["Error"])
prune = requests.post(self.podman_url + "/v1.40/containers/prune")
self.assertEqual(prune.status_code, 200, prune.status_code)
prune_payload = prune.json()
self.assertGreater(prune_payload["SpaceReclaimed"], 0)
self.assertIn(create["Id"], prune_payload["ContainersDeleted"])
# Delete any orphaned containers
r = requests.get(self.podman_url + "/v1.40/containers/json?all=true")
self.assertEqual(r.status_code, 200, r.text)
for self.resolve_container in r.json():
requests.delete(
self.podman_url + f"/v1.40/containers/{self.resolve_container["Id"]}?force=true"
)
# Image prune here tied to containers freeing up
prune = requests.post(self.podman_url + "/v1.40/images/prune")
self.assertEqual(prune.status_code, 200, prune.text)
prune_payload = prune.json()
self.assertGreater(prune_payload["SpaceReclaimed"], 0)
# FIXME need method to determine which image is going to be "pruned" to fix test
# TODO should handler be recursive when deleting images?
# self.assertIn(img["Id"], prune_payload["ImagesDeleted"][1]["Deleted"])
# FIXME (@vrothberg): I commented this line out during the `libimage` migration.
# It doesn't make sense to report anything to be deleted if the reclaimed space
# is zero. I think the test needs some rewrite.
# self.assertIsNotNone(prune_payload["ImagesDeleted"][1]["Deleted"])
def test_status(self):
r = requests.post(
self.podman_url + "/v1.40/containers/create?name=topcontainer",
json={"Cmd": ["top"], "Image": "alpine:latest"},
)
self.assertEqual(r.status_code, 201, r.text)
payload = r.json()
container_id = payload["Id"]
self.assertIsNotNone(container_id)
r = requests.get(
self.podman_url + "/v1.40/containers/json",
params={"all": "true", "filters": f'{{'id':['{container_id}"]}}'},
)
self.assertEqual(r.status_code, 200, r.text)
payload = r.json()
self.assertEqual(payload[0]["Status"], "Created")
r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/start")
self.assertEqual(r.status_code, 204, r.text)
r = requests.get(
self.podman_url + "/v1.40/containers/json",
params={"all": "true", "filters": f'{{'id':['{container_id}"]}}'},
)
self.assertEqual(r.status_code, 200, r.text)
payload = r.json()
self.assertTrue(str(payload[0]["Status"]).startswith("Up"))
r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/pause")
self.assertEqual(r.status_code, 204, r.text)
r = requests.get(
self.podman_url + "/v1.40/containers/json",
params={"all": "true", "filters": f'{{'id':['{container_id}"]}}'},
)
self.assertEqual(r.status_code, 200, r.text)
payload = r.json()
self.assertTrue(str(payload[0]["Status"]).startswith("Up"))
self.assertTrue(str(payload[0]["Status"]).endswith("(Paused)"))
r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/unpause")
self.assertEqual(r.status_code, 204, r.text)
r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/stop")
self.assertEqual(r.status_code, 204, r.text)
r = requests.get(
self.podman_url + "/v1.40/containers/json",
params={"all": "true", "filters": f'{{'id':['{container_id}"]}}'},
)
self.assertEqual(r.status_code, 200, r.text)
payload = r.json()
self.assertTrue(str(payload[0]["Status"]).startswith("Exited"))
r = requests.delete(self.podman_url + f"/v1.40/containers/{container_id}")
self.assertEqual(r.status_code, 204, r.text)
def test_top_no_stream(self):
uri = self.uri(self.resolve_container("/containers/{}/top"))
q = queue.Queue()
def _impl(fifo):
fifo.put(requests.get(uri, params={"stream": False}, timeout=2))
top = threading.Thread(target=_impl, args=(q,))
top.start()
time.sleep(2)
self.assertFalse(top.is_alive(), f"GET {uri} failed to return in 2s")
qr = q.get(False)
self.assertEqual(qr.status_code, 200, qr.text)
qr.close()
top.join()
def test_top_stream(self):
uri = self.uri(self.resolve_container("/containers/{}/top"))
q = queue.Queue()
stop_thread = False
def _impl(fifo, stop):
try:
with requests.get(uri, params={"stream": True, "delay": 1}, stream=True) as r:
r.raise_for_status()
fifo.put(r)
for buf in r.iter_lines(chunk_size=None):
if stop():
break
fifo.put(buf)
except Exception:
pass
top = threading.Thread(target=_impl, args=(q, (lambda: stop_thread)))
top.start()
time.sleep(4)
self.assertTrue(top.is_alive(), f"GET {uri} exited too soon")
stop_thread = True
for _ in range(10):
try:
qr = q.get_nowait()
if qr is not None:
self.assertEqual(qr.status_code, 200)
qr.close()
break
except queue.Empty:
pass
finally:
time.sleep(1)
else:
self.fail("Server failed to respond in 10s")
top.join()
if __name__ == "__main__":
unittest.main()
|
import multiprocessing
import queue
import random
import threading
import unittest
import requests
import time
from dateutil.parser import parse
from .fixtures import APITestCase
class ContainerTestCase(APITestCase):
def test_list(self):
r = requests.get(self.uri("/containers/json"), timeout=5)
self.assertEqual(r.status_code, 200, r.text)
obj = r.json()
self.assertEqual(len(obj), 1)
def test_list_filters(self):
r = requests.get(
self.podman_url
+ "/v1.40/containers/json?filters%3D%7B%22status%22%3A%5B%22running%22%5D%7D"
)
self.assertEqual(r.status_code, 200, r.text)
payload = r.json()
containerAmnt = len(payload)
self.assertGreater(containerAmnt, 0)
def test_list_all(self):
r = requests.get(self.uri("/containers/json?all=true"))
self.assertEqual(r.status_code, 200, r.text)
self.assertId(r.content)
def test_inspect(self):
r = requests.get(self.uri(self.resolve_container("/containers/{}/json")))
self.assertEqual(r.status_code, 200, r.text)
self.assertId(r.content)
_ = parse(r.json()["Created"])
r = requests.post(
self.podman_url + "/v1.40/containers/create?name=topcontainer",
json={
"Cmd": ["top"],
"Image": "alpine:latest",
"Healthcheck": {
"Test": ["CMD", "pidof", "top"],
"Interval": 5000000000,
"Timeout": 2000000000,
"Retries": 3,
"StartPeriod": 5000000000,
},
},
)
self.assertEqual(r.status_code, 201, r.text)
payload = r.json()
container_id = payload["Id"]
self.assertIsNotNone(container_id)
r = requests.get(self.podman_url + f"/v1.40/containers/{container_id}/json")
self.assertEqual(r.status_code, 200, r.text)
self.assertId(r.content)
out = r.json()
self.assertIsNotNone(out["State"].get("Health"))
self.assertListEqual(["CMD", "pidof", "top"], out["Config"]["Healthcheck"]["Test"])
self.assertEqual(5000000000, out["Config"]["Healthcheck"]["Interval"])
self.assertEqual(2000000000, out["Config"]["Healthcheck"]["Timeout"])
self.assertEqual(3, out["Config"]["Healthcheck"]["Retries"])
self.assertEqual(5000000000, out["Config"]["Healthcheck"]["StartPeriod"])
r = requests.get(self.uri(f"/containers/{container_id}/json"))
self.assertEqual(r.status_code, 200, r.text)
self.assertId(r.content)
out = r.json()
hc = out["Config"]["Healthcheck"]["Test"]
self.assertListEqual(["CMD", "pidof", "top"], hc)
r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/start")
self.assertEqual(r.status_code, 204, r.text)
r = requests.get(self.podman_url + f"/v1.40/containers/{container_id}/json")
self.assertEqual(r.status_code, 200, r.text)
out = r.json()
state = out["State"]["Health"]
self.assertIsInstance(state, dict)
def test_stats(self):
r = requests.get(self.uri(self.resolve_container("/containers/{}/stats?stream=false")))
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
self.assertId(r.content)
r = requests.get(
self.uri(self.resolve_container("/containers/{}/stats?stream=false&one-shot=true"))
)
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
self.assertId(r.content)
def test_delete(self):
r = requests.delete(self.uri(self.resolve_container("/containers/{}?force=true")))
self.assertEqual(r.status_code, 200, r.text)
def test_stop(self):
r = requests.post(self.uri(self.resolve_container("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(self.uri(self.resolve_container("/containers/{}/stop")))
self.assertIn(r.status_code, (204, 304), r.text)
def test_start(self):
r = requests.post(self.uri(self.resolve_container("/containers/{}/stop")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(self.uri(self.resolve_container("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
def test_restart(self):
r = requests.post(self.uri(self.resolve_container("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(self.uri(self.resolve_container("/containers/{}/restart")), timeout=5)
self.assertEqual(r.status_code, 204, r.text)
def test_resize(self):
r = requests.post(self.uri(self.resolve_container("/containers/{}/resize?h=43&w=80")))
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
self.assertEqual(r.text, "", r.text)
def test_attach(self):
self.skipTest("FIXME: Test timeouts")
r = requests.post(self.uri(self.resolve_container("/containers/{}/attach?logs=true")), timeout=5)
self.assertIn(r.status_code, (101, 500), r.text)
def test_logs(self):
r = requests.get(self.uri(self.resolve_container("/containers/{}/logs?stdout=true")))
self.assertEqual(r.status_code, 200, r.text)
r = requests.post(
self.podman_url + "/v1.40/containers/create?name=topcontainer",
json={"Cmd": ["top", "ls"], "Image": "alpine:latest"},
)
self.assertEqual(r.status_code, 201, r.text)
payload = r.json()
container_id = payload["Id"]
self.assertIsNotNone(container_id)
r = requests.get(
self.podman_url
+ f"/v1.40/containers/{payload['Id']}/logs?follow=false&stdout=true&until=0"
)
self.assertEqual(r.status_code, 200, r.text)
r = requests.get(
self.podman_url
+ f"/v1.40/containers/{payload['Id']}/logs?follow=false&stdout=true&until=1"
)
self.assertEqual(r.status_code, 200, r.text)
def test_commit(self):
r = requests.post(self.uri(self.resolve_container("/commit?container={}")))
self.assertEqual(r.status_code, 200, r.text)
self.assertId(r.content)
obj = r.json()
self.assertIsInstance(obj, dict)
def test_prune(self):
name = f"Container_{random.getrandbits(160):x}"
r = requests.post(
self.podman_url + f"/v1.40/containers/create?name={name}",
json={
"Cmd": ["cp", "/etc/motd", "/motd.size_test"],
"Image": "alpine:latest",
"NetworkDisabled": True,
},
)
self.assertEqual(r.status_code, 201, r.text)
create = r.json()
r = requests.post(self.podman_url + f"/v1.40/containers/{create['Id']}/start")
self.assertEqual(r.status_code, 204, r.text)
r = requests.post(self.podman_url + f"/v1.40/containers/{create['Id']}/wait")
self.assertEqual(r.status_code, 200, r.text)
wait = r.json()
self.assertEqual(wait["StatusCode"], 0, wait["Error"])
prune = requests.post(self.podman_url + "/v1.40/containers/prune")
self.assertEqual(prune.status_code, 200, prune.status_code)
prune_payload = prune.json()
self.assertGreater(prune_payload["SpaceReclaimed"], 0)
self.assertIn(create["Id"], prune_payload["ContainersDeleted"])
# Delete any orphaned containers
r = requests.get(self.podman_url + "/v1.40/containers/json?all=true")
self.assertEqual(r.status_code, 200, r.text)
for self.resolve_container in r.json():
requests.delete(
self.podman_url + f"/v1.40/containers/{self.resolve_container['Id']}?force=true"
)
# Image prune here tied to containers freeing up
prune = requests.post(self.podman_url + "/v1.40/images/prune")
self.assertEqual(prune.status_code, 200, prune.text)
prune_payload = prune.json()
self.assertGreater(prune_payload["SpaceReclaimed"], 0)
# FIXME need method to determine which image is going to be "pruned" to fix test
# TODO should handler be recursive when deleting images?
# self.assertIn(img["Id"], prune_payload["ImagesDeleted"][1]["Deleted"])
# FIXME (@vrothberg): I commented this line out during the `libimage` migration.
# It doesn't make sense to report anything to be deleted if the reclaimed space
# is zero. I think the test needs some rewrite.
# self.assertIsNotNone(prune_payload["ImagesDeleted"][1]["Deleted"])
def test_status(self):
r = requests.post(
self.podman_url + "/v1.40/containers/create?name=topcontainer",
json={"Cmd": ["top"], "Image": "alpine:latest"},
)
self.assertEqual(r.status_code, 201, r.text)
payload = r.json()
container_id = payload["Id"]
self.assertIsNotNone(container_id)
r = requests.get(
self.podman_url + "/v1.40/containers/json",
params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
)
self.assertEqual(r.status_code, 200, r.text)
payload = r.json()
self.assertEqual(payload[0]["Status"], "Created")
r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/start")
self.assertEqual(r.status_code, 204, r.text)
r = requests.get(
self.podman_url + "/v1.40/containers/json",
params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
)
self.assertEqual(r.status_code, 200, r.text)
payload = r.json()
self.assertTrue(str(payload[0]["Status"]).startswith("Up"))
r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/pause")
self.assertEqual(r.status_code, 204, r.text)
r = requests.get(
self.podman_url + "/v1.40/containers/json",
params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
)
self.assertEqual(r.status_code, 200, r.text)
payload = r.json()
self.assertTrue(str(payload[0]["Status"]).startswith("Up"))
self.assertTrue(str(payload[0]["Status"]).endswith("(Paused)"))
r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/unpause")
self.assertEqual(r.status_code, 204, r.text)
r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/stop")
self.assertEqual(r.status_code, 204, r.text)
r = requests.get(
self.podman_url + "/v1.40/containers/json",
params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
)
self.assertEqual(r.status_code, 200, r.text)
payload = r.json()
self.assertTrue(str(payload[0]["Status"]).startswith("Exited"))
r = requests.delete(self.podman_url + f"/v1.40/containers/{container_id}")
self.assertEqual(r.status_code, 204, r.text)
def test_top_no_stream(self):
uri = self.uri(self.resolve_container("/containers/{}/top"))
q = queue.Queue()
def _impl(fifo):
fifo.put(requests.get(uri, params={"stream": False}, timeout=2))
top = threading.Thread(target=_impl, args=(q,))
top.start()
time.sleep(2)
self.assertFalse(top.is_alive(), f"GET {uri} failed to return in 2s")
qr = q.get(False)
self.assertEqual(qr.status_code, 200, qr.text)
qr.close()
top.join()
def test_top_stream(self):
uri = self.uri(self.resolve_container("/containers/{}/top"))
q = queue.Queue()
stop_thread = False
def _impl(fifo, stop):
try:
with requests.get(uri, params={"stream": True, "delay": 1}, stream=True) as r:
r.raise_for_status()
fifo.put(r)
for buf in r.iter_lines(chunk_size=None):
if stop():
break
fifo.put(buf)
except Exception:
pass
top = threading.Thread(target=_impl, args=(q, (lambda: stop_thread)))
top.start()
time.sleep(4)
self.assertTrue(top.is_alive(), f"GET {uri} exited too soon")
stop_thread = True
for _ in range(10):
try:
qr = q.get_nowait()
if qr is not None:
self.assertEqual(qr.status_code, 200)
qr.close()
break
except queue.Empty:
pass
finally:
time.sleep(1)
else:
self.fail("Server failed to respond in 10s")
top.join()
if __name__ == "__main__":
unittest.main()
|
import argparse
import spotipy
import secrets as user_secrets
from spotipy.oauth2 import SpotifyOAuth
PERMISSIONS_SCOPE = 'user-library-read playlist-modify-public'
FILTERS_AND_ARGS = None
# Track stats
FILTERED, ADDED, SKIPPED = 0, 0, 0
def get_args():
parser = argparse.ArgumentParser(description='Creates a playlist for user.', add_help=True)
parser.add_argument('-p', '--playlist-id', type=str,
help='Specify a custom playlist ID, instead of using liked songs playlist.')
# Argument descriptions source:
# https://developer.spotify.com/documentation/web-api/reference/tracks/get-several-audio-features/
parser.add_argument('-a', '--min-acousticness', type=float,
help='Min. value. A confidence measure from 0.0 to 1.0 of whether the track is acoustic. '
'1.0 represents high confidence the track is acoustic.')
parser.add_argument('-ma', '--max-acousticness', type=float,
help='Max. value for acousticness.')
parser.add_argument('-d', '--min-danceability', type=float,
help='Min. value. Danceability describes how suitable a track is for dancing based on a '
'combination of musical elements including tempo, rhythm stability, beat strength, '
'and overall regularity. A value of 0.0 is least danceable and 1.0 is most danceable.')
parser.add_argument('-md', '--max-dancebillity', type=float,
help='Max. value for dancebillity.')
parser.add_argument('-du', '--min-duration_ms', type=int,
help='Min. value. The duration of the track in milliseconds.')
parser.add_argument('-mdu', '--max-duration_ms', type=int,
help='Max. value for duration_ms.')
parser.add_argument('-e', '--min-energy', type=float,
help='Min. value. Energy is a measure from 0.0 to 1.0 and represents a perceptual measure of '
'intensity and activity. Typically, energetic tracks feel fast, loud, and noisy. '
'For example, death metal has high energy, while a Bach prelude scores low on the scale. '
'Perceptual features contributing to this attribute include dynamic range, '
'perceived loudness, timbre, onset rate, and general entropy.')
parser.add_argument('-me', '--max-energy', type=float,
help='Max. value for energy.')
parser.add_argument('-i', '--min-instrumentalness', type=float,
help='Min. value. Predicts whether a track contains no vocals. “Ooh” and “aah” sounds are treated as '
'instrumental in this context. Rap or spoken word tracks are clearly “vocal”. The closer the '
'instrumentalness value is to 1.0, the greater likelihood the track contains no vocal content. '
'Values above 0.5 are intended to represent instrumental tracks, but confidence '
'is higher as the value approaches 1.0.')
parser.add_argument('-mi', '--max-instrumentalness', type=float,
help='Max. value for instrumentalness.')
parser.add_argument('-k', '--min-key', type=int,
help='Min. value. The key the track is in. Integers map to pitches using standard Pitch Class notation. '
'E.g. 0 = C, 1 = C/D, 2 = D, and so on.')
parser.add_argument('-mk', '--max-key', type=int,
help='Max. value for key.')
parser.add_argument('-li', '--min-liveness', type=float,
help='Min. value. Detects the presence of an audience in the recording. Higher liveness values represent '
'an increased probability that the track was performed live. A value above 0.8 provides strong '
'likelihood that the track is live.')
parser.add_argument('-mli', '--max-liveness', type=float,
help='Max. value for liveness.')
parser.add_argument('-lo', '--min-loudness', type=float,
help='Min. value. The overall loudness of a track in decibels (dB). '
'Loudness values are averaged across the entire track and are useful for comparing relative '
'loudness of tracks. Loudness is the quality of a sound that is the primary psychological '
'correlate of physical strength (amplitude). Values typical range between -60 and 0 db.')
parser.add_argument('-mlo', '--max-loudness', type=float,
help='Max. value for loudness.')
parser.add_argument('-s', '--min-speechiness', type=float,
help='Min. value. Speechiness detects the presence of spoken words in a track. The more exclusively speech-like the recording '
'(e.g. talk show, audio book, poetry), the closer to 1.0 the attribute value. Values above 0.66 describe tracks '
'that are probably made entirely of spoken words. Values between 0.33 and 0.66 describe tracks that may contain '
'both music and speech, either in sections or layered, including such cases as rap music. Values below 0.33 most '
'likely represent music and other non-speech-like tracks.')
parser.add_argument('-ms', '--max-speechiness', type=float,
help='Max. value for speechiness.')
parser.add_argument('-t', '--min-tempo', type=float,
help='Min. value. The overall estimated tempo of a track in beats per minute (BPM). In musical terminology, '
'tempo is the speed or pace of a given piece and derives directly from the average beat duration.')
parser.add_argument('-mt', '--max-tempo', type=float,
help='Max. value for tempo.')
parser.add_argument('-ts', '--min-time_signature', type=int,
help='Min. value. An estimated overall time signature of a track. The time signature (meter) is a notational '
'convention to specify how many beats are in each bar (or measure).')
parser.add_argument('-mts', '--max-time_signature', type=int,
help='Max. value for time_signature.')
parser.add_argument('-v', '--min-valence', type=float,
help='Min. value. A measure from 0.0 to 1.0 describing the musical positiveness conveyed by a track. '
'Tracks with high valence sound more positive (e.g. happy, cheerful, euphoric), while tracks with low '
'valence sound more negative (e.g. sad, depressed, angry).')
parser.add_argument('-mv', '--max-valence', type=float,
help='Max. value for valence.')
return parser.parse_args()
def track_should_be_added(track_audio_features):
if not track_audio_features:
return False
for key, value in FILTERS_AND_ARGS.items():
split_filter_key = key.split("_") # e.g. min_tempo.
try: # Get the actual audio feature value that was received.
actual_value = track_audio_features[split_filter_key[1]]
except KeyError:
return False # TODO: Maybe rather continue?
if split_filter_key[0] == "min":
if actual_value < value:
return False
elif split_filter_key[0] == "max":
if actual_value > value:
return False
return True
def filter_tracks_to_list(to_add, results):
global FILTERED, SKIPPED
for track_audio_features in results:
FILTERED += 1
if track_should_be_added(track_audio_features):
print(f"Adding: {track_audio_features["id"]}")
to_add.append(track_audio_features['id'])
else:
SKIPPED += 1
def request_audio_features(spotify_client, results):
to_request = [x['track']['id'] for x in results['items'] if x['track']['id'] is not None]
if to_request:
return spotify_client.audio_features(tracks=to_request)
return None
def main():
global FILTERED, ADDED, SKIPPED
try:
custom_playlist_id = FILTERS_AND_ARGS['playlist_id']
except KeyError:
custom_playlist_id = False
# TODO: If there would be more options that are not filters, this should be reworked.
if not FILTERS_AND_ARGS or len(FILTERS_AND_ARGS) == 1 and custom_playlist_id:
raise Exception("Usage of atleast one filter is required to generate the playlist.")
authorization = SpotifyOAuth(
scope=PERMISSIONS_SCOPE,
client_id=user_secrets.CLIENT_ID,
client_secret=user_secrets.CLIENT_SECRET,
redirect_uri=user_secrets.REDIRECT_URI,
open_browser=False
)
spotify_client = spotipy.Spotify(auth_manager=authorization)
current_user = spotify_client.me()
if not spotify_client or not current_user:
raise Exception("Failed to authorize app client or user.")
print(f"Authorized as: {current_user["display_name"]}")
used_flags = "".join(f"{filter}:{value}, " for filter, value in FILTERS_AND_ARGS.items())
print(f"Using audio feature flags: {used_flags[:-2]}")
created_playlist = spotify_client.user_playlist_create(
user=current_user['id'],
name="My filtered playlist",
description="Automatically generated with https://github.com/fuzzysearch404/SpotifyPlaylistScripts"
f" | Used flags: {used_flags[:-2]}."[:300] # Description char limit: 300
)
if not created_playlist:
raise Exception("Failed to create a playlist.")
print(f"Playlist created. ID:{created_playlist["id"]}")
if not custom_playlist_id:
print("No playlist ID provided - defaulting to saved (liked) tracks")
results = spotify_client.current_user_saved_tracks(limit=50)
else:
print(f"Using custom playlist. ID: {custom_playlist_id}")
results = spotify_client.playlist_items(custom_playlist_id, limit=50)
if not results:
raise Exception("Failed to load playlist or playlist has no songs.")
to_add = []
def add_tracks_to_spotify_playlist():
print(f"Sending a request to Spotify to add {len(to_add)} tracks.")
spotify_client.playlist_add_items(created_playlist['id'], to_add)
filter_tracks_to_list(to_add, request_audio_features(spotify_client, results))
while results['next']:
results = spotify_client.next(results)
filter_tracks_to_list(to_add, request_audio_features(spotify_client, results))
# Limit list of songs to be added at a time to about 50 from max 100.
if len(to_add) >= 50:
add_tracks_to_spotify_playlist()
ADDED += len(to_add)
to_add = []
if len(to_add) > 0:
add_tracks_to_spotify_playlist()
ADDED += len(to_add)
print("Done.")
print(f"Filtered: {FILTERED}, Added: {ADDED}, Skipped: {SKIPPED}")
if __name__ == '__main__':
args = get_args()
# Remove args where value is None.
FILTERS_AND_ARGS = dict([x for x in args.__dict__.items() if x[1] is not None])
main()
|
import argparse
import spotipy
import secrets as user_secrets
from spotipy.oauth2 import SpotifyOAuth
PERMISSIONS_SCOPE = 'user-library-read playlist-modify-public'
FILTERS_AND_ARGS = None
# Track stats
FILTERED, ADDED, SKIPPED = 0, 0, 0
def get_args():
parser = argparse.ArgumentParser(description='Creates a playlist for user.', add_help=True)
parser.add_argument('-p', '--playlist-id', type=str,
help='Specify a custom playlist ID, instead of using liked songs playlist.')
# Argument descriptions source:
# https://developer.spotify.com/documentation/web-api/reference/tracks/get-several-audio-features/
parser.add_argument('-a', '--min-acousticness', type=float,
help='Min. value. A confidence measure from 0.0 to 1.0 of whether the track is acoustic. '
'1.0 represents high confidence the track is acoustic.')
parser.add_argument('-ma', '--max-acousticness', type=float,
help='Max. value for acousticness.')
parser.add_argument('-d', '--min-danceability', type=float,
help='Min. value. Danceability describes how suitable a track is for dancing based on a '
'combination of musical elements including tempo, rhythm stability, beat strength, '
'and overall regularity. A value of 0.0 is least danceable and 1.0 is most danceable.')
parser.add_argument('-md', '--max-dancebillity', type=float,
help='Max. value for dancebillity.')
parser.add_argument('-du', '--min-duration_ms', type=int,
help='Min. value. The duration of the track in milliseconds.')
parser.add_argument('-mdu', '--max-duration_ms', type=int,
help='Max. value for duration_ms.')
parser.add_argument('-e', '--min-energy', type=float,
help='Min. value. Energy is a measure from 0.0 to 1.0 and represents a perceptual measure of '
'intensity and activity. Typically, energetic tracks feel fast, loud, and noisy. '
'For example, death metal has high energy, while a Bach prelude scores low on the scale. '
'Perceptual features contributing to this attribute include dynamic range, '
'perceived loudness, timbre, onset rate, and general entropy.')
parser.add_argument('-me', '--max-energy', type=float,
help='Max. value for energy.')
parser.add_argument('-i', '--min-instrumentalness', type=float,
help='Min. value. Predicts whether a track contains no vocals. “Ooh” and “aah” sounds are treated as '
'instrumental in this context. Rap or spoken word tracks are clearly “vocal”. The closer the '
'instrumentalness value is to 1.0, the greater likelihood the track contains no vocal content. '
'Values above 0.5 are intended to represent instrumental tracks, but confidence '
'is higher as the value approaches 1.0.')
parser.add_argument('-mi', '--max-instrumentalness', type=float,
help='Max. value for instrumentalness.')
parser.add_argument('-k', '--min-key', type=int,
help='Min. value. The key the track is in. Integers map to pitches using standard Pitch Class notation. '
'E.g. 0 = C, 1 = C/D, 2 = D, and so on.')
parser.add_argument('-mk', '--max-key', type=int,
help='Max. value for key.')
parser.add_argument('-li', '--min-liveness', type=float,
help='Min. value. Detects the presence of an audience in the recording. Higher liveness values represent '
'an increased probability that the track was performed live. A value above 0.8 provides strong '
'likelihood that the track is live.')
parser.add_argument('-mli', '--max-liveness', type=float,
help='Max. value for liveness.')
parser.add_argument('-lo', '--min-loudness', type=float,
help='Min. value. The overall loudness of a track in decibels (dB). '
'Loudness values are averaged across the entire track and are useful for comparing relative '
'loudness of tracks. Loudness is the quality of a sound that is the primary psychological '
'correlate of physical strength (amplitude). Values typical range between -60 and 0 db.')
parser.add_argument('-mlo', '--max-loudness', type=float,
help='Max. value for loudness.')
parser.add_argument('-s', '--min-speechiness', type=float,
help='Min. value. Speechiness detects the presence of spoken words in a track. The more exclusively speech-like the recording '
'(e.g. talk show, audio book, poetry), the closer to 1.0 the attribute value. Values above 0.66 describe tracks '
'that are probably made entirely of spoken words. Values between 0.33 and 0.66 describe tracks that may contain '
'both music and speech, either in sections or layered, including such cases as rap music. Values below 0.33 most '
'likely represent music and other non-speech-like tracks.')
parser.add_argument('-ms', '--max-speechiness', type=float,
help='Max. value for speechiness.')
parser.add_argument('-t', '--min-tempo', type=float,
help='Min. value. The overall estimated tempo of a track in beats per minute (BPM). In musical terminology, '
'tempo is the speed or pace of a given piece and derives directly from the average beat duration.')
parser.add_argument('-mt', '--max-tempo', type=float,
help='Max. value for tempo.')
parser.add_argument('-ts', '--min-time_signature', type=int,
help='Min. value. An estimated overall time signature of a track. The time signature (meter) is a notational '
'convention to specify how many beats are in each bar (or measure).')
parser.add_argument('-mts', '--max-time_signature', type=int,
help='Max. value for time_signature.')
parser.add_argument('-v', '--min-valence', type=float,
help='Min. value. A measure from 0.0 to 1.0 describing the musical positiveness conveyed by a track. '
'Tracks with high valence sound more positive (e.g. happy, cheerful, euphoric), while tracks with low '
'valence sound more negative (e.g. sad, depressed, angry).')
parser.add_argument('-mv', '--max-valence', type=float,
help='Max. value for valence.')
return parser.parse_args()
def track_should_be_added(track_audio_features):
if not track_audio_features:
return False
for key, value in FILTERS_AND_ARGS.items():
split_filter_key = key.split("_") # e.g. min_tempo.
try: # Get the actual audio feature value that was received.
actual_value = track_audio_features[split_filter_key[1]]
except KeyError:
return False # TODO: Maybe rather continue?
if split_filter_key[0] == "min":
if actual_value < value:
return False
elif split_filter_key[0] == "max":
if actual_value > value:
return False
return True
def filter_tracks_to_list(to_add, results):
global FILTERED, SKIPPED
for track_audio_features in results:
FILTERED += 1
if track_should_be_added(track_audio_features):
print(f"Adding: {track_audio_features['id']}")
to_add.append(track_audio_features['id'])
else:
SKIPPED += 1
def request_audio_features(spotify_client, results):
to_request = [x['track']['id'] for x in results['items'] if x['track']['id'] is not None]
if to_request:
return spotify_client.audio_features(tracks=to_request)
return None
def main():
global FILTERED, ADDED, SKIPPED
try:
custom_playlist_id = FILTERS_AND_ARGS['playlist_id']
except KeyError:
custom_playlist_id = False
# TODO: If there would be more options that are not filters, this should be reworked.
if not FILTERS_AND_ARGS or len(FILTERS_AND_ARGS) == 1 and custom_playlist_id:
raise Exception("Usage of atleast one filter is required to generate the playlist.")
authorization = SpotifyOAuth(
scope=PERMISSIONS_SCOPE,
client_id=user_secrets.CLIENT_ID,
client_secret=user_secrets.CLIENT_SECRET,
redirect_uri=user_secrets.REDIRECT_URI,
open_browser=False
)
spotify_client = spotipy.Spotify(auth_manager=authorization)
current_user = spotify_client.me()
if not spotify_client or not current_user:
raise Exception("Failed to authorize app client or user.")
print(f"Authorized as: {current_user['display_name']}")
used_flags = "".join(f"{filter}:{value}, " for filter, value in FILTERS_AND_ARGS.items())
print(f"Using audio feature flags: {used_flags[:-2]}")
created_playlist = spotify_client.user_playlist_create(
user=current_user['id'],
name="My filtered playlist",
description="Automatically generated with https://github.com/fuzzysearch404/SpotifyPlaylistScripts"
f" | Used flags: {used_flags[:-2]}."[:300] # Description char limit: 300
)
if not created_playlist:
raise Exception("Failed to create a playlist.")
print(f"Playlist created. ID:{created_playlist['id']}")
if not custom_playlist_id:
print("No playlist ID provided - defaulting to saved (liked) tracks")
results = spotify_client.current_user_saved_tracks(limit=50)
else:
print(f"Using custom playlist. ID: {custom_playlist_id}")
results = spotify_client.playlist_items(custom_playlist_id, limit=50)
if not results:
raise Exception("Failed to load playlist or playlist has no songs.")
to_add = []
def add_tracks_to_spotify_playlist():
print(f"Sending a request to Spotify to add {len(to_add)} tracks.")
spotify_client.playlist_add_items(created_playlist['id'], to_add)
filter_tracks_to_list(to_add, request_audio_features(spotify_client, results))
while results['next']:
results = spotify_client.next(results)
filter_tracks_to_list(to_add, request_audio_features(spotify_client, results))
# Limit list of songs to be added at a time to about 50 from max 100.
if len(to_add) >= 50:
add_tracks_to_spotify_playlist()
ADDED += len(to_add)
to_add = []
if len(to_add) > 0:
add_tracks_to_spotify_playlist()
ADDED += len(to_add)
print("Done.")
print(f"Filtered: {FILTERED}, Added: {ADDED}, Skipped: {SKIPPED}")
if __name__ == '__main__':
args = get_args()
# Remove args where value is None.
FILTERS_AND_ARGS = dict([x for x in args.__dict__.items() if x[1] is not None])
main()
|
import sys
sys.path.append("../shared")
sys.path.append("")
import logging
import numpy as np
import df_utils
import file_utils
import time
import pandas as pd
from plotly.subplots import make_subplots
from plotly.offline import plot
import plotly.graph_objects as go
from sklearn.linear_model import LinearRegression
from sqlalchemy import Column, Integer, String, Float
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
logger = logging.getLogger(__name__)
NONE = 0
LONG = 1
SHORT = -1
DEBUG = False
DEVIATION_POW = 4
if DEBUG:
pass
"""
███████╗████████╗ █████╗ ████████╗██╗███████╗████████╗██╗ ██████╗███████╗
██╔════╝╚══██╔══╝██╔══██╗╚══██╔══╝██║██╔════╝╚══██╔══╝██║██╔════╝██╔════╝
███████╗ ██║ ███████║ ██║ ██║███████╗ ██║ ██║██║ ███████╗
╚════██║ ██║ ██╔══██║ ██║ ██║╚════██║ ██║ ██║██║ ╚════██║
███████║ ██║ ██║ ██║ ██║ ██║███████║ ██║ ██║╚██████╗███████║
╚══════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝╚══════╝ ╚═╝ ╚═╝ ╚═════╝╚══════╝
"""
def _sortino_ratio(returns: pd.Series) -> float:
"""Return the sortino ratio for a given series of a returns.
References:
- https://en.wikipedia.org/wiki/Sortino_ratio
"""
_target_returns = 0
_risk_free_rate = 0
downside_returns = returns.copy()
downside_returns[returns < _target_returns] = returns ** 2
expected_return = np.mean(returns)
downside_std = np.sqrt(np.std(downside_returns))
return (expected_return - _risk_free_rate + 1E-9) / (downside_std + 1E-9)
def _sortino_ratio_v2(returns) -> float:
"""Return the sortino ratio for a given series of a returns.
References:
- https://en.wikipedia.org/wiki/Sortino_ratio
"""
_target_returns = 0
_risk_free_rate = 0
#downside_returns = np.copy(returns)
downside_returns = np.where(returns < _target_returns, returns ** 2, returns)
expected_return = np.mean(returns)
downside_std = np.sqrt(np.std(downside_returns))
return (expected_return - _risk_free_rate + 1E-9) / (downside_std + 1E-9)
def _sharpe_ratio(self, returns: pd.Series) -> float:
"""Return the sharpe ratio for a given series of a returns.
References:
- https://en.wikipedia.org/wiki/Sharpe_ratio
"""
return (np.mean(returns) - self._risk_free_rate + 1E-9) / (np.std(returns) + 1E-9)
def _sharpe_ratio_v2(returns) -> float:
_target_returns = 0
_risk_free_rate = 0
"""Return the sharpe ratio for a given series of a returns.
References:
- https://en.wikipedia.org/wiki/Sharpe_ratio
"""
return (np.mean(returns) - _risk_free_rate + 1E-9) / (np.std(returns) + 1E-9)
def linreg(list, forward=1):
regression_model = LinearRegression()
# print(row)
index = np.arange(len(list))
pred = np.arange(len(list) + forward)
row = np.array(list)
regression_model.fit(index.reshape(-1, 1), row.reshape(-1, 1))
val = regression_model.predict(pred.reshape(-1, 1))[-1]
return val.item()
def linreg_np(list, forward=1):
regression_model = LinearRegression()
# print(row)
index = np.arange(len(list))
pred = np.arange(len(list) + forward)
row = list
regression_model.fit(index.reshape(-1, 1), row.reshape(-1, 1))
val = regression_model.predict(pred.reshape(-1, 1))[-1]
return val.item()
"""
████████╗██████╗ █████╗ ██████╗ ███████╗
╚══██╔══╝██╔══██╗██╔══██╗██╔══██╗██╔════╝
██║ ██████╔╝███████║██║ ██║█████╗
██║ ██╔══██╗██╔══██║██║ ██║██╔══╝
██║ ██║ ██║██║ ██║██████╔╝███████╗
╚═╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═════╝ ╚══════╝
"""
class Trade():
def __init__(self, id, position, open_time, open_rate, open_fee, open_fee_pct, i):
self.id = id
self.position = position
self.open_i = i
self.close_i = 0
self.open_time = open_time
self.open_rate = open_rate
self.open_fee = open_fee
self.open_fee_pct = open_fee_pct
self.close_time = None
self.close_rate = None
self.close_fee = None
self.profit_pct = None
self.profit_pip = None
self.duration = None
self.duration_candles = 0
def close(self, close_time, close_rate, close_fee, close_fee_pct, pip_multiplier, i):
self.close_time = close_time
self.close_rate = close_rate
self.close_fee = close_fee
self.close_fee_pct = close_fee_pct
self.close_i = i
open_fee = (self.open_rate * self.open_fee_pct) + (self.open_fee)
close_fee = (self.close_rate * self.close_fee_pct) + (self.close_fee)
if self.position == LONG:
self.profit_pct = ((self.close_rate - close_fee) / (self.open_rate + open_fee)) - 1
self.profit_pip = ((self.close_rate - close_fee) - (self.open_rate + open_fee))
elif self.position == SHORT:
self.profit_pct = ((self.open_rate - open_fee) / (self.close_rate + open_fee)) - 1
self.profit_pip = ((self.open_rate - open_fee) - (self.close_rate + open_fee))
self.profit_pip = int(self.profit_pip * pip_multiplier)
try:
self.duration = int((self.close_time - self.open_time).total_seconds() / 60)
except:
pass
self.duration_candles = self.close_i - self.open_i
def to_dict(self):
return {
"id": self.id,
"position": self.position,
"open_time": self.open_time,
"open_rate": self.open_rate,
"open_fee": self.open_fee,
"close_time": self.close_time,
"close_rate": self.close_rate,
"close_fee": self.close_fee,
"profit_pct": self.profit_pct,
"profit_pip": self.profit_pip,
"duration": self.duration,
"duration_candles": self.duration_candles,
"open_i": self.open_i,
"close_i": self.close_i
}
class BacktestSimpleResult(Base):
__tablename__ = 'results'
id = Column(Integer, primary_key=True, autoincrement=True)
pair_id = Column(Integer)
seed = Column(String)
statement = Column(String)
trades = Column(Integer)
cum_profit = Column(Float)
profit_drawdown = Column(Float)
equity_drawdown = Column(Float)
profit_deviation = Column(Float)
profit_deviation_max = Column(Float)
equity_deviation = Column(Float)
equity_deviation_max = Column(Float)
negative_equity = Column(Float)
trade_duration_mean = Column(Float)
trade_duration_median = Column(Float)
trade_duration_variance = Column(Float)
win_rate = Column(Float)
cnt_wins = Column(Float)
cnt_losses = Column(Float)
sharpe_ratio = Column(Float)
sortino_ratio = Column(Float)
lr1 = Column(Float)
lr2 = Column(Float)
lr3 = Column(Float)
lrk1 = Column(Float)
lrk2 = Column(Float)
lrk3 = Column(Float)
buy_sell_signal_ratio = Column(Float)
score = Column(Float)
def __init__(self):
self.pair_id = 0
self.trades = 0
self.cum_profit = 0
self.profit_drawdown = 0
self.equity_drawdown = 0
self.profit_deviation = 0
self.profit_deviation_max = 0
self.equity_deviation = 0
self.equity_deviation_max = 0
self.negative_equity = 0
self.trade_duration_mean = 0
self.trade_duration_median = 0
self.trade_duration_variance = 0
self.win_rate = 0
self.cnt_wins = 0
self.cnt_losses = 0
self.sharpe_ratio = 0
self.sortino_ratio = 0
self.lr1 = 0
self.lr2 = 0
self.lr3 = 0
self.lrk1 = 0
self.lrk2 = 0
self.lrk3 = 0
self.buy_sell_signal_ratio = 0
self.score = 0
class SimpleBacktest():
def __init__(self, df, spread=0, fee_pct=0, ping_pong=False, pip_multiplier=1):
self.spread = spread
self.fee_pct = fee_pct
self.ping_pong = ping_pong
self.pip_multiplier = pip_multiplier
self.df = df
tmp = df[['date', 'open', 'high', 'low', 'close', 'buy', 'sell', 'sbuy', 'ssell']].copy()
self.date = tmp.columns.get_loc("date")
self.open = tmp.columns.get_loc("open")
self.high = tmp.columns.get_loc("high")
self.low = tmp.columns.get_loc("low")
self.close = tmp.columns.get_loc("close")
self.buy = tmp.columns.get_loc("buy")
self.sell = tmp.columns.get_loc("sell")
self.sbuy = tmp.columns.get_loc("sbuy")
self.ssell = tmp.columns.get_loc("ssell")
self.numpy_df = tmp.to_numpy()
self.numpy_df[:, self.buy] = 0
self.numpy_df[:, self.sell] = 0
self.numpy_df[:, self.sbuy] = 0
self.numpy_df[:, self.ssell] = 0
self.stats = BacktestSimpleResult()
self.reset()
def reset(self):
# INIT
self.i = 0
self.trades = []
self.cur_trade = None
self.row = None
self.std_duration = []
self.std_duration_candles = []
# Array mit Profit in % von jedem Trade
self.std_returns = []
# Array mit Zeit für Plotting
self.std_df_time = []
# Gesamtprofit bei jeder Candle
self.std_df_profit = []
# UPNL bei jeder Candle
self.std_df_upnl = []
# Equity bei jeder Candle
self.std_df_equity = []
self.stats = BacktestSimpleResult()
# COPY
self.numpy_df[:, self.buy] = 0
self.numpy_df[:, self.sell] = 0
self.numpy_df[:, self.sbuy] = 0
self.numpy_df[:, self.ssell] = 0
def _open(self, position):
self.cur_trade = Trade(
id=len(self.trades),
position=position,
open_time=self.row[self.date],
open_rate=self.row[self.open],
open_fee=self.spread,
open_fee_pct=self.fee_pct,
i=self.i
)
self.trades.append(self.cur_trade)
def _close(self):
self.cur_trade.close(
close_time=self.row[self.date],
close_rate=self.row[self.open],
close_fee=self.spread,
close_fee_pct=self.fee_pct,
pip_multiplier=self.pip_multiplier,
i=self.i
)
self.stats.trades += 1
self.stats.cum_profit = self.stats.cum_profit + self.cur_trade.profit_pct
#self.stats.cum_profit_pip = self.stats.cum_profit_pip + self.cur_trade.profit_pip
if self.cur_trade.profit_pct > 0:
self.stats.cnt_wins += 1
else:
self.stats.cnt_losses += 1
self.std_returns.append(self.cur_trade.profit_pct)
self.std_duration.append(self.cur_trade.duration)
self.std_duration_candles.append(self.cur_trade.duration_candles)
self.cur_trade = None
def _get_upnl(self, trade, close_rate):
open_fee = (trade.open_rate * trade.open_fee_pct) + (trade.open_fee)
close_fee = (close_rate * trade.open_fee_pct) + (trade.open_fee)
if trade.position == LONG:
profit_pct = ((close_rate - close_fee) / (trade.open_rate + open_fee)) - 1
elif trade.position == SHORT:
profit_pct = ((trade.open_rate - open_fee) / (close_rate + open_fee)) - 1
return profit_pct
def backtest(self, np_buy: np.ndarray, np_sell: np.ndarray, start: int=None, end: int=None, np_sbuy: np.ndarray=None, np_ssell: np.ndarray=None):
#date = self.date
#open = self.open
#high = self.high
#low = self.low
#close = self.close
buy = self.buy
sell = self.sell
sbuy = self.sbuy
ssell = self.ssell
df = self.numpy_df
df[:, buy] = np_buy[:]
df[:, sell] = np_sell[:]
if self.ping_pong:
df[:, sbuy] = np_buy[:]
df[:, ssell] = np_sell[:]
else:
if np_sbuy is not None:
df[:, sbuy] = np_sbuy[:]
else:
df[:, sbuy] = 0
if np_ssell is not None:
df[:, ssell] = np_ssell[:]
else:
df[:, ssell] = 0
# Buy/Sell Ratio
buys = np.sum(np_buy)
sells = np.sum(np_sell)
if buys > 1 and sells > 1:
if buys > sells:
self.stats.buy_sell_signal_ratio = sells / buys
else:
self.stats.buy_sell_signal_ratio = buys / sells
else:
self.stats = None
return False
length = df.shape[0]
if start is None:
start = 0
if end is None:
end = length - 1
last_upnl = 0
equity = 0
for i in range(start, end):
rrow = df[i]
self.row = rrow
self.i = i
new_equity = equity
upnl = 0
if i > 0:
lrow = df[i - 1] # lrow nehmen damit automatisch geshiftet wird
# Kein Trade -> Open
if self.cur_trade is None:
# Open
if lrow[buy] == 1:
self._open(LONG)
elif lrow[ssell] == 1:
self._open(SHORT)
# Trade -> Close oder Close und neuer Open
else:
# UPNL holen für Equity
upnl = self._get_upnl(self.cur_trade, self.row[self.open])
new_equity = equity + upnl
if lrow[sell] == 1 and lrow[buy] == 1:
raise Exception("Kann nicht Long-kaufen und Long-verkaufen!")
if lrow[ssell] == 1 and lrow[sbuy] == 1:
raise Exception("Kann nicht Short-verkaufen und Short-kaufen!")
if lrow[ssell] == 1 and lrow[buy] == 1:
raise Exception("Kann nicht Short-verkaufen und Long-kaufen!")
# Close-Signal
if (lrow[sell] == 1 or lrow[ssell] == 1) and self.cur_trade.position == LONG:
self._close()
equity = self.stats.cum_profit
elif (lrow[buy] == 1 or lrow[sbuy] == 1) and self.cur_trade.position == SHORT:
self._close()
equity = self.stats.cum_profit
if self.cur_trade is None:
if lrow[buy] == 1:
self._open(LONG)
if lrow[ssell] == 1:
self._open(SHORT)
self.std_df_time.append(self.row[self.date])
self.std_df_profit.append(self.stats.cum_profit)
self.std_df_upnl.append(upnl)
self.std_df_equity.append(new_equity)
if self.cur_trade is not None:
self.i += 1
rrow = df[i]
self.row = rrow
self._close()
new_equity = self.stats.cum_profit
upnl = 0
# Statistik vorbereiten
self.stats.trades_cnt = len(self.trades)
if (self.stats.trades_cnt < 3) or (self.stats.cum_profit < 0):
logger.debug("Keine Trades oder kein Gewinn!")
self.stats = None
return False
if self.row is not None:
self.std_df_time.append(self.row[self.date])
self.std_df_profit.append(self.stats.cum_profit)
self.std_df_upnl.append(upnl)
self.std_df_equity.append(new_equity)
assert(len(self.std_df_equity) == len(np_buy))
self.get_stats_np()
return True
"""
█████╗ ██╗ ██╗███████╗██╗ ██╗███████╗██████╗ ████████╗███████╗███╗ ██╗
██╔══██╗██║ ██║██╔════╝██║ ██║██╔════╝██╔══██╗╚══██╔══╝██╔════╝████╗ ██║
███████║██║ ██║███████╗██║ █╗ ██║█████╗ ██████╔╝ ██║ █████╗ ██╔██╗ ██║
██╔══██║██║ ██║╚════██║██║███╗██║██╔══╝ ██╔══██╗ ██║ ██╔══╝ ██║╚██╗██║
██║ ██║╚██████╔╝███████║╚███╔███╔╝███████╗██║ ██║ ██║ ███████╗██║ ╚████║
╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚══╝╚══╝ ╚══════╝╚═╝ ╚═╝ ╚═╝ ╚══════╝╚═╝ ╚═══╝
"""
def get_stats_np(self):
if self.stats.cnt_losses <= 0:
self.stats.cnt_losses = 1
if self.stats.cnt_wins <= 0:
self.stats.cnt_wins = 1
self.stats.win_rate = self.stats.cnt_wins / self.stats.cnt_losses
profit = np.array(self.std_df_profit)
equity = np.array(self.std_df_equity)
upnl = np.array(self.std_df_upnl)
equity_norm = df_utils.normalize_np(equity)
profit_norm = df_utils.normalize_np(profit)
#upnl_norm = df_utils.normalize_np(upnl)
self.stats.equity = equity_norm
self.stats.profit = profit_norm
# Profit Drawdown PCT
min_right = np.minimum.accumulate(profit_norm[::-1])[::-1]
self.stats.profit_drawdown = np.max(np.abs(profit_norm - min_right))
# Equity Drawdown PCT
min_right = np.minimum.accumulate(equity_norm[::-1])[::-1]
self.stats.equity_drawdown = np.max(np.abs(equity_norm - min_right))
# Profit Deviation PCT
step = 1 / len(profit_norm)
perfekte_gewinnkurve = np.full(len(profit_norm), step).cumsum()
deviation = np.abs(profit_norm - perfekte_gewinnkurve)
self.stats.profit_deviation = np.sum(deviation) / len(profit_norm)
self.stats.profit_deviation_max = np.max(deviation)
# Equity Deviation PCT
step = 1 / len(equity_norm)
perfekte_gewinnkurve = np.full(len(equity_norm), step).cumsum()
deviation = np.abs(equity_norm - perfekte_gewinnkurve)
self.stats.equity_deviation = np.sum(deviation) / len(equity_norm)
self.stats.equity_deviation_max = np.max(deviation)
# Avg, Median Trade Duration
duration_candles = np.array(self.std_duration_candles)
self.stats.trade_duration_mean = np.mean(duration_candles)
self.stats.trade_duration_median = np.median(duration_candles)
self.stats.trade_duration_variance = np.var(duration_candles, ddof=1)
# Negative Equity
self.stats.negative_equity = np.sum(np.power(1 + np.abs(np.where(upnl > 0, 0, upnl)), 6))
# Sortino
returns = np.array(self.std_returns)
ratio = _sortino_ratio_v2(returns)
if ratio > 100:
ratio = -1
self.stats.sortino_ratio = ratio
# Sharpe
ratio = _sharpe_ratio_v2(returns)
if ratio > 100:
ratio = -1
self.stats.sharpe_ratio = ratio
# Linreg
y = profit_norm
length = len(y)
l1 = int(length) - 1
l2 = int(length / 2)
l3 = int(length / 3)
self.stats.lr1 = 1
self.stats.lr2 = 1
self.stats.lr3 = 1
self.stats.lrk3 = 1
self.stats.lrk2 = 1
self.stats.lrk1 = 1
"""
self.stats.lr1 = linreg_np(y[-l1:])
self.stats.lr2 = linreg_np(y[-l2:])
self.stats.lr3 = linreg_np(y[-l3:])
self.stats.lrk3 = linreg_np(y[-l3:], 2) - linreg_np(y[-l3:], 1)
self.stats.lrk2 = linreg_np(y[-l2:], 2) - linreg_np(y[-l2:], 1)
self.stats.lrk1 = linreg_np(y[-l1:], 2) - linreg_np(y[-l1:], 1)
"""
"""
██████╗ ██╗ ██████╗ ████████╗████████╗██╗███╗ ██╗ ██████╗
██╔══██╗██║ ██╔═══██╗╚══██╔══╝╚══██╔══╝██║████╗ ██║██╔════╝
██████╔╝██║ ██║ ██║ ██║ ██║ ██║██╔██╗ ██║██║ ███╗
██╔═══╝ ██║ ██║ ██║ ██║ ██║ ██║██║╚██╗██║██║ ██║
██║ ███████╗╚██████╔╝ ██║ ██║ ██║██║ ╚████║╚██████╔╝
╚═╝ ╚══════╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═════╝
"""
def plot_plotly(self, name, path, indicators=[]):
self.indicators = indicators
fig = self.generate_candlestick_graph(
pair=name,
data=self.df
)
trades = self.get_trades()
fig = self.plot_trades(fig, trades)
fig = self.plot_profit(fig)
fig.update_layout(showlegend=True)
fig.update_yaxes(automargin=True)
fig.update_xaxes(automargin=True)
fig.update_layout(
autosize=True,
margin=go.layout.Margin(
l=0,
r=0,
b=0,
t=30,
pad=0
)
)
plot(fig, filename=path, auto_open=False)
def get_trades(self):
self.df_trades = pd.DataFrame.from_records([s.to_dict() for s in self.trades])
if len(self.df_trades) > 0:
self.df_trades["profit_pct"] = self.df_trades["profit_pct"] * 100
self.profit = self.df_trades["profit_pct"].sum()
else:
self.profit = -1
return self.df_trades
def generate_candlestick_graph(self, pair: str, data: pd.DataFrame) -> go.Figure:
# Define the graph
fig = make_subplots(
rows=2,
cols=1,
shared_xaxes=True,
row_width=[1, 1],
vertical_spacing=0,
)
fig['layout'].update(title=pair)
fig['layout']['yaxis1'].update(title='Price')
fig['layout']['yaxis2'].update(title='Balance')
#fig['layout']['yaxis3'].update(title='Other')
fig['layout']['xaxis']['rangeslider'].update(visible=False)
#fig['layout']['xaxis'].update(type=False)
if len(data.index) > 1024:
# Common information
candles = go.Candlestick(
x=data.date,
open=data.open,
high=data.high,
low=data.low,
close=data.close,
name='Price'
#text=data.status
)
else:
candles = go.Scatter(
x=data.date,
y=data.close,
name='Price',
fillcolor="black"
)
fig.add_trace(candles, 1, 1)
for indi in self.indicators:
if indi in data.columns:
candles = go.Scatter(
x=data.date,
y=data[indi],
name=indi,
fillcolor="blue"
)
fig.add_trace(candles, 1, 1)
op = 0.5
size = 11
width = 2
if 'buy' in data.columns:
df_buy = data[data['buy'] == 1]
if len(df_buy) > 0:
buys = go.Scatter(
x=df_buy.date,
y=df_buy.close,
mode='markers',
name='buy',
opacity=op,
marker=dict(
symbol='triangle-up-open',
size=size,
line=dict(width=width),
color='green',
)
)
fig.add_trace(buys, 1, 1)
if 'sell' in data.columns:
df_sell = data[data['sell'] == 1]
if len(df_sell) > 0:
sells = go.Scatter(
x=df_sell.date,
y=df_sell.close,
mode='markers',
name='sell',
opacity=op,
marker=dict(
symbol='circle-open',
size=size,
line=dict(width=width),
color='red',
)
)
fig.add_trace(sells, 1, 1)
if 'sbuy' in data.columns:
df_buy = data[data['sbuy'] == 1]
if len(df_buy) > 0:
buys = go.Scatter(
x=df_buy.date,
y=df_buy.close,
mode='markers',
name='sbuy',
opacity=op,
marker=dict(
symbol='circle-open',
size=size,
line=dict(width=width),
color='cyan',
)
)
fig.add_trace(buys, 1, 1)
if 'ssell' in data.columns:
df_sell = data[data['ssell'] == 1]
if len(df_sell) > 0:
sells = go.Scatter(
x=df_sell.date,
y=df_sell.close,
mode='markers',
name='ssell',
opacity=op,
marker=dict(
symbol='triangle-down-open',
size=size,
line=dict(width=width),
color='orange',
)
)
fig.add_trace(sells, 1, 1)
return fig
def plot_profit(self, fig) -> make_subplots:
profit = go.Scatter(
x=self.std_df_time,
y=self.std_df_profit,
name='Cum Profit'
)
fig.add_trace(profit, 2, 1)
profit = go.Scatter(
x=self.std_df_time,
y=self.std_df_equity,
name='Equity'
)
fig.add_trace(profit, 2, 1)
profit = go.Scatter(
x=self.std_df_time,
y=self.std_df_upnl,
name='UPNL'
)
fig.add_trace(profit, 2, 1)
return fig
def plot_trades(self, fig, trades: pd.DataFrame) -> make_subplots:
# Trades can be empty
if trades is not None and len(trades) > 0:
longs = trades[trades["position"] == 1]
shorts = trades[trades["position"] == -1]
def tmp(df, name):
if len(df.index) > 0:
color_entry = df.apply(lambda row: 'green' if row['position'] == 1 else 'orange', axis=1)
color_exit = df.apply(lambda row: 'red' if row['position'] == 1 else 'cyan', axis=1)
shape = df.apply(lambda row: 'square-open' if row['position'] == 1 else 'diamond-open', axis=1)
trade_buys = go.Scatter(
x=df["open_time"],
y=df["open_rate"],
mode='markers',
name=name + " OPEN",
marker=dict(
symbol=shape,
size=11,
line=dict(width=2),
color=color_entry
)
)
desc = df.apply(lambda row: f"{round((row["profit_pct"]), 3)}%,"
f"{row["duration"]}min",
axis=1)
trade_sells = go.Scatter(
x=df["close_time"],
y=df["close_rate"],
text=desc,
mode='markers',
name=name + " CLOSE",
marker=dict(
symbol=shape,
size=11,
line=dict(width=2),
color=color_exit
)
)
fig.add_trace(trade_buys, 1, 1)
fig.add_trace(trade_sells, 1, 1)
tmp(longs, "LONG")
tmp(shorts, "SHORT")
else:
logger.warning("No trades found.")
return fig
|
import sys
sys.path.append("../shared")
sys.path.append("")
import logging
import numpy as np
import df_utils
import file_utils
import time
import pandas as pd
from plotly.subplots import make_subplots
from plotly.offline import plot
import plotly.graph_objects as go
from sklearn.linear_model import LinearRegression
from sqlalchemy import Column, Integer, String, Float
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
logger = logging.getLogger(__name__)
NONE = 0
LONG = 1
SHORT = -1
DEBUG = False
DEVIATION_POW = 4
if DEBUG:
pass
"""
███████╗████████╗ █████╗ ████████╗██╗███████╗████████╗██╗ ██████╗███████╗
██╔════╝╚══██╔══╝██╔══██╗╚══██╔══╝██║██╔════╝╚══██╔══╝██║██╔════╝██╔════╝
███████╗ ██║ ███████║ ██║ ██║███████╗ ██║ ██║██║ ███████╗
╚════██║ ██║ ██╔══██║ ██║ ██║╚════██║ ██║ ██║██║ ╚════██║
███████║ ██║ ██║ ██║ ██║ ██║███████║ ██║ ██║╚██████╗███████║
╚══════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝╚══════╝ ╚═╝ ╚═╝ ╚═════╝╚══════╝
"""
def _sortino_ratio(returns: pd.Series) -> float:
"""Return the sortino ratio for a given series of a returns.
References:
- https://en.wikipedia.org/wiki/Sortino_ratio
"""
_target_returns = 0
_risk_free_rate = 0
downside_returns = returns.copy()
downside_returns[returns < _target_returns] = returns ** 2
expected_return = np.mean(returns)
downside_std = np.sqrt(np.std(downside_returns))
return (expected_return - _risk_free_rate + 1E-9) / (downside_std + 1E-9)
def _sortino_ratio_v2(returns) -> float:
"""Return the sortino ratio for a given series of a returns.
References:
- https://en.wikipedia.org/wiki/Sortino_ratio
"""
_target_returns = 0
_risk_free_rate = 0
#downside_returns = np.copy(returns)
downside_returns = np.where(returns < _target_returns, returns ** 2, returns)
expected_return = np.mean(returns)
downside_std = np.sqrt(np.std(downside_returns))
return (expected_return - _risk_free_rate + 1E-9) / (downside_std + 1E-9)
def _sharpe_ratio(self, returns: pd.Series) -> float:
"""Return the sharpe ratio for a given series of a returns.
References:
- https://en.wikipedia.org/wiki/Sharpe_ratio
"""
return (np.mean(returns) - self._risk_free_rate + 1E-9) / (np.std(returns) + 1E-9)
def _sharpe_ratio_v2(returns) -> float:
_target_returns = 0
_risk_free_rate = 0
"""Return the sharpe ratio for a given series of a returns.
References:
- https://en.wikipedia.org/wiki/Sharpe_ratio
"""
return (np.mean(returns) - _risk_free_rate + 1E-9) / (np.std(returns) + 1E-9)
def linreg(list, forward=1):
regression_model = LinearRegression()
# print(row)
index = np.arange(len(list))
pred = np.arange(len(list) + forward)
row = np.array(list)
regression_model.fit(index.reshape(-1, 1), row.reshape(-1, 1))
val = regression_model.predict(pred.reshape(-1, 1))[-1]
return val.item()
def linreg_np(list, forward=1):
regression_model = LinearRegression()
# print(row)
index = np.arange(len(list))
pred = np.arange(len(list) + forward)
row = list
regression_model.fit(index.reshape(-1, 1), row.reshape(-1, 1))
val = regression_model.predict(pred.reshape(-1, 1))[-1]
return val.item()
"""
████████╗██████╗ █████╗ ██████╗ ███████╗
╚══██╔══╝██╔══██╗██╔══██╗██╔══██╗██╔════╝
██║ ██████╔╝███████║██║ ██║█████╗
██║ ██╔══██╗██╔══██║██║ ██║██╔══╝
██║ ██║ ██║██║ ██║██████╔╝███████╗
╚═╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═════╝ ╚══════╝
"""
class Trade():
def __init__(self, id, position, open_time, open_rate, open_fee, open_fee_pct, i):
self.id = id
self.position = position
self.open_i = i
self.close_i = 0
self.open_time = open_time
self.open_rate = open_rate
self.open_fee = open_fee
self.open_fee_pct = open_fee_pct
self.close_time = None
self.close_rate = None
self.close_fee = None
self.profit_pct = None
self.profit_pip = None
self.duration = None
self.duration_candles = 0
def close(self, close_time, close_rate, close_fee, close_fee_pct, pip_multiplier, i):
self.close_time = close_time
self.close_rate = close_rate
self.close_fee = close_fee
self.close_fee_pct = close_fee_pct
self.close_i = i
open_fee = (self.open_rate * self.open_fee_pct) + (self.open_fee)
close_fee = (self.close_rate * self.close_fee_pct) + (self.close_fee)
if self.position == LONG:
self.profit_pct = ((self.close_rate - close_fee) / (self.open_rate + open_fee)) - 1
self.profit_pip = ((self.close_rate - close_fee) - (self.open_rate + open_fee))
elif self.position == SHORT:
self.profit_pct = ((self.open_rate - open_fee) / (self.close_rate + open_fee)) - 1
self.profit_pip = ((self.open_rate - open_fee) - (self.close_rate + open_fee))
self.profit_pip = int(self.profit_pip * pip_multiplier)
try:
self.duration = int((self.close_time - self.open_time).total_seconds() / 60)
except:
pass
self.duration_candles = self.close_i - self.open_i
def to_dict(self):
return {
"id": self.id,
"position": self.position,
"open_time": self.open_time,
"open_rate": self.open_rate,
"open_fee": self.open_fee,
"close_time": self.close_time,
"close_rate": self.close_rate,
"close_fee": self.close_fee,
"profit_pct": self.profit_pct,
"profit_pip": self.profit_pip,
"duration": self.duration,
"duration_candles": self.duration_candles,
"open_i": self.open_i,
"close_i": self.close_i
}
class BacktestSimpleResult(Base):
__tablename__ = 'results'
id = Column(Integer, primary_key=True, autoincrement=True)
pair_id = Column(Integer)
seed = Column(String)
statement = Column(String)
trades = Column(Integer)
cum_profit = Column(Float)
profit_drawdown = Column(Float)
equity_drawdown = Column(Float)
profit_deviation = Column(Float)
profit_deviation_max = Column(Float)
equity_deviation = Column(Float)
equity_deviation_max = Column(Float)
negative_equity = Column(Float)
trade_duration_mean = Column(Float)
trade_duration_median = Column(Float)
trade_duration_variance = Column(Float)
win_rate = Column(Float)
cnt_wins = Column(Float)
cnt_losses = Column(Float)
sharpe_ratio = Column(Float)
sortino_ratio = Column(Float)
lr1 = Column(Float)
lr2 = Column(Float)
lr3 = Column(Float)
lrk1 = Column(Float)
lrk2 = Column(Float)
lrk3 = Column(Float)
buy_sell_signal_ratio = Column(Float)
score = Column(Float)
def __init__(self):
self.pair_id = 0
self.trades = 0
self.cum_profit = 0
self.profit_drawdown = 0
self.equity_drawdown = 0
self.profit_deviation = 0
self.profit_deviation_max = 0
self.equity_deviation = 0
self.equity_deviation_max = 0
self.negative_equity = 0
self.trade_duration_mean = 0
self.trade_duration_median = 0
self.trade_duration_variance = 0
self.win_rate = 0
self.cnt_wins = 0
self.cnt_losses = 0
self.sharpe_ratio = 0
self.sortino_ratio = 0
self.lr1 = 0
self.lr2 = 0
self.lr3 = 0
self.lrk1 = 0
self.lrk2 = 0
self.lrk3 = 0
self.buy_sell_signal_ratio = 0
self.score = 0
class SimpleBacktest():
def __init__(self, df, spread=0, fee_pct=0, ping_pong=False, pip_multiplier=1):
self.spread = spread
self.fee_pct = fee_pct
self.ping_pong = ping_pong
self.pip_multiplier = pip_multiplier
self.df = df
tmp = df[['date', 'open', 'high', 'low', 'close', 'buy', 'sell', 'sbuy', 'ssell']].copy()
self.date = tmp.columns.get_loc("date")
self.open = tmp.columns.get_loc("open")
self.high = tmp.columns.get_loc("high")
self.low = tmp.columns.get_loc("low")
self.close = tmp.columns.get_loc("close")
self.buy = tmp.columns.get_loc("buy")
self.sell = tmp.columns.get_loc("sell")
self.sbuy = tmp.columns.get_loc("sbuy")
self.ssell = tmp.columns.get_loc("ssell")
self.numpy_df = tmp.to_numpy()
self.numpy_df[:, self.buy] = 0
self.numpy_df[:, self.sell] = 0
self.numpy_df[:, self.sbuy] = 0
self.numpy_df[:, self.ssell] = 0
self.stats = BacktestSimpleResult()
self.reset()
def reset(self):
# INIT
self.i = 0
self.trades = []
self.cur_trade = None
self.row = None
self.std_duration = []
self.std_duration_candles = []
# Array mit Profit in % von jedem Trade
self.std_returns = []
# Array mit Zeit für Plotting
self.std_df_time = []
# Gesamtprofit bei jeder Candle
self.std_df_profit = []
# UPNL bei jeder Candle
self.std_df_upnl = []
# Equity bei jeder Candle
self.std_df_equity = []
self.stats = BacktestSimpleResult()
# COPY
self.numpy_df[:, self.buy] = 0
self.numpy_df[:, self.sell] = 0
self.numpy_df[:, self.sbuy] = 0
self.numpy_df[:, self.ssell] = 0
def _open(self, position):
self.cur_trade = Trade(
id=len(self.trades),
position=position,
open_time=self.row[self.date],
open_rate=self.row[self.open],
open_fee=self.spread,
open_fee_pct=self.fee_pct,
i=self.i
)
self.trades.append(self.cur_trade)
def _close(self):
self.cur_trade.close(
close_time=self.row[self.date],
close_rate=self.row[self.open],
close_fee=self.spread,
close_fee_pct=self.fee_pct,
pip_multiplier=self.pip_multiplier,
i=self.i
)
self.stats.trades += 1
self.stats.cum_profit = self.stats.cum_profit + self.cur_trade.profit_pct
#self.stats.cum_profit_pip = self.stats.cum_profit_pip + self.cur_trade.profit_pip
if self.cur_trade.profit_pct > 0:
self.stats.cnt_wins += 1
else:
self.stats.cnt_losses += 1
self.std_returns.append(self.cur_trade.profit_pct)
self.std_duration.append(self.cur_trade.duration)
self.std_duration_candles.append(self.cur_trade.duration_candles)
self.cur_trade = None
def _get_upnl(self, trade, close_rate):
open_fee = (trade.open_rate * trade.open_fee_pct) + (trade.open_fee)
close_fee = (close_rate * trade.open_fee_pct) + (trade.open_fee)
if trade.position == LONG:
profit_pct = ((close_rate - close_fee) / (trade.open_rate + open_fee)) - 1
elif trade.position == SHORT:
profit_pct = ((trade.open_rate - open_fee) / (close_rate + open_fee)) - 1
return profit_pct
def backtest(self, np_buy: np.ndarray, np_sell: np.ndarray, start: int=None, end: int=None, np_sbuy: np.ndarray=None, np_ssell: np.ndarray=None):
#date = self.date
#open = self.open
#high = self.high
#low = self.low
#close = self.close
buy = self.buy
sell = self.sell
sbuy = self.sbuy
ssell = self.ssell
df = self.numpy_df
df[:, buy] = np_buy[:]
df[:, sell] = np_sell[:]
if self.ping_pong:
df[:, sbuy] = np_buy[:]
df[:, ssell] = np_sell[:]
else:
if np_sbuy is not None:
df[:, sbuy] = np_sbuy[:]
else:
df[:, sbuy] = 0
if np_ssell is not None:
df[:, ssell] = np_ssell[:]
else:
df[:, ssell] = 0
# Buy/Sell Ratio
buys = np.sum(np_buy)
sells = np.sum(np_sell)
if buys > 1 and sells > 1:
if buys > sells:
self.stats.buy_sell_signal_ratio = sells / buys
else:
self.stats.buy_sell_signal_ratio = buys / sells
else:
self.stats = None
return False
length = df.shape[0]
if start is None:
start = 0
if end is None:
end = length - 1
last_upnl = 0
equity = 0
for i in range(start, end):
rrow = df[i]
self.row = rrow
self.i = i
new_equity = equity
upnl = 0
if i > 0:
lrow = df[i - 1] # lrow nehmen damit automatisch geshiftet wird
# Kein Trade -> Open
if self.cur_trade is None:
# Open
if lrow[buy] == 1:
self._open(LONG)
elif lrow[ssell] == 1:
self._open(SHORT)
# Trade -> Close oder Close und neuer Open
else:
# UPNL holen für Equity
upnl = self._get_upnl(self.cur_trade, self.row[self.open])
new_equity = equity + upnl
if lrow[sell] == 1 and lrow[buy] == 1:
raise Exception("Kann nicht Long-kaufen und Long-verkaufen!")
if lrow[ssell] == 1 and lrow[sbuy] == 1:
raise Exception("Kann nicht Short-verkaufen und Short-kaufen!")
if lrow[ssell] == 1 and lrow[buy] == 1:
raise Exception("Kann nicht Short-verkaufen und Long-kaufen!")
# Close-Signal
if (lrow[sell] == 1 or lrow[ssell] == 1) and self.cur_trade.position == LONG:
self._close()
equity = self.stats.cum_profit
elif (lrow[buy] == 1 or lrow[sbuy] == 1) and self.cur_trade.position == SHORT:
self._close()
equity = self.stats.cum_profit
if self.cur_trade is None:
if lrow[buy] == 1:
self._open(LONG)
if lrow[ssell] == 1:
self._open(SHORT)
self.std_df_time.append(self.row[self.date])
self.std_df_profit.append(self.stats.cum_profit)
self.std_df_upnl.append(upnl)
self.std_df_equity.append(new_equity)
if self.cur_trade is not None:
self.i += 1
rrow = df[i]
self.row = rrow
self._close()
new_equity = self.stats.cum_profit
upnl = 0
# Statistik vorbereiten
self.stats.trades_cnt = len(self.trades)
if (self.stats.trades_cnt < 3) or (self.stats.cum_profit < 0):
logger.debug("Keine Trades oder kein Gewinn!")
self.stats = None
return False
if self.row is not None:
self.std_df_time.append(self.row[self.date])
self.std_df_profit.append(self.stats.cum_profit)
self.std_df_upnl.append(upnl)
self.std_df_equity.append(new_equity)
assert(len(self.std_df_equity) == len(np_buy))
self.get_stats_np()
return True
"""
█████╗ ██╗ ██╗███████╗██╗ ██╗███████╗██████╗ ████████╗███████╗███╗ ██╗
██╔══██╗██║ ██║██╔════╝██║ ██║██╔════╝██╔══██╗╚══██╔══╝██╔════╝████╗ ██║
███████║██║ ██║███████╗██║ █╗ ██║█████╗ ██████╔╝ ██║ █████╗ ██╔██╗ ██║
██╔══██║██║ ██║╚════██║██║███╗██║██╔══╝ ██╔══██╗ ██║ ██╔══╝ ██║╚██╗██║
██║ ██║╚██████╔╝███████║╚███╔███╔╝███████╗██║ ██║ ██║ ███████╗██║ ╚████║
╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚══╝╚══╝ ╚══════╝╚═╝ ╚═╝ ╚═╝ ╚══════╝╚═╝ ╚═══╝
"""
def get_stats_np(self):
if self.stats.cnt_losses <= 0:
self.stats.cnt_losses = 1
if self.stats.cnt_wins <= 0:
self.stats.cnt_wins = 1
self.stats.win_rate = self.stats.cnt_wins / self.stats.cnt_losses
profit = np.array(self.std_df_profit)
equity = np.array(self.std_df_equity)
upnl = np.array(self.std_df_upnl)
equity_norm = df_utils.normalize_np(equity)
profit_norm = df_utils.normalize_np(profit)
#upnl_norm = df_utils.normalize_np(upnl)
self.stats.equity = equity_norm
self.stats.profit = profit_norm
# Profit Drawdown PCT
min_right = np.minimum.accumulate(profit_norm[::-1])[::-1]
self.stats.profit_drawdown = np.max(np.abs(profit_norm - min_right))
# Equity Drawdown PCT
min_right = np.minimum.accumulate(equity_norm[::-1])[::-1]
self.stats.equity_drawdown = np.max(np.abs(equity_norm - min_right))
# Profit Deviation PCT
step = 1 / len(profit_norm)
perfekte_gewinnkurve = np.full(len(profit_norm), step).cumsum()
deviation = np.abs(profit_norm - perfekte_gewinnkurve)
self.stats.profit_deviation = np.sum(deviation) / len(profit_norm)
self.stats.profit_deviation_max = np.max(deviation)
# Equity Deviation PCT
step = 1 / len(equity_norm)
perfekte_gewinnkurve = np.full(len(equity_norm), step).cumsum()
deviation = np.abs(equity_norm - perfekte_gewinnkurve)
self.stats.equity_deviation = np.sum(deviation) / len(equity_norm)
self.stats.equity_deviation_max = np.max(deviation)
# Avg, Median Trade Duration
duration_candles = np.array(self.std_duration_candles)
self.stats.trade_duration_mean = np.mean(duration_candles)
self.stats.trade_duration_median = np.median(duration_candles)
self.stats.trade_duration_variance = np.var(duration_candles, ddof=1)
# Negative Equity
self.stats.negative_equity = np.sum(np.power(1 + np.abs(np.where(upnl > 0, 0, upnl)), 6))
# Sortino
returns = np.array(self.std_returns)
ratio = _sortino_ratio_v2(returns)
if ratio > 100:
ratio = -1
self.stats.sortino_ratio = ratio
# Sharpe
ratio = _sharpe_ratio_v2(returns)
if ratio > 100:
ratio = -1
self.stats.sharpe_ratio = ratio
# Linreg
y = profit_norm
length = len(y)
l1 = int(length) - 1
l2 = int(length / 2)
l3 = int(length / 3)
self.stats.lr1 = 1
self.stats.lr2 = 1
self.stats.lr3 = 1
self.stats.lrk3 = 1
self.stats.lrk2 = 1
self.stats.lrk1 = 1
"""
self.stats.lr1 = linreg_np(y[-l1:])
self.stats.lr2 = linreg_np(y[-l2:])
self.stats.lr3 = linreg_np(y[-l3:])
self.stats.lrk3 = linreg_np(y[-l3:], 2) - linreg_np(y[-l3:], 1)
self.stats.lrk2 = linreg_np(y[-l2:], 2) - linreg_np(y[-l2:], 1)
self.stats.lrk1 = linreg_np(y[-l1:], 2) - linreg_np(y[-l1:], 1)
"""
"""
██████╗ ██╗ ██████╗ ████████╗████████╗██╗███╗ ██╗ ██████╗
██╔══██╗██║ ██╔═══██╗╚══██╔══╝╚══██╔══╝██║████╗ ██║██╔════╝
██████╔╝██║ ██║ ██║ ██║ ██║ ██║██╔██╗ ██║██║ ███╗
██╔═══╝ ██║ ██║ ██║ ██║ ██║ ██║██║╚██╗██║██║ ██║
██║ ███████╗╚██████╔╝ ██║ ██║ ██║██║ ╚████║╚██████╔╝
╚═╝ ╚══════╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═════╝
"""
def plot_plotly(self, name, path, indicators=[]):
self.indicators = indicators
fig = self.generate_candlestick_graph(
pair=name,
data=self.df
)
trades = self.get_trades()
fig = self.plot_trades(fig, trades)
fig = self.plot_profit(fig)
fig.update_layout(showlegend=True)
fig.update_yaxes(automargin=True)
fig.update_xaxes(automargin=True)
fig.update_layout(
autosize=True,
margin=go.layout.Margin(
l=0,
r=0,
b=0,
t=30,
pad=0
)
)
plot(fig, filename=path, auto_open=False)
def get_trades(self):
self.df_trades = pd.DataFrame.from_records([s.to_dict() for s in self.trades])
if len(self.df_trades) > 0:
self.df_trades["profit_pct"] = self.df_trades["profit_pct"] * 100
self.profit = self.df_trades["profit_pct"].sum()
else:
self.profit = -1
return self.df_trades
def generate_candlestick_graph(self, pair: str, data: pd.DataFrame) -> go.Figure:
# Define the graph
fig = make_subplots(
rows=2,
cols=1,
shared_xaxes=True,
row_width=[1, 1],
vertical_spacing=0,
)
fig['layout'].update(title=pair)
fig['layout']['yaxis1'].update(title='Price')
fig['layout']['yaxis2'].update(title='Balance')
#fig['layout']['yaxis3'].update(title='Other')
fig['layout']['xaxis']['rangeslider'].update(visible=False)
#fig['layout']['xaxis'].update(type=False)
if len(data.index) > 1024:
# Common information
candles = go.Candlestick(
x=data.date,
open=data.open,
high=data.high,
low=data.low,
close=data.close,
name='Price'
#text=data.status
)
else:
candles = go.Scatter(
x=data.date,
y=data.close,
name='Price',
fillcolor="black"
)
fig.add_trace(candles, 1, 1)
for indi in self.indicators:
if indi in data.columns:
candles = go.Scatter(
x=data.date,
y=data[indi],
name=indi,
fillcolor="blue"
)
fig.add_trace(candles, 1, 1)
op = 0.5
size = 11
width = 2
if 'buy' in data.columns:
df_buy = data[data['buy'] == 1]
if len(df_buy) > 0:
buys = go.Scatter(
x=df_buy.date,
y=df_buy.close,
mode='markers',
name='buy',
opacity=op,
marker=dict(
symbol='triangle-up-open',
size=size,
line=dict(width=width),
color='green',
)
)
fig.add_trace(buys, 1, 1)
if 'sell' in data.columns:
df_sell = data[data['sell'] == 1]
if len(df_sell) > 0:
sells = go.Scatter(
x=df_sell.date,
y=df_sell.close,
mode='markers',
name='sell',
opacity=op,
marker=dict(
symbol='circle-open',
size=size,
line=dict(width=width),
color='red',
)
)
fig.add_trace(sells, 1, 1)
if 'sbuy' in data.columns:
df_buy = data[data['sbuy'] == 1]
if len(df_buy) > 0:
buys = go.Scatter(
x=df_buy.date,
y=df_buy.close,
mode='markers',
name='sbuy',
opacity=op,
marker=dict(
symbol='circle-open',
size=size,
line=dict(width=width),
color='cyan',
)
)
fig.add_trace(buys, 1, 1)
if 'ssell' in data.columns:
df_sell = data[data['ssell'] == 1]
if len(df_sell) > 0:
sells = go.Scatter(
x=df_sell.date,
y=df_sell.close,
mode='markers',
name='ssell',
opacity=op,
marker=dict(
symbol='triangle-down-open',
size=size,
line=dict(width=width),
color='orange',
)
)
fig.add_trace(sells, 1, 1)
return fig
def plot_profit(self, fig) -> make_subplots:
profit = go.Scatter(
x=self.std_df_time,
y=self.std_df_profit,
name='Cum Profit'
)
fig.add_trace(profit, 2, 1)
profit = go.Scatter(
x=self.std_df_time,
y=self.std_df_equity,
name='Equity'
)
fig.add_trace(profit, 2, 1)
profit = go.Scatter(
x=self.std_df_time,
y=self.std_df_upnl,
name='UPNL'
)
fig.add_trace(profit, 2, 1)
return fig
def plot_trades(self, fig, trades: pd.DataFrame) -> make_subplots:
# Trades can be empty
if trades is not None and len(trades) > 0:
longs = trades[trades["position"] == 1]
shorts = trades[trades["position"] == -1]
def tmp(df, name):
if len(df.index) > 0:
color_entry = df.apply(lambda row: 'green' if row['position'] == 1 else 'orange', axis=1)
color_exit = df.apply(lambda row: 'red' if row['position'] == 1 else 'cyan', axis=1)
shape = df.apply(lambda row: 'square-open' if row['position'] == 1 else 'diamond-open', axis=1)
trade_buys = go.Scatter(
x=df["open_time"],
y=df["open_rate"],
mode='markers',
name=name + " OPEN",
marker=dict(
symbol=shape,
size=11,
line=dict(width=2),
color=color_entry
)
)
desc = df.apply(lambda row: f"{round((row['profit_pct']), 3)}%,"
f"{row['duration']}min",
axis=1)
trade_sells = go.Scatter(
x=df["close_time"],
y=df["close_rate"],
text=desc,
mode='markers',
name=name + " CLOSE",
marker=dict(
symbol=shape,
size=11,
line=dict(width=2),
color=color_exit
)
)
fig.add_trace(trade_buys, 1, 1)
fig.add_trace(trade_sells, 1, 1)
tmp(longs, "LONG")
tmp(shorts, "SHORT")
else:
logger.warning("No trades found.")
return fig
|
import json
import os
import time
import datetime
import pytz
from xml.sax.saxutils import escape
from wsgiref.handlers import format_date_time
import cachetools.func
import diskcache
import requests
import podgen
from flask import abort
from flask import render_template
from flask import make_response
from flask import request
from server.parser.npr import NprParser
from server import app
CACHE_TIMEOUT = 300
# Application needs to be restarted if feeds.json changes
@cachetools.func.lru_cache
def get_feeds():
return json.load(open(os.path.join(app.root_path, 'feeds.json')))
@cachetools.func.lru_cache
def get_feed_name(id_):
feeds = get_feeds()
if id_ in feeds:
return id_
if id_.isdigit():
for key, value in feeds.items():
if int(id_) == value['id']:
return key
raise ValueError
# One request per minute per URL. If there is a bug we don't want to kill the remote server.
@cachetools.func.ttl_cache(maxsize=128, ttl=60)
def get_url(url):
return requests.get(url, timeout=5)
def generate_rss(text, ts, name, meta):
if meta['parser'] == 'npr':
publication_time = meta.get('publication_time', None)
if publication_time:
data = NprParser(text, name, publication_time=publication_time)
else:
data = NprParser(text, name)
else:
raise ValueError(f"unknown parser type {meta["parser"]}")
episodes = []
for item in data.episodes:
e = podgen.Episode()
e.id = escape(item.id)
e.title = escape(item.title)
e.media = podgen.Media(item.media_url, item.media_size, duration=datetime.timedelta(seconds=item.media_duration))
e.publication_date = item.publication_date
e.link = item.link
episodes.append(e)
if 'title' in meta:
title = meta['title']
else:
title = getattr(data, 'title')
if 'author' in meta:
author = meta['author']
else:
author = getattr(data, 'author', None)
if 'image' in meta:
image = meta['image']
else:
image = getattr(data, 'image', None)
if 'description' in meta:
description = meta['description']
else:
description = f"Auto-generated by podfeed. Data sourced from {meta["url"]}. Report issues to https://github.com/pyther/podfeed/issues"
category = meta.get('category', None)
url = meta.get('url', None)
podcast = podgen.Podcast()
podcast.name = escape(title)
podcast.description = escape(description)
if url:
podcast.website = url
if category:
podcast.category = podgen.Category(category[0], category[1])
podcast.language = "en-US"
if author:
podcast.authors = [podgen.Person(author, None)]
if image:
podcast.image = image
podcast.explicit = False
podcast.last_updated = pytz.utc.localize(datetime.datetime.utcfromtimestamp(ts))
podcast.generator = "pyther/podfeed"
podcast.episodes = episodes
podcast.publication_date = False
return podcast.rss_str()
# Check if cache has expired every 10 seconds, serve from cache otherwise
@cachetools.func.ttl_cache(maxsize=128, ttl=1)
def feed(name):
meta = get_feeds()[name]
# Load Cache
cache = diskcache.Cache('/tmp/podfeed')
# cache is expired if item dosen't exist
if name not in cache:
# Update cache
try:
req = get_url(meta['url'])
except Exception as e:
app.logger.error(f"connection error: {e}")
abort(503, 'remote server unavailable')
else:
if req.ok:
cache.set(name, req.text, expire=CACHE_TIMEOUT)
app.logger.info(f"cache updated for {name}")
else:
app.logger.error(f"status code {req.status_code} from {meta["url"]}")
abort(503, 'request to remote server was unsuccessful')
# Get expiration time
text, expire_time = cache.get(name, expire_time=True)
expire_time = int(expire_time)
cache.close()
# Generate RSS XML
rss = generate_rss(text, expire_time, name, meta)
app.logger.info(f"generated rss for {name}")
response = make_response(rss)
response.headers['Content-Type'] = 'application/xml'
if expire_time:
max_age = int(expire_time - time.time())
response.headers['Expires'] = format_date_time(expire_time)
response.headers['Cache-Control'] = f"public, max-age={max_age}, stale-if-error=43200"
return response
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html', feeds=get_feeds())
@app.route('/podcast/<_id>')
@app.route('/podcast/<_id>.xml')
def podcast(_id):
try:
name = get_feed_name(_id)
except ValueError:
abort(404)
response = feed(name)
return response
|
import json
import os
import time
import datetime
import pytz
from xml.sax.saxutils import escape
from wsgiref.handlers import format_date_time
import cachetools.func
import diskcache
import requests
import podgen
from flask import abort
from flask import render_template
from flask import make_response
from flask import request
from server.parser.npr import NprParser
from server import app
CACHE_TIMEOUT = 300
# Application needs to be restarted if feeds.json changes
@cachetools.func.lru_cache
def get_feeds():
return json.load(open(os.path.join(app.root_path, 'feeds.json')))
@cachetools.func.lru_cache
def get_feed_name(id_):
feeds = get_feeds()
if id_ in feeds:
return id_
if id_.isdigit():
for key, value in feeds.items():
if int(id_) == value['id']:
return key
raise ValueError
# One request per minute per URL. If there is a bug we don't want to kill the remote server.
@cachetools.func.ttl_cache(maxsize=128, ttl=60)
def get_url(url):
return requests.get(url, timeout=5)
def generate_rss(text, ts, name, meta):
if meta['parser'] == 'npr':
publication_time = meta.get('publication_time', None)
if publication_time:
data = NprParser(text, name, publication_time=publication_time)
else:
data = NprParser(text, name)
else:
raise ValueError(f"unknown parser type {meta['parser']}")
episodes = []
for item in data.episodes:
e = podgen.Episode()
e.id = escape(item.id)
e.title = escape(item.title)
e.media = podgen.Media(item.media_url, item.media_size, duration=datetime.timedelta(seconds=item.media_duration))
e.publication_date = item.publication_date
e.link = item.link
episodes.append(e)
if 'title' in meta:
title = meta['title']
else:
title = getattr(data, 'title')
if 'author' in meta:
author = meta['author']
else:
author = getattr(data, 'author', None)
if 'image' in meta:
image = meta['image']
else:
image = getattr(data, 'image', None)
if 'description' in meta:
description = meta['description']
else:
description = f"Auto-generated by podfeed. Data sourced from {meta['url']}. Report issues to https://github.com/pyther/podfeed/issues"
category = meta.get('category', None)
url = meta.get('url', None)
podcast = podgen.Podcast()
podcast.name = escape(title)
podcast.description = escape(description)
if url:
podcast.website = url
if category:
podcast.category = podgen.Category(category[0], category[1])
podcast.language = "en-US"
if author:
podcast.authors = [podgen.Person(author, None)]
if image:
podcast.image = image
podcast.explicit = False
podcast.last_updated = pytz.utc.localize(datetime.datetime.utcfromtimestamp(ts))
podcast.generator = "pyther/podfeed"
podcast.episodes = episodes
podcast.publication_date = False
return podcast.rss_str()
# Check if cache has expired every 10 seconds, serve from cache otherwise
@cachetools.func.ttl_cache(maxsize=128, ttl=1)
def feed(name):
meta = get_feeds()[name]
# Load Cache
cache = diskcache.Cache('/tmp/podfeed')
# cache is expired if item dosen't exist
if name not in cache:
# Update cache
try:
req = get_url(meta['url'])
except Exception as e:
app.logger.error(f"connection error: {e}")
abort(503, 'remote server unavailable')
else:
if req.ok:
cache.set(name, req.text, expire=CACHE_TIMEOUT)
app.logger.info(f"cache updated for {name}")
else:
app.logger.error(f"status code {req.status_code} from {meta['url']}")
abort(503, 'request to remote server was unsuccessful')
# Get expiration time
text, expire_time = cache.get(name, expire_time=True)
expire_time = int(expire_time)
cache.close()
# Generate RSS XML
rss = generate_rss(text, expire_time, name, meta)
app.logger.info(f"generated rss for {name}")
response = make_response(rss)
response.headers['Content-Type'] = 'application/xml'
if expire_time:
max_age = int(expire_time - time.time())
response.headers['Expires'] = format_date_time(expire_time)
response.headers['Cache-Control'] = f"public, max-age={max_age}, stale-if-error=43200"
return response
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html', feeds=get_feeds())
@app.route('/podcast/<_id>')
@app.route('/podcast/<_id>.xml')
def podcast(_id):
try:
name = get_feed_name(_id)
except ValueError:
abort(404)
response = feed(name)
return response
|
import json
import logging
import math
import os
import random
import warnings
from multiprocessing import cpu_count
from pathlib import Path
import numpy as np
from tqdm.auto import tqdm, trange
import pandas as pd
import torch
from simpletransformers.config.global_args import global_args
from simpletransformers.seq2seq.seq2seq_utils import Seq2SeqDataset, SimpleSummarizationDataset
from tensorboardX import SummaryWriter
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from transformers import AdamW, EncoderDecoderModel, EncoderDecoderConfig, get_linear_schedule_with_warmup
from transformers import (
AutoModel,
AutoTokenizer,
AutoConfig,
BertTokenizer,
BertModel,
BertForMaskedLM,
BertConfig,
CamembertConfig,
CamembertModel,
CamembertTokenizer,
DistilBertConfig,
DistilBertModel,
DistilBertTokenizer,
ElectraConfig,
ElectraModel,
ElectraTokenizer,
LongformerConfig,
LongformerModel,
LongformerTokenizer,
PreTrainedModel,
PreTrainedTokenizer,
RobertaConfig,
RobertaModel,
RobertaTokenizer,
BartForConditionalGeneration,
BartTokenizer,
BartConfig,
MarianMTModel,
MarianTokenizer,
MarianConfig,
)
try:
import wandb
wandb_available = True
except ImportError:
wandb_available = False
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
"auto": (AutoConfig, AutoModel, AutoTokenizer),
"bart": (BartConfig, BartForConditionalGeneration, BartTokenizer),
"bert": (BertConfig, BertModel, BertTokenizer),
"camembert": (CamembertConfig, CamembertModel, CamembertTokenizer),
"distilbert": (DistilBertConfig, DistilBertModel, DistilBertTokenizer),
"electra": (ElectraConfig, ElectraModel, ElectraTokenizer),
"longformer": (LongformerConfig, LongformerModel, LongformerTokenizer),
"marian": (MarianConfig, MarianMTModel, MarianTokenizer),
"roberta": (RobertaConfig, RobertaModel, RobertaTokenizer),
}
class Seq2SeqModel:
def __init__(
self,
encoder_type=None,
encoder_name=None,
decoder_name=None,
encoder_decoder_type=None,
encoder_decoder_name=None,
config=None,
args=None,
use_cuda=True,
cuda_device=-1,
**kwargs,
):
"""
Initializes a Seq2SeqModel.
Args:
encoder_type (optional): The type of model to use as the encoder.
encoder_name (optional): The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
decoder_name (optional): The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
Must be the same "size" as the encoder model (base/base, large/large, etc.)
encoder_decoder_type (optional): The type of encoder-decoder model. (E.g. bart)
encoder_decoder_name (optional): The path to a directory containing the saved encoder and decoder of a Seq2SeqModel. (E.g. "outputs/") OR a valid BART model.
config (optional): A configuration file to build an EncoderDecoderModel.
args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.
use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.
cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.
**kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.
""" # noqa: ignore flake8"
if not config:
# if not ((encoder_name and decoder_name) or encoder_decoder_name) and not encoder_type:
if not ((encoder_name and decoder_name) or encoder_decoder_name):
raise ValueError(
"You must specify a Seq2Seq config \t OR \t"
"encoder_type, encoder_name, and decoder_name OR \t \t"
"encoder_type and encoder_decoder_name"
)
elif not (encoder_type or encoder_decoder_type):
raise ValueError(
"You must specify a Seq2Seq config \t OR \t"
"encoder_type, encoder_name, and decoder_name \t OR \t"
"encoder_type and encoder_decoder_name"
)
if args and "manual_seed" in args:
random.seed(args["manual_seed"])
np.random.seed(args["manual_seed"])
torch.manual_seed(args["manual_seed"])
if "n_gpu" in args and args["n_gpu"] > 0:
torch.cuda.manual_seed_all(args["manual_seed"])
self.args = {
"dataset_class": None,
"do_sample": False,
"max_steps": -1,
"evaluate_generated_text": False,
"num_beams": 1,
"max_length": 20,
"repetition_penalty": 1.0,
"length_penalty": 2.0,
"early_stopping": True,
}
self.args.update(global_args)
try:
saved_model_args = self._load_model_args(encoder_decoder_name)
if saved_model_args:
self.args.update(saved_model_args)
except TypeError:
logger.info(f"Failed to load saved args from {encoder_decoder_name}. This may be normal.")
if args:
self.args.update(args)
if use_cuda:
if torch.cuda.is_available():
if cuda_device == -1:
self.device = torch.device("cuda")
else:
self.device = torch.device(f"cuda:{cuda_device}")
else:
raise ValueError(
"'use_cuda' set to True when cuda is unavailable."
"Make sure CUDA is available or set `use_cuda=False`."
)
else:
self.device = "cpu"
self.results = {}
if not use_cuda:
self.args["fp16"] = False
# config = EncoderDecoderConfig.from_encoder_decoder_configs(config, config)
if encoder_decoder_type:
config_class, model_class, tokenizer_class = MODEL_CLASSES[encoder_decoder_type]
else:
config_class, model_class, tokenizer_class = MODEL_CLASSES[encoder_type]
if encoder_decoder_type in ["bart", "marian"]:
self.model = model_class.from_pretrained(encoder_decoder_name)
if encoder_decoder_type == "bart":
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_decoder_name)
elif encoder_decoder_type == "marian":
if "base_marian_model_name" in self.args:
self.encoder_tokenizer = tokenizer_class.from_pretrained(self.args["base_marian_model_name"])
else:
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_decoder_name)
self.decoder_tokenizer = self.encoder_tokenizer
self.config = self.model.config
else:
if encoder_decoder_name:
# self.model = EncoderDecoderModel.from_pretrained(encoder_decoder_name)
self.model = EncoderDecoderModel.from_encoder_decoder_pretrained(
os.path.join(encoder_decoder_name, "encoder"), os.path.join(encoder_decoder_name, "decoder")
)
self.model.encoder = model_class.from_pretrained(os.path.join(encoder_decoder_name, "encoder"))
self.model.decoder = BertForMaskedLM.from_pretrained(os.path.join(encoder_decoder_name, "decoder"))
self.encoder_tokenizer = tokenizer_class.from_pretrained(os.path.join(encoder_decoder_name, "encoder"))
self.decoder_tokenizer = BertTokenizer.from_pretrained(os.path.join(encoder_decoder_name, "decoder"))
else:
self.model = EncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_name, decoder_name, config=config
)
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_name)
self.decoder_tokenizer = BertTokenizer.from_pretrained(decoder_name)
self.encoder_config = self.model.config.encoder
self.decoder_config = self.model.config.decoder
if self.args["wandb_project"] and not wandb_available:
warnings.warn("wandb_project specified but wandb is not available. Wandb disabled.")
self.args["wandb_project"] = None
if encoder_decoder_name:
self.args["model_name"] = encoder_decoder_name
# Checking if we are loading from a saved model or using a pre-trained model
if not saved_model_args and encoder_decoder_type == "marian":
# Need to store base pre-trained model name to get the tokenizer when loading a saved model
self.args["base_marian_model_name"] = encoder_decoder_name
elif encoder_name and decoder_name:
self.args["model_name"] = encoder_name + "-" + decoder_name
else:
self.args["model_name"] = "encoder-decoder"
if encoder_decoder_type:
self.args["model_type"] = encoder_decoder_type
elif encoder_type:
self.args["model_type"] = encoder_type + "-bert"
else:
self.args["model_type"] = "encoder-decoder"
def train_model(
self, train_data, output_dir=None, show_running_loss=True, args=None, eval_data=None, verbose=True, **kwargs,
):
"""
Trains the model using 'train_data'
Args:
train_data: Pandas DataFrame containing the 2 columns - `input_text`, `target_text`.
- `input_text`: The input text sequence.
- `target_text`: The target sequence
output_dir: The directory where model files will be saved. If not given, self.args['output_dir'] will be used.
show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.
args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.
eval_data (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down training significantly as the predicted sequences need to be generated.
Returns:
None
""" # noqa: ignore flake8"
if args:
self.args.update(args)
# if self.args["silent"]:
# show_running_loss = False
if self.args["evaluate_during_training"] and eval_data is None:
raise ValueError(
"evaluate_during_training is enabled but eval_data is not specified."
" Pass eval_data to model.train_model() if using evaluate_during_training."
)
if not output_dir:
output_dir = self.args["output_dir"]
if os.path.exists(output_dir) and os.listdir(output_dir) and not self.args["overwrite_output_dir"]:
raise ValueError(
"Output directory ({}) already exists and is not empty."
" Set args['overwrite_output_dir'] = True to overcome.".format(output_dir)
)
self._move_model_to_device()
train_dataset = self.load_and_cache_examples(train_data, verbose=verbose)
os.makedirs(output_dir, exist_ok=True)
global_step, tr_loss = self.train(
train_dataset,
output_dir,
show_running_loss=show_running_loss,
eval_data=eval_data,
verbose=verbose,
**kwargs,
)
self._save_model(self.args["output_dir"], model=self.model)
# model_to_save = self.model.module if hasattr(self.model, "module") else self.model
# model_to_save.save_pretrained(output_dir)
# self.encoder_tokenizer.save_pretrained(output_dir)
# self.decoder_tokenizer.save_pretrained(output_dir)
# torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
if verbose:
logger.info(" Training of {} model complete. Saved to {}.".format(self.args["model_name"], output_dir))
def train(
self, train_dataset, output_dir, show_running_loss=True, eval_data=None, verbose=True, **kwargs,
):
"""
Trains the model on train_dataset.
Utility function to be used by the train_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
tb_writer = SummaryWriter(logdir=args["tensorboard_dir"])
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args["train_batch_size"])
if args["max_steps"] > 0:
t_total = args["max_steps"]
args["num_train_epochs"] = (
args["max_steps"] // (len(train_dataloader) // args["gradient_accumulation_steps"]) + 1
)
else:
t_total = len(train_dataloader) // args["gradient_accumulation_steps"] * args["num_train_epochs"]
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args["weight_decay"],
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)]},
]
warmup_steps = math.ceil(t_total * args["warmup_ratio"])
args["warmup_steps"] = warmup_steps if args["warmup_steps"] == 0 else args["warmup_steps"]
# TODO: Use custom optimizer like with BertSum?
optimizer = AdamW(optimizer_grouped_parameters, lr=args["learning_rate"], eps=args["adam_epsilon"])
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args["warmup_steps"], num_training_steps=t_total
)
if (
args["model_name"]
and os.path.isfile(os.path.join(args["model_name"], "optimizer.pt"))
and os.path.isfile(os.path.join(args["model_name"], "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args["model_name"], "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args["model_name"], "scheduler.pt")))
if args["fp16"]:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args["fp16_opt_level"])
if args["n_gpu"] > 1:
model = torch.nn.DataParallel(model)
logger.info(" Training started")
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args["num_train_epochs"]), desc="Epoch", disable=args["silent"], mininterval=0)
epoch_number = 0
best_eval_metric = None
early_stopping_counter = 0
steps_trained_in_current_epoch = 0
epochs_trained = 0
if args["model_name"] and os.path.exists(args["model_name"]):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args["model_name"].split("/")[-1].split("-")
if len(checkpoint_suffix) > 2:
checkpoint_suffix = checkpoint_suffix[1]
else:
checkpoint_suffix = checkpoint_suffix[-1]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args["gradient_accumulation_steps"])
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // args["gradient_accumulation_steps"]
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the current epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
if args["evaluate_during_training"]:
training_progress_scores = self._create_training_progress_scores(**kwargs)
if args["wandb_project"]:
wandb.init(project=args["wandb_project"], config={**args}, **args["wandb_kwargs"])
wandb.watch(self.model)
model.train()
for current_epoch in train_iterator:
if epochs_trained > 0:
epochs_trained -= 1
continue
# epoch_iterator = tqdm(train_dataloader, desc="Iteration")
for step, batch in enumerate(tqdm(train_dataloader, desc="Current iteration", disable=args["silent"])):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
# batch = tuple(t.to(device) for t in batch)
inputs = self._get_inputs_dict(batch)
outputs = model(**inputs)
# model outputs are always tuple in pytorch-transformers (see doc)
loss = outputs[0]
if args["n_gpu"] > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
current_loss = loss.item()
if show_running_loss:
print("\rRunning loss: %f" % loss, end="")
if args["gradient_accumulation_steps"] > 1:
loss = loss / args["gradient_accumulation_steps"]
if args["fp16"]:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
# torch.nn.utils.clip_grad_norm_(
# amp.master_params(optimizer), args["max_grad_norm"]
# )
else:
loss.backward()
# torch.nn.utils.clip_grad_norm_(
# model.parameters(), args["max_grad_norm"]
# )
tr_loss += loss.item()
if (step + 1) % args["gradient_accumulation_steps"] == 0:
if args["fp16"]:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args["max_grad_norm"])
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args["max_grad_norm"])
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args["logging_steps"] > 0 and global_step % args["logging_steps"] == 0:
# Log metrics
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args["logging_steps"], global_step)
logging_loss = tr_loss
if args["wandb_project"]:
wandb.log(
{
"Training loss": current_loss,
"lr": scheduler.get_lr()[0],
"global_step": global_step,
}
)
if args["save_steps"] > 0 and global_step % args["save_steps"] == 0:
# Save model checkpoint
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if args["evaluate_during_training"] and (
args["evaluate_during_training_steps"] > 0
and global_step % args["evaluate_during_training_steps"] == 0
):
# Only evaluate when single GPU otherwise metrics may not average well
results = self.eval_model(
eval_data,
verbose=verbose and args["evaluate_during_training_verbose"],
silent=args["evaluate_during_training_silent"],
**kwargs,
)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
if args["save_eval_checkpoints"]:
self._save_model(output_dir_current, optimizer, scheduler, model=model, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(
os.path.join(args["output_dir"], "training_progress_scores.csv"), index=False,
)
if args["wandb_project"]:
wandb.log(self._get_last_metrics(training_progress_scores))
if not best_eval_metric:
best_eval_metric = results[args["early_stopping_metric"]]
if args["save_best_model"]:
self._save_model(
args["best_model_dir"], optimizer, scheduler, model=model, results=results
)
if best_eval_metric and args["early_stopping_metric_minimize"]:
if (
results[args["early_stopping_metric"]] - best_eval_metric
< args["early_stopping_delta"]
):
best_eval_metric = results[args["early_stopping_metric"]]
if args["save_best_model"]:
self._save_model(
args["best_model_dir"], optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args["use_early_stopping"]:
if early_stopping_counter < args["early_stopping_patience"]:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args["early_stopping_metric"]}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args["early_stopping_patience"]}")
else:
if verbose:
logger.info(
f" Patience of {args["early_stopping_patience"]} steps reached"
)
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
else:
if (
results[args["early_stopping_metric"]] - best_eval_metric
> args["early_stopping_delta"]
):
best_eval_metric = results[args["early_stopping_metric"]]
if args["save_best_model"]:
self._save_model(
args["best_model_dir"], optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args["use_early_stopping"]:
if early_stopping_counter < args["early_stopping_patience"]:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args["early_stopping_metric"]}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args["early_stopping_patience"]}")
else:
if verbose:
logger.info(
f" Patience of {args["early_stopping_patience"]} steps reached"
)
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
epoch_number += 1
output_dir_current = os.path.join(output_dir, "checkpoint-{}-epoch-{}".format(global_step, epoch_number))
if args["save_model_every_epoch"] or args["evaluate_during_training"]:
os.makedirs(output_dir_current, exist_ok=True)
if args["save_model_every_epoch"]:
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if args["evaluate_during_training"]:
results = self.eval_model(
eval_data,
verbose=verbose and args["evaluate_during_training_verbose"],
silent=args["evaluate_during_training_silent"],
**kwargs,
)
if args["save_eval_checkpoints"]:
self._save_model(output_dir_current, optimizer, scheduler, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(os.path.join(args["output_dir"], "training_progress_scores.csv"), index=False)
if args["wandb_project"]:
wandb.log(self._get_last_metrics(training_progress_scores))
if not best_eval_metric:
best_eval_metric = results[args["early_stopping_metric"]]
if args["save_best_model"]:
self._save_model(args["best_model_dir"], optimizer, scheduler, model=model, results=results)
if best_eval_metric and args["early_stopping_metric_minimize"]:
if results[args["early_stopping_metric"]] - best_eval_metric < args["early_stopping_delta"]:
best_eval_metric = results[args["early_stopping_metric"]]
if args["save_best_model"]:
self._save_model(
args["best_model_dir"], optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args["use_early_stopping"] and args["early_stopping_consider_epochs"]:
if early_stopping_counter < args["early_stopping_patience"]:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args["early_stopping_metric"]}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args["early_stopping_patience"]}")
else:
if verbose:
logger.info(f" Patience of {args["early_stopping_patience"]} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
else:
if results[args["early_stopping_metric"]] - best_eval_metric > args["early_stopping_delta"]:
best_eval_metric = results[args["early_stopping_metric"]]
if args["save_best_model"]:
self._save_model(
args["best_model_dir"], optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args["use_early_stopping"] and args["early_stopping_consider_epochs"]:
if early_stopping_counter < args["early_stopping_patience"]:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args["early_stopping_metric"]}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args["early_stopping_patience"]}")
else:
if verbose:
logger.info(f" Patience of {args["early_stopping_patience"]} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
return global_step, tr_loss / global_step
def eval_model(self, eval_data, output_dir=None, verbose=True, silent=False, **kwargs):
"""
Evaluates the model on eval_data. Saves results to output_dir.
Args:
eval_data: Pandas DataFrame containing the 2 columns - `input_text`, `target_text`.
- `input_text`: The input text sequence.
- `target_text`: The target sequence
output_dir: The directory where model files will be saved. If not given, self.args['output_dir'] will be used.
verbose: If verbose, results will be printed to the console on completion of evaluation.
silent: If silent, tqdm progress bars will be hidden.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.
Returns:
results: Dictionary containing evaluation results.
""" # noqa: ignore flake8"
if not output_dir:
output_dir = self.args["output_dir"]
self._move_model_to_device()
eval_dataset = self.load_and_cache_examples(eval_data, evaluate=True, verbose=verbose, silent=silent)
os.makedirs(output_dir, exist_ok=True)
result = self.evaluate(eval_dataset, output_dir, verbose=verbose, silent=silent, **kwargs)
self.results.update(result)
if self.args["evaluate_generated_text"]:
to_predict = eval_data["input_text"].tolist()
preds = self.predict(to_predict)
result = self.compute_metrics(eval_data["target_text"].tolist(), preds, **kwargs)
self.results.update(result)
if verbose:
logger.info(self.results)
return self.results
def evaluate(self, eval_dataset, output_dir, verbose=True, silent=False, **kwargs):
"""
Evaluates the model on eval_dataset.
Utility function to be used by the eval_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
eval_output_dir = output_dir
results = {}
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args["eval_batch_size"])
if args["n_gpu"] > 1:
model = torch.nn.DataParallel(model)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader, disable=args["silent"] or silent):
# batch = tuple(t.to(device) for t in batch)
inputs = self._get_inputs_dict(batch)
with torch.no_grad():
outputs = model(**inputs)
loss = outputs[0]
eval_loss += loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
results["eval_loss"] = eval_loss
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
return results
def predict(self, to_predict):
"""
Performs predictions on a list of text.
Args:
to_predict: A python list of text (str) to be sent to the model for prediction. Note that the prefix should be prepended to the text.
Returns:
preds: A python list of the generated sequences.
""" # noqa: ignore flake8"
self._move_model_to_device()
all_outputs = []
# Batching
for batch in [
to_predict[i : i + self.args["eval_batch_size"]]
for i in range(0, len(to_predict), self.args["eval_batch_size"])
]:
if self.args["model_type"] == "marian":
input_ids = self.encoder_tokenizer.prepare_translation_batch(
batch, max_length=self.args["max_seq_length"], pad_to_max_length=True, return_tensors="pt",
)["input_ids"]
else:
input_ids = self.encoder_tokenizer.batch_encode_plus(
batch, max_length=self.args["max_seq_length"], pad_to_max_length=True, return_tensors="pt",
)["input_ids"]
input_ids = input_ids.to(self.device)
if self.args["model_type"] in ["bart", "marian"]:
outputs = self.model.generate(
input_ids=input_ids,
num_beams=self.args["num_beams"],
max_length=self.args["max_length"],
length_penalty=self.args["length_penalty"],
early_stopping=self.args["early_stopping"],
repetition_penalty=self.args["repetition_penalty"],
do_sample=self.args["do_sample"],
)
else:
outputs = self.model.generate(
input_ids=input_ids,
decoder_start_token_id=self.model.config.decoder.pad_token_id,
num_beams=self.args["num_beams"],
max_length=self.args["max_length"],
length_penalty=self.args["length_penalty"],
early_stopping=self.args["early_stopping"],
repetition_penalty=self.args["repetition_penalty"],
do_sample=self.args["do_sample"],
)
all_outputs.extend(outputs)
return [
self.decoder_tokenizer.decode(output_id, skip_special_tokens=True, clean_up_tokenization_spaces=True)
for output_id in all_outputs
]
def compute_metrics(self, labels, preds, **kwargs):
"""
Computes the evaluation metrics for the model predictions.
Args:
labels: List of target sequences
preds: List of model generated outputs
**kwargs: Custom metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.
Returns:
result: Dictionary containing evaluation results.
""" # noqa: ignore flake8"
assert len(labels) == len(preds)
results = {}
for metric, func in kwargs.items():
results[metric] = func(labels, preds)
return results
def load_and_cache_examples(self, data, evaluate=False, no_cache=False, verbose=True, silent=False):
"""
Creates a T5Dataset from data.
Utility function for train() and eval() methods. Not intended to be used directly.
"""
encoder_tokenizer = self.encoder_tokenizer
decoder_tokenizer = self.decoder_tokenizer
args = self.args
if not no_cache:
no_cache = args["no_cache"]
os.makedirs(self.args["cache_dir"], exist_ok=True)
mode = "dev" if evaluate else "train"
if args["dataset_class"]:
CustomDataset = args["dataset_class"]
return CustomDataset(encoder_tokenizer, decoder_tokenizer, args, data, mode)
else:
if args["model_type"] in ["bart", "marian"]:
return SimpleSummarizationDataset(encoder_tokenizer, self.args, data, mode)
else:
return Seq2SeqDataset(encoder_tokenizer, decoder_tokenizer, self.args, data, mode,)
def _create_training_progress_scores(self, **kwargs):
extra_metrics = {key: [] for key in kwargs}
training_progress_scores = {
"global_step": [],
"eval_loss": [],
"train_loss": [],
**extra_metrics,
}
return training_progress_scores
def _get_last_metrics(self, metric_values):
return {metric: values[-1] for metric, values in metric_values.items()}
def _save_model(self, output_dir=None, optimizer=None, scheduler=None, model=None, results=None):
if not output_dir:
output_dir = self.args["output_dir"]
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model into {output_dir}")
if model and not self.args["no_save"]:
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
self._save_model_args(output_dir)
if self.args["model_type"] in ["bart", "marian"]:
os.makedirs(os.path.join(output_dir), exist_ok=True)
model_to_save.save_pretrained(output_dir)
self.config.save_pretrained(output_dir)
if self.args["model_type"] == "bart":
self.encoder_tokenizer.save_pretrained(output_dir)
else:
os.makedirs(os.path.join(output_dir, "encoder"), exist_ok=True)
os.makedirs(os.path.join(output_dir, "decoder"), exist_ok=True)
self.encoder_config.save_pretrained(os.path.join(output_dir, "encoder"))
self.decoder_config.save_pretrained(os.path.join(output_dir, "decoder"))
model_to_save = (
self.model.encoder.module if hasattr(self.model.encoder, "module") else self.model.encoder
)
model_to_save.save_pretrained(os.path.join(output_dir, "encoder"))
model_to_save = (
self.model.decoder.module if hasattr(self.model.decoder, "module") else self.model.decoder
)
model_to_save.save_pretrained(os.path.join(output_dir, "decoder"))
self.encoder_tokenizer.save_pretrained(os.path.join(output_dir, "encoder"))
self.decoder_tokenizer.save_pretrained(os.path.join(output_dir, "decoder"))
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
if optimizer and scheduler and self.args["save_optimizer_and_scheduler"]:
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
if results:
output_eval_file = os.path.join(output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
def _move_model_to_device(self):
self.model.to(self.device)
def _get_inputs_dict(self, batch):
device = self.device
if self.args["model_type"] in ["bart", "marian"]:
pad_token_id = self.encoder_tokenizer.pad_token_id
source_ids, source_mask, y = batch["source_ids"], batch["source_mask"], batch["target_ids"]
y_ids = y[:, :-1].contiguous()
lm_labels = y[:, 1:].clone()
lm_labels[y[:, 1:] == pad_token_id] = -100
inputs = {
"input_ids": source_ids.to(device),
"attention_mask": source_mask.to(device),
"decoder_input_ids": y_ids.to(device),
"lm_labels": lm_labels.to(device),
}
else:
lm_labels = batch[1]
lm_labels_masked = lm_labels.clone()
lm_labels_masked[lm_labels_masked == self.decoder_tokenizer.pad_token_id] = -100
inputs = {
"input_ids": batch[0].to(device),
"decoder_input_ids": lm_labels.to(device),
"lm_labels": lm_labels_masked.to(device),
}
return inputs
def _save_model_args(self, output_dir):
os.makedirs(output_dir, exist_ok=True)
with open(os.path.join(output_dir, "model_args.json"), "w") as f:
json.dump(self.args, f)
def _load_model_args(self, input_dir):
model_args_file = os.path.join(input_dir, "model_args.json")
if os.path.isfile(model_args_file):
with open(model_args_file, "r") as f:
model_args = json.load(f)
return model_args
|
import json
import logging
import math
import os
import random
import warnings
from multiprocessing import cpu_count
from pathlib import Path
import numpy as np
from tqdm.auto import tqdm, trange
import pandas as pd
import torch
from simpletransformers.config.global_args import global_args
from simpletransformers.seq2seq.seq2seq_utils import Seq2SeqDataset, SimpleSummarizationDataset
from tensorboardX import SummaryWriter
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from transformers import AdamW, EncoderDecoderModel, EncoderDecoderConfig, get_linear_schedule_with_warmup
from transformers import (
AutoModel,
AutoTokenizer,
AutoConfig,
BertTokenizer,
BertModel,
BertForMaskedLM,
BertConfig,
CamembertConfig,
CamembertModel,
CamembertTokenizer,
DistilBertConfig,
DistilBertModel,
DistilBertTokenizer,
ElectraConfig,
ElectraModel,
ElectraTokenizer,
LongformerConfig,
LongformerModel,
LongformerTokenizer,
PreTrainedModel,
PreTrainedTokenizer,
RobertaConfig,
RobertaModel,
RobertaTokenizer,
BartForConditionalGeneration,
BartTokenizer,
BartConfig,
MarianMTModel,
MarianTokenizer,
MarianConfig,
)
try:
import wandb
wandb_available = True
except ImportError:
wandb_available = False
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
"auto": (AutoConfig, AutoModel, AutoTokenizer),
"bart": (BartConfig, BartForConditionalGeneration, BartTokenizer),
"bert": (BertConfig, BertModel, BertTokenizer),
"camembert": (CamembertConfig, CamembertModel, CamembertTokenizer),
"distilbert": (DistilBertConfig, DistilBertModel, DistilBertTokenizer),
"electra": (ElectraConfig, ElectraModel, ElectraTokenizer),
"longformer": (LongformerConfig, LongformerModel, LongformerTokenizer),
"marian": (MarianConfig, MarianMTModel, MarianTokenizer),
"roberta": (RobertaConfig, RobertaModel, RobertaTokenizer),
}
class Seq2SeqModel:
def __init__(
self,
encoder_type=None,
encoder_name=None,
decoder_name=None,
encoder_decoder_type=None,
encoder_decoder_name=None,
config=None,
args=None,
use_cuda=True,
cuda_device=-1,
**kwargs,
):
"""
Initializes a Seq2SeqModel.
Args:
encoder_type (optional): The type of model to use as the encoder.
encoder_name (optional): The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
decoder_name (optional): The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
Must be the same "size" as the encoder model (base/base, large/large, etc.)
encoder_decoder_type (optional): The type of encoder-decoder model. (E.g. bart)
encoder_decoder_name (optional): The path to a directory containing the saved encoder and decoder of a Seq2SeqModel. (E.g. "outputs/") OR a valid BART model.
config (optional): A configuration file to build an EncoderDecoderModel.
args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.
use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.
cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.
**kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.
""" # noqa: ignore flake8"
if not config:
# if not ((encoder_name and decoder_name) or encoder_decoder_name) and not encoder_type:
if not ((encoder_name and decoder_name) or encoder_decoder_name):
raise ValueError(
"You must specify a Seq2Seq config \t OR \t"
"encoder_type, encoder_name, and decoder_name OR \t \t"
"encoder_type and encoder_decoder_name"
)
elif not (encoder_type or encoder_decoder_type):
raise ValueError(
"You must specify a Seq2Seq config \t OR \t"
"encoder_type, encoder_name, and decoder_name \t OR \t"
"encoder_type and encoder_decoder_name"
)
if args and "manual_seed" in args:
random.seed(args["manual_seed"])
np.random.seed(args["manual_seed"])
torch.manual_seed(args["manual_seed"])
if "n_gpu" in args and args["n_gpu"] > 0:
torch.cuda.manual_seed_all(args["manual_seed"])
self.args = {
"dataset_class": None,
"do_sample": False,
"max_steps": -1,
"evaluate_generated_text": False,
"num_beams": 1,
"max_length": 20,
"repetition_penalty": 1.0,
"length_penalty": 2.0,
"early_stopping": True,
}
self.args.update(global_args)
try:
saved_model_args = self._load_model_args(encoder_decoder_name)
if saved_model_args:
self.args.update(saved_model_args)
except TypeError:
logger.info(f"Failed to load saved args from {encoder_decoder_name}. This may be normal.")
if args:
self.args.update(args)
if use_cuda:
if torch.cuda.is_available():
if cuda_device == -1:
self.device = torch.device("cuda")
else:
self.device = torch.device(f"cuda:{cuda_device}")
else:
raise ValueError(
"'use_cuda' set to True when cuda is unavailable."
"Make sure CUDA is available or set `use_cuda=False`."
)
else:
self.device = "cpu"
self.results = {}
if not use_cuda:
self.args["fp16"] = False
# config = EncoderDecoderConfig.from_encoder_decoder_configs(config, config)
if encoder_decoder_type:
config_class, model_class, tokenizer_class = MODEL_CLASSES[encoder_decoder_type]
else:
config_class, model_class, tokenizer_class = MODEL_CLASSES[encoder_type]
if encoder_decoder_type in ["bart", "marian"]:
self.model = model_class.from_pretrained(encoder_decoder_name)
if encoder_decoder_type == "bart":
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_decoder_name)
elif encoder_decoder_type == "marian":
if "base_marian_model_name" in self.args:
self.encoder_tokenizer = tokenizer_class.from_pretrained(self.args["base_marian_model_name"])
else:
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_decoder_name)
self.decoder_tokenizer = self.encoder_tokenizer
self.config = self.model.config
else:
if encoder_decoder_name:
# self.model = EncoderDecoderModel.from_pretrained(encoder_decoder_name)
self.model = EncoderDecoderModel.from_encoder_decoder_pretrained(
os.path.join(encoder_decoder_name, "encoder"), os.path.join(encoder_decoder_name, "decoder")
)
self.model.encoder = model_class.from_pretrained(os.path.join(encoder_decoder_name, "encoder"))
self.model.decoder = BertForMaskedLM.from_pretrained(os.path.join(encoder_decoder_name, "decoder"))
self.encoder_tokenizer = tokenizer_class.from_pretrained(os.path.join(encoder_decoder_name, "encoder"))
self.decoder_tokenizer = BertTokenizer.from_pretrained(os.path.join(encoder_decoder_name, "decoder"))
else:
self.model = EncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_name, decoder_name, config=config
)
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_name)
self.decoder_tokenizer = BertTokenizer.from_pretrained(decoder_name)
self.encoder_config = self.model.config.encoder
self.decoder_config = self.model.config.decoder
if self.args["wandb_project"] and not wandb_available:
warnings.warn("wandb_project specified but wandb is not available. Wandb disabled.")
self.args["wandb_project"] = None
if encoder_decoder_name:
self.args["model_name"] = encoder_decoder_name
# Checking if we are loading from a saved model or using a pre-trained model
if not saved_model_args and encoder_decoder_type == "marian":
# Need to store base pre-trained model name to get the tokenizer when loading a saved model
self.args["base_marian_model_name"] = encoder_decoder_name
elif encoder_name and decoder_name:
self.args["model_name"] = encoder_name + "-" + decoder_name
else:
self.args["model_name"] = "encoder-decoder"
if encoder_decoder_type:
self.args["model_type"] = encoder_decoder_type
elif encoder_type:
self.args["model_type"] = encoder_type + "-bert"
else:
self.args["model_type"] = "encoder-decoder"
def train_model(
self, train_data, output_dir=None, show_running_loss=True, args=None, eval_data=None, verbose=True, **kwargs,
):
"""
Trains the model using 'train_data'
Args:
train_data: Pandas DataFrame containing the 2 columns - `input_text`, `target_text`.
- `input_text`: The input text sequence.
- `target_text`: The target sequence
output_dir: The directory where model files will be saved. If not given, self.args['output_dir'] will be used.
show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.
args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.
eval_data (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down training significantly as the predicted sequences need to be generated.
Returns:
None
""" # noqa: ignore flake8"
if args:
self.args.update(args)
# if self.args["silent"]:
# show_running_loss = False
if self.args["evaluate_during_training"] and eval_data is None:
raise ValueError(
"evaluate_during_training is enabled but eval_data is not specified."
" Pass eval_data to model.train_model() if using evaluate_during_training."
)
if not output_dir:
output_dir = self.args["output_dir"]
if os.path.exists(output_dir) and os.listdir(output_dir) and not self.args["overwrite_output_dir"]:
raise ValueError(
"Output directory ({}) already exists and is not empty."
" Set args['overwrite_output_dir'] = True to overcome.".format(output_dir)
)
self._move_model_to_device()
train_dataset = self.load_and_cache_examples(train_data, verbose=verbose)
os.makedirs(output_dir, exist_ok=True)
global_step, tr_loss = self.train(
train_dataset,
output_dir,
show_running_loss=show_running_loss,
eval_data=eval_data,
verbose=verbose,
**kwargs,
)
self._save_model(self.args["output_dir"], model=self.model)
# model_to_save = self.model.module if hasattr(self.model, "module") else self.model
# model_to_save.save_pretrained(output_dir)
# self.encoder_tokenizer.save_pretrained(output_dir)
# self.decoder_tokenizer.save_pretrained(output_dir)
# torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
if verbose:
logger.info(" Training of {} model complete. Saved to {}.".format(self.args["model_name"], output_dir))
def train(
self, train_dataset, output_dir, show_running_loss=True, eval_data=None, verbose=True, **kwargs,
):
"""
Trains the model on train_dataset.
Utility function to be used by the train_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
tb_writer = SummaryWriter(logdir=args["tensorboard_dir"])
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args["train_batch_size"])
if args["max_steps"] > 0:
t_total = args["max_steps"]
args["num_train_epochs"] = (
args["max_steps"] // (len(train_dataloader) // args["gradient_accumulation_steps"]) + 1
)
else:
t_total = len(train_dataloader) // args["gradient_accumulation_steps"] * args["num_train_epochs"]
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args["weight_decay"],
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)]},
]
warmup_steps = math.ceil(t_total * args["warmup_ratio"])
args["warmup_steps"] = warmup_steps if args["warmup_steps"] == 0 else args["warmup_steps"]
# TODO: Use custom optimizer like with BertSum?
optimizer = AdamW(optimizer_grouped_parameters, lr=args["learning_rate"], eps=args["adam_epsilon"])
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args["warmup_steps"], num_training_steps=t_total
)
if (
args["model_name"]
and os.path.isfile(os.path.join(args["model_name"], "optimizer.pt"))
and os.path.isfile(os.path.join(args["model_name"], "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args["model_name"], "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args["model_name"], "scheduler.pt")))
if args["fp16"]:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args["fp16_opt_level"])
if args["n_gpu"] > 1:
model = torch.nn.DataParallel(model)
logger.info(" Training started")
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args["num_train_epochs"]), desc="Epoch", disable=args["silent"], mininterval=0)
epoch_number = 0
best_eval_metric = None
early_stopping_counter = 0
steps_trained_in_current_epoch = 0
epochs_trained = 0
if args["model_name"] and os.path.exists(args["model_name"]):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args["model_name"].split("/")[-1].split("-")
if len(checkpoint_suffix) > 2:
checkpoint_suffix = checkpoint_suffix[1]
else:
checkpoint_suffix = checkpoint_suffix[-1]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args["gradient_accumulation_steps"])
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // args["gradient_accumulation_steps"]
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the current epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
if args["evaluate_during_training"]:
training_progress_scores = self._create_training_progress_scores(**kwargs)
if args["wandb_project"]:
wandb.init(project=args["wandb_project"], config={**args}, **args["wandb_kwargs"])
wandb.watch(self.model)
model.train()
for current_epoch in train_iterator:
if epochs_trained > 0:
epochs_trained -= 1
continue
# epoch_iterator = tqdm(train_dataloader, desc="Iteration")
for step, batch in enumerate(tqdm(train_dataloader, desc="Current iteration", disable=args["silent"])):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
# batch = tuple(t.to(device) for t in batch)
inputs = self._get_inputs_dict(batch)
outputs = model(**inputs)
# model outputs are always tuple in pytorch-transformers (see doc)
loss = outputs[0]
if args["n_gpu"] > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
current_loss = loss.item()
if show_running_loss:
print("\rRunning loss: %f" % loss, end="")
if args["gradient_accumulation_steps"] > 1:
loss = loss / args["gradient_accumulation_steps"]
if args["fp16"]:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
# torch.nn.utils.clip_grad_norm_(
# amp.master_params(optimizer), args["max_grad_norm"]
# )
else:
loss.backward()
# torch.nn.utils.clip_grad_norm_(
# model.parameters(), args["max_grad_norm"]
# )
tr_loss += loss.item()
if (step + 1) % args["gradient_accumulation_steps"] == 0:
if args["fp16"]:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args["max_grad_norm"])
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args["max_grad_norm"])
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args["logging_steps"] > 0 and global_step % args["logging_steps"] == 0:
# Log metrics
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args["logging_steps"], global_step)
logging_loss = tr_loss
if args["wandb_project"]:
wandb.log(
{
"Training loss": current_loss,
"lr": scheduler.get_lr()[0],
"global_step": global_step,
}
)
if args["save_steps"] > 0 and global_step % args["save_steps"] == 0:
# Save model checkpoint
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if args["evaluate_during_training"] and (
args["evaluate_during_training_steps"] > 0
and global_step % args["evaluate_during_training_steps"] == 0
):
# Only evaluate when single GPU otherwise metrics may not average well
results = self.eval_model(
eval_data,
verbose=verbose and args["evaluate_during_training_verbose"],
silent=args["evaluate_during_training_silent"],
**kwargs,
)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
if args["save_eval_checkpoints"]:
self._save_model(output_dir_current, optimizer, scheduler, model=model, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(
os.path.join(args["output_dir"], "training_progress_scores.csv"), index=False,
)
if args["wandb_project"]:
wandb.log(self._get_last_metrics(training_progress_scores))
if not best_eval_metric:
best_eval_metric = results[args["early_stopping_metric"]]
if args["save_best_model"]:
self._save_model(
args["best_model_dir"], optimizer, scheduler, model=model, results=results
)
if best_eval_metric and args["early_stopping_metric_minimize"]:
if (
results[args["early_stopping_metric"]] - best_eval_metric
< args["early_stopping_delta"]
):
best_eval_metric = results[args["early_stopping_metric"]]
if args["save_best_model"]:
self._save_model(
args["best_model_dir"], optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args["use_early_stopping"]:
if early_stopping_counter < args["early_stopping_patience"]:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args['early_stopping_metric']}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args['early_stopping_patience']}")
else:
if verbose:
logger.info(
f" Patience of {args['early_stopping_patience']} steps reached"
)
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
else:
if (
results[args["early_stopping_metric"]] - best_eval_metric
> args["early_stopping_delta"]
):
best_eval_metric = results[args["early_stopping_metric"]]
if args["save_best_model"]:
self._save_model(
args["best_model_dir"], optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args["use_early_stopping"]:
if early_stopping_counter < args["early_stopping_patience"]:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args['early_stopping_metric']}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args['early_stopping_patience']}")
else:
if verbose:
logger.info(
f" Patience of {args['early_stopping_patience']} steps reached"
)
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
epoch_number += 1
output_dir_current = os.path.join(output_dir, "checkpoint-{}-epoch-{}".format(global_step, epoch_number))
if args["save_model_every_epoch"] or args["evaluate_during_training"]:
os.makedirs(output_dir_current, exist_ok=True)
if args["save_model_every_epoch"]:
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if args["evaluate_during_training"]:
results = self.eval_model(
eval_data,
verbose=verbose and args["evaluate_during_training_verbose"],
silent=args["evaluate_during_training_silent"],
**kwargs,
)
if args["save_eval_checkpoints"]:
self._save_model(output_dir_current, optimizer, scheduler, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(os.path.join(args["output_dir"], "training_progress_scores.csv"), index=False)
if args["wandb_project"]:
wandb.log(self._get_last_metrics(training_progress_scores))
if not best_eval_metric:
best_eval_metric = results[args["early_stopping_metric"]]
if args["save_best_model"]:
self._save_model(args["best_model_dir"], optimizer, scheduler, model=model, results=results)
if best_eval_metric and args["early_stopping_metric_minimize"]:
if results[args["early_stopping_metric"]] - best_eval_metric < args["early_stopping_delta"]:
best_eval_metric = results[args["early_stopping_metric"]]
if args["save_best_model"]:
self._save_model(
args["best_model_dir"], optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args["use_early_stopping"] and args["early_stopping_consider_epochs"]:
if early_stopping_counter < args["early_stopping_patience"]:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args['early_stopping_metric']}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args['early_stopping_patience']}")
else:
if verbose:
logger.info(f" Patience of {args['early_stopping_patience']} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
else:
if results[args["early_stopping_metric"]] - best_eval_metric > args["early_stopping_delta"]:
best_eval_metric = results[args["early_stopping_metric"]]
if args["save_best_model"]:
self._save_model(
args["best_model_dir"], optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args["use_early_stopping"] and args["early_stopping_consider_epochs"]:
if early_stopping_counter < args["early_stopping_patience"]:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args['early_stopping_metric']}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args['early_stopping_patience']}")
else:
if verbose:
logger.info(f" Patience of {args['early_stopping_patience']} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
return global_step, tr_loss / global_step
def eval_model(self, eval_data, output_dir=None, verbose=True, silent=False, **kwargs):
"""
Evaluates the model on eval_data. Saves results to output_dir.
Args:
eval_data: Pandas DataFrame containing the 2 columns - `input_text`, `target_text`.
- `input_text`: The input text sequence.
- `target_text`: The target sequence
output_dir: The directory where model files will be saved. If not given, self.args['output_dir'] will be used.
verbose: If verbose, results will be printed to the console on completion of evaluation.
silent: If silent, tqdm progress bars will be hidden.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.
Returns:
results: Dictionary containing evaluation results.
""" # noqa: ignore flake8"
if not output_dir:
output_dir = self.args["output_dir"]
self._move_model_to_device()
eval_dataset = self.load_and_cache_examples(eval_data, evaluate=True, verbose=verbose, silent=silent)
os.makedirs(output_dir, exist_ok=True)
result = self.evaluate(eval_dataset, output_dir, verbose=verbose, silent=silent, **kwargs)
self.results.update(result)
if self.args["evaluate_generated_text"]:
to_predict = eval_data["input_text"].tolist()
preds = self.predict(to_predict)
result = self.compute_metrics(eval_data["target_text"].tolist(), preds, **kwargs)
self.results.update(result)
if verbose:
logger.info(self.results)
return self.results
def evaluate(self, eval_dataset, output_dir, verbose=True, silent=False, **kwargs):
"""
Evaluates the model on eval_dataset.
Utility function to be used by the eval_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
eval_output_dir = output_dir
results = {}
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args["eval_batch_size"])
if args["n_gpu"] > 1:
model = torch.nn.DataParallel(model)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader, disable=args["silent"] or silent):
# batch = tuple(t.to(device) for t in batch)
inputs = self._get_inputs_dict(batch)
with torch.no_grad():
outputs = model(**inputs)
loss = outputs[0]
eval_loss += loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
results["eval_loss"] = eval_loss
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
return results
def predict(self, to_predict):
"""
Performs predictions on a list of text.
Args:
to_predict: A python list of text (str) to be sent to the model for prediction. Note that the prefix should be prepended to the text.
Returns:
preds: A python list of the generated sequences.
""" # noqa: ignore flake8"
self._move_model_to_device()
all_outputs = []
# Batching
for batch in [
to_predict[i : i + self.args["eval_batch_size"]]
for i in range(0, len(to_predict), self.args["eval_batch_size"])
]:
if self.args["model_type"] == "marian":
input_ids = self.encoder_tokenizer.prepare_translation_batch(
batch, max_length=self.args["max_seq_length"], pad_to_max_length=True, return_tensors="pt",
)["input_ids"]
else:
input_ids = self.encoder_tokenizer.batch_encode_plus(
batch, max_length=self.args["max_seq_length"], pad_to_max_length=True, return_tensors="pt",
)["input_ids"]
input_ids = input_ids.to(self.device)
if self.args["model_type"] in ["bart", "marian"]:
outputs = self.model.generate(
input_ids=input_ids,
num_beams=self.args["num_beams"],
max_length=self.args["max_length"],
length_penalty=self.args["length_penalty"],
early_stopping=self.args["early_stopping"],
repetition_penalty=self.args["repetition_penalty"],
do_sample=self.args["do_sample"],
)
else:
outputs = self.model.generate(
input_ids=input_ids,
decoder_start_token_id=self.model.config.decoder.pad_token_id,
num_beams=self.args["num_beams"],
max_length=self.args["max_length"],
length_penalty=self.args["length_penalty"],
early_stopping=self.args["early_stopping"],
repetition_penalty=self.args["repetition_penalty"],
do_sample=self.args["do_sample"],
)
all_outputs.extend(outputs)
return [
self.decoder_tokenizer.decode(output_id, skip_special_tokens=True, clean_up_tokenization_spaces=True)
for output_id in all_outputs
]
def compute_metrics(self, labels, preds, **kwargs):
"""
Computes the evaluation metrics for the model predictions.
Args:
labels: List of target sequences
preds: List of model generated outputs
**kwargs: Custom metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.
Returns:
result: Dictionary containing evaluation results.
""" # noqa: ignore flake8"
assert len(labels) == len(preds)
results = {}
for metric, func in kwargs.items():
results[metric] = func(labels, preds)
return results
def load_and_cache_examples(self, data, evaluate=False, no_cache=False, verbose=True, silent=False):
"""
Creates a T5Dataset from data.
Utility function for train() and eval() methods. Not intended to be used directly.
"""
encoder_tokenizer = self.encoder_tokenizer
decoder_tokenizer = self.decoder_tokenizer
args = self.args
if not no_cache:
no_cache = args["no_cache"]
os.makedirs(self.args["cache_dir"], exist_ok=True)
mode = "dev" if evaluate else "train"
if args["dataset_class"]:
CustomDataset = args["dataset_class"]
return CustomDataset(encoder_tokenizer, decoder_tokenizer, args, data, mode)
else:
if args["model_type"] in ["bart", "marian"]:
return SimpleSummarizationDataset(encoder_tokenizer, self.args, data, mode)
else:
return Seq2SeqDataset(encoder_tokenizer, decoder_tokenizer, self.args, data, mode,)
def _create_training_progress_scores(self, **kwargs):
extra_metrics = {key: [] for key in kwargs}
training_progress_scores = {
"global_step": [],
"eval_loss": [],
"train_loss": [],
**extra_metrics,
}
return training_progress_scores
def _get_last_metrics(self, metric_values):
return {metric: values[-1] for metric, values in metric_values.items()}
def _save_model(self, output_dir=None, optimizer=None, scheduler=None, model=None, results=None):
if not output_dir:
output_dir = self.args["output_dir"]
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model into {output_dir}")
if model and not self.args["no_save"]:
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
self._save_model_args(output_dir)
if self.args["model_type"] in ["bart", "marian"]:
os.makedirs(os.path.join(output_dir), exist_ok=True)
model_to_save.save_pretrained(output_dir)
self.config.save_pretrained(output_dir)
if self.args["model_type"] == "bart":
self.encoder_tokenizer.save_pretrained(output_dir)
else:
os.makedirs(os.path.join(output_dir, "encoder"), exist_ok=True)
os.makedirs(os.path.join(output_dir, "decoder"), exist_ok=True)
self.encoder_config.save_pretrained(os.path.join(output_dir, "encoder"))
self.decoder_config.save_pretrained(os.path.join(output_dir, "decoder"))
model_to_save = (
self.model.encoder.module if hasattr(self.model.encoder, "module") else self.model.encoder
)
model_to_save.save_pretrained(os.path.join(output_dir, "encoder"))
model_to_save = (
self.model.decoder.module if hasattr(self.model.decoder, "module") else self.model.decoder
)
model_to_save.save_pretrained(os.path.join(output_dir, "decoder"))
self.encoder_tokenizer.save_pretrained(os.path.join(output_dir, "encoder"))
self.decoder_tokenizer.save_pretrained(os.path.join(output_dir, "decoder"))
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
if optimizer and scheduler and self.args["save_optimizer_and_scheduler"]:
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
if results:
output_eval_file = os.path.join(output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
def _move_model_to_device(self):
self.model.to(self.device)
def _get_inputs_dict(self, batch):
device = self.device
if self.args["model_type"] in ["bart", "marian"]:
pad_token_id = self.encoder_tokenizer.pad_token_id
source_ids, source_mask, y = batch["source_ids"], batch["source_mask"], batch["target_ids"]
y_ids = y[:, :-1].contiguous()
lm_labels = y[:, 1:].clone()
lm_labels[y[:, 1:] == pad_token_id] = -100
inputs = {
"input_ids": source_ids.to(device),
"attention_mask": source_mask.to(device),
"decoder_input_ids": y_ids.to(device),
"lm_labels": lm_labels.to(device),
}
else:
lm_labels = batch[1]
lm_labels_masked = lm_labels.clone()
lm_labels_masked[lm_labels_masked == self.decoder_tokenizer.pad_token_id] = -100
inputs = {
"input_ids": batch[0].to(device),
"decoder_input_ids": lm_labels.to(device),
"lm_labels": lm_labels_masked.to(device),
}
return inputs
def _save_model_args(self, output_dir):
os.makedirs(output_dir, exist_ok=True)
with open(os.path.join(output_dir, "model_args.json"), "w") as f:
json.dump(self.args, f)
def _load_model_args(self, input_dir):
model_args_file = os.path.join(input_dir, "model_args.json")
if os.path.isfile(model_args_file):
with open(model_args_file, "r") as f:
model_args = json.load(f)
return model_args
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START gae_python37_app]
from base64 import urlsafe_b64encode
import datetime
import json
from urllib.parse import urlparse, parse_qs
from secrets import SystemRandom
from operator import itemgetter
import os
import time
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from flask import Flask, request, redirect, jsonify, abort
from google.cloud import datastore
import google.oauth2.credentials
import googleapiclient.discovery
import httplib2
import google_auth_httplib2
import pytz
import gapi
from config import *
def store_endpoint_id(endpoint_id, timestamp):
client = datastore.Client()
key = client.key('endpoint_id', endpoint_id)
entity = datastore.Entity(key)
entity['endpoint_id'] = endpoint_id
entity['timestamp'] = timestamp
client.put(entity)
def local_time(udt):
tzone = pytz.timezone(TIMEZONE)
ldt = udt.astimezone(tzone)
return ldt.strftime('%-I:%M%p')
def draw_meetings(gmail, meetings):
for meet in meetings.values():
output = '<html><body>'
output += 'Here\'s some details about your recent Google Meet call:<br><br>'
if meet.get('calendar_event'):
summary = meet["calendar_event"].get("summary", "<No title>")
htmlLink = meet["calendar_event"].get("htmlLink", "")
output += f'Calendar Event: <a href="{htmlLink}">{summary}</a><br>'
output += f'Meeting code: <a href="https://meet.google.com/{meet['meeting_code']}">{meet['meeting_code']}</a><br>'
output += f'Time: {local_time(meet['start_time'])} - {local_time(meet['end_time'])} {TIMEZONE}<br>'
output += f'{len(meet['attendees'])} attendees:<br><ul>'
rcpts = []
if TO_ORGANIZER:
rcpts.append(meet['organizer_email'])
endpoint_ids = []
for attendee, times in meet['attendees'].items():
for atime in times:
if atime.get('identifier_type') == 'email_address':
if TO_ATTENDEES:
rcpts.append(attendee)
attendee = f'<a href="mailto:{attendee}'>{atime.get('display_name')}</a>'
break
output += f'<li>{attendee}:'
for atime in sorted(times, key=itemgetter('joined_time')):
output += f' {local_time(atime['joined_time'])} - {local_time(atime['left_time'])}'
if SHOW_DEVICE_TYPE and 'device_type' in atime:
output += f' {atime['device_type']}'
if SHOW_IP and 'ip_address' in atime:
output += f', {atime['ip_address']}'
if SHOW_LOCATION and 'location_region' in atime:
output += f', {atime['location_region']}'
if SHOW_LOCATION and 'location_country' in atime:
output += f', {atime['location_country']}'
endpoint_ids.append(atime['endpoint_id'])
output += '</li>'
output += '</ul></body></html>'
if meet.get('calendar_event'):
subject = SUBJECT_FOR_CALENDAR_MEETINGS.format(
event_summary=meet['calendar_event'].get('summary', '<no title>'),
meeting_code=meet['meeting_code'])
else:
subject = SUBJECT_FOR_MEETINGS.format(meeting_code=meet['meeting_code'])
ref_domain = os.environ.get('GAE_APPLICATION', 'unknown-meet-report-instance.appspot.com')
if ref_domain.find('~') != -1:
ref_domain = ref_domain.split('~')[1]
ref_domain += '.appspot.com'
references = f'<{meet['meeting_code']}@{ref_domain}>'
send_email(gmail, rcpts, subject, output, references)
timestamp = datetime.datetime.utcnow()
for id in endpoint_ids:
store_endpoint_id(id, timestamp)
def send_email(gmail, rcpts, subject, body, references=None):
msg = MIMEMultipart("alternative")
msg.attach(MIMEText(body, 'html'))
msg['Subject'] = subject
if FROM_HEADER:
msg['From'] = FROM_HEADER
msg['Precedence'] = 'bulk'
msg['Return-Path'] = '<>'
if rcpts:
msg['To'] = ', '.join(rcpts)
if BCC_ADDRESS:
msg['Bcc'] = BCC_ADDRESS
if references:
msg['References'] = references
encoded_email = urlsafe_b64encode(msg.as_bytes()).decode()
api_body = {'raw': encoded_email}
gmail.users().messages().send(userId='me', body=api_body).execute()
def fetch_all_endpoint_ids():
endpoint_ids = []
client = datastore.Client()
query = client.query(kind='endpoint_id')
query_iter = query.fetch()
for entity in query_iter:
endpoint_ids.append(entity.get('endpoint_id'))
return list(set(endpoint_ids))
str_params = ['device_type', 'display_name', 'endpoint_id',
'identifier_type', 'ip_address', 'location_country',
'location_region',]
bool_params = ['is_external', ]
def parse_report(report, cal, ignore_endpoint_ids=[]):
'''Takes a Meet Activity Report and parses into something we can chew'''
meetings = {}
now = datetime.datetime.utcnow()
defer_if_event_after = now - datetime.timedelta(minutes=DEFER_IF_EVENT_SOONER_THAN_MINUTES)
for meeting in report:
left_time = meeting['id']['time'][:-1]
left_time = datetime.datetime.fromisoformat(left_time)
for event in meeting.get('events', []):
left_event = {'left_time': left_time}
is_meet = True
conference_id = identifier = meeting_code = calendar_event_id = organizer_email = None
for param in event.get('parameters', []):
name = param.get('name', 'NOTSET')
if name == 'product_type' and param.get('value').lower() != 'meet':
is_meet = False
break
elif name == 'conference_id':
conference_id = param.get('value')
elif name == 'meeting_code':
meeting_code = param.get('value')
meeting_code = meeting_code.lower()
if len(meeting_code) == 10:
meeting_code = f'{meeting_code[:3]}-{meeting_code[3:7]}-{meeting_code[7:]}'
elif name == 'calendar_event_id':
calendar_event_id = param.get('value')
elif name == 'organizer_email':
organizer_email = param.get('value')
elif name == 'identifier':
identifier = param.get('value')
elif name in str_params:
left_event[name] = param.get('value')
elif name in bool_params:
left_event[name] = bool(param.get('boolValue'))
elif name == 'duration_seconds':
left_event[name] = int(param.get('intValue'))
left_event['joined_time'] = left_event['left_time'] - datetime.timedelta(seconds=left_event[name])
if not is_meet:
print(f'skipping non meet meeting {conference_id}')
continue
if not conference_id:
print(f'skipping end_call with no conference_id: {event}')
continue
if left_event.get('endpoint_id') in ignore_endpoint_ids:
print(f'skipping ignored endpoint {left_event['endpoint_id']}')
continue
if not identifier: # anonymous web user
identifier = left_event.get('display_name', 'No Name Set')
if not left_event.get('duration_seconds'): # unset or 0
left_event['duration_seconds'] = 0
left_event['joined_time'] = left_event['left_time']
if conference_id in meetings:
if meeting_code and not meetings[conference_id]['meeting_code']:
meetings[conference_id]['meeting_code'] = meeting_code
if calendar_event_id and not meetings[conference_id]['calendar_event_id']:
meetings[conference_id]['calendar_event_id'] = calendar_event_id
if organizer_email and not meetings[conference_id]['organizer_email']:
meetings[conference_id]['organizer_email'] = organizer_email
if identifier in meetings[conference_id]['attendees']:
meetings[conference_id]['attendees'][identifier].append(left_event)
else:
meetings[conference_id]['attendees'][identifier] = [left_event, ]
if left_event['left_time'] > meetings[conference_id]['end_time']:
meetings[conference_id]['end_time'] = left_event['left_time']
if left_event['joined_time'] < meetings[conference_id]['start_time']:
meetings[conference_id]['start_time'] = left_event['joined_time']
else:
meetings[conference_id] = {'meeting_code': meeting_code,
'calendar_event_id': calendar_event_id,
'organizer_email': organizer_email,
'start_time': left_event.get('joined_time', now),
'end_time': left_event.get('left_time', now),
'attendees': {identifier: [left_event]}}
organized_meetings = {}
print(f'len meetings = {len(meetings)}')
for meeting, val in meetings.items():
if val['end_time'] > defer_if_event_after:
print('deferring meeting with recent end call events')
continue
val['duration'] = (val['end_time'] - val['start_time']).total_seconds()
if val['duration'] < MINIMUM_MEETING_DURATION_SECONDS:
print(f'skipping short meeting of {val['duration']} seconds')
continue
if len(val['attendees']) < MINIMUM_MEETING_ATTENDEES:
print(f'skipping meeting with only {len(val['attendees'])}')
continue
if not val['organizer_email']:
if REPLACE_BLANK_ORGANIZER_WITH:
val['organizer_email'] = REPLACE_BLANK_ORGANIZER_WITH
else:
print('skipping meeting with no organizer')
continue
if 'calendar_event_id' in val:
val['calendar_event'] = get_event(val['organizer_email'], val['calendar_event_id'], cal)
organized_meetings[meeting] = val
print(f'len organized_meetings = {len(organized_meetings)}')
return organized_meetings
def get_event(calendarId, eventId, cal):
fields = 'summary,htmlLink'
try:
results = cal.events().get(calendarId=calendarId, eventId=eventId, fields=fields).execute()
if not results.get('summary'):
results.pop('summary', None)
return results
except:
pass
app = Flask(__name__)
@app.route('/send-reports', methods=['GET'])
def send_reports():
if request.headers.get('X-Appengine-Cron') != 'true':
abort(404)
already_parsed_endpoint_ids = fetch_all_endpoint_ids()
print(already_parsed_endpoint_ids)
with open('oauth2.txt') as f:
cdata = json.load(f)
httpc = httplib2.Http()
req = google_auth_httplib2.Request(httpc)
creds = google.oauth2.credentials.Credentials.from_authorized_user_file('oauth2.txt')
creds.token = cdata.get('token', cdata.get('auth_token', ''))
creds._id_token = cdata.get('id_token_jwt', cdata.get('id_token', None))
token_expiry = cdata.get('token_expiry', '1970-01-01T00:00:01Z')
creds.expiry = datetime.datetime.strptime(token_expiry, '%Y-%m-%dT%H:%M:%SZ')
creds.refresh(req)
httpc = google_auth_httplib2.AuthorizedHttp(creds, httpc)
rep = googleapiclient.discovery.build('admin', 'reports_v1', http=httpc, cache_discovery=False)
gmail = googleapiclient.discovery.build('gmail', 'v1', http=httpc, cache_discovery=False)
cal = googleapiclient.discovery.build('calendar', 'v3', http=httpc, cache_discovery=False)
now = datetime.datetime.utcnow()
two_days_ago = now - datetime.timedelta(days=2)
two_days_ago = two_days_ago.isoformat(timespec='seconds') + 'Z'
min_age = now - datetime.timedelta(minutes=MINIMUM_AGE_MINUTES)
min_age = min_age.isoformat(timespec='seconds') + 'Z'
print(f'Start time: {two_days_ago} End time: {min_age}')
response = gapi.call_pages(rep.activities(), 'list', applicationName='meet',
userKey='all', eventName='call_ended',
startTime=two_days_ago, endTime=min_age)
meetings = parse_report(response, cal, ignore_endpoint_ids=already_parsed_endpoint_ids)
draw_meetings(gmail, meetings)
return 'all done!'
@app.route('/cleanup', methods=['GET'])
def cleanup():
if request.headers.get('X-Appengine-Cron') != 'true':
abort(404)
client = datastore.Client()
q = client.query(kind='endpoint_id')
three_days_ago = datetime.datetime.utcnow() - datetime.timedelta(days = 3)
q.add_filter('timestamp', '<', three_days_ago)
q.keys_only()
keys = [key.key for key in list(q.fetch())]
max_chunk_size = 500
chunked_keys = [keys[i * max_chunk_size:(i + 1) * max_chunk_size] for i in range((len(keys) + max_chunk_size - 1) // max_chunk_size )]
for key_chunk in chunked_keys:
client.delete_multi(key_chunk)
return 'Deleted %s codes' % len(keys)
if __name__ == '__main__':
# This is used when running locally only. When deploying to Google App
# Engine, a webserver process such as Gunicorn will serve the app. This
# can be configured by adding an `entrypoint` to app.yaml.
app.run(host='0.0.0.0', port=8080, debug=True)
# [END gae_python37_app]
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START gae_python37_app]
from base64 import urlsafe_b64encode
import datetime
import json
from urllib.parse import urlparse, parse_qs
from secrets import SystemRandom
from operator import itemgetter
import os
import time
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from flask import Flask, request, redirect, jsonify, abort
from google.cloud import datastore
import google.oauth2.credentials
import googleapiclient.discovery
import httplib2
import google_auth_httplib2
import pytz
import gapi
from config import *
def store_endpoint_id(endpoint_id, timestamp):
client = datastore.Client()
key = client.key('endpoint_id', endpoint_id)
entity = datastore.Entity(key)
entity['endpoint_id'] = endpoint_id
entity['timestamp'] = timestamp
client.put(entity)
def local_time(udt):
tzone = pytz.timezone(TIMEZONE)
ldt = udt.astimezone(tzone)
return ldt.strftime('%-I:%M%p')
def draw_meetings(gmail, meetings):
for meet in meetings.values():
output = '<html><body>'
output += 'Here\'s some details about your recent Google Meet call:<br><br>'
if meet.get('calendar_event'):
summary = meet["calendar_event"].get("summary", "<No title>")
htmlLink = meet["calendar_event"].get("htmlLink", "")
output += f'Calendar Event: <a href="{htmlLink}">{summary}</a><br>'
output += f'Meeting code: <a href="https://meet.google.com/{meet["meeting_code"]}">{meet["meeting_code"]}</a><br>'
output += f'Time: {local_time(meet["start_time"])} - {local_time(meet["end_time"])} {TIMEZONE}<br>'
output += f'{len(meet["attendees"])} attendees:<br><ul>'
rcpts = []
if TO_ORGANIZER:
rcpts.append(meet['organizer_email'])
endpoint_ids = []
for attendee, times in meet['attendees'].items():
for atime in times:
if atime.get('identifier_type') == 'email_address':
if TO_ATTENDEES:
rcpts.append(attendee)
attendee = f'<a href="mailto:{attendee}">{atime.get("display_name")}</a>'
break
output += f'<li>{attendee}:'
for atime in sorted(times, key=itemgetter('joined_time')):
output += f' {local_time(atime["joined_time"])} - {local_time(atime["left_time"])}'
if SHOW_DEVICE_TYPE and 'device_type' in atime:
output += f' {atime["device_type"]}'
if SHOW_IP and 'ip_address' in atime:
output += f', {atime["ip_address"]}'
if SHOW_LOCATION and 'location_region' in atime:
output += f', {atime["location_region"]}'
if SHOW_LOCATION and 'location_country' in atime:
output += f', {atime["location_country"]}'
endpoint_ids.append(atime['endpoint_id'])
output += '</li>'
output += '</ul></body></html>'
if meet.get('calendar_event'):
subject = SUBJECT_FOR_CALENDAR_MEETINGS.format(
event_summary=meet['calendar_event'].get('summary', '<no title>'),
meeting_code=meet['meeting_code'])
else:
subject = SUBJECT_FOR_MEETINGS.format(meeting_code=meet['meeting_code'])
ref_domain = os.environ.get('GAE_APPLICATION', 'unknown-meet-report-instance.appspot.com')
if ref_domain.find('~') != -1:
ref_domain = ref_domain.split('~')[1]
ref_domain += '.appspot.com'
references = f'<{meet["meeting_code"]}@{ref_domain}>'
send_email(gmail, rcpts, subject, output, references)
timestamp = datetime.datetime.utcnow()
for id in endpoint_ids:
store_endpoint_id(id, timestamp)
def send_email(gmail, rcpts, subject, body, references=None):
msg = MIMEMultipart("alternative")
msg.attach(MIMEText(body, 'html'))
msg['Subject'] = subject
if FROM_HEADER:
msg['From'] = FROM_HEADER
msg['Precedence'] = 'bulk'
msg['Return-Path'] = '<>'
if rcpts:
msg['To'] = ', '.join(rcpts)
if BCC_ADDRESS:
msg['Bcc'] = BCC_ADDRESS
if references:
msg['References'] = references
encoded_email = urlsafe_b64encode(msg.as_bytes()).decode()
api_body = {'raw': encoded_email}
gmail.users().messages().send(userId='me', body=api_body).execute()
def fetch_all_endpoint_ids():
endpoint_ids = []
client = datastore.Client()
query = client.query(kind='endpoint_id')
query_iter = query.fetch()
for entity in query_iter:
endpoint_ids.append(entity.get('endpoint_id'))
return list(set(endpoint_ids))
str_params = ['device_type', 'display_name', 'endpoint_id',
'identifier_type', 'ip_address', 'location_country',
'location_region',]
bool_params = ['is_external', ]
def parse_report(report, cal, ignore_endpoint_ids=[]):
'''Takes a Meet Activity Report and parses into something we can chew'''
meetings = {}
now = datetime.datetime.utcnow()
defer_if_event_after = now - datetime.timedelta(minutes=DEFER_IF_EVENT_SOONER_THAN_MINUTES)
for meeting in report:
left_time = meeting['id']['time'][:-1]
left_time = datetime.datetime.fromisoformat(left_time)
for event in meeting.get('events', []):
left_event = {'left_time': left_time}
is_meet = True
conference_id = identifier = meeting_code = calendar_event_id = organizer_email = None
for param in event.get('parameters', []):
name = param.get('name', 'NOTSET')
if name == 'product_type' and param.get('value').lower() != 'meet':
is_meet = False
break
elif name == 'conference_id':
conference_id = param.get('value')
elif name == 'meeting_code':
meeting_code = param.get('value')
meeting_code = meeting_code.lower()
if len(meeting_code) == 10:
meeting_code = f'{meeting_code[:3]}-{meeting_code[3:7]}-{meeting_code[7:]}'
elif name == 'calendar_event_id':
calendar_event_id = param.get('value')
elif name == 'organizer_email':
organizer_email = param.get('value')
elif name == 'identifier':
identifier = param.get('value')
elif name in str_params:
left_event[name] = param.get('value')
elif name in bool_params:
left_event[name] = bool(param.get('boolValue'))
elif name == 'duration_seconds':
left_event[name] = int(param.get('intValue'))
left_event['joined_time'] = left_event['left_time'] - datetime.timedelta(seconds=left_event[name])
if not is_meet:
print(f'skipping non meet meeting {conference_id}')
continue
if not conference_id:
print(f'skipping end_call with no conference_id: {event}')
continue
if left_event.get('endpoint_id') in ignore_endpoint_ids:
print(f'skipping ignored endpoint {left_event["endpoint_id"]}')
continue
if not identifier: # anonymous web user
identifier = left_event.get('display_name', 'No Name Set')
if not left_event.get('duration_seconds'): # unset or 0
left_event['duration_seconds'] = 0
left_event['joined_time'] = left_event['left_time']
if conference_id in meetings:
if meeting_code and not meetings[conference_id]['meeting_code']:
meetings[conference_id]['meeting_code'] = meeting_code
if calendar_event_id and not meetings[conference_id]['calendar_event_id']:
meetings[conference_id]['calendar_event_id'] = calendar_event_id
if organizer_email and not meetings[conference_id]['organizer_email']:
meetings[conference_id]['organizer_email'] = organizer_email
if identifier in meetings[conference_id]['attendees']:
meetings[conference_id]['attendees'][identifier].append(left_event)
else:
meetings[conference_id]['attendees'][identifier] = [left_event, ]
if left_event['left_time'] > meetings[conference_id]['end_time']:
meetings[conference_id]['end_time'] = left_event['left_time']
if left_event['joined_time'] < meetings[conference_id]['start_time']:
meetings[conference_id]['start_time'] = left_event['joined_time']
else:
meetings[conference_id] = {'meeting_code': meeting_code,
'calendar_event_id': calendar_event_id,
'organizer_email': organizer_email,
'start_time': left_event.get('joined_time', now),
'end_time': left_event.get('left_time', now),
'attendees': {identifier: [left_event]}}
organized_meetings = {}
print(f'len meetings = {len(meetings)}')
for meeting, val in meetings.items():
if val['end_time'] > defer_if_event_after:
print('deferring meeting with recent end call events')
continue
val['duration'] = (val['end_time'] - val['start_time']).total_seconds()
if val['duration'] < MINIMUM_MEETING_DURATION_SECONDS:
print(f'skipping short meeting of {val["duration"]} seconds')
continue
if len(val['attendees']) < MINIMUM_MEETING_ATTENDEES:
print(f'skipping meeting with only {len(val["attendees"])}')
continue
if not val['organizer_email']:
if REPLACE_BLANK_ORGANIZER_WITH:
val['organizer_email'] = REPLACE_BLANK_ORGANIZER_WITH
else:
print('skipping meeting with no organizer')
continue
if 'calendar_event_id' in val:
val['calendar_event'] = get_event(val['organizer_email'], val['calendar_event_id'], cal)
organized_meetings[meeting] = val
print(f'len organized_meetings = {len(organized_meetings)}')
return organized_meetings
def get_event(calendarId, eventId, cal):
fields = 'summary,htmlLink'
try:
results = cal.events().get(calendarId=calendarId, eventId=eventId, fields=fields).execute()
if not results.get('summary'):
results.pop('summary', None)
return results
except:
pass
app = Flask(__name__)
@app.route('/send-reports', methods=['GET'])
def send_reports():
if request.headers.get('X-Appengine-Cron') != 'true':
abort(404)
already_parsed_endpoint_ids = fetch_all_endpoint_ids()
print(already_parsed_endpoint_ids)
with open('oauth2.txt') as f:
cdata = json.load(f)
httpc = httplib2.Http()
req = google_auth_httplib2.Request(httpc)
creds = google.oauth2.credentials.Credentials.from_authorized_user_file('oauth2.txt')
creds.token = cdata.get('token', cdata.get('auth_token', ''))
creds._id_token = cdata.get('id_token_jwt', cdata.get('id_token', None))
token_expiry = cdata.get('token_expiry', '1970-01-01T00:00:01Z')
creds.expiry = datetime.datetime.strptime(token_expiry, '%Y-%m-%dT%H:%M:%SZ')
creds.refresh(req)
httpc = google_auth_httplib2.AuthorizedHttp(creds, httpc)
rep = googleapiclient.discovery.build('admin', 'reports_v1', http=httpc, cache_discovery=False)
gmail = googleapiclient.discovery.build('gmail', 'v1', http=httpc, cache_discovery=False)
cal = googleapiclient.discovery.build('calendar', 'v3', http=httpc, cache_discovery=False)
now = datetime.datetime.utcnow()
two_days_ago = now - datetime.timedelta(days=2)
two_days_ago = two_days_ago.isoformat(timespec='seconds') + 'Z'
min_age = now - datetime.timedelta(minutes=MINIMUM_AGE_MINUTES)
min_age = min_age.isoformat(timespec='seconds') + 'Z'
print(f'Start time: {two_days_ago} End time: {min_age}')
response = gapi.call_pages(rep.activities(), 'list', applicationName='meet',
userKey='all', eventName='call_ended',
startTime=two_days_ago, endTime=min_age)
meetings = parse_report(response, cal, ignore_endpoint_ids=already_parsed_endpoint_ids)
draw_meetings(gmail, meetings)
return 'all done!'
@app.route('/cleanup', methods=['GET'])
def cleanup():
if request.headers.get('X-Appengine-Cron') != 'true':
abort(404)
client = datastore.Client()
q = client.query(kind='endpoint_id')
three_days_ago = datetime.datetime.utcnow() - datetime.timedelta(days = 3)
q.add_filter('timestamp', '<', three_days_ago)
q.keys_only()
keys = [key.key for key in list(q.fetch())]
max_chunk_size = 500
chunked_keys = [keys[i * max_chunk_size:(i + 1) * max_chunk_size] for i in range((len(keys) + max_chunk_size - 1) // max_chunk_size )]
for key_chunk in chunked_keys:
client.delete_multi(key_chunk)
return 'Deleted %s codes' % len(keys)
if __name__ == '__main__':
# This is used when running locally only. When deploying to Google App
# Engine, a webserver process such as Gunicorn will serve the app. This
# can be configured by adding an `entrypoint` to app.yaml.
app.run(host='0.0.0.0', port=8080, debug=True)
# [END gae_python37_app]
|
from django.shortcuts import render
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.contenttypes.models import ContentType
from django.http import HttpResponse, JsonResponse, Http404
from django.shortcuts import render, get_object_or_404
from django.utils.text import slugify
from django.urls import reverse
from .utils import as_arche_graph, get_root_col, ARCHE_BASE_URL, get_arche_id
def res_as_arche_graph(request, app_label, model_name, pk):
format = request.GET.get('format', 'xml')
try:
ct = ContentType.objects.get(app_label=app_label, model=model_name)
except ObjectDoesNotExist:
raise Http404(f"No model: {model_name} in app: {app_label} defined")
try:
int_pk = int(pk)
except ValueError:
raise Http404(f"No model: {model_name} with id: {pk} found")
try:
res = ct.model_class().objects.get(id=int_pk)
except ObjectDoesNotExist:
raise Http404(f"No model: {model_name} with id: {pk} found")
g = as_arche_graph(res)
if format == 'turtle':
return HttpResponse(
g.serialize(encoding='utf-8', format='turtle'), content_type='text/turtle'
)
else:
return HttpResponse(
g.serialize(encoding='utf-8'), content_type='application/xml'
)
def top_col_md(request):
format = request.GET.get('format', 'xml')
g = get_root_col()
if format == 'turtle':
return HttpResponse(
g.serialize(encoding='utf-8', format='turtle'), content_type='text/turtle'
)
else:
return HttpResponse(
g.serialize(encoding='utf-8'), content_type='application/xml'
)
def get_ids(request, app_label, model_name):
start = request.GET.get('start', 0)
limit = request.GET.get('limit', False)
print(limit)
try:
ct = ContentType.objects.get(app_label=app_label, model=model_name)
except ObjectDoesNotExist:
raise Http404(f"No model: {model_name} in app: {app_label} defined")
curr_class = ct.model_class()
if limit:
try:
final_limit = int(limit)
except ValueError:
final_limit = 10
else:
final_limit = curr_class.objects.all().count()
print(limit)
base_uri = request.build_absolute_uri().split('/archeutils')[0]
# base_uri = "https://hansi4ever/"
data = {
"arche_constants": f"{base_uri}{reverse("archeutils:top_col_md")}",
"id_prefix": f"{ARCHE_BASE_URL}",
"ids": [
{
"id": f"{ARCHE_BASE_URL}/bomber__{x.id}.xml",
"filename": f"bomber__{x.id}.xml",
"md": f"{base_uri}/archeutils/md-resource/{app_label}/{model_name}/{x.id}",
"html": f"{base_uri}{x.get_absolute_url()}",
"payload": f"{base_uri}/tei/resource-as-tei/{app_label}/{model_name}/{x.id}",
"mimetype": "application/xml"
} for x in curr_class.objects.all()[0:final_limit]],
}
return JsonResponse(data)
|
from django.shortcuts import render
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.contenttypes.models import ContentType
from django.http import HttpResponse, JsonResponse, Http404
from django.shortcuts import render, get_object_or_404
from django.utils.text import slugify
from django.urls import reverse
from .utils import as_arche_graph, get_root_col, ARCHE_BASE_URL, get_arche_id
def res_as_arche_graph(request, app_label, model_name, pk):
format = request.GET.get('format', 'xml')
try:
ct = ContentType.objects.get(app_label=app_label, model=model_name)
except ObjectDoesNotExist:
raise Http404(f"No model: {model_name} in app: {app_label} defined")
try:
int_pk = int(pk)
except ValueError:
raise Http404(f"No model: {model_name} with id: {pk} found")
try:
res = ct.model_class().objects.get(id=int_pk)
except ObjectDoesNotExist:
raise Http404(f"No model: {model_name} with id: {pk} found")
g = as_arche_graph(res)
if format == 'turtle':
return HttpResponse(
g.serialize(encoding='utf-8', format='turtle'), content_type='text/turtle'
)
else:
return HttpResponse(
g.serialize(encoding='utf-8'), content_type='application/xml'
)
def top_col_md(request):
format = request.GET.get('format', 'xml')
g = get_root_col()
if format == 'turtle':
return HttpResponse(
g.serialize(encoding='utf-8', format='turtle'), content_type='text/turtle'
)
else:
return HttpResponse(
g.serialize(encoding='utf-8'), content_type='application/xml'
)
def get_ids(request, app_label, model_name):
start = request.GET.get('start', 0)
limit = request.GET.get('limit', False)
print(limit)
try:
ct = ContentType.objects.get(app_label=app_label, model=model_name)
except ObjectDoesNotExist:
raise Http404(f"No model: {model_name} in app: {app_label} defined")
curr_class = ct.model_class()
if limit:
try:
final_limit = int(limit)
except ValueError:
final_limit = 10
else:
final_limit = curr_class.objects.all().count()
print(limit)
base_uri = request.build_absolute_uri().split('/archeutils')[0]
# base_uri = "https://hansi4ever/"
data = {
"arche_constants": f"{base_uri}{reverse('archeutils:top_col_md')}",
"id_prefix": f"{ARCHE_BASE_URL}",
"ids": [
{
"id": f"{ARCHE_BASE_URL}/bomber__{x.id}.xml",
"filename": f"bomber__{x.id}.xml",
"md": f"{base_uri}/archeutils/md-resource/{app_label}/{model_name}/{x.id}",
"html": f"{base_uri}{x.get_absolute_url()}",
"payload": f"{base_uri}/tei/resource-as-tei/{app_label}/{model_name}/{x.id}",
"mimetype": "application/xml"
} for x in curr_class.objects.all()[0:final_limit]],
}
return JsonResponse(data)
|
#!/usr/bin/env python3
__version__ = '0.1.0'
__author__ = 'https://md.land/md'
import datetime
import math
import os
import re
import sys
import typing
import json
# Entity
class Day:
def __init__(self, date: datetime.date, entry_list: typing.List['Entry']):
self.date: datetime.date = date
self.entry_list: typing.List['Entry'] = entry_list
class Entry:
""" Represent task report row entry (model of parsed text line) """
def __init__(self, start: datetime.datetime, end: datetime.datetime, task: str, description: str = ''):
self.start: datetime.datetime = start
self.end: datetime.datetime = end
self.task: str = task
self.description: str = description
def __repr__(self) -> str:
return f'Entry(start={self.start.strftime('%H:%M')!r}, end={self.end.strftime('%H:%M')!r},' \
f' task={self.task!r}, description={self.description!r})'
class TaskEntry:
""" Represents aggregated task entry (task model to be dumped into final report) """
def __init__(self, task: str, duration: int = 0, date: datetime.date = None):
self.date: datetime.date = date
self.task: str = task
self.duration: int = duration
self.description: set = set()
def update(self, duration: int, description: typing.Iterable) -> None:
self.duration += duration
self.description.update(description)
def __repr__(self) -> str:
return f'TaskEntry(task={self.task!r}, duration={self.duration!r}, description={self.description!r})'
# Parser
class ParserInterface:
""" Defines parser contract """
def parse(self, data: str) -> typing.Iterable:
raise NotImplementedError
class Parser(ParserInterface):
""" Parses tasks day report """
ENTRY_REGEXP = r'^(?P<start>\d{1,2}[.:]\d{1,2})\s*-\s*(?P<end>\d{1,2}[.:]\d{1,2})\s*-\s*' \
r'(?P<task>(?:([a-z]+)-\d+|let))\s*(?:\s*-\s*(?P<description>.+)|)$'
def __init__(self):
self._entry_regexp = re.compile(self.ENTRY_REGEXP, re.IGNORECASE)
def parse(self, data: str) -> typing.Generator[typing.Tuple[str, Entry], None, None]:
line_list = data.split("\n")
for line in line_list:
if line.startswith('#'):
continue # Skip comment row
entry_match = self._entry_regexp.match(line)
if not entry_match:
continue
entry = entry_match.groupdict()
task = entry['task'].upper()
yield task, Entry(
# 9.08 -> 09:08
start=datetime.datetime.strptime(f"{int(entry["start"][:-3]):02d}:{entry["start"][-2:]}", '%H:%M'),
end=datetime.datetime.strptime(f"{int(entry["end"][:-3]):02d}:{entry["end"][-2:]}", '%H:%M'),
task=task,
description=entry['description'] or ''
)
# Report builder
class ViewBuilderInterface:
""" Builds tasks report view """
def build(self, data: list) -> str:
raise NotImplementedError
class ReportViewBuilder(ViewBuilderInterface):
""" Builds JSON serialized report to be passed for 3rd party components """
def build(self, data: typing.Dict) -> str:
task_list: typing.List[dict] = []
for day in data:
day_ = {
'_': {'sum': 0},
'date': day['date'].strftime('%Y-%m-%dT00:00:00.000'),
'entries': []
}
task_list.append(day_)
for task in day['entries'].values():
day_['entries'].append({
'key': task.task,
'comment': ', '.join(sorted(task.description)),
'timespent': task.duration
})
day_['_']['sum'] += int(task.duration / 60)
return json.dumps(task_list, indent=2, ensure_ascii=False)
class UserViewBuilder(ViewBuilderInterface):
""" Builds basic table-like report for standard output """
def build(self, data: typing.List[typing.Dict[str, TaskEntry]]) -> str:
if len(data) == 0:
return 'Nothing was done this day'
view = '\n'
for day in data:
entries = day['entries']
assert isinstance(entries, dict)
for task_number, task in entries.items():
assert isinstance(task, TaskEntry)
cell_width = len(max(task_map.keys(), key=len))
delta = datetime.timedelta(seconds=task.duration)
view += ('{task:' + str(cell_width) + '} {time!s:8} {description!s}\n').format(
task=task.task,
time=f'{math.ceil(delta.seconds / 3600):>2}h ' + str(math.ceil((delta.seconds % 3600) / 60)) + 'm',
description=', '.join(sorted(task.description)),
)
return view
# Processing
class Debug:
NO_VERBOSE = 0
VERBOSE = 1
VERY_VERBOSE = 3
VERY_VERY_VERBOSE = 7
LEVELS_ = [NO_VERBOSE, VERBOSE, VERY_VERBOSE, VERY_VERY_VERBOSE]
parser = Parser()
def process_day(data: str, date: datetime.date) -> typing.Dict[str, TaskEntry]:
""" Process day report and returns list of parsed row models """
task_map: typing.Dict[str, TaskEntry] = {} # aggregate
keyword_splitter = re.compile(r'\s*,\s*')
task_alias_map: typing.Dict[str, str] = config['task']['alias']
task_description_map: typing.Dict[str, str] = config['task']['description']
for task, entry in parser.parse(data=data):
if debug_mode == Debug.VERY_VERY_VERBOSE:
print('vvv:', entry)
if task in task_alias_map: # substitute task alias if such
task = task_alias_map[task]
if task not in task_map: # processing day task first time
task_map[task] = TaskEntry(task=task, date=date)
task_map[task].update( # bump duration and description
duration=(entry.end - entry.start).seconds,
description=filter(lambda x: x != '', keyword_splitter.split(entry.description))
)
for task_entry in task_map.values(): # just patch description if present
if task_entry.task in task_description_map:
task_entry.description = {task_description_map[task_entry.task]}
return task_map
if __name__ == '__main__':
# Arguments configuration
import argparse
def get_date_list(date_list: typing.List[str]) -> typing.List[datetime.date]:
def get_date_list_on_interval(days: int, relative_to: datetime.date):
sign = 1 if days > 0 else -1
return [relative_to + datetime.timedelta(days=sign * days) for days in range(0, abs(days)+1)]
def parse_ymd(date: str) -> datetime.date:
return datetime.datetime.strptime(date, '%Y%m%d').date()
if date_list is None:
return [datetime.datetime.now().date()]
date_list_length = len(date_list)
if date_list_length == 0:
return [datetime.datetime.now().date()]
if date_list_length == 1:
if re.match(r'-\d+', date_list[0]):
return [datetime.datetime.now().date() + datetime.timedelta(days=int(date_list[0]))]
if re.match(r'\d{8}', date_list[0]):
return [parse_ymd(date_list[0])]
raise Exception('Unsupported date format')
if date_list_length >= 2:
date_list_ = []
if re.match(r'\d{8}', date_list[0]):
date_list_.append(parse_ymd(date_list[0]))
else:
raise Exception('Unsupported date format')
if re.match(r'[+-]\d+', date_list[1]):
if date_list_length > 2:
raise Exception('Unsupported date format')
return get_date_list_on_interval(days=int(date_list[1]), relative_to=date_list_[0])
for date in date_list[1:]:
if re.match(r'\d{8}', date):
date_list_.append(parse_ymd(date))
else:
raise Exception('Unsupported date format')
return date_list_
raise Exception('Unsupported date format')
argument_parser = argparse.ArgumentParser()
group = argument_parser.add_mutually_exclusive_group(required=False)
group.add_argument('-j', '--json', action='store_true', dest='json', help='Builds report view')
argument_parser.add_argument('date', action='store', nargs='*',
help='Example: `-1`, `20200212`, `20200212 -1`, `20200212 +1`, `20200212 20200215` ')
argument_parser.add_argument('-v', '--verbose', action='count', default=0)
command_arguments = argument_parser.parse_args(args=sys.argv[1:])
# Main
if command_arguments.verbose + 1 > len(Debug.LEVELS_):
print(argument_parser.print_help())
exit(1)
debug_mode = Debug.LEVELS_[command_arguments.verbose]
date_list: typing.List[datetime.date] = get_date_list(command_arguments.date)
if debug_mode == Debug.VERY_VERY_VERBOSE:
print('vvv:', 'date list to process', date_list)
absolute_bin_dir = os.path.dirname(os.path.abspath(__file__))
with open(absolute_bin_dir + '/../etc/wid.json') as fp:
config = json.load(fp=fp)
def process_date(date: datetime.date) -> typing.Union[typing.Dict[str, TaskEntry], None]:
filename = f"{absolute_bin_dir!s}/{config["dir"]}/{date.strftime("%Y%m%d")}.txt"
if not os.path.exists(filename):
if debug_mode >= Debug.VERBOSE:
print('v:', filename, 'is not found')
return None
with open(filename) as fp:
if debug_mode == Debug.VERY_VERY_VERBOSE:
print('vvv: open file', filename)
data = fp.read()
return process_day(data=data, date=date)
task_map_list = []
for date in date_list:
task_map = process_date(date)
if debug_mode == Debug.VERY_VERY_VERBOSE:
print('vvv: task map is', task_map)
if task_map is None:
continue
task_map_list.append({
'date': date,
'entries': task_map,
})
# output
view_builder: ViewBuilderInterface
if command_arguments.json:
view_builder = ReportViewBuilder()
else:
view_builder = UserViewBuilder()
report = view_builder.build(data=task_map_list)
print(report)
exit(0)
|
#!/usr/bin/env python3
__version__ = '0.1.0'
__author__ = 'https://md.land/md'
import datetime
import math
import os
import re
import sys
import typing
import json
# Entity
class Day:
def __init__(self, date: datetime.date, entry_list: typing.List['Entry']):
self.date: datetime.date = date
self.entry_list: typing.List['Entry'] = entry_list
class Entry:
""" Represent task report row entry (model of parsed text line) """
def __init__(self, start: datetime.datetime, end: datetime.datetime, task: str, description: str = ''):
self.start: datetime.datetime = start
self.end: datetime.datetime = end
self.task: str = task
self.description: str = description
def __repr__(self) -> str:
return f'Entry(start={self.start.strftime("%H:%M")!r}, end={self.end.strftime("%H:%M")!r},' \
f' task={self.task!r}, description={self.description!r})'
class TaskEntry:
""" Represents aggregated task entry (task model to be dumped into final report) """
def __init__(self, task: str, duration: int = 0, date: datetime.date = None):
self.date: datetime.date = date
self.task: str = task
self.duration: int = duration
self.description: set = set()
def update(self, duration: int, description: typing.Iterable) -> None:
self.duration += duration
self.description.update(description)
def __repr__(self) -> str:
return f'TaskEntry(task={self.task!r}, duration={self.duration!r}, description={self.description!r})'
# Parser
class ParserInterface:
""" Defines parser contract """
def parse(self, data: str) -> typing.Iterable:
raise NotImplementedError
class Parser(ParserInterface):
""" Parses tasks day report """
ENTRY_REGEXP = r'^(?P<start>\d{1,2}[.:]\d{1,2})\s*-\s*(?P<end>\d{1,2}[.:]\d{1,2})\s*-\s*' \
r'(?P<task>(?:([a-z]+)-\d+|let))\s*(?:\s*-\s*(?P<description>.+)|)$'
def __init__(self):
self._entry_regexp = re.compile(self.ENTRY_REGEXP, re.IGNORECASE)
def parse(self, data: str) -> typing.Generator[typing.Tuple[str, Entry], None, None]:
line_list = data.split("\n")
for line in line_list:
if line.startswith('#'):
continue # Skip comment row
entry_match = self._entry_regexp.match(line)
if not entry_match:
continue
entry = entry_match.groupdict()
task = entry['task'].upper()
yield task, Entry(
# 9.08 -> 09:08
start=datetime.datetime.strptime(f"{int(entry['start'][:-3]):02d}:{entry['start'][-2:]}", '%H:%M'),
end=datetime.datetime.strptime(f"{int(entry['end'][:-3]):02d}:{entry['end'][-2:]}", '%H:%M'),
task=task,
description=entry['description'] or ''
)
# Report builder
class ViewBuilderInterface:
""" Builds tasks report view """
def build(self, data: list) -> str:
raise NotImplementedError
class ReportViewBuilder(ViewBuilderInterface):
""" Builds JSON serialized report to be passed for 3rd party components """
def build(self, data: typing.Dict) -> str:
task_list: typing.List[dict] = []
for day in data:
day_ = {
'_': {'sum': 0},
'date': day['date'].strftime('%Y-%m-%dT00:00:00.000'),
'entries': []
}
task_list.append(day_)
for task in day['entries'].values():
day_['entries'].append({
'key': task.task,
'comment': ', '.join(sorted(task.description)),
'timespent': task.duration
})
day_['_']['sum'] += int(task.duration / 60)
return json.dumps(task_list, indent=2, ensure_ascii=False)
class UserViewBuilder(ViewBuilderInterface):
""" Builds basic table-like report for standard output """
def build(self, data: typing.List[typing.Dict[str, TaskEntry]]) -> str:
if len(data) == 0:
return 'Nothing was done this day'
view = '\n'
for day in data:
entries = day['entries']
assert isinstance(entries, dict)
for task_number, task in entries.items():
assert isinstance(task, TaskEntry)
cell_width = len(max(task_map.keys(), key=len))
delta = datetime.timedelta(seconds=task.duration)
view += ('{task:' + str(cell_width) + '} {time!s:8} {description!s}\n').format(
task=task.task,
time=f'{math.ceil(delta.seconds / 3600):>2}h ' + str(math.ceil((delta.seconds % 3600) / 60)) + 'm',
description=', '.join(sorted(task.description)),
)
return view
# Processing
class Debug:
NO_VERBOSE = 0
VERBOSE = 1
VERY_VERBOSE = 3
VERY_VERY_VERBOSE = 7
LEVELS_ = [NO_VERBOSE, VERBOSE, VERY_VERBOSE, VERY_VERY_VERBOSE]
parser = Parser()
def process_day(data: str, date: datetime.date) -> typing.Dict[str, TaskEntry]:
""" Process day report and returns list of parsed row models """
task_map: typing.Dict[str, TaskEntry] = {} # aggregate
keyword_splitter = re.compile(r'\s*,\s*')
task_alias_map: typing.Dict[str, str] = config['task']['alias']
task_description_map: typing.Dict[str, str] = config['task']['description']
for task, entry in parser.parse(data=data):
if debug_mode == Debug.VERY_VERY_VERBOSE:
print('vvv:', entry)
if task in task_alias_map: # substitute task alias if such
task = task_alias_map[task]
if task not in task_map: # processing day task first time
task_map[task] = TaskEntry(task=task, date=date)
task_map[task].update( # bump duration and description
duration=(entry.end - entry.start).seconds,
description=filter(lambda x: x != '', keyword_splitter.split(entry.description))
)
for task_entry in task_map.values(): # just patch description if present
if task_entry.task in task_description_map:
task_entry.description = {task_description_map[task_entry.task]}
return task_map
if __name__ == '__main__':
# Arguments configuration
import argparse
def get_date_list(date_list: typing.List[str]) -> typing.List[datetime.date]:
def get_date_list_on_interval(days: int, relative_to: datetime.date):
sign = 1 if days > 0 else -1
return [relative_to + datetime.timedelta(days=sign * days) for days in range(0, abs(days)+1)]
def parse_ymd(date: str) -> datetime.date:
return datetime.datetime.strptime(date, '%Y%m%d').date()
if date_list is None:
return [datetime.datetime.now().date()]
date_list_length = len(date_list)
if date_list_length == 0:
return [datetime.datetime.now().date()]
if date_list_length == 1:
if re.match(r'-\d+', date_list[0]):
return [datetime.datetime.now().date() + datetime.timedelta(days=int(date_list[0]))]
if re.match(r'\d{8}', date_list[0]):
return [parse_ymd(date_list[0])]
raise Exception('Unsupported date format')
if date_list_length >= 2:
date_list_ = []
if re.match(r'\d{8}', date_list[0]):
date_list_.append(parse_ymd(date_list[0]))
else:
raise Exception('Unsupported date format')
if re.match(r'[+-]\d+', date_list[1]):
if date_list_length > 2:
raise Exception('Unsupported date format')
return get_date_list_on_interval(days=int(date_list[1]), relative_to=date_list_[0])
for date in date_list[1:]:
if re.match(r'\d{8}', date):
date_list_.append(parse_ymd(date))
else:
raise Exception('Unsupported date format')
return date_list_
raise Exception('Unsupported date format')
argument_parser = argparse.ArgumentParser()
group = argument_parser.add_mutually_exclusive_group(required=False)
group.add_argument('-j', '--json', action='store_true', dest='json', help='Builds report view')
argument_parser.add_argument('date', action='store', nargs='*',
help='Example: `-1`, `20200212`, `20200212 -1`, `20200212 +1`, `20200212 20200215` ')
argument_parser.add_argument('-v', '--verbose', action='count', default=0)
command_arguments = argument_parser.parse_args(args=sys.argv[1:])
# Main
if command_arguments.verbose + 1 > len(Debug.LEVELS_):
print(argument_parser.print_help())
exit(1)
debug_mode = Debug.LEVELS_[command_arguments.verbose]
date_list: typing.List[datetime.date] = get_date_list(command_arguments.date)
if debug_mode == Debug.VERY_VERY_VERBOSE:
print('vvv:', 'date list to process', date_list)
absolute_bin_dir = os.path.dirname(os.path.abspath(__file__))
with open(absolute_bin_dir + '/../etc/wid.json') as fp:
config = json.load(fp=fp)
def process_date(date: datetime.date) -> typing.Union[typing.Dict[str, TaskEntry], None]:
filename = f"{absolute_bin_dir!s}/{config['dir']}/{date.strftime('%Y%m%d')}.txt"
if not os.path.exists(filename):
if debug_mode >= Debug.VERBOSE:
print('v:', filename, 'is not found')
return None
with open(filename) as fp:
if debug_mode == Debug.VERY_VERY_VERBOSE:
print('vvv: open file', filename)
data = fp.read()
return process_day(data=data, date=date)
task_map_list = []
for date in date_list:
task_map = process_date(date)
if debug_mode == Debug.VERY_VERY_VERBOSE:
print('vvv: task map is', task_map)
if task_map is None:
continue
task_map_list.append({
'date': date,
'entries': task_map,
})
# output
view_builder: ViewBuilderInterface
if command_arguments.json:
view_builder = ReportViewBuilder()
else:
view_builder = UserViewBuilder()
report = view_builder.build(data=task_map_list)
print(report)
exit(0)
|
import os
import tests.testdata as test_data
from mkqa_eval import (
compute_mkqa_scores_for_language,
MKQAAnnotation,
MKQAPrediction,
read_predictions,
read_annotations,
evaluate,
)
package_path = list(test_data.__path__)[0]
def test_compute_mkqa_scores():
test_cases = [
# Test 1: Perfect textual prediction
{
"prediction": MKQAPrediction(
example_id="1",
prediction="first dummy answer",
binary_answer=None,
no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(
example_id="1", types=[], answers=["first dummy answer", "second dummy answer"],
),
"expected_f1": 1.0,
"expected_em": 1.0,
},
# Test 2: Partially correct prediction
{
"prediction": MKQAPrediction(
example_id="2",
prediction="alternative answer",
binary_answer=None,
no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(
example_id="2", types=[], answers=["dummy answer one", "alternative answer two"],
),
"expected_f1": 0.8,
"expected_em": 0.0,
},
# Test 3: Partially correct with stopword and punctuation removal
{
"prediction": MKQAPrediction(
example_id="3",
prediction="an answer ?? without stopwords",
binary_answer=None,
no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(
example_id="3", types=[], answers=["answer -- without, stopwords!!"],
),
"expected_f1": 1.0,
"expected_em": 1.0,
},
# Test 4: Correct No Answer prediction
{
"prediction": MKQAPrediction(
example_id="4", prediction="", binary_answer=None, no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(example_id="4", types=[], answers=[""],),
"expected_f1": 1.0,
"expected_em": 1.0,
},
# Test 5: Incorrect No Answer prediction
{
"prediction": MKQAPrediction(
example_id="5", prediction="", binary_answer=None, no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(
example_id="5", types=[], answers=["first dummy answer", "second dummy answer"],
),
"expected_f1": 0.0,
"expected_em": 0.0,
},
# Test 6: Incorrect No Answer prediction, when answer is binary
{
"prediction": MKQAPrediction(
example_id="6", prediction="", binary_answer=None, no_answer_prob=1,
),
"gold_truth": MKQAAnnotation(example_id="6", types=[], answers=["yes"],),
"expected_f1": 0.0,
"expected_em": 0.0,
},
# Test 7: Correct binary answer prediction
{
"prediction": MKQAPrediction(
example_id="7", prediction="wrong answer", binary_answer="yes", no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(example_id="7", types=[], answers=["yes"],),
"expected_f1": 1.0,
"expected_em": 1.0,
},
# Test 8: Incorrect binary answer prediction
{
"prediction": MKQAPrediction(
example_id="8",
prediction="distractor answer",
binary_answer="no",
no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(example_id="8", types=[], answers=["yes"],),
"expected_f1": 0.0,
"expected_em": 0.0,
},
# Test 9: Binary answer prediction takes precedence, but is incorrect
{
"prediction": MKQAPrediction(
example_id="9", prediction="", binary_answer="no", no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(example_id="1", types=[], answers=[""],),
"expected_f1": 0.0,
"expected_em": 0.0,
},
# Test 10: No Answer probability is 1, but it is not relevant to computing initial scores in `compute_mkqa_scores_for_language`
{
"prediction": MKQAPrediction(
example_id="10",
prediction="final prediction",
binary_answer=None,
no_answer_prob=1.0,
),
"gold_truth": MKQAAnnotation(
example_id="10", types=[], answers=["penultimate", "final prediction"],
),
"expected_f1": 1.0,
"expected_em": 1.0,
},
]
test_preds = {ex["prediction"].example_id: ex["prediction"] for ex in test_cases}
test_golds = {ex["gold_truth"].example_id: ex["gold_truth"] for ex in test_cases}
expected_f1s = {ex["gold_truth"].example_id: ex["expected_f1"] for ex in test_cases}
expected_ems = {ex["gold_truth"].example_id: ex["expected_em"] for ex in test_cases}
test_em_scores, test_f1_scores = compute_mkqa_scores_for_language(test_preds, test_golds, "en")
for ex_id in test_golds:
assert (
test_em_scores[ex_id] == expected_ems[ex_id]
), f"Example ID = {ex_id} | EM | Result = {test_em_scores[ex_id]} | Expected = {expected_ems[ex_id]}"
assert (
test_f1_scores[ex_id] == expected_f1s[ex_id]
), f"Example ID = {ex_id} | F1 | Result = {test_f1_scores[ex_id]} | Expected = {expected_f1s[ex_id]}"
def test_compute_mkqa_scores_in_different_languages():
test_cases = [
# Test 1: Test Spanish text normalization
{
"prediction": MKQAPrediction(
example_id="1", prediction="esto es manzana", binary_answer=None, no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(
example_id="1", types=[], answers=["esto es una manzana", "esta es otra manzana"],
),
"lang": "es",
"expected_f1": 1.0,
"expected_em": 1.0,
},
# Test 2: Test Arabic normalization
{
"prediction": MKQAPrediction(
example_id="2", prediction="تفاحة", binary_answer=None, no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(
example_id="2", types=[], answers=["التفاحة", "هذه تفاحة"],
),
"lang": "ar",
"expected_f1": 1.0,
"expected_em": 1.0,
},
# Test 3: Test French normalization
{
"prediction": MKQAPrediction(
example_id="3",
prediction="c'est de la pomme",
binary_answer=None,
no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(
example_id="3", types=[], answers=["c'est la pomme", "c'est une autre pomme"],
),
"lang": "fr",
"expected_f1": 1.0,
"expected_em": 1.0,
},
# Test 4: Test Hungarian normalization
{
"prediction": MKQAPrediction(
example_id="4", prediction="ez egy alma", binary_answer=None, no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(
example_id="4", types=[], answers=["ez alma", "ez egy újabb alma"],
),
"lang": "hu",
"expected_f1": 1.0,
"expected_em": 1.0,
},
# Test 5: Test Chinese Mandarin mixed segmentation f1
{
"prediction": MKQAPrediction(
example_id="5", prediction="这个一个苹果", binary_answer=None, no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(example_id="5", types=[], answers=["这是还是苹果", "这是另一个苹果"],),
"lang": "zh_cn",
"expected_f1": 0.7692307692307692,
"expected_em": 0,
},
# Test 6: Test Khmer mixed segmentation f1
{
"prediction": MKQAPrediction(
example_id="6", prediction="នេះគឺជាផ្លែប៉ោម", binary_answer=None, no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(
example_id="7", types=[], answers=["នេះគឺជាផ្លែប៉ោមមួយទៀត"],
),
"lang": "km",
"expected_f1": 0.8333333333333333,
"expected_em": 0,
},
]
for case in test_cases:
example_id = case["prediction"].example_id
predictions = {example_id: case["prediction"]}
gold_annotations = {example_id: case["gold_truth"]}
lang = case["lang"]
em_scores, f1_scores = compute_mkqa_scores_for_language(predictions, gold_annotations, lang)
assert (
em_scores[example_id] == case["expected_em"]
), f"Example ID = {example_id} | EM | Result = {em_scores[example_id]} | Expected = {case["expected_em"]}"
assert (
f1_scores[example_id] == case["expected_f1"]
), f"Example ID = {example_id} | F1 | Result = {f1_scores[example_id]} | Expected = {case["expected_f1"]}"
def test_compute_metrics_end_2_end():
predictions_path = os.path.join(package_path, "en_prediction.jsonl")
annotations_path = os.path.join(package_path, "test_mkqa.jsonl.gz")
language = "en"
predictions = read_predictions(predictions_path)
annotations = read_annotations(annotations_path)
metrics = evaluate(annotations[language], predictions, language)
expected_metrics = {
"best_em": 66.67,
"best_f1": 80.95,
"best_answerable_em": 33.33,
"best_answerable_f1": 61.9,
"best_unanswerable_em": 100.0,
"best_f1_threshold": -6.91,
}
assert expected_metrics == dict(metrics)
|
import os
import tests.testdata as test_data
from mkqa_eval import (
compute_mkqa_scores_for_language,
MKQAAnnotation,
MKQAPrediction,
read_predictions,
read_annotations,
evaluate,
)
package_path = list(test_data.__path__)[0]
def test_compute_mkqa_scores():
test_cases = [
# Test 1: Perfect textual prediction
{
"prediction": MKQAPrediction(
example_id="1",
prediction="first dummy answer",
binary_answer=None,
no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(
example_id="1", types=[], answers=["first dummy answer", "second dummy answer"],
),
"expected_f1": 1.0,
"expected_em": 1.0,
},
# Test 2: Partially correct prediction
{
"prediction": MKQAPrediction(
example_id="2",
prediction="alternative answer",
binary_answer=None,
no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(
example_id="2", types=[], answers=["dummy answer one", "alternative answer two"],
),
"expected_f1": 0.8,
"expected_em": 0.0,
},
# Test 3: Partially correct with stopword and punctuation removal
{
"prediction": MKQAPrediction(
example_id="3",
prediction="an answer ?? without stopwords",
binary_answer=None,
no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(
example_id="3", types=[], answers=["answer -- without, stopwords!!"],
),
"expected_f1": 1.0,
"expected_em": 1.0,
},
# Test 4: Correct No Answer prediction
{
"prediction": MKQAPrediction(
example_id="4", prediction="", binary_answer=None, no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(example_id="4", types=[], answers=[""],),
"expected_f1": 1.0,
"expected_em": 1.0,
},
# Test 5: Incorrect No Answer prediction
{
"prediction": MKQAPrediction(
example_id="5", prediction="", binary_answer=None, no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(
example_id="5", types=[], answers=["first dummy answer", "second dummy answer"],
),
"expected_f1": 0.0,
"expected_em": 0.0,
},
# Test 6: Incorrect No Answer prediction, when answer is binary
{
"prediction": MKQAPrediction(
example_id="6", prediction="", binary_answer=None, no_answer_prob=1,
),
"gold_truth": MKQAAnnotation(example_id="6", types=[], answers=["yes"],),
"expected_f1": 0.0,
"expected_em": 0.0,
},
# Test 7: Correct binary answer prediction
{
"prediction": MKQAPrediction(
example_id="7", prediction="wrong answer", binary_answer="yes", no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(example_id="7", types=[], answers=["yes"],),
"expected_f1": 1.0,
"expected_em": 1.0,
},
# Test 8: Incorrect binary answer prediction
{
"prediction": MKQAPrediction(
example_id="8",
prediction="distractor answer",
binary_answer="no",
no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(example_id="8", types=[], answers=["yes"],),
"expected_f1": 0.0,
"expected_em": 0.0,
},
# Test 9: Binary answer prediction takes precedence, but is incorrect
{
"prediction": MKQAPrediction(
example_id="9", prediction="", binary_answer="no", no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(example_id="1", types=[], answers=[""],),
"expected_f1": 0.0,
"expected_em": 0.0,
},
# Test 10: No Answer probability is 1, but it is not relevant to computing initial scores in `compute_mkqa_scores_for_language`
{
"prediction": MKQAPrediction(
example_id="10",
prediction="final prediction",
binary_answer=None,
no_answer_prob=1.0,
),
"gold_truth": MKQAAnnotation(
example_id="10", types=[], answers=["penultimate", "final prediction"],
),
"expected_f1": 1.0,
"expected_em": 1.0,
},
]
test_preds = {ex["prediction"].example_id: ex["prediction"] for ex in test_cases}
test_golds = {ex["gold_truth"].example_id: ex["gold_truth"] for ex in test_cases}
expected_f1s = {ex["gold_truth"].example_id: ex["expected_f1"] for ex in test_cases}
expected_ems = {ex["gold_truth"].example_id: ex["expected_em"] for ex in test_cases}
test_em_scores, test_f1_scores = compute_mkqa_scores_for_language(test_preds, test_golds, "en")
for ex_id in test_golds:
assert (
test_em_scores[ex_id] == expected_ems[ex_id]
), f"Example ID = {ex_id} | EM | Result = {test_em_scores[ex_id]} | Expected = {expected_ems[ex_id]}"
assert (
test_f1_scores[ex_id] == expected_f1s[ex_id]
), f"Example ID = {ex_id} | F1 | Result = {test_f1_scores[ex_id]} | Expected = {expected_f1s[ex_id]}"
def test_compute_mkqa_scores_in_different_languages():
test_cases = [
# Test 1: Test Spanish text normalization
{
"prediction": MKQAPrediction(
example_id="1", prediction="esto es manzana", binary_answer=None, no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(
example_id="1", types=[], answers=["esto es una manzana", "esta es otra manzana"],
),
"lang": "es",
"expected_f1": 1.0,
"expected_em": 1.0,
},
# Test 2: Test Arabic normalization
{
"prediction": MKQAPrediction(
example_id="2", prediction="تفاحة", binary_answer=None, no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(
example_id="2", types=[], answers=["التفاحة", "هذه تفاحة"],
),
"lang": "ar",
"expected_f1": 1.0,
"expected_em": 1.0,
},
# Test 3: Test French normalization
{
"prediction": MKQAPrediction(
example_id="3",
prediction="c'est de la pomme",
binary_answer=None,
no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(
example_id="3", types=[], answers=["c'est la pomme", "c'est une autre pomme"],
),
"lang": "fr",
"expected_f1": 1.0,
"expected_em": 1.0,
},
# Test 4: Test Hungarian normalization
{
"prediction": MKQAPrediction(
example_id="4", prediction="ez egy alma", binary_answer=None, no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(
example_id="4", types=[], answers=["ez alma", "ez egy újabb alma"],
),
"lang": "hu",
"expected_f1": 1.0,
"expected_em": 1.0,
},
# Test 5: Test Chinese Mandarin mixed segmentation f1
{
"prediction": MKQAPrediction(
example_id="5", prediction="这个一个苹果", binary_answer=None, no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(example_id="5", types=[], answers=["这是还是苹果", "这是另一个苹果"],),
"lang": "zh_cn",
"expected_f1": 0.7692307692307692,
"expected_em": 0,
},
# Test 6: Test Khmer mixed segmentation f1
{
"prediction": MKQAPrediction(
example_id="6", prediction="នេះគឺជាផ្លែប៉ោម", binary_answer=None, no_answer_prob=0,
),
"gold_truth": MKQAAnnotation(
example_id="7", types=[], answers=["នេះគឺជាផ្លែប៉ោមមួយទៀត"],
),
"lang": "km",
"expected_f1": 0.8333333333333333,
"expected_em": 0,
},
]
for case in test_cases:
example_id = case["prediction"].example_id
predictions = {example_id: case["prediction"]}
gold_annotations = {example_id: case["gold_truth"]}
lang = case["lang"]
em_scores, f1_scores = compute_mkqa_scores_for_language(predictions, gold_annotations, lang)
assert (
em_scores[example_id] == case["expected_em"]
), f"Example ID = {example_id} | EM | Result = {em_scores[example_id]} | Expected = {case['expected_em']}"
assert (
f1_scores[example_id] == case["expected_f1"]
), f"Example ID = {example_id} | F1 | Result = {f1_scores[example_id]} | Expected = {case['expected_f1']}"
def test_compute_metrics_end_2_end():
predictions_path = os.path.join(package_path, "en_prediction.jsonl")
annotations_path = os.path.join(package_path, "test_mkqa.jsonl.gz")
language = "en"
predictions = read_predictions(predictions_path)
annotations = read_annotations(annotations_path)
metrics = evaluate(annotations[language], predictions, language)
expected_metrics = {
"best_em": 66.67,
"best_f1": 80.95,
"best_answerable_em": 33.33,
"best_answerable_f1": 61.9,
"best_unanswerable_em": 100.0,
"best_f1_threshold": -6.91,
}
assert expected_metrics == dict(metrics)
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from http import HTTPStatus
from jira import JIRA, JIRAError, Issue, User as JiraUser
from typing import Any, List
from flask import current_app as app
from amundsen_application.api.metadata.v0 import USER_ENDPOINT
from amundsen_application.api.utils.request_utils import request_metadata
from amundsen_application.base.base_issue_tracker_client import BaseIssueTrackerClient
from amundsen_application.proxy.issue_tracker_clients.issue_exceptions import IssueConfigurationException
from amundsen_application.models.data_issue import DataIssue, Priority
from amundsen_application.models.issue_results import IssueResults
from amundsen_application.models.user import load_user
from amundsen_common.models.user import User
import urllib.parse
import logging
SEARCH_STUB_ALL_ISSUES = ('text ~ "\\"Table Key: {table_key} [PLEASE DO NOT REMOVE]\\"" '
'and (resolution = unresolved or (resolution != unresolved and updated > -30d)) '
'order by resolution DESC, priority DESC, createdDate DESC')
SEARCH_STUB_OPEN_ISSUES = ('text ~ "\\"Table Key: {table_key} [PLEASE DO NOT REMOVE]\\"" '
'and resolution = unresolved '
'order by priority DESC, createdDate DESC')
SEARCH_STUB_CLOSED_ISSUES = ('text ~ "\\"Table Key: {table_key} [PLEASE DO NOT REMOVE]\\"" '
'and resolution != unresolved '
'order by priority DESC, createdDate DESC')
# this is provided by jira as the type of a bug
ISSUE_TYPE_ID = 1
ISSUE_TYPE_NAME = 'Bug'
class JiraClient(BaseIssueTrackerClient):
def __init__(self, issue_labels: List[str],
issue_tracker_url: str,
issue_tracker_user: str,
issue_tracker_password: str,
issue_tracker_project_id: int,
issue_tracker_max_results: int) -> None:
self.issue_labels = issue_labels
self.jira_url = issue_tracker_url
self.jira_user = issue_tracker_user
self.jira_password = issue_tracker_password
self.jira_project_id = issue_tracker_project_id
self.jira_max_results = issue_tracker_max_results
self._validate_jira_configuration()
self.jira_client = self.get_client()
def get_client(self) -> JIRA:
"""
Get the Jira client properly formatted prepared for hitting JIRA
:return: A Jira client.
"""
return JIRA(
server=self.jira_url,
basic_auth=(self.jira_user, self.jira_password)
)
def get_issues(self, table_uri: str) -> IssueResults:
"""
Runs a query against a given Jira project for tickets matching the key
Returns open issues sorted by most recently created.
:param table_uri: Table Uri ie databasetype://database/table
:return: Metadata of matching issues
"""
try:
issues = self.jira_client.search_issues(SEARCH_STUB_ALL_ISSUES.format(
table_key=table_uri),
maxResults=self.jira_max_results)
# Call search_issues for only 1 open/closed issue just to get the total values from the response. The
# total count from all issues may not be accurate if older closed issues are excluded from the response
open_issues = self.jira_client.search_issues(SEARCH_STUB_OPEN_ISSUES.format(
table_key=table_uri),
maxResults=1)
closed_issues = self.jira_client.search_issues(SEARCH_STUB_CLOSED_ISSUES.format(
table_key=table_uri),
maxResults=1)
returned_issues = self._sort_issues(issues)
return IssueResults(issues=returned_issues,
total=open_issues.total + closed_issues.total,
all_issues_url=self._generate_issues_url(SEARCH_STUB_ALL_ISSUES,
table_uri,
open_issues.total + closed_issues.total),
open_issues_url=self._generate_issues_url(SEARCH_STUB_OPEN_ISSUES,
table_uri,
open_issues.total),
closed_issues_url=self._generate_issues_url(SEARCH_STUB_CLOSED_ISSUES,
table_uri,
closed_issues.total),
open_count=open_issues.total)
except JIRAError as e:
logging.exception(str(e))
raise e
def create_issue(self,
table_uri: str,
title: str,
description: str,
priority_level: str,
table_url: str,
user_id: str,
**kwargs: Any) -> DataIssue:
"""
Creates an issue in Jira
:param description: Description of the Jira issue
:param priority_level: Priority level for the ticket
:param table_uri: Table Uri ie databasetype://database/table
:param title: Title of the Jira ticket
:param table_url: Link to access the table
:param owner_ids: List of table owners user ids
:param frequent_user_ids: List of table frequent users user ids
:param project_key: Jira project key to specify where the ticket should be created
:return: Metadata about the newly created issue
"""
try:
if app.config['AUTH_USER_METHOD']:
user_email = app.config['AUTH_USER_METHOD'](app, user_id).email
# We currently cannot use the email directly because of the following issue:
# https://community.atlassian.com/t5/Answers-Developer-Questions/JIRA-Rest-API-find-JIRA-user-based-on-user-s-email-address/qaq-p/532715
jira_id = user_email.split('@')[0]
else:
raise Exception('AUTH_USER_METHOD must be configured to set the JIRA issue reporter')
reporter = {'name': jira_id}
# Detected by the jira client based on API version & deployment.
if self.jira_client.deploymentType == 'Cloud':
try:
user = self.jira_client._fetch_pages(JiraUser, None, "user/search", 0, 1, {'query': user_email})[0]
reporter = {'accountId': user.accountId}
except IndexError:
raise Exception('Could not find the reporting user in our Jira installation.')
issue_type_id = ISSUE_TYPE_ID
if app.config['ISSUE_TRACKER_ISSUE_TYPE_ID']:
issue_type_id = app.config['ISSUE_TRACKER_ISSUE_TYPE_ID']
project_key = kwargs.get('project_key', None)
proj_key = 'key' if project_key else 'id'
proj_value = project_key if project_key else self.jira_project_id
reporting_user = self._get_users_from_ids([user_email])
owners = self._get_users_from_ids(kwargs.get('owner_ids', []))
frequent_users = self._get_users_from_ids(kwargs.get('frequent_user_ids', []))
reporting_user_str = self._generate_reporting_user_str(reporting_user)
owners_description_str = self._generate_owners_description_str(owners)
frequent_users_description_str = self._generate_frequent_users_description_str(frequent_users)
all_users_description_str = self._generate_all_table_users_description_str(owners_description_str,
frequent_users_description_str)
issue = self.jira_client.create_issue(fields=dict(project={
proj_key: proj_value
}, issuetype={
'id': issue_type_id,
'name': ISSUE_TYPE_NAME,
}, labels=self.issue_labels,
summary=title,
description=(f'{description} '
f'\n *Reported By:* {reporting_user_str if reporting_user_str else user_email} '
f'\n *Table Key:* {table_uri} [PLEASE DO NOT REMOVE] '
f'\n *Table URL:* {table_url} '
f'{all_users_description_str}'),
priority={
'name': Priority.get_jira_severity_from_level(priority_level)
}, reporter=reporter))
self._add_watchers_to_issue(issue_key=issue.key, users=owners + frequent_users)
return self._get_issue_properties(issue=issue)
except JIRAError as e:
logging.exception(str(e))
raise e
def _validate_jira_configuration(self) -> None:
"""
Validates that all properties for jira configuration are set. Returns a list of missing properties
to return if they are missing
:return: String representing missing Jira properties, or an empty string.
"""
missing_fields = []
if not self.jira_url:
missing_fields.append('ISSUE_TRACKER_URL')
if not self.jira_user:
missing_fields.append('ISSUE_TRACKER_USER')
if not self.jira_password:
missing_fields.append('ISSUE_TRACKER_PASSWORD')
if not self.jira_project_id:
missing_fields.append('ISSUE_TRACKER_PROJECT_ID')
if not self.jira_max_results:
missing_fields.append('ISSUE_TRACKER_MAX_RESULTS')
if missing_fields:
raise IssueConfigurationException(
f'The following config settings must be set for Jira: {', '.join(missing_fields)} ')
@staticmethod
def _get_issue_properties(issue: Issue) -> DataIssue:
"""
Maps the jira issue object to properties we want in the UI
:param issue: Jira issue to map
:return: JiraIssue
"""
return DataIssue(issue_key=issue.key,
title=issue.fields.summary,
url=issue.permalink(),
status=issue.fields.status.name,
priority=Priority.from_jira_severity(issue.fields.priority.name))
def _generate_issues_url(self, search_stub: str, table_uri: str, issueCount: int) -> str:
"""
Way to get list of jira tickets
SDK doesn't return a query
:param search_stub: search stub for type of query to build
:param table_uri: table uri from the ui
:param issueCount: number of jira issues associated to the search
:return: url to a list of issues in jira
"""
if issueCount == 0:
return ''
search_query = urllib.parse.quote(search_stub.format(table_key=table_uri))
return f'{self.jira_url}/issues/?jql={search_query}'
def _sort_issues(self, issues: List[Issue]) -> List[DataIssue]:
"""
Sorts issues by resolution, first by unresolved and then by resolved. Also maps the issues to
the object used by the front end. Doesn't include closed issues that are older than 30 days.
:param issues: Issues returned from the JIRA API
:return: List of data issues
"""
open = []
closed = []
for issue in issues:
data_issue = self._get_issue_properties(issue)
if not issue.fields.resolution:
open.append(data_issue)
else:
closed.append(data_issue)
return open + closed
@staticmethod
def _get_users_from_ids(user_ids: List[str]) -> User:
"""
Calls get_user metadata API with a user id to retrieve user details.
:param user_ids: List of strings representing user ids
:return: List of User objects
"""
users = []
for user_id in user_ids:
url = '{0}{1}/{2}'.format(app.config['METADATASERVICE_BASE'], USER_ENDPOINT, user_id)
response = request_metadata(url=url)
if response.status_code == HTTPStatus.OK:
user = load_user(response.json())
if user:
users.append(user)
return users
def _generate_reporting_user_str(self, reporting_user: List[User]) -> str:
"""
:param reporting_user: List containing a user representing the reporter of the issue
or an empty list if the reporter's information could not be retrieved
:return: String of reporting user's information to display in the description
"""
if not reporting_user:
return ''
user = reporting_user[0]
if user.is_active and user.profile_url:
return (f'[{user.full_name if user.full_name else user.email}'
f'|{user.profile_url}]')
else:
return user.email
def _generate_owners_description_str(self, owners: List[User]) -> str:
"""
Build a list of table owner information to add to the description of the ticket
:param owners: List of users representing owners of the table
:return: String of owners to append in the description
"""
owners_description_str = '\n Table Owners:' if owners else ''
user_details_list = []
inactive_user_details_list = []
for user in owners:
if user.is_active and user.profile_url:
user_details_list.append((f'[{user.full_name if user.full_name else user.email}'
f'|{user.profile_url}] '))
continue
else:
inactive_user_details = f'{user.full_name if user.full_name else user.email}'
# Append relevant alumni and manager information if the user is a person and inactive
if not user.is_active and user.full_name:
inactive_user_details += ' (Alumni) '
if user.manager_fullname:
inactive_user_details += f'\u2022 Manager: {user.manager_fullname} '
inactive_user_details_list.append(inactive_user_details)
return '\n '.join(filter(None, [owners_description_str,
'\n '.join(user_details_list),
'\n '.join(inactive_user_details_list)]))
def _generate_frequent_users_description_str(self, frequent_users: List[User]) -> str:
"""
Build a list of table frequent user information to add to the description of the ticket; this list will leave
out inactive frequent users
:param frequent_users: List of users representing frequent users of the table
:return: String of frequent users to append in the description
"""
frequent_users_description_str = '\n Frequent Users: ' if frequent_users else ''
user_details_list = []
for user in frequent_users:
if user.is_active and user.profile_url:
user_details_list.append((f'[{user.full_name if user.full_name else user.email}'
f'|{user.profile_url}]'))
return frequent_users_description_str + ', '.join(user_details_list) if user_details_list else ''
def _generate_all_table_users_description_str(self, owners_str: str, frequent_users_str: str) -> str:
"""
Takes the generated owners and frequent users information and packages it up into one string for appending
to the ticket description
:param owners_str: Owner information
:param frequent_users_str: Frequent user information
:return: String including all table users (owners and frequent users) information to append to the description
"""
table_users_description_title = ''
if owners_str and frequent_users_str:
table_users_description_title = '\n\n *Owners and Frequent Users (added as Watchers):* '
elif owners_str:
table_users_description_title = '\n\n *Owners (added as Watchers):* '
elif frequent_users_str:
table_users_description_title = '\n\n *Frequent Users (added as Watchers):* '
return table_users_description_title + owners_str + frequent_users_str
def _add_watchers_to_issue(self, issue_key: str, users: List[User]) -> None:
"""
Given an issue key and a list of users, add those users as watchers to the issue if they are active
:param issue_key: key representing an issue
:param users: list of users to add as watchers to the issue
"""
for user in users:
if user.is_active:
try:
# Detected by the jira client based on API version & deployment.
if self.jira_client.deploymentType == 'Cloud':
jira_user = self.jira_client._fetch_pages(JiraUser, None, "user/search", 0, 1,
{'query': user.email})[0]
self.jira_client.add_watcher(issue=issue_key, watcher=jira_user.accountId)
else:
self.jira_client.add_watcher(issue=issue_key, watcher=user.email.split("@")[0])
except (JIRAError, IndexError):
logging.warning('Could not add user {user_email} as a watcher on the issue.'
.format(user_email=user.email))
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from http import HTTPStatus
from jira import JIRA, JIRAError, Issue, User as JiraUser
from typing import Any, List
from flask import current_app as app
from amundsen_application.api.metadata.v0 import USER_ENDPOINT
from amundsen_application.api.utils.request_utils import request_metadata
from amundsen_application.base.base_issue_tracker_client import BaseIssueTrackerClient
from amundsen_application.proxy.issue_tracker_clients.issue_exceptions import IssueConfigurationException
from amundsen_application.models.data_issue import DataIssue, Priority
from amundsen_application.models.issue_results import IssueResults
from amundsen_application.models.user import load_user
from amundsen_common.models.user import User
import urllib.parse
import logging
SEARCH_STUB_ALL_ISSUES = ('text ~ "\\"Table Key: {table_key} [PLEASE DO NOT REMOVE]\\"" '
'and (resolution = unresolved or (resolution != unresolved and updated > -30d)) '
'order by resolution DESC, priority DESC, createdDate DESC')
SEARCH_STUB_OPEN_ISSUES = ('text ~ "\\"Table Key: {table_key} [PLEASE DO NOT REMOVE]\\"" '
'and resolution = unresolved '
'order by priority DESC, createdDate DESC')
SEARCH_STUB_CLOSED_ISSUES = ('text ~ "\\"Table Key: {table_key} [PLEASE DO NOT REMOVE]\\"" '
'and resolution != unresolved '
'order by priority DESC, createdDate DESC')
# this is provided by jira as the type of a bug
ISSUE_TYPE_ID = 1
ISSUE_TYPE_NAME = 'Bug'
class JiraClient(BaseIssueTrackerClient):
def __init__(self, issue_labels: List[str],
issue_tracker_url: str,
issue_tracker_user: str,
issue_tracker_password: str,
issue_tracker_project_id: int,
issue_tracker_max_results: int) -> None:
self.issue_labels = issue_labels
self.jira_url = issue_tracker_url
self.jira_user = issue_tracker_user
self.jira_password = issue_tracker_password
self.jira_project_id = issue_tracker_project_id
self.jira_max_results = issue_tracker_max_results
self._validate_jira_configuration()
self.jira_client = self.get_client()
def get_client(self) -> JIRA:
"""
Get the Jira client properly formatted prepared for hitting JIRA
:return: A Jira client.
"""
return JIRA(
server=self.jira_url,
basic_auth=(self.jira_user, self.jira_password)
)
def get_issues(self, table_uri: str) -> IssueResults:
"""
Runs a query against a given Jira project for tickets matching the key
Returns open issues sorted by most recently created.
:param table_uri: Table Uri ie databasetype://database/table
:return: Metadata of matching issues
"""
try:
issues = self.jira_client.search_issues(SEARCH_STUB_ALL_ISSUES.format(
table_key=table_uri),
maxResults=self.jira_max_results)
# Call search_issues for only 1 open/closed issue just to get the total values from the response. The
# total count from all issues may not be accurate if older closed issues are excluded from the response
open_issues = self.jira_client.search_issues(SEARCH_STUB_OPEN_ISSUES.format(
table_key=table_uri),
maxResults=1)
closed_issues = self.jira_client.search_issues(SEARCH_STUB_CLOSED_ISSUES.format(
table_key=table_uri),
maxResults=1)
returned_issues = self._sort_issues(issues)
return IssueResults(issues=returned_issues,
total=open_issues.total + closed_issues.total,
all_issues_url=self._generate_issues_url(SEARCH_STUB_ALL_ISSUES,
table_uri,
open_issues.total + closed_issues.total),
open_issues_url=self._generate_issues_url(SEARCH_STUB_OPEN_ISSUES,
table_uri,
open_issues.total),
closed_issues_url=self._generate_issues_url(SEARCH_STUB_CLOSED_ISSUES,
table_uri,
closed_issues.total),
open_count=open_issues.total)
except JIRAError as e:
logging.exception(str(e))
raise e
def create_issue(self,
table_uri: str,
title: str,
description: str,
priority_level: str,
table_url: str,
user_id: str,
**kwargs: Any) -> DataIssue:
"""
Creates an issue in Jira
:param description: Description of the Jira issue
:param priority_level: Priority level for the ticket
:param table_uri: Table Uri ie databasetype://database/table
:param title: Title of the Jira ticket
:param table_url: Link to access the table
:param owner_ids: List of table owners user ids
:param frequent_user_ids: List of table frequent users user ids
:param project_key: Jira project key to specify where the ticket should be created
:return: Metadata about the newly created issue
"""
try:
if app.config['AUTH_USER_METHOD']:
user_email = app.config['AUTH_USER_METHOD'](app, user_id).email
# We currently cannot use the email directly because of the following issue:
# https://community.atlassian.com/t5/Answers-Developer-Questions/JIRA-Rest-API-find-JIRA-user-based-on-user-s-email-address/qaq-p/532715
jira_id = user_email.split('@')[0]
else:
raise Exception('AUTH_USER_METHOD must be configured to set the JIRA issue reporter')
reporter = {'name': jira_id}
# Detected by the jira client based on API version & deployment.
if self.jira_client.deploymentType == 'Cloud':
try:
user = self.jira_client._fetch_pages(JiraUser, None, "user/search", 0, 1, {'query': user_email})[0]
reporter = {'accountId': user.accountId}
except IndexError:
raise Exception('Could not find the reporting user in our Jira installation.')
issue_type_id = ISSUE_TYPE_ID
if app.config['ISSUE_TRACKER_ISSUE_TYPE_ID']:
issue_type_id = app.config['ISSUE_TRACKER_ISSUE_TYPE_ID']
project_key = kwargs.get('project_key', None)
proj_key = 'key' if project_key else 'id'
proj_value = project_key if project_key else self.jira_project_id
reporting_user = self._get_users_from_ids([user_email])
owners = self._get_users_from_ids(kwargs.get('owner_ids', []))
frequent_users = self._get_users_from_ids(kwargs.get('frequent_user_ids', []))
reporting_user_str = self._generate_reporting_user_str(reporting_user)
owners_description_str = self._generate_owners_description_str(owners)
frequent_users_description_str = self._generate_frequent_users_description_str(frequent_users)
all_users_description_str = self._generate_all_table_users_description_str(owners_description_str,
frequent_users_description_str)
issue = self.jira_client.create_issue(fields=dict(project={
proj_key: proj_value
}, issuetype={
'id': issue_type_id,
'name': ISSUE_TYPE_NAME,
}, labels=self.issue_labels,
summary=title,
description=(f'{description} '
f'\n *Reported By:* {reporting_user_str if reporting_user_str else user_email} '
f'\n *Table Key:* {table_uri} [PLEASE DO NOT REMOVE] '
f'\n *Table URL:* {table_url} '
f'{all_users_description_str}'),
priority={
'name': Priority.get_jira_severity_from_level(priority_level)
}, reporter=reporter))
self._add_watchers_to_issue(issue_key=issue.key, users=owners + frequent_users)
return self._get_issue_properties(issue=issue)
except JIRAError as e:
logging.exception(str(e))
raise e
def _validate_jira_configuration(self) -> None:
"""
Validates that all properties for jira configuration are set. Returns a list of missing properties
to return if they are missing
:return: String representing missing Jira properties, or an empty string.
"""
missing_fields = []
if not self.jira_url:
missing_fields.append('ISSUE_TRACKER_URL')
if not self.jira_user:
missing_fields.append('ISSUE_TRACKER_USER')
if not self.jira_password:
missing_fields.append('ISSUE_TRACKER_PASSWORD')
if not self.jira_project_id:
missing_fields.append('ISSUE_TRACKER_PROJECT_ID')
if not self.jira_max_results:
missing_fields.append('ISSUE_TRACKER_MAX_RESULTS')
if missing_fields:
raise IssueConfigurationException(
f'The following config settings must be set for Jira: {", ".join(missing_fields)} ')
@staticmethod
def _get_issue_properties(issue: Issue) -> DataIssue:
"""
Maps the jira issue object to properties we want in the UI
:param issue: Jira issue to map
:return: JiraIssue
"""
return DataIssue(issue_key=issue.key,
title=issue.fields.summary,
url=issue.permalink(),
status=issue.fields.status.name,
priority=Priority.from_jira_severity(issue.fields.priority.name))
def _generate_issues_url(self, search_stub: str, table_uri: str, issueCount: int) -> str:
"""
Way to get list of jira tickets
SDK doesn't return a query
:param search_stub: search stub for type of query to build
:param table_uri: table uri from the ui
:param issueCount: number of jira issues associated to the search
:return: url to a list of issues in jira
"""
if issueCount == 0:
return ''
search_query = urllib.parse.quote(search_stub.format(table_key=table_uri))
return f'{self.jira_url}/issues/?jql={search_query}'
def _sort_issues(self, issues: List[Issue]) -> List[DataIssue]:
"""
Sorts issues by resolution, first by unresolved and then by resolved. Also maps the issues to
the object used by the front end. Doesn't include closed issues that are older than 30 days.
:param issues: Issues returned from the JIRA API
:return: List of data issues
"""
open = []
closed = []
for issue in issues:
data_issue = self._get_issue_properties(issue)
if not issue.fields.resolution:
open.append(data_issue)
else:
closed.append(data_issue)
return open + closed
@staticmethod
def _get_users_from_ids(user_ids: List[str]) -> User:
"""
Calls get_user metadata API with a user id to retrieve user details.
:param user_ids: List of strings representing user ids
:return: List of User objects
"""
users = []
for user_id in user_ids:
url = '{0}{1}/{2}'.format(app.config['METADATASERVICE_BASE'], USER_ENDPOINT, user_id)
response = request_metadata(url=url)
if response.status_code == HTTPStatus.OK:
user = load_user(response.json())
if user:
users.append(user)
return users
def _generate_reporting_user_str(self, reporting_user: List[User]) -> str:
"""
:param reporting_user: List containing a user representing the reporter of the issue
or an empty list if the reporter's information could not be retrieved
:return: String of reporting user's information to display in the description
"""
if not reporting_user:
return ''
user = reporting_user[0]
if user.is_active and user.profile_url:
return (f'[{user.full_name if user.full_name else user.email}'
f'|{user.profile_url}]')
else:
return user.email
def _generate_owners_description_str(self, owners: List[User]) -> str:
"""
Build a list of table owner information to add to the description of the ticket
:param owners: List of users representing owners of the table
:return: String of owners to append in the description
"""
owners_description_str = '\n Table Owners:' if owners else ''
user_details_list = []
inactive_user_details_list = []
for user in owners:
if user.is_active and user.profile_url:
user_details_list.append((f'[{user.full_name if user.full_name else user.email}'
f'|{user.profile_url}] '))
continue
else:
inactive_user_details = f'{user.full_name if user.full_name else user.email}'
# Append relevant alumni and manager information if the user is a person and inactive
if not user.is_active and user.full_name:
inactive_user_details += ' (Alumni) '
if user.manager_fullname:
inactive_user_details += f'\u2022 Manager: {user.manager_fullname} '
inactive_user_details_list.append(inactive_user_details)
return '\n '.join(filter(None, [owners_description_str,
'\n '.join(user_details_list),
'\n '.join(inactive_user_details_list)]))
def _generate_frequent_users_description_str(self, frequent_users: List[User]) -> str:
"""
Build a list of table frequent user information to add to the description of the ticket; this list will leave
out inactive frequent users
:param frequent_users: List of users representing frequent users of the table
:return: String of frequent users to append in the description
"""
frequent_users_description_str = '\n Frequent Users: ' if frequent_users else ''
user_details_list = []
for user in frequent_users:
if user.is_active and user.profile_url:
user_details_list.append((f'[{user.full_name if user.full_name else user.email}'
f'|{user.profile_url}]'))
return frequent_users_description_str + ', '.join(user_details_list) if user_details_list else ''
def _generate_all_table_users_description_str(self, owners_str: str, frequent_users_str: str) -> str:
"""
Takes the generated owners and frequent users information and packages it up into one string for appending
to the ticket description
:param owners_str: Owner information
:param frequent_users_str: Frequent user information
:return: String including all table users (owners and frequent users) information to append to the description
"""
table_users_description_title = ''
if owners_str and frequent_users_str:
table_users_description_title = '\n\n *Owners and Frequent Users (added as Watchers):* '
elif owners_str:
table_users_description_title = '\n\n *Owners (added as Watchers):* '
elif frequent_users_str:
table_users_description_title = '\n\n *Frequent Users (added as Watchers):* '
return table_users_description_title + owners_str + frequent_users_str
def _add_watchers_to_issue(self, issue_key: str, users: List[User]) -> None:
"""
Given an issue key and a list of users, add those users as watchers to the issue if they are active
:param issue_key: key representing an issue
:param users: list of users to add as watchers to the issue
"""
for user in users:
if user.is_active:
try:
# Detected by the jira client based on API version & deployment.
if self.jira_client.deploymentType == 'Cloud':
jira_user = self.jira_client._fetch_pages(JiraUser, None, "user/search", 0, 1,
{'query': user.email})[0]
self.jira_client.add_watcher(issue=issue_key, watcher=jira_user.accountId)
else:
self.jira_client.add_watcher(issue=issue_key, watcher=user.email.split("@")[0])
except (JIRAError, IndexError):
logging.warning('Could not add user {user_email} as a watcher on the issue.'
.format(user_email=user.email))
|
"""
This sets variables for a matrix of QT versions to test downloading against with Azure Pipelines
"""
import collections
import json
from itertools import product
class BuildJob:
def __init__(self, qt_version, host, target, arch, archdir, module=None):
self.qt_version = qt_version
self.host = host
self.target = target
self.arch = arch
self.archdir = archdir
self.module = module
class PlatformBuildJobs:
def __init__(self, platform, build_jobs):
self.platform = platform
self.build_jobs = build_jobs
python_versions = [
'3.7',
]
qt_versions = [
'5.12.6',
'5.13.2',
'5.14.0'
]
linux_build_jobs = []
mac_build_jobs = []
windows_build_jobs = []
all_platform_build_jobs = [
PlatformBuildJobs('linux', linux_build_jobs),
PlatformBuildJobs('mac', mac_build_jobs),
PlatformBuildJobs('windows', windows_build_jobs),
]
# Linux Desktop
for qt_version in qt_versions:
linux_build_jobs.append(
BuildJob(qt_version, 'linux', 'desktop', 'gcc_64', 'gcc_64')
)
# WASM
linux_build_jobs.append(
BuildJob('5.13.2', 'linux', 'desktop', 'wasm_32', "wasm_32")
)
# Mac Desktop
for qt_version in qt_versions:
mac_build_jobs.append(
BuildJob(qt_version, 'mac', 'desktop', 'clang_64', "clang_64")
)
# Mac iOS
mac_build_jobs.append(
BuildJob('5.13.2', 'mac', 'ios', 'ios', 'ios')
)
# WASM
mac_build_jobs.append(
BuildJob('5.13.2', 'mac', 'desktop', 'wasm_32', "wasm_32")
)
# Windows Desktop
windows_build_jobs.extend(
[
BuildJob('5.12.6', 'windows', 'desktop', 'win64_msvc2017_64', 'msvc2017_64'),
BuildJob('5.12.6', 'windows', 'desktop', 'win32_msvc2017', 'msvc2017'),
]
)
# WASM
windows_build_jobs.append(
BuildJob('5.13.2', 'windows', 'desktop', 'wasm_32', "wasm_32")
)
windows_build_jobs.extend(
[
BuildJob('5.13.2', 'windows', 'desktop', 'win64_msvc2017_64', 'msvc2017_64'),
BuildJob('5.13.2', 'windows', 'desktop', 'win64_msvc2015_64', 'msvc2015_64'),
BuildJob('5.13.2', 'windows', 'desktop', 'win64_mingw73', 'mingw73_64'),
BuildJob('5.13.2', 'windows', 'desktop', 'win32_msvc2017', 'msvc2017'),
BuildJob('5.13.2', 'windows', 'desktop', 'win32_mingw73', 'mingw73_32'),
]
)
windows_build_jobs.extend(
[
BuildJob('5.14.0', 'windows', 'desktop', 'win64_msvc2015_64', 'msvc2015_64'),
BuildJob('5.14.0', 'windows', 'desktop', 'win32_msvc2017', 'msvc2017'),
]
)
# Androids for Linux platforms
# aqt is for CI/CD systems!
# Users might develop on Win/Mac, but are most likely to use Linux for CI/CD with
# the Android ecosystem.
for android_arch in ['android_x86_64', 'android_arm64_v8a', 'android_x86', 'android_armv7']:
linux_build_jobs.append(
BuildJob('5.13.2', 'linux', 'android', android_arch, android_arch)
)
linux_build_jobs.append(
BuildJob('5.14.0', 'linux', 'android', 'android', 'android')
)
# Extra modules test
linux_build_jobs.append(
BuildJob('5.13.2', 'linux', 'desktop', 'gcc_64', 'gcc_64', module='qcharts qtnetworkauth')
)
mac_build_jobs.append(
BuildJob('5.13.2', 'mac', 'desktop', 'clang_64', 'clang_64', module='qcharts qtnetworkauth')
)
windows_build_jobs.append(
BuildJob('5.13.2', 'windows', 'desktop', 'win64_msvc2017_64', 'msvc2017_64', module='qcharts qtnetworkauth')
)
matrices = {}
for platform_build_job in all_platform_build_jobs:
matrix_dictionary = collections.OrderedDict()
for build_job, python_version in product(platform_build_job.build_jobs, python_versions):
key = '{} {} for {} on {}'.format(build_job.qt_version, build_job.arch, build_job.target, build_job.host)
if build_job.module:
key = "{} ({})".format(key, build_job.module)
matrix_dictionary[key] = collections.OrderedDict(
[
('PYTHON_VERSION', python_version),
('QT_VERSION', build_job.qt_version),
('HOST', build_job.host),
('TARGET', build_job.target),
('ARCH', build_job.arch),
('ARCHDIR', build_job.archdir),
('MODULE', build_job.module if build_job.module else '')
]
)
matrices[platform_build_job.platform] = matrix_dictionary
print("Setting Variables below")
print(f"##vso[task.setVariable variable=linux;isOutput=true]{json.dumps(matrices["linux"])}")
print(f"##vso[task.setVariable variable=windows;isOutput=true]{json.dumps(matrices["windows"])}")
print(f"##vso[task.setVariable variable=mac;isOutput=true]{json.dumps(matrices["mac"])}")
|
"""
This sets variables for a matrix of QT versions to test downloading against with Azure Pipelines
"""
import collections
import json
from itertools import product
class BuildJob:
def __init__(self, qt_version, host, target, arch, archdir, module=None):
self.qt_version = qt_version
self.host = host
self.target = target
self.arch = arch
self.archdir = archdir
self.module = module
class PlatformBuildJobs:
def __init__(self, platform, build_jobs):
self.platform = platform
self.build_jobs = build_jobs
python_versions = [
'3.7',
]
qt_versions = [
'5.12.6',
'5.13.2',
'5.14.0'
]
linux_build_jobs = []
mac_build_jobs = []
windows_build_jobs = []
all_platform_build_jobs = [
PlatformBuildJobs('linux', linux_build_jobs),
PlatformBuildJobs('mac', mac_build_jobs),
PlatformBuildJobs('windows', windows_build_jobs),
]
# Linux Desktop
for qt_version in qt_versions:
linux_build_jobs.append(
BuildJob(qt_version, 'linux', 'desktop', 'gcc_64', 'gcc_64')
)
# WASM
linux_build_jobs.append(
BuildJob('5.13.2', 'linux', 'desktop', 'wasm_32', "wasm_32")
)
# Mac Desktop
for qt_version in qt_versions:
mac_build_jobs.append(
BuildJob(qt_version, 'mac', 'desktop', 'clang_64', "clang_64")
)
# Mac iOS
mac_build_jobs.append(
BuildJob('5.13.2', 'mac', 'ios', 'ios', 'ios')
)
# WASM
mac_build_jobs.append(
BuildJob('5.13.2', 'mac', 'desktop', 'wasm_32', "wasm_32")
)
# Windows Desktop
windows_build_jobs.extend(
[
BuildJob('5.12.6', 'windows', 'desktop', 'win64_msvc2017_64', 'msvc2017_64'),
BuildJob('5.12.6', 'windows', 'desktop', 'win32_msvc2017', 'msvc2017'),
]
)
# WASM
windows_build_jobs.append(
BuildJob('5.13.2', 'windows', 'desktop', 'wasm_32', "wasm_32")
)
windows_build_jobs.extend(
[
BuildJob('5.13.2', 'windows', 'desktop', 'win64_msvc2017_64', 'msvc2017_64'),
BuildJob('5.13.2', 'windows', 'desktop', 'win64_msvc2015_64', 'msvc2015_64'),
BuildJob('5.13.2', 'windows', 'desktop', 'win64_mingw73', 'mingw73_64'),
BuildJob('5.13.2', 'windows', 'desktop', 'win32_msvc2017', 'msvc2017'),
BuildJob('5.13.2', 'windows', 'desktop', 'win32_mingw73', 'mingw73_32'),
]
)
windows_build_jobs.extend(
[
BuildJob('5.14.0', 'windows', 'desktop', 'win64_msvc2015_64', 'msvc2015_64'),
BuildJob('5.14.0', 'windows', 'desktop', 'win32_msvc2017', 'msvc2017'),
]
)
# Androids for Linux platforms
# aqt is for CI/CD systems!
# Users might develop on Win/Mac, but are most likely to use Linux for CI/CD with
# the Android ecosystem.
for android_arch in ['android_x86_64', 'android_arm64_v8a', 'android_x86', 'android_armv7']:
linux_build_jobs.append(
BuildJob('5.13.2', 'linux', 'android', android_arch, android_arch)
)
linux_build_jobs.append(
BuildJob('5.14.0', 'linux', 'android', 'android', 'android')
)
# Extra modules test
linux_build_jobs.append(
BuildJob('5.13.2', 'linux', 'desktop', 'gcc_64', 'gcc_64', module='qcharts qtnetworkauth')
)
mac_build_jobs.append(
BuildJob('5.13.2', 'mac', 'desktop', 'clang_64', 'clang_64', module='qcharts qtnetworkauth')
)
windows_build_jobs.append(
BuildJob('5.13.2', 'windows', 'desktop', 'win64_msvc2017_64', 'msvc2017_64', module='qcharts qtnetworkauth')
)
matrices = {}
for platform_build_job in all_platform_build_jobs:
matrix_dictionary = collections.OrderedDict()
for build_job, python_version in product(platform_build_job.build_jobs, python_versions):
key = '{} {} for {} on {}'.format(build_job.qt_version, build_job.arch, build_job.target, build_job.host)
if build_job.module:
key = "{} ({})".format(key, build_job.module)
matrix_dictionary[key] = collections.OrderedDict(
[
('PYTHON_VERSION', python_version),
('QT_VERSION', build_job.qt_version),
('HOST', build_job.host),
('TARGET', build_job.target),
('ARCH', build_job.arch),
('ARCHDIR', build_job.archdir),
('MODULE', build_job.module if build_job.module else '')
]
)
matrices[platform_build_job.platform] = matrix_dictionary
print("Setting Variables below")
print(f"##vso[task.setVariable variable=linux;isOutput=true]{json.dumps(matrices['linux'])}")
print(f"##vso[task.setVariable variable=windows;isOutput=true]{json.dumps(matrices['windows'])}")
print(f"##vso[task.setVariable variable=mac;isOutput=true]{json.dumps(matrices['mac'])}")
|
#!/usr/bin/env python3
import argparse
import glob
import html
import json
import os
import random
import re
import shutil
import subprocess
import sys
import traceback
from datetime import datetime
from distutils.version import StrictVersion
from functools import partial
from multiprocessing import Pool
from typing import Dict, Iterator, List, Optional, Tuple, TypedDict
import yaml
from bs4 import BeautifulSoup
from dateutil.relativedelta import relativedelta
from CommonServerPython import tableToMarkdown # type: ignore
from mdx_utils import (fix_mdx, fix_relative_images, normalize_id,
start_mdx_server, stop_mdx_server, verify_mdx_server)
# override print so we have a timestamp with each print
org_print = print
def timestamped_print(*args, **kwargs):
org_print(datetime.now().strftime("%H:%M:%S.%f"), *args, **kwargs)
print = timestamped_print
INTEGRATION_YML_MATCH = [
"Packs/[^/]+?/Integrations/[^/]+?/.+.yml",
"Packs/[^/]+?/Integrations/.+.yml",
]
INTEGRATION_DOCS_MATCH = [
"Integrations/[^/]+?/README.md",
"Integrations/.+_README.md",
"Packs/[^/]+?/Integrations/[^/]+?/README.md",
"Packs/[^/]+?/Integrations/.+_README.md",
"Beta_Integrations/[^/]+?/README.md",
"Beta_Integrations/.+_README.md",
]
SCRIPTS_DOCS_MATCH = [
"Scripts/[^/]+?/README.md",
"Scripts/.+_README.md",
"Packs/[^/]+?/Scripts/[^/]+?/README.md",
"Packs/[^/]+?/Scripts/.+_README.md",
]
PLAYBOOKS_DOCS_MATCH = [
"Playbooks/.+_README.md",
"Packs/[^/]+?/Playbooks/.+_README.md",
]
INTEGRATIONS_PREFIX = 'integrations'
SCRIPTS_PREFIX = 'scripts'
PLAYBOOKS_PREFIX = 'playbooks'
PRIVATE_PACKS_INTEGRATIONS_PREFIX = 'Integrations'
PRIVATE_PACKS_SCRIPTS_PREFIX = 'Scripts'
PRIVATE_PACKS_PLAYBOOKS_PREFIX = 'Playbooks'
RELEASES_PREFIX = 'releases'
ARTICLES_PREFIX = 'articles'
NO_HTML = '<!-- NOT_HTML_DOC -->'
YES_HTML = '<!-- HTML_DOC -->'
BRANCH = os.getenv('HEAD', 'master')
MAX_FAILURES = int(os.getenv('MAX_FAILURES', 10)) # if we have more than this amount in a single category we fail the build
# env vars for faster development
MAX_FILES = int(os.getenv('MAX_FILES', -1))
FILE_REGEX = os.getenv('FILE_REGEX')
EMPTY_FILE_MSG = 'empty file'
DEPRECATED_INFO_FILE = f'{os.path.dirname(os.path.abspath(__file__))}/extra-docs/articles/deprecated_info.json'
# initialize the seed according to the PR branch. Used when selecting max files.
random.seed(os.getenv('CIRCLE_BRANCH'))
MIN_RELEASE_VERSION = StrictVersion((datetime.now() + relativedelta(months=-18)).strftime('%y.%-m.0'))
class DocInfo:
def __init__(self, id: str, name: str, description: str, readme: str, error_msg: Optional[str] = None):
self.id = id
self.name = name
self.description = description
self.readme = readme
self.error_msg = error_msg
class DeprecatedInfo(TypedDict, total=False):
id: str
name: str
description: str
maintenance_start: str
eol_start: str
note: str
def findfiles(match_patterns: List[str], target_dir: str) -> List[str]:
"""Return the list of found files based upon the passed fnmatch patters. Will perform case insensative search regardless of OS.
Arguments:
match_patterns {List[str]} -- list of fnmatch patters
target_dir {str} -- targer dir
Returns:
list of found dirs
"""
rules = [re.compile(target_dir + "/" + r, re.IGNORECASE) for r in match_patterns]
res = []
for d in glob.glob(target_dir + "/**", recursive=True):
for r in rules:
if r.match(d):
res.append(d)
continue
return res
def is_html_doc(txt: str) -> bool:
if txt.startswith(NO_HTML):
return False
if txt.startswith(YES_HTML):
return True
# use some heuristics to try to figure out if this is html
return txt.startswith('<p>') or ('<thead>' in txt and '<tbody>' in txt)
def gen_html_doc(txt: str) -> str:
# create a javascript string
soup = BeautifulSoup(txt, features="html.parser")
txt = soup.prettify()
txt = json.dumps(txt)
return (f'export const txt = {txt};\n\n' +
'<div dangerouslySetInnerHTML={{__html: txt}} />\n')
def get_extracted_deprecated_note(description: str):
regexs = [
r'.*deprecated\s*[\.\-:]\s*(.*?instead.*?\.)',
r'.*deprecated\s*[\.\-:]\s*(.*?No available replacement.*?\.)',
]
for r in regexs:
dep_match = re.match(r, description, re.IGNORECASE)
if dep_match:
res = dep_match[1]
if res[0].islower():
res = res[0].capitalize() + res[1:]
return res
return ""
def get_deprecated_data(yml_data: dict, desc: str, readme_file: str):
if yml_data.get('deprecated') or 'DeprecatedContent' in readme_file or yml_data.get('hidden'):
dep_msg = get_extracted_deprecated_note(desc)
if dep_msg:
dep_msg = dep_msg + '\n'
return f':::caution Deprecated\n{dep_msg}:::\n\n'
return ""
def get_fromversion_data(yml_data: dict):
from_version = yml_data.get('fromversion', '')
if from_version and not from_version.startswith(('4', '5.0')):
return f':::info Supported versions\nSupported Cortex XSOAR versions: {from_version} and later.\n:::\n\n'
return ''
def get_beta_data(yml_data: dict, content: str):
if yml_data.get('beta'):
msg = ''
if not re.search(r'This is a beta', content, re.IGNORECASE):
# only add the beta disclaimer if it is not in the docs
msg = 'This is a beta Integration, which lets you implement and test pre-release software. ' \
'Since the integration is beta, it might contain bugs. Updates to the integration during the beta phase might '\
'include non-backward compatible features. We appreciate your feedback on the quality and usability of the '\
'integration to help us identify issues, fix them, and continually improve.\n'
return f':::info beta\n{msg}:::\n\n'
return ""
def process_readme_doc(target_dir: str, content_dir: str, prefix: str,
imgs_dir: str, relative_images_dir: str, readme_file: str) -> DocInfo:
try:
base_dir = os.path.dirname(readme_file)
if readme_file.endswith('_README.md'):
ymlfile = readme_file[0:readme_file.index('_README.md')] + '.yml'
else:
ymlfiles = glob.glob(base_dir + '/*.yml')
if not ymlfiles:
raise ValueError('no yml file found')
if len(ymlfiles) > 1:
raise ValueError(f'mulitple yml files found: {ymlfiles}')
ymlfile = ymlfiles[0]
with open(ymlfile, 'r', encoding='utf-8') as f:
yml_data = yaml.safe_load(f)
id = yml_data.get('commonfields', {}).get('id') or yml_data['id']
id = normalize_id(id)
name = yml_data.get('display') or yml_data['name']
desc = yml_data.get('description') or yml_data.get('comment')
if desc:
desc = handle_desc_field(desc)
doc_info = DocInfo(id, name, desc, readme_file)
with open(readme_file, 'r', encoding='utf-8') as f:
content = f.read()
if not content.strip():
raise ValueError(EMPTY_FILE_MSG)
if is_html_doc(content):
print(f'{readme_file}: detect html file')
content = gen_html_doc(content)
else:
content = fix_mdx(content)
content = fix_relative_images(content, base_dir, f'{prefix}-{id}', imgs_dir, relative_images_dir)
# check if we have a header
lines = content.splitlines(True)
has_header = len(lines) >= 2 and lines[0].startswith('---') and lines[1].startswith('id:')
if not has_header:
readme_repo_path = readme_file
if readme_repo_path.startswith(content_dir):
readme_repo_path = readme_repo_path[len(content_dir):]
edit_url = f'https://github.com/demisto/content/blob/{BRANCH}/{readme_repo_path}'
header = f'---\nid: {id}\ntitle: {json.dumps(doc_info.name)}\ncustom_edit_url: {edit_url}\n---\n\n'
content = get_deprecated_data(yml_data, desc, readme_file) + content
content = get_beta_data(yml_data, content) + content
content = get_fromversion_data(yml_data) + content
content = header + content
verify_mdx_server(content)
with open(f'{target_dir}/{id}.md', mode='w', encoding='utf-8') as f: # type: ignore
f.write(content)
return doc_info
except Exception as ex:
print(f'fail: {readme_file}. Exception: {traceback.format_exc()}')
return DocInfo('', '', '', readme_file, str(ex).splitlines()[0])
finally:
sys.stdout.flush()
sys.stderr.flush()
def handle_desc_field(desc: str):
word_break = False
for word in re.split(r'\s|-', desc):
if len(word) > 40:
word_break = True
desc = html.escape(desc)
if word_break: # long words tell browser to break in the midle
desc = '<span style={{wordBreak: "break-word"}}>' + desc + '</span>'
return desc
def process_release_doc(target_dir: str, release_file: str) -> Optional[DocInfo]:
try:
name = os.path.splitext(os.path.basename(release_file))[0]
if name < MIN_RELEASE_VERSION:
print(f'Skipping release notes: {release_file} as it is older than: {MIN_RELEASE_VERSION}')
return None
with open(release_file, 'r', encoding='utf-8') as f:
content = f.read()
desc_match = re.search(r'Published on .*', content, re.IGNORECASE)
if not desc_match:
raise ValueError('Published on... not found for release: ' + name)
doc_info = DocInfo(name, f'Content Release {name}', desc_match[0], release_file)
edit_url = f'https://github.com/demisto/content-docs/blob/master/content-repo/extra-docs/releases/{name}.md'
# replace the title to be with one # so it doesn't appear in the TOC
content = re.sub(r'^## Demisto Content Release Notes', '# Demisto Content Release Notes', content)
content = f'---\nid: {name}\ntitle: "{name}"\ncustom_edit_url: {edit_url}\nhide_title: true\n---\n\n' + content
download_msg = "Download"
packs_download = ""
if name > StrictVersion('20.8.0'):
# from 20.8.1 we also add a link to the marketplace zips
download_msg = "Download Content Zip (Cortex XSOAR 5.5 and earlier)"
packs_download = '* **Download Marketplace Packs (Cortex XSOAR 6.0 and later):** ' + \
f'[content_marketplace_packs.zip](https://github.com/demisto/content/releases/download/{name}/content_marketplace_packs.zip)\n'
content = content + \
f'\n\n---\n### Assets\n\n* **{download_msg}:** ' + \
f'[content_new.zip](https://github.com/demisto/content/releases/download/{name}/content_new.zip)\n'
if packs_download:
content = content + packs_download
content = content + \
f'* **Browse the Source Code:** [Content Repo @ {name}](https://github.com/demisto/content/tree/{name})\n'
verify_mdx_server(content)
with open(f'{target_dir}/{name}.md', mode='w', encoding='utf-8') as f:
f.write(content)
return doc_info
except Exception as ex:
print(f'fail: {release_file}. Exception: {traceback.format_exc()}. Message: {ex}')
# We shouldn't have failing release docs. Breack the build
raise
finally:
sys.stdout.flush()
sys.stderr.flush()
def index_doc_infos(doc_infos: List[DocInfo], link_prefix: str, headers: Optional[Tuple[str, str]] = None):
if not headers:
headers = ('Name', 'Description')
if not doc_infos:
return ''
table_items = []
for d in doc_infos:
name = html.escape(d.name)
link_name = f'[{name}]({link_prefix}/{d.id})'
for word in re.split(r'\s|-', name):
if len(word) > 25: # we have a long word tell browser ok to break it
link_name = '<span style={{wordBreak: "break-word"}}>' + link_name + '</span>'
break
table_items.append({
headers[0]: link_name,
headers[1]: d.description
})
res = tableToMarkdown('', table_items, headers=headers)
return fix_mdx(res)
def process_extra_readme_doc(target_dir: str, prefix: str, readme_file: str, private_packs=False) -> DocInfo:
try:
with open(readme_file, 'r', encoding='utf-8') as f:
content = f.read()
front_matter_match = re.match(r'---\n(.*?)\n---', content, re.DOTALL)
if not front_matter_match:
raise ValueError(f'No front matter. Extra docs must have description and title front matter. File: {readme_file}')
yml_matter = front_matter_match[1]
yml_data = yaml.safe_load(yml_matter)
name = yml_data['title']
file_id = yml_data.get('id') or normalize_id(name)
desc = yml_data.get('description')
if desc:
desc = handle_desc_field(desc)
readme_file_name = os.path.basename(readme_file)
content = content.replace(front_matter_match[0], '')
if private_packs:
content = f'---\nid: {file_id}\ntitle: "{name}"\ncustom_edit_url: null\n---\n\n' + content
else:
edit_url = f'https://github.com/demisto/content-docs/blob/master/content-repo/extra-docs/{prefix}/{readme_file_name}'
content = f'---\nid: {file_id}\ntitle: "{name}"\ncustom_edit_url: {edit_url}\n---\n\n' + content
verify_mdx_server(content)
with open(f'{target_dir}/{file_id}.md', mode='w', encoding='utf-8') as f:
f.write(content)
return DocInfo(file_id, name, desc, readme_file)
except Exception as ex:
print(f'fail: {readme_file}. Exception: {traceback.format_exc()}')
return DocInfo('', '', '', readme_file, str(ex).splitlines()[0])
def process_extra_docs(target_dir: str, prefix: str,
private_packs_prefix='', private_packs=False) -> Iterator[DocInfo]:
if private_packs:
if private_packs_prefix == PRIVATE_PACKS_PLAYBOOKS_PREFIX:
md_dir = f'{os.path.dirname(os.path.abspath(__file__))}/.content-bucket/Packs/*/{private_packs_prefix}/'
else:
md_dir = f'{os.path.dirname(os.path.abspath(__file__))}/.content-bucket/Packs/*/{private_packs_prefix}/*'
for readme_file in glob.glob(f'{md_dir}/*.md'):
yield process_extra_readme_doc(target_dir, private_packs_prefix, readme_file, private_packs=True)
else:
md_dir = f'{os.path.dirname(os.path.abspath(__file__))}/extra-docs/{prefix}'
for readme_file in glob.glob(f'{md_dir}/*.md'):
yield process_extra_readme_doc(target_dir, prefix, readme_file)
# POOL_SIZE has to be declared after process_readme_doc so it can find it when doing map
# multiprocess pool
POOL_SIZE = 4
def process_doc_info(doc_info: DocInfo, success: List[str], fail: List[str], doc_infos: List[DocInfo], seen_docs: Dict[str, DocInfo]):
if doc_info.error_msg == EMPTY_FILE_MSG:
# ignore empty files
return
if doc_info.error_msg:
fail.append(f'{doc_info.readme} ({doc_info.error_msg})')
elif doc_info.id in seen_docs:
fail.append(f'{doc_info.readme} (duplicate with {seen_docs[doc_info.id].readme})')
else:
doc_infos.append(doc_info)
success.append(doc_info.readme)
seen_docs[doc_info.id] = doc_info
def create_docs(content_dir: str, target_dir: str, regex_list: List[str], prefix: str, private_pack_prefix: str):
print(f'Using BRANCH: {BRANCH}')
# Search for readme files
readme_files = findfiles(regex_list, content_dir)
print(f'Processing: {len(readme_files)} {prefix} files ...')
if MAX_FILES > 0:
print(f'PREVIEW MODE. Truncating file list to: {MAX_FILES}')
random.shuffle(readme_files)
readme_files = readme_files[:MAX_FILES]
if FILE_REGEX:
print(f'PREVIEW MODE. Matching only files which match: {FILE_REGEX}')
regex = re.compile(FILE_REGEX)
readme_files = list(filter(regex.search, readme_files))
target_sub_dir = f'{target_dir}/{prefix}'
if not os.path.exists(target_sub_dir):
os.makedirs(target_sub_dir)
relative_imgs_dir = "../../../docs/doc_imgs/reference/relative"
imgs_dir = os.path.abspath(f'{target_sub_dir}/{relative_imgs_dir}')
if not os.path.exists(imgs_dir):
os.makedirs(imgs_dir)
doc_infos: List[DocInfo] = []
success: List[str] = []
fail: List[str] = []
# flush before starting multi process
sys.stdout.flush()
sys.stderr.flush()
seen_docs: Dict[str, DocInfo] = {}
with Pool(processes=POOL_SIZE) as pool:
for doc_info in pool.map(partial(process_readme_doc, target_sub_dir, content_dir, prefix, imgs_dir, relative_imgs_dir), readme_files):
process_doc_info(doc_info, success, fail, doc_infos, seen_docs)
for doc_info in process_extra_docs(target_sub_dir, prefix):
process_doc_info(doc_info, success, fail, doc_infos, seen_docs)
for private_doc_info in process_extra_docs(target_sub_dir, prefix, private_packs=True,
private_packs_prefix=private_pack_prefix):
process_doc_info(private_doc_info, success, fail, doc_infos, seen_docs)
org_print(f'\n===========================================\nSuccess {prefix} docs ({len(success)}):')
for r in sorted(success):
print(r)
org_print(f'\n===========================================\nFailed {prefix} docs ({len(fail)}):')
for r in sorted(fail):
print(r)
org_print("\n===========================================\n")
if len(fail) > MAX_FAILURES:
print(f'MAX_FAILURES of {len(fail)} exceeded limit: {MAX_FAILURES}. Aborting!!')
sys.exit(2)
return sorted(doc_infos, key=lambda d: d.name.lower()) # sort by name
def create_releases(target_dir: str):
releases_dir = f'{os.path.dirname(os.path.abspath(__file__))}/extra-docs/{RELEASES_PREFIX}'
target_sub_dir = f'{target_dir}/{RELEASES_PREFIX}'
if not os.path.exists(target_sub_dir):
os.makedirs(target_sub_dir)
release_files = glob.glob(f'{releases_dir}/*.md')
doc_infos: List[DocInfo] = []
success = []
fail = []
# flush before starting multi process
sys.stdout.flush()
sys.stderr.flush()
with Pool(processes=POOL_SIZE) as pool:
for doc_info in pool.map(partial(process_release_doc, target_sub_dir), release_files):
if not doc_info: # case that we skip a release doc as it is too old
continue
if doc_info.error_msg:
fail.append(f'{doc_info.readme} ({doc_info.error_msg})')
else:
doc_infos.append(doc_info)
success.append(doc_info.readme)
org_print(f'\n===========================================\nSuccess release docs ({len(success)}):')
for r in sorted(success):
print(r)
org_print(f'\n===========================================\nFailed release docs ({len(fail)}):')
for r in sorted(fail):
print(r)
org_print("\n===========================================\n")
if fail:
print(f'{len(fail)} failed releases. Aborting!!')
sys.exit(3)
return sorted(doc_infos, key=lambda d: StrictVersion(d.name.lower().partition('content release ')[2]), reverse=True)
def create_articles(target_dir: str):
target_sub_dir = f'{target_dir}/{ARTICLES_PREFIX}'
if not os.path.exists(target_sub_dir):
os.makedirs(target_sub_dir)
doc_infos: List[DocInfo] = []
success: List[str] = []
fail: List[str] = []
seen_docs: Dict[str, DocInfo] = {}
for doc_info in process_extra_docs(target_sub_dir, ARTICLES_PREFIX):
if not doc_info.description: # fail the build if no description for an article
raise ValueError(f'Missing description for article: {doc_info.id} ({doc_info.name})')
process_doc_info(doc_info, success, fail, doc_infos, seen_docs)
org_print(f'\n===========================================\nSuccess {ARTICLES_PREFIX} docs ({len(success)}):')
for r in sorted(success):
print(r)
org_print(f'\n===========================================\nFailed {ARTICLES_PREFIX} docs ({len(fail)}):')
for r in sorted(fail):
print(r)
org_print("\n===========================================\n")
if fail:
print(f'{len(fail)} failed articles. Aborting!!')
sys.exit(2)
return sorted(doc_infos, key=lambda d: d.name.lower()) # sort by name
def insert_approved_tags_and_usecases():
with open('approved_usecases.json', 'r') as f:
approved_usecases = json.loads(f.read()).get('approved_list')
approved_usecases_string = '\n '.join(approved_usecases)
with open('approved_tags.json', 'r') as f:
approved_tags = json.loads(f.read()).get('approved_list')
approved_tags_string = '\n '.join(approved_tags)
with open("../docs/documentation/pack-docs.md", "r+") as f:
pack_docs = f.readlines()
f.seek(0)
for line in pack_docs:
if '***Use-case***' in line:
line += f"""
<details>
<summary>Pack Use-cases</summary>
{approved_usecases_string}
</details>
"""
if '***Tags***' in line:
line += f"""
<details>
<summary>Pack Tags</summary>
{approved_tags_string}
</details>
"""
f.write(line)
def is_xsoar_supported_pack(pack_dir: str):
with open(f'{pack_dir}/pack_metadata.json', 'r') as f:
metadata = json.load(f)
return 'xsoar' == metadata.get('support')
def get_blame_date(content_dir: str, file: str, line: int):
file_rel = os.path.relpath(file, content_dir)
blame_out = subprocess.check_output(['git', 'blame', '-p', '-L', f'{line},+1', file_rel], text=True, cwd=content_dir)
auth_date = re.search(r'^author-time\s+(\d+)', blame_out, re.MULTILINE)
if not auth_date:
raise ValueError(f'author-date not found for blame output of file: [{file}]: {blame_out}')
return datetime.utcfromtimestamp(int(auth_date.group(1)))
def get_deprecated_display_dates(dep_date: datetime) -> Tuple[str, str]:
"""Get the deprecation start date. The 1st of the following month.
Args:
dep_date (datetime): The raw dep date
Returns:
tuple of start deprecation and end deprecation
"""
DATE_FRMT = "%b %d, %Y"
start = datetime(day=1, month=dep_date.month, year=dep_date.year) + relativedelta(months=+1)
end = start + relativedelta(months=+6)
return (datetime.strftime(start, DATE_FRMT), datetime.strftime(end, DATE_FRMT))
def find_deprecated_integrations(content_dir: str):
files = glob.glob(content_dir + '/Packs/*/Integrations/*.yml')
files.extend(glob.glob(content_dir + '/Packs/*/Integrations/*/*.yml'))
res: List[DeprecatedInfo] = []
# go over each file and check if contains deprecated: true
for f in files:
with open(f, 'r') as fr:
content = fr.read()
if dep_search := re.search(r'^deprecated:\s*true', content, re.MULTILINE):
pack_dir = re.match(r'.+/Packs/.+?(?=/)', f)
if is_xsoar_supported_pack(pack_dir.group(0)): # type: ignore[union-attr]
yml_data = yaml.safe_load(content)
id = yml_data.get('commonfields', {}).get('id') or yml_data['name']
name: str = yml_data.get('display') or yml_data['name']
desc = yml_data.get('description')
content_to_search = content[:dep_search.regs[0][0]]
lines_search = re.findall(r'\n', content_to_search)
blame_line = 1
if lines_search:
blame_line += len(lines_search)
dep_date = get_blame_date(content_dir, f, blame_line)
maintenance_start, eol_start = get_deprecated_display_dates(dep_date)
dep_suffix = "(Deprecated)"
if name.endswith(dep_suffix):
name = name.replace(dep_suffix, "").strip()
info = DeprecatedInfo(id=id, name=name, description=desc, note=get_extracted_deprecated_note(desc),
maintenance_start=maintenance_start, eol_start=eol_start)
print(f'Adding deprecated integration: [{name}]. Deprecated date: {dep_date}. From file: {f}')
res.append(info)
else:
print(f'Skippinng deprecated integration: {f} which is not supported by xsoar')
return res
def merge_deprecated_info(deprecated_list: List[DeprecatedInfo], deperecated_info_file: str):
with open(deperecated_info_file, "rt") as f:
to_merge_list: List[DeprecatedInfo] = json.load(f)['integrations']
to_merge_map = {i['id']: i for i in to_merge_list}
merged_list: List[DeprecatedInfo] = []
for d in deprecated_list:
if d['id'] in to_merge_map:
d = {**d, **to_merge_map[d['id']]} # type: ignore[misc]
merged_list.append(d)
merged_map = {i['id']: i for i in merged_list}
for k, v in to_merge_map.items():
if k not in merged_map:
merged_list.append(v)
return merged_list
def add_deprected_integrations_info(content_dir: str, deperecated_article: str, deperecated_info_file: str, assets_dir: str):
"""Will append the deprecated integrations info to the deprecated article
Args:
content_dir (str): content dir to search for deprecated integrations
deperecated_article (str): deprecated article (md file) to add to
deperecated_info_file (str): json file with static deprecated info to merge
"""
deprecated_infos = merge_deprecated_info(find_deprecated_integrations(content_dir), deperecated_info_file)
deprecated_infos = sorted(deprecated_infos, key=lambda d: d['name'].lower() if 'name' in d else d['id'].lower()) # sort by name
deperecated_json_file = f'{assets_dir}/{os.path.basename(deperecated_article.replace('.md', '.json'))}'
with open(deperecated_json_file, 'w') as f:
json.dump({
'description': 'Generated machine readable doc of deprecated integrations',
'integrations': deprecated_infos
}, f, indent=2)
deperecated_infos_no_note = [i for i in deprecated_infos if not i['note']]
deperecated_json_file_no_note = deperecated_json_file.replace('.json', '.no_note.json')
with open(deperecated_json_file_no_note, 'w') as f:
json.dump({
'description': 'Generated doc of deprecated integrations which do not contain a note about replacement or deprecation reason',
'integrations': deperecated_infos_no_note
}, f, indent=2)
with open(deperecated_article, "at") as f:
for d in deprecated_infos:
f.write(f'\n## {d['name'] if d.get('name') else d['id']}\n')
if d.get("maintenance_start"):
f.write(f'* **Maintenance Mode Start Date:** {d['maintenance_start']}\n')
if d.get("eol_start"):
f.write(f'* **End-of-Life Date:** {d['eol_start']}\n')
if d.get("note"):
f.write(f'* **Note:** {d['note']}\n')
f.write('\n\n----\nA machine readable version of this file'
f' is available [here](pathname:///assets/{os.path.basename(deperecated_json_file)}).\n')
org_print("\n===========================================\n")
def main():
parser = argparse.ArgumentParser(description='''Generate Content Docs. You should probably not call this script directly.
See: https://github.com/demisto/content-docs/#generating-reference-docs''',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-t", "--target", help="Target dir to generate docs at.", required=True)
parser.add_argument("-d", "--dir", help="Content repo dir.", required=True)
args = parser.parse_args()
print(f'Using multiprocess pool size: {POOL_SIZE}')
print('Starting MDX server...')
start_mdx_server()
prefix = os.path.basename(args.target)
integrations_full_prefix = f'{prefix}/{INTEGRATIONS_PREFIX}'
scripts_full_prefix = f'{prefix}/{SCRIPTS_PREFIX}'
playbooks_full_prefix = f'{prefix}/{PLAYBOOKS_PREFIX}'
releases_full_prefix = f'{prefix}/{RELEASES_PREFIX}'
articles_full_prefix = f'{prefix}/{ARTICLES_PREFIX}'
integration_doc_infos = create_docs(args.dir, args.target, INTEGRATION_DOCS_MATCH, INTEGRATIONS_PREFIX,
private_pack_prefix=PRIVATE_PACKS_INTEGRATIONS_PREFIX)
playbooks_doc_infos = create_docs(args.dir, args.target, PLAYBOOKS_DOCS_MATCH, PLAYBOOKS_PREFIX,
private_pack_prefix=PRIVATE_PACKS_PLAYBOOKS_PREFIX)
script_doc_infos = create_docs(args.dir, args.target, SCRIPTS_DOCS_MATCH, SCRIPTS_PREFIX,
private_pack_prefix=PRIVATE_PACKS_SCRIPTS_PREFIX)
release_doc_infos = create_releases(args.target)
article_doc_infos = create_articles(args.target)
if os.getenv('SKIP_DEPRECATED') not in ('true', 'yes', '1'):
add_deprected_integrations_info(args.dir, f'{args.target}/{ARTICLES_PREFIX}/deprecated.md', DEPRECATED_INFO_FILE,
f'{args.target}/../../static/assets')
index_base = f'{os.path.dirname(os.path.abspath(__file__))}/reference-index.md'
index_target = args.target + '/index.md'
articles_index_target = args.target + '/articles-index.md'
articles_index_base = f'{os.path.dirname(os.path.abspath(__file__))}/articles-index.md'
shutil.copy(index_base, index_target)
shutil.copy(articles_index_base, articles_index_target)
with open(index_target, 'a', encoding='utf-8') as f:
if MAX_FILES > 0:
f.write(f'\n\n# =====<br/>BUILD PREVIEW only {MAX_FILES} files from each category! <br/>=====\n\n')
f.write("\n\n## Integrations\n\n")
f.write(index_doc_infos(integration_doc_infos, INTEGRATIONS_PREFIX))
f.write("\n\n## Playbooks\n\n")
f.write(index_doc_infos(playbooks_doc_infos, PLAYBOOKS_PREFIX))
f.write("\n\n## Scripts\n\n")
f.write(index_doc_infos(script_doc_infos, SCRIPTS_PREFIX))
f.write("\n\n## Content Release Notes\n\n")
f.write(index_doc_infos(release_doc_infos, RELEASES_PREFIX, headers=('Name', 'Date')))
f.write("\n\nAdditional archived release notes are available"
" [here](https://github.com/demisto/content-docs/tree/master/content-repo/extra-docs/releases).")
with open(articles_index_target, 'a', encoding='utf-8') as f:
if MAX_FILES > 0:
f.write(f'\n\n# =====<br/>BUILD PREVIEW only {MAX_FILES} files from each category! <br/>=====\n\n')
f.write(index_doc_infos(article_doc_infos, ARTICLES_PREFIX))
integration_items = [f'{integrations_full_prefix}/{d.id}' for d in integration_doc_infos]
playbook_items = [f'{playbooks_full_prefix}/{d.id}' for d in playbooks_doc_infos]
script_items = [f'{scripts_full_prefix}/{d.id}' for d in script_doc_infos]
article_items = [f'{articles_full_prefix}/{d.id}' for d in article_doc_infos]
article_items.insert(0, f'{prefix}/articles-index')
release_items = [f'{releases_full_prefix}/{d.id}' for d in release_doc_infos]
sidebar = [
{
"type": "doc",
"id": f'{prefix}/index'
},
{
"type": "category",
"label": "Integrations",
"items": integration_items
},
{
"type": "category",
"label": "Playbooks",
"items": playbook_items
},
{
"type": "category",
"label": "Scripts",
"items": script_items
},
{
"type": "category",
"label": "Content Release Notes",
"items": release_items
},
]
with open(f'{args.target}/sidebar.json', 'w') as f:
json.dump(sidebar, f, indent=4)
articles_sidebar = article_items
with open(f'{args.target}/articles-sidebar.json', 'w') as f:
json.dump(articles_sidebar, f, indent=4)
print('Stopping mdx server ...')
stop_mdx_server()
if os.getenv('UPDATE_PACK_DOCS') or os.getenv('CI'):
# to avoid cases that in local dev someone might checkin the modifed pack-docs.md we do this only if explicityl asked for or in CI env
insert_approved_tags_and_usecases()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
import argparse
import glob
import html
import json
import os
import random
import re
import shutil
import subprocess
import sys
import traceback
from datetime import datetime
from distutils.version import StrictVersion
from functools import partial
from multiprocessing import Pool
from typing import Dict, Iterator, List, Optional, Tuple, TypedDict
import yaml
from bs4 import BeautifulSoup
from dateutil.relativedelta import relativedelta
from CommonServerPython import tableToMarkdown # type: ignore
from mdx_utils import (fix_mdx, fix_relative_images, normalize_id,
start_mdx_server, stop_mdx_server, verify_mdx_server)
# override print so we have a timestamp with each print
org_print = print
def timestamped_print(*args, **kwargs):
org_print(datetime.now().strftime("%H:%M:%S.%f"), *args, **kwargs)
print = timestamped_print
INTEGRATION_YML_MATCH = [
"Packs/[^/]+?/Integrations/[^/]+?/.+.yml",
"Packs/[^/]+?/Integrations/.+.yml",
]
INTEGRATION_DOCS_MATCH = [
"Integrations/[^/]+?/README.md",
"Integrations/.+_README.md",
"Packs/[^/]+?/Integrations/[^/]+?/README.md",
"Packs/[^/]+?/Integrations/.+_README.md",
"Beta_Integrations/[^/]+?/README.md",
"Beta_Integrations/.+_README.md",
]
SCRIPTS_DOCS_MATCH = [
"Scripts/[^/]+?/README.md",
"Scripts/.+_README.md",
"Packs/[^/]+?/Scripts/[^/]+?/README.md",
"Packs/[^/]+?/Scripts/.+_README.md",
]
PLAYBOOKS_DOCS_MATCH = [
"Playbooks/.+_README.md",
"Packs/[^/]+?/Playbooks/.+_README.md",
]
INTEGRATIONS_PREFIX = 'integrations'
SCRIPTS_PREFIX = 'scripts'
PLAYBOOKS_PREFIX = 'playbooks'
PRIVATE_PACKS_INTEGRATIONS_PREFIX = 'Integrations'
PRIVATE_PACKS_SCRIPTS_PREFIX = 'Scripts'
PRIVATE_PACKS_PLAYBOOKS_PREFIX = 'Playbooks'
RELEASES_PREFIX = 'releases'
ARTICLES_PREFIX = 'articles'
NO_HTML = '<!-- NOT_HTML_DOC -->'
YES_HTML = '<!-- HTML_DOC -->'
BRANCH = os.getenv('HEAD', 'master')
MAX_FAILURES = int(os.getenv('MAX_FAILURES', 10)) # if we have more than this amount in a single category we fail the build
# env vars for faster development
MAX_FILES = int(os.getenv('MAX_FILES', -1))
FILE_REGEX = os.getenv('FILE_REGEX')
EMPTY_FILE_MSG = 'empty file'
DEPRECATED_INFO_FILE = f'{os.path.dirname(os.path.abspath(__file__))}/extra-docs/articles/deprecated_info.json'
# initialize the seed according to the PR branch. Used when selecting max files.
random.seed(os.getenv('CIRCLE_BRANCH'))
MIN_RELEASE_VERSION = StrictVersion((datetime.now() + relativedelta(months=-18)).strftime('%y.%-m.0'))
class DocInfo:
def __init__(self, id: str, name: str, description: str, readme: str, error_msg: Optional[str] = None):
self.id = id
self.name = name
self.description = description
self.readme = readme
self.error_msg = error_msg
class DeprecatedInfo(TypedDict, total=False):
id: str
name: str
description: str
maintenance_start: str
eol_start: str
note: str
def findfiles(match_patterns: List[str], target_dir: str) -> List[str]:
"""Return the list of found files based upon the passed fnmatch patters. Will perform case insensative search regardless of OS.
Arguments:
match_patterns {List[str]} -- list of fnmatch patters
target_dir {str} -- targer dir
Returns:
list of found dirs
"""
rules = [re.compile(target_dir + "/" + r, re.IGNORECASE) for r in match_patterns]
res = []
for d in glob.glob(target_dir + "/**", recursive=True):
for r in rules:
if r.match(d):
res.append(d)
continue
return res
def is_html_doc(txt: str) -> bool:
if txt.startswith(NO_HTML):
return False
if txt.startswith(YES_HTML):
return True
# use some heuristics to try to figure out if this is html
return txt.startswith('<p>') or ('<thead>' in txt and '<tbody>' in txt)
def gen_html_doc(txt: str) -> str:
# create a javascript string
soup = BeautifulSoup(txt, features="html.parser")
txt = soup.prettify()
txt = json.dumps(txt)
return (f'export const txt = {txt};\n\n' +
'<div dangerouslySetInnerHTML={{__html: txt}} />\n')
def get_extracted_deprecated_note(description: str):
regexs = [
r'.*deprecated\s*[\.\-:]\s*(.*?instead.*?\.)',
r'.*deprecated\s*[\.\-:]\s*(.*?No available replacement.*?\.)',
]
for r in regexs:
dep_match = re.match(r, description, re.IGNORECASE)
if dep_match:
res = dep_match[1]
if res[0].islower():
res = res[0].capitalize() + res[1:]
return res
return ""
def get_deprecated_data(yml_data: dict, desc: str, readme_file: str):
if yml_data.get('deprecated') or 'DeprecatedContent' in readme_file or yml_data.get('hidden'):
dep_msg = get_extracted_deprecated_note(desc)
if dep_msg:
dep_msg = dep_msg + '\n'
return f':::caution Deprecated\n{dep_msg}:::\n\n'
return ""
def get_fromversion_data(yml_data: dict):
from_version = yml_data.get('fromversion', '')
if from_version and not from_version.startswith(('4', '5.0')):
return f':::info Supported versions\nSupported Cortex XSOAR versions: {from_version} and later.\n:::\n\n'
return ''
def get_beta_data(yml_data: dict, content: str):
if yml_data.get('beta'):
msg = ''
if not re.search(r'This is a beta', content, re.IGNORECASE):
# only add the beta disclaimer if it is not in the docs
msg = 'This is a beta Integration, which lets you implement and test pre-release software. ' \
'Since the integration is beta, it might contain bugs. Updates to the integration during the beta phase might '\
'include non-backward compatible features. We appreciate your feedback on the quality and usability of the '\
'integration to help us identify issues, fix them, and continually improve.\n'
return f':::info beta\n{msg}:::\n\n'
return ""
def process_readme_doc(target_dir: str, content_dir: str, prefix: str,
imgs_dir: str, relative_images_dir: str, readme_file: str) -> DocInfo:
try:
base_dir = os.path.dirname(readme_file)
if readme_file.endswith('_README.md'):
ymlfile = readme_file[0:readme_file.index('_README.md')] + '.yml'
else:
ymlfiles = glob.glob(base_dir + '/*.yml')
if not ymlfiles:
raise ValueError('no yml file found')
if len(ymlfiles) > 1:
raise ValueError(f'mulitple yml files found: {ymlfiles}')
ymlfile = ymlfiles[0]
with open(ymlfile, 'r', encoding='utf-8') as f:
yml_data = yaml.safe_load(f)
id = yml_data.get('commonfields', {}).get('id') or yml_data['id']
id = normalize_id(id)
name = yml_data.get('display') or yml_data['name']
desc = yml_data.get('description') or yml_data.get('comment')
if desc:
desc = handle_desc_field(desc)
doc_info = DocInfo(id, name, desc, readme_file)
with open(readme_file, 'r', encoding='utf-8') as f:
content = f.read()
if not content.strip():
raise ValueError(EMPTY_FILE_MSG)
if is_html_doc(content):
print(f'{readme_file}: detect html file')
content = gen_html_doc(content)
else:
content = fix_mdx(content)
content = fix_relative_images(content, base_dir, f'{prefix}-{id}', imgs_dir, relative_images_dir)
# check if we have a header
lines = content.splitlines(True)
has_header = len(lines) >= 2 and lines[0].startswith('---') and lines[1].startswith('id:')
if not has_header:
readme_repo_path = readme_file
if readme_repo_path.startswith(content_dir):
readme_repo_path = readme_repo_path[len(content_dir):]
edit_url = f'https://github.com/demisto/content/blob/{BRANCH}/{readme_repo_path}'
header = f'---\nid: {id}\ntitle: {json.dumps(doc_info.name)}\ncustom_edit_url: {edit_url}\n---\n\n'
content = get_deprecated_data(yml_data, desc, readme_file) + content
content = get_beta_data(yml_data, content) + content
content = get_fromversion_data(yml_data) + content
content = header + content
verify_mdx_server(content)
with open(f'{target_dir}/{id}.md', mode='w', encoding='utf-8') as f: # type: ignore
f.write(content)
return doc_info
except Exception as ex:
print(f'fail: {readme_file}. Exception: {traceback.format_exc()}')
return DocInfo('', '', '', readme_file, str(ex).splitlines()[0])
finally:
sys.stdout.flush()
sys.stderr.flush()
def handle_desc_field(desc: str):
word_break = False
for word in re.split(r'\s|-', desc):
if len(word) > 40:
word_break = True
desc = html.escape(desc)
if word_break: # long words tell browser to break in the midle
desc = '<span style={{wordBreak: "break-word"}}>' + desc + '</span>'
return desc
def process_release_doc(target_dir: str, release_file: str) -> Optional[DocInfo]:
try:
name = os.path.splitext(os.path.basename(release_file))[0]
if name < MIN_RELEASE_VERSION:
print(f'Skipping release notes: {release_file} as it is older than: {MIN_RELEASE_VERSION}')
return None
with open(release_file, 'r', encoding='utf-8') as f:
content = f.read()
desc_match = re.search(r'Published on .*', content, re.IGNORECASE)
if not desc_match:
raise ValueError('Published on... not found for release: ' + name)
doc_info = DocInfo(name, f'Content Release {name}', desc_match[0], release_file)
edit_url = f'https://github.com/demisto/content-docs/blob/master/content-repo/extra-docs/releases/{name}.md'
# replace the title to be with one # so it doesn't appear in the TOC
content = re.sub(r'^## Demisto Content Release Notes', '# Demisto Content Release Notes', content)
content = f'---\nid: {name}\ntitle: "{name}"\ncustom_edit_url: {edit_url}\nhide_title: true\n---\n\n' + content
download_msg = "Download"
packs_download = ""
if name > StrictVersion('20.8.0'):
# from 20.8.1 we also add a link to the marketplace zips
download_msg = "Download Content Zip (Cortex XSOAR 5.5 and earlier)"
packs_download = '* **Download Marketplace Packs (Cortex XSOAR 6.0 and later):** ' + \
f'[content_marketplace_packs.zip](https://github.com/demisto/content/releases/download/{name}/content_marketplace_packs.zip)\n'
content = content + \
f'\n\n---\n### Assets\n\n* **{download_msg}:** ' + \
f'[content_new.zip](https://github.com/demisto/content/releases/download/{name}/content_new.zip)\n'
if packs_download:
content = content + packs_download
content = content + \
f'* **Browse the Source Code:** [Content Repo @ {name}](https://github.com/demisto/content/tree/{name})\n'
verify_mdx_server(content)
with open(f'{target_dir}/{name}.md', mode='w', encoding='utf-8') as f:
f.write(content)
return doc_info
except Exception as ex:
print(f'fail: {release_file}. Exception: {traceback.format_exc()}. Message: {ex}')
# We shouldn't have failing release docs. Breack the build
raise
finally:
sys.stdout.flush()
sys.stderr.flush()
def index_doc_infos(doc_infos: List[DocInfo], link_prefix: str, headers: Optional[Tuple[str, str]] = None):
if not headers:
headers = ('Name', 'Description')
if not doc_infos:
return ''
table_items = []
for d in doc_infos:
name = html.escape(d.name)
link_name = f'[{name}]({link_prefix}/{d.id})'
for word in re.split(r'\s|-', name):
if len(word) > 25: # we have a long word tell browser ok to break it
link_name = '<span style={{wordBreak: "break-word"}}>' + link_name + '</span>'
break
table_items.append({
headers[0]: link_name,
headers[1]: d.description
})
res = tableToMarkdown('', table_items, headers=headers)
return fix_mdx(res)
def process_extra_readme_doc(target_dir: str, prefix: str, readme_file: str, private_packs=False) -> DocInfo:
try:
with open(readme_file, 'r', encoding='utf-8') as f:
content = f.read()
front_matter_match = re.match(r'---\n(.*?)\n---', content, re.DOTALL)
if not front_matter_match:
raise ValueError(f'No front matter. Extra docs must have description and title front matter. File: {readme_file}')
yml_matter = front_matter_match[1]
yml_data = yaml.safe_load(yml_matter)
name = yml_data['title']
file_id = yml_data.get('id') or normalize_id(name)
desc = yml_data.get('description')
if desc:
desc = handle_desc_field(desc)
readme_file_name = os.path.basename(readme_file)
content = content.replace(front_matter_match[0], '')
if private_packs:
content = f'---\nid: {file_id}\ntitle: "{name}"\ncustom_edit_url: null\n---\n\n' + content
else:
edit_url = f'https://github.com/demisto/content-docs/blob/master/content-repo/extra-docs/{prefix}/{readme_file_name}'
content = f'---\nid: {file_id}\ntitle: "{name}"\ncustom_edit_url: {edit_url}\n---\n\n' + content
verify_mdx_server(content)
with open(f'{target_dir}/{file_id}.md', mode='w', encoding='utf-8') as f:
f.write(content)
return DocInfo(file_id, name, desc, readme_file)
except Exception as ex:
print(f'fail: {readme_file}. Exception: {traceback.format_exc()}')
return DocInfo('', '', '', readme_file, str(ex).splitlines()[0])
def process_extra_docs(target_dir: str, prefix: str,
private_packs_prefix='', private_packs=False) -> Iterator[DocInfo]:
if private_packs:
if private_packs_prefix == PRIVATE_PACKS_PLAYBOOKS_PREFIX:
md_dir = f'{os.path.dirname(os.path.abspath(__file__))}/.content-bucket/Packs/*/{private_packs_prefix}/'
else:
md_dir = f'{os.path.dirname(os.path.abspath(__file__))}/.content-bucket/Packs/*/{private_packs_prefix}/*'
for readme_file in glob.glob(f'{md_dir}/*.md'):
yield process_extra_readme_doc(target_dir, private_packs_prefix, readme_file, private_packs=True)
else:
md_dir = f'{os.path.dirname(os.path.abspath(__file__))}/extra-docs/{prefix}'
for readme_file in glob.glob(f'{md_dir}/*.md'):
yield process_extra_readme_doc(target_dir, prefix, readme_file)
# POOL_SIZE has to be declared after process_readme_doc so it can find it when doing map
# multiprocess pool
POOL_SIZE = 4
def process_doc_info(doc_info: DocInfo, success: List[str], fail: List[str], doc_infos: List[DocInfo], seen_docs: Dict[str, DocInfo]):
if doc_info.error_msg == EMPTY_FILE_MSG:
# ignore empty files
return
if doc_info.error_msg:
fail.append(f'{doc_info.readme} ({doc_info.error_msg})')
elif doc_info.id in seen_docs:
fail.append(f'{doc_info.readme} (duplicate with {seen_docs[doc_info.id].readme})')
else:
doc_infos.append(doc_info)
success.append(doc_info.readme)
seen_docs[doc_info.id] = doc_info
def create_docs(content_dir: str, target_dir: str, regex_list: List[str], prefix: str, private_pack_prefix: str):
print(f'Using BRANCH: {BRANCH}')
# Search for readme files
readme_files = findfiles(regex_list, content_dir)
print(f'Processing: {len(readme_files)} {prefix} files ...')
if MAX_FILES > 0:
print(f'PREVIEW MODE. Truncating file list to: {MAX_FILES}')
random.shuffle(readme_files)
readme_files = readme_files[:MAX_FILES]
if FILE_REGEX:
print(f'PREVIEW MODE. Matching only files which match: {FILE_REGEX}')
regex = re.compile(FILE_REGEX)
readme_files = list(filter(regex.search, readme_files))
target_sub_dir = f'{target_dir}/{prefix}'
if not os.path.exists(target_sub_dir):
os.makedirs(target_sub_dir)
relative_imgs_dir = "../../../docs/doc_imgs/reference/relative"
imgs_dir = os.path.abspath(f'{target_sub_dir}/{relative_imgs_dir}')
if not os.path.exists(imgs_dir):
os.makedirs(imgs_dir)
doc_infos: List[DocInfo] = []
success: List[str] = []
fail: List[str] = []
# flush before starting multi process
sys.stdout.flush()
sys.stderr.flush()
seen_docs: Dict[str, DocInfo] = {}
with Pool(processes=POOL_SIZE) as pool:
for doc_info in pool.map(partial(process_readme_doc, target_sub_dir, content_dir, prefix, imgs_dir, relative_imgs_dir), readme_files):
process_doc_info(doc_info, success, fail, doc_infos, seen_docs)
for doc_info in process_extra_docs(target_sub_dir, prefix):
process_doc_info(doc_info, success, fail, doc_infos, seen_docs)
for private_doc_info in process_extra_docs(target_sub_dir, prefix, private_packs=True,
private_packs_prefix=private_pack_prefix):
process_doc_info(private_doc_info, success, fail, doc_infos, seen_docs)
org_print(f'\n===========================================\nSuccess {prefix} docs ({len(success)}):')
for r in sorted(success):
print(r)
org_print(f'\n===========================================\nFailed {prefix} docs ({len(fail)}):')
for r in sorted(fail):
print(r)
org_print("\n===========================================\n")
if len(fail) > MAX_FAILURES:
print(f'MAX_FAILURES of {len(fail)} exceeded limit: {MAX_FAILURES}. Aborting!!')
sys.exit(2)
return sorted(doc_infos, key=lambda d: d.name.lower()) # sort by name
def create_releases(target_dir: str):
releases_dir = f'{os.path.dirname(os.path.abspath(__file__))}/extra-docs/{RELEASES_PREFIX}'
target_sub_dir = f'{target_dir}/{RELEASES_PREFIX}'
if not os.path.exists(target_sub_dir):
os.makedirs(target_sub_dir)
release_files = glob.glob(f'{releases_dir}/*.md')
doc_infos: List[DocInfo] = []
success = []
fail = []
# flush before starting multi process
sys.stdout.flush()
sys.stderr.flush()
with Pool(processes=POOL_SIZE) as pool:
for doc_info in pool.map(partial(process_release_doc, target_sub_dir), release_files):
if not doc_info: # case that we skip a release doc as it is too old
continue
if doc_info.error_msg:
fail.append(f'{doc_info.readme} ({doc_info.error_msg})')
else:
doc_infos.append(doc_info)
success.append(doc_info.readme)
org_print(f'\n===========================================\nSuccess release docs ({len(success)}):')
for r in sorted(success):
print(r)
org_print(f'\n===========================================\nFailed release docs ({len(fail)}):')
for r in sorted(fail):
print(r)
org_print("\n===========================================\n")
if fail:
print(f'{len(fail)} failed releases. Aborting!!')
sys.exit(3)
return sorted(doc_infos, key=lambda d: StrictVersion(d.name.lower().partition('content release ')[2]), reverse=True)
def create_articles(target_dir: str):
target_sub_dir = f'{target_dir}/{ARTICLES_PREFIX}'
if not os.path.exists(target_sub_dir):
os.makedirs(target_sub_dir)
doc_infos: List[DocInfo] = []
success: List[str] = []
fail: List[str] = []
seen_docs: Dict[str, DocInfo] = {}
for doc_info in process_extra_docs(target_sub_dir, ARTICLES_PREFIX):
if not doc_info.description: # fail the build if no description for an article
raise ValueError(f'Missing description for article: {doc_info.id} ({doc_info.name})')
process_doc_info(doc_info, success, fail, doc_infos, seen_docs)
org_print(f'\n===========================================\nSuccess {ARTICLES_PREFIX} docs ({len(success)}):')
for r in sorted(success):
print(r)
org_print(f'\n===========================================\nFailed {ARTICLES_PREFIX} docs ({len(fail)}):')
for r in sorted(fail):
print(r)
org_print("\n===========================================\n")
if fail:
print(f'{len(fail)} failed articles. Aborting!!')
sys.exit(2)
return sorted(doc_infos, key=lambda d: d.name.lower()) # sort by name
def insert_approved_tags_and_usecases():
with open('approved_usecases.json', 'r') as f:
approved_usecases = json.loads(f.read()).get('approved_list')
approved_usecases_string = '\n '.join(approved_usecases)
with open('approved_tags.json', 'r') as f:
approved_tags = json.loads(f.read()).get('approved_list')
approved_tags_string = '\n '.join(approved_tags)
with open("../docs/documentation/pack-docs.md", "r+") as f:
pack_docs = f.readlines()
f.seek(0)
for line in pack_docs:
if '***Use-case***' in line:
line += f"""
<details>
<summary>Pack Use-cases</summary>
{approved_usecases_string}
</details>
"""
if '***Tags***' in line:
line += f"""
<details>
<summary>Pack Tags</summary>
{approved_tags_string}
</details>
"""
f.write(line)
def is_xsoar_supported_pack(pack_dir: str):
with open(f'{pack_dir}/pack_metadata.json', 'r') as f:
metadata = json.load(f)
return 'xsoar' == metadata.get('support')
def get_blame_date(content_dir: str, file: str, line: int):
file_rel = os.path.relpath(file, content_dir)
blame_out = subprocess.check_output(['git', 'blame', '-p', '-L', f'{line},+1', file_rel], text=True, cwd=content_dir)
auth_date = re.search(r'^author-time\s+(\d+)', blame_out, re.MULTILINE)
if not auth_date:
raise ValueError(f'author-date not found for blame output of file: [{file}]: {blame_out}')
return datetime.utcfromtimestamp(int(auth_date.group(1)))
def get_deprecated_display_dates(dep_date: datetime) -> Tuple[str, str]:
"""Get the deprecation start date. The 1st of the following month.
Args:
dep_date (datetime): The raw dep date
Returns:
tuple of start deprecation and end deprecation
"""
DATE_FRMT = "%b %d, %Y"
start = datetime(day=1, month=dep_date.month, year=dep_date.year) + relativedelta(months=+1)
end = start + relativedelta(months=+6)
return (datetime.strftime(start, DATE_FRMT), datetime.strftime(end, DATE_FRMT))
def find_deprecated_integrations(content_dir: str):
files = glob.glob(content_dir + '/Packs/*/Integrations/*.yml')
files.extend(glob.glob(content_dir + '/Packs/*/Integrations/*/*.yml'))
res: List[DeprecatedInfo] = []
# go over each file and check if contains deprecated: true
for f in files:
with open(f, 'r') as fr:
content = fr.read()
if dep_search := re.search(r'^deprecated:\s*true', content, re.MULTILINE):
pack_dir = re.match(r'.+/Packs/.+?(?=/)', f)
if is_xsoar_supported_pack(pack_dir.group(0)): # type: ignore[union-attr]
yml_data = yaml.safe_load(content)
id = yml_data.get('commonfields', {}).get('id') or yml_data['name']
name: str = yml_data.get('display') or yml_data['name']
desc = yml_data.get('description')
content_to_search = content[:dep_search.regs[0][0]]
lines_search = re.findall(r'\n', content_to_search)
blame_line = 1
if lines_search:
blame_line += len(lines_search)
dep_date = get_blame_date(content_dir, f, blame_line)
maintenance_start, eol_start = get_deprecated_display_dates(dep_date)
dep_suffix = "(Deprecated)"
if name.endswith(dep_suffix):
name = name.replace(dep_suffix, "").strip()
info = DeprecatedInfo(id=id, name=name, description=desc, note=get_extracted_deprecated_note(desc),
maintenance_start=maintenance_start, eol_start=eol_start)
print(f'Adding deprecated integration: [{name}]. Deprecated date: {dep_date}. From file: {f}')
res.append(info)
else:
print(f'Skippinng deprecated integration: {f} which is not supported by xsoar')
return res
def merge_deprecated_info(deprecated_list: List[DeprecatedInfo], deperecated_info_file: str):
with open(deperecated_info_file, "rt") as f:
to_merge_list: List[DeprecatedInfo] = json.load(f)['integrations']
to_merge_map = {i['id']: i for i in to_merge_list}
merged_list: List[DeprecatedInfo] = []
for d in deprecated_list:
if d['id'] in to_merge_map:
d = {**d, **to_merge_map[d['id']]} # type: ignore[misc]
merged_list.append(d)
merged_map = {i['id']: i for i in merged_list}
for k, v in to_merge_map.items():
if k not in merged_map:
merged_list.append(v)
return merged_list
def add_deprected_integrations_info(content_dir: str, deperecated_article: str, deperecated_info_file: str, assets_dir: str):
"""Will append the deprecated integrations info to the deprecated article
Args:
content_dir (str): content dir to search for deprecated integrations
deperecated_article (str): deprecated article (md file) to add to
deperecated_info_file (str): json file with static deprecated info to merge
"""
deprecated_infos = merge_deprecated_info(find_deprecated_integrations(content_dir), deperecated_info_file)
deprecated_infos = sorted(deprecated_infos, key=lambda d: d['name'].lower() if 'name' in d else d['id'].lower()) # sort by name
deperecated_json_file = f'{assets_dir}/{os.path.basename(deperecated_article.replace(".md", ".json"))}'
with open(deperecated_json_file, 'w') as f:
json.dump({
'description': 'Generated machine readable doc of deprecated integrations',
'integrations': deprecated_infos
}, f, indent=2)
deperecated_infos_no_note = [i for i in deprecated_infos if not i['note']]
deperecated_json_file_no_note = deperecated_json_file.replace('.json', '.no_note.json')
with open(deperecated_json_file_no_note, 'w') as f:
json.dump({
'description': 'Generated doc of deprecated integrations which do not contain a note about replacement or deprecation reason',
'integrations': deperecated_infos_no_note
}, f, indent=2)
with open(deperecated_article, "at") as f:
for d in deprecated_infos:
f.write(f'\n## {d["name"] if d.get("name") else d["id"]}\n')
if d.get("maintenance_start"):
f.write(f'* **Maintenance Mode Start Date:** {d["maintenance_start"]}\n')
if d.get("eol_start"):
f.write(f'* **End-of-Life Date:** {d["eol_start"]}\n')
if d.get("note"):
f.write(f'* **Note:** {d["note"]}\n')
f.write('\n\n----\nA machine readable version of this file'
f' is available [here](pathname:///assets/{os.path.basename(deperecated_json_file)}).\n')
org_print("\n===========================================\n")
def main():
parser = argparse.ArgumentParser(description='''Generate Content Docs. You should probably not call this script directly.
See: https://github.com/demisto/content-docs/#generating-reference-docs''',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-t", "--target", help="Target dir to generate docs at.", required=True)
parser.add_argument("-d", "--dir", help="Content repo dir.", required=True)
args = parser.parse_args()
print(f'Using multiprocess pool size: {POOL_SIZE}')
print('Starting MDX server...')
start_mdx_server()
prefix = os.path.basename(args.target)
integrations_full_prefix = f'{prefix}/{INTEGRATIONS_PREFIX}'
scripts_full_prefix = f'{prefix}/{SCRIPTS_PREFIX}'
playbooks_full_prefix = f'{prefix}/{PLAYBOOKS_PREFIX}'
releases_full_prefix = f'{prefix}/{RELEASES_PREFIX}'
articles_full_prefix = f'{prefix}/{ARTICLES_PREFIX}'
integration_doc_infos = create_docs(args.dir, args.target, INTEGRATION_DOCS_MATCH, INTEGRATIONS_PREFIX,
private_pack_prefix=PRIVATE_PACKS_INTEGRATIONS_PREFIX)
playbooks_doc_infos = create_docs(args.dir, args.target, PLAYBOOKS_DOCS_MATCH, PLAYBOOKS_PREFIX,
private_pack_prefix=PRIVATE_PACKS_PLAYBOOKS_PREFIX)
script_doc_infos = create_docs(args.dir, args.target, SCRIPTS_DOCS_MATCH, SCRIPTS_PREFIX,
private_pack_prefix=PRIVATE_PACKS_SCRIPTS_PREFIX)
release_doc_infos = create_releases(args.target)
article_doc_infos = create_articles(args.target)
if os.getenv('SKIP_DEPRECATED') not in ('true', 'yes', '1'):
add_deprected_integrations_info(args.dir, f'{args.target}/{ARTICLES_PREFIX}/deprecated.md', DEPRECATED_INFO_FILE,
f'{args.target}/../../static/assets')
index_base = f'{os.path.dirname(os.path.abspath(__file__))}/reference-index.md'
index_target = args.target + '/index.md'
articles_index_target = args.target + '/articles-index.md'
articles_index_base = f'{os.path.dirname(os.path.abspath(__file__))}/articles-index.md'
shutil.copy(index_base, index_target)
shutil.copy(articles_index_base, articles_index_target)
with open(index_target, 'a', encoding='utf-8') as f:
if MAX_FILES > 0:
f.write(f'\n\n# =====<br/>BUILD PREVIEW only {MAX_FILES} files from each category! <br/>=====\n\n')
f.write("\n\n## Integrations\n\n")
f.write(index_doc_infos(integration_doc_infos, INTEGRATIONS_PREFIX))
f.write("\n\n## Playbooks\n\n")
f.write(index_doc_infos(playbooks_doc_infos, PLAYBOOKS_PREFIX))
f.write("\n\n## Scripts\n\n")
f.write(index_doc_infos(script_doc_infos, SCRIPTS_PREFIX))
f.write("\n\n## Content Release Notes\n\n")
f.write(index_doc_infos(release_doc_infos, RELEASES_PREFIX, headers=('Name', 'Date')))
f.write("\n\nAdditional archived release notes are available"
" [here](https://github.com/demisto/content-docs/tree/master/content-repo/extra-docs/releases).")
with open(articles_index_target, 'a', encoding='utf-8') as f:
if MAX_FILES > 0:
f.write(f'\n\n# =====<br/>BUILD PREVIEW only {MAX_FILES} files from each category! <br/>=====\n\n')
f.write(index_doc_infos(article_doc_infos, ARTICLES_PREFIX))
integration_items = [f'{integrations_full_prefix}/{d.id}' for d in integration_doc_infos]
playbook_items = [f'{playbooks_full_prefix}/{d.id}' for d in playbooks_doc_infos]
script_items = [f'{scripts_full_prefix}/{d.id}' for d in script_doc_infos]
article_items = [f'{articles_full_prefix}/{d.id}' for d in article_doc_infos]
article_items.insert(0, f'{prefix}/articles-index')
release_items = [f'{releases_full_prefix}/{d.id}' for d in release_doc_infos]
sidebar = [
{
"type": "doc",
"id": f'{prefix}/index'
},
{
"type": "category",
"label": "Integrations",
"items": integration_items
},
{
"type": "category",
"label": "Playbooks",
"items": playbook_items
},
{
"type": "category",
"label": "Scripts",
"items": script_items
},
{
"type": "category",
"label": "Content Release Notes",
"items": release_items
},
]
with open(f'{args.target}/sidebar.json', 'w') as f:
json.dump(sidebar, f, indent=4)
articles_sidebar = article_items
with open(f'{args.target}/articles-sidebar.json', 'w') as f:
json.dump(articles_sidebar, f, indent=4)
print('Stopping mdx server ...')
stop_mdx_server()
if os.getenv('UPDATE_PACK_DOCS') or os.getenv('CI'):
# to avoid cases that in local dev someone might checkin the modifed pack-docs.md we do this only if explicityl asked for or in CI env
insert_approved_tags_and_usecases()
if __name__ == "__main__":
main()
|
"""
Interval datatypes
"""
import logging
import math
import sys
import tempfile
from urllib.parse import quote_plus
from bx.intervals.io import (
GenomicIntervalReader,
ParseError,
)
from galaxy import util
from galaxy.datatypes import metadata
from galaxy.datatypes.data import DatatypeValidation
from galaxy.datatypes.metadata import MetadataElement
from galaxy.datatypes.sniff import (
build_sniff_from_prefix,
FilePrefix,
get_headers,
iter_headers,
)
from galaxy.datatypes.tabular import Tabular
from galaxy.datatypes.util.gff_util import (
parse_gff3_attributes,
parse_gff_attributes,
)
from galaxy.util import compression_utils
from . import (
data,
dataproviders,
)
log = logging.getLogger(__name__)
# Contains the meta columns and the words that map to it; list aliases on the
# right side of the : in decreasing order of priority
alias_spec = {
"chromCol": ["chrom", "CHROMOSOME", "CHROM", "Chromosome Name"],
"startCol": ["start", "START", "chromStart", "txStart", "Start Position (bp)"],
"endCol": ["end", "END", "STOP", "chromEnd", "txEnd", "End Position (bp)"],
"strandCol": ["strand", "STRAND", "Strand"],
"nameCol": [
"name",
"NAME",
"Name",
"name2",
"NAME2",
"Name2",
"Ensembl Gene ID",
"Ensembl Transcript ID",
"Ensembl Peptide ID",
],
}
# a little faster lookup
alias_helper = {}
for key, value in alias_spec.items():
for elem in value:
alias_helper[elem] = key
# Constants for configuring viewport generation: If a line is greater than
# VIEWPORT_MAX_READS_PER_LINE * VIEWPORT_READLINE_BUFFER_SIZE bytes in size,
# then we will not generate a viewport for that dataset
VIEWPORT_READLINE_BUFFER_SIZE = 1048576 # 1MB
VIEWPORT_MAX_READS_PER_LINE = 10
@dataproviders.decorators.has_dataproviders
@build_sniff_from_prefix
class Interval(Tabular):
"""Tab delimited data containing interval information"""
edam_data = "data_3002"
edam_format = "format_3475"
file_ext = "interval"
line_class = "region"
track_type = "FeatureTrack"
data_sources = {"data": "tabix", "index": "bigwig"}
MetadataElement(name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter)
MetadataElement(name="startCol", default=2, desc="Start column", param=metadata.ColumnParameter)
MetadataElement(name="endCol", default=3, desc="End column", param=metadata.ColumnParameter)
MetadataElement(
name="strandCol",
desc="Strand column (click box & select)",
param=metadata.ColumnParameter,
optional=True,
no_value=0,
)
MetadataElement(
name="nameCol",
desc="Name/Identifier column (click box & select)",
param=metadata.ColumnParameter,
optional=True,
no_value=0,
)
MetadataElement(name="columns", default=3, desc="Number of columns", readonly=True, visible=False)
def __init__(self, **kwd):
"""Initialize interval datatype, by adding UCSC display apps"""
Tabular.__init__(self, **kwd)
self.add_display_app("ucsc", "display at UCSC", "as_ucsc_display_file", "ucsc_links")
def init_meta(self, dataset, copy_from=None):
Tabular.init_meta(self, dataset, copy_from=copy_from)
def set_meta(self, dataset, overwrite=True, first_line_is_header=False, **kwd):
"""Tries to guess from the line the location number of the column for the chromosome, region start-end and strand"""
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=0)
if dataset.has_data():
empty_line_count = 0
num_check_lines = 100 # only check up to this many non empty lines
with compression_utils.get_fileobj(dataset.file_name) as in_fh:
for i, line in enumerate(in_fh):
line = line.rstrip("\r\n")
if line:
if first_line_is_header or line[0] == "#":
self.init_meta(dataset)
line = line.strip("#")
elems = line.split("\t")
for meta_name, header_list in alias_spec.items():
for header_val in header_list:
if header_val in elems:
# found highest priority header to meta_name
setattr(dataset.metadata, meta_name, elems.index(header_val) + 1)
break # next meta_name
break # Our metadata is set, so break out of the outer loop
else:
# Header lines in Interval files are optional. For example, BED is Interval but has no header.
# We'll make a best guess at the location of the metadata columns.
elems = line.split("\t")
if len(elems) > 2:
if overwrite or not dataset.metadata.element_is_set("chromCol"):
dataset.metadata.chromCol = 1
try:
int(elems[1])
if overwrite or not dataset.metadata.element_is_set("startCol"):
dataset.metadata.startCol = 2
except Exception:
pass # Metadata default will be used
try:
int(elems[2])
if overwrite or not dataset.metadata.element_is_set("endCol"):
dataset.metadata.endCol = 3
except Exception:
pass # Metadata default will be used
# we no longer want to guess that this column is the 'name', name must now be set manually for interval files
# we will still guess at the strand, as we can make a more educated guess
# if len( elems ) > 3:
# try:
# int( elems[3] )
# except Exception:
# if overwrite or not dataset.metadata.element_is_set( 'nameCol' ):
# dataset.metadata.nameCol = 4
if len(elems) < 6 or elems[5] not in data.valid_strand:
if overwrite or not dataset.metadata.element_is_set("strandCol"):
dataset.metadata.strandCol = 0
else:
if overwrite or not dataset.metadata.element_is_set("strandCol"):
dataset.metadata.strandCol = 6
break
if (i - empty_line_count) > num_check_lines:
break # Our metadata is set or we examined 100 non-empty lines, so break out of the outer loop
else:
empty_line_count += 1
def displayable(self, dataset):
try:
return (
dataset.has_data()
and dataset.state == dataset.states.OK
and dataset.metadata.columns > 0
and dataset.metadata.data_lines != 0
and dataset.metadata.chromCol
and dataset.metadata.startCol
and dataset.metadata.endCol
)
except Exception:
return False
def get_estimated_display_viewport(self, dataset, chrom_col=None, start_col=None, end_col=None):
"""Return a chrom, start, stop tuple for viewing a file."""
viewport_feature_count = 100 # viewport should check at least 100 features; excludes comment lines
max_line_count = max(viewport_feature_count, 500) # maximum number of lines to check; includes comment lines
if not self.displayable(dataset):
return (None, None, None)
try:
# If column indexes were not passwed, determine from metadata
if chrom_col is None:
chrom_col = int(dataset.metadata.chromCol) - 1
if start_col is None:
start_col = int(dataset.metadata.startCol) - 1
if end_col is None:
end_col = int(dataset.metadata.endCol) - 1
# Scan lines of file to find a reasonable chromosome and range
chrom = None
start = sys.maxsize
end = 0
max_col = max(chrom_col, start_col, end_col)
with compression_utils.get_fileobj(dataset.file_name) as fh:
for line in util.iter_start_of_line(fh, VIEWPORT_READLINE_BUFFER_SIZE):
# Skip comment lines
if not line.startswith("#"):
try:
fields = line.rstrip().split("\t")
if len(fields) > max_col:
if chrom is None or chrom == fields[chrom_col]:
start = min(start, int(fields[start_col]))
end = max(end, int(fields[end_col]))
# Set chrom last, in case start and end are not integers
chrom = fields[chrom_col]
viewport_feature_count -= 1
except Exception:
# Most likely a non-integer field has been encountered
# for start / stop. Just ignore and make sure we finish
# reading the line and decrementing the counters.
pass
# Make sure we are at the next new line
readline_count = VIEWPORT_MAX_READS_PER_LINE
while line.rstrip("\n\r") == line:
assert readline_count > 0, Exception(
f"Viewport readline count exceeded for dataset {dataset.id}."
)
line = fh.readline(VIEWPORT_READLINE_BUFFER_SIZE)
if not line:
break # EOF
readline_count -= 1
max_line_count -= 1
if not viewport_feature_count or not max_line_count:
# exceeded viewport or total line count to check
break
if chrom is not None:
return (chrom, str(start), str(end)) # Necessary to return strings?
except Exception:
# Unexpected error, possibly missing metadata
log.exception("Exception caught attempting to generate viewport for dataset '%d'", dataset.id)
return (None, None, None)
def as_ucsc_display_file(self, dataset, **kwd):
"""Returns file contents with only the bed data"""
with tempfile.NamedTemporaryFile(delete=False, mode="w") as fh:
c, s, e, t, n = (
dataset.metadata.chromCol,
dataset.metadata.startCol,
dataset.metadata.endCol,
dataset.metadata.strandCol or 0,
dataset.metadata.nameCol or 0,
)
c, s, e, t, n = int(c) - 1, int(s) - 1, int(e) - 1, int(t) - 1, int(n) - 1
if t >= 0: # strand column (should) exists
for i, elems in enumerate(compression_utils.file_iter(dataset.file_name)):
strand = "+"
name = "region_%i" % i
if n >= 0 and n < len(elems):
name = elems[n]
if t < len(elems):
strand = elems[t]
tmp = [elems[c], elems[s], elems[e], name, "0", strand]
fh.write("%s\n" % "\t".join(tmp))
elif n >= 0: # name column (should) exists
for i, elems in enumerate(compression_utils.file_iter(dataset.file_name)):
name = "region_%i" % i
if n >= 0 and n < len(elems):
name = elems[n]
tmp = [elems[c], elems[s], elems[e], name]
fh.write("%s\n" % "\t".join(tmp))
else:
for elems in compression_utils.file_iter(dataset.file_name):
tmp = [elems[c], elems[s], elems[e]]
fh.write("%s\n" % "\t".join(tmp))
return compression_utils.get_fileobj(fh.name, mode="rb")
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(
dataset,
column_parameter_alias={
"chromCol": "Chrom",
"startCol": "Start",
"endCol": "End",
"strandCol": "Strand",
"nameCol": "Name",
},
)
def ucsc_links(self, dataset, type, app, base_url):
"""
Generate links to UCSC genome browser sites based on the dbkey
and content of dataset.
"""
# Filter UCSC sites to only those that are supported by this build and
# enabled.
valid_sites = [
(name, url)
for name, url in app.datatypes_registry.get_legacy_sites_by_build("ucsc", dataset.dbkey)
if name in app.datatypes_registry.get_display_sites("ucsc")
]
if not valid_sites:
return []
# If there are any valid sites, we need to generate the estimated
# viewport
chrom, start, stop = self.get_estimated_display_viewport(dataset)
if chrom is None:
return []
# Accumulate links for valid sites
ret_val = []
for site_name, site_url in valid_sites:
internal_url = app.url_for(
controller="dataset", dataset_id=dataset.id, action="display_at", filename="ucsc_" + site_name
)
display_url = quote_plus(
"%s%s/display_as?id=%i&display_app=%s&authz_method=display_at"
% (base_url, app.url_for(controller="root"), dataset.id, type)
)
redirect_url = quote_plus(f"{site_url}db={dataset.dbkey}&position={chrom}:{start}-{stop}&hgt.customText=%s")
link = f"{internal_url}?redirect_url={redirect_url}&display_url={display_url}"
ret_val.append((site_name, link))
return ret_val
def validate(self, dataset, **kwd):
"""Validate an interval file using the bx GenomicIntervalReader"""
c, s, e, t = (
dataset.metadata.chromCol,
dataset.metadata.startCol,
dataset.metadata.endCol,
dataset.metadata.strandCol,
)
c, s, e, t = int(c) - 1, int(s) - 1, int(e) - 1, int(t) - 1
with compression_utils.get_fileobj(dataset.file_name, "r") as infile:
reader = GenomicIntervalReader(infile, chrom_col=c, start_col=s, end_col=e, strand_col=t)
while True:
try:
next(reader)
except ParseError as e:
return DatatypeValidation.invalid(util.unicodify(e))
except StopIteration:
return DatatypeValidation.valid()
def repair_methods(self, dataset):
"""Return options for removing errors along with a description"""
return [("lines", "Remove erroneous lines")]
def sniff_prefix(self, file_prefix: FilePrefix):
"""
Checks for 'intervalness'
This format is mostly used by galaxy itself. Valid interval files should include
a valid header comment, but this seems to be loosely regulated.
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'test_space.txt' )
>>> Interval().sniff( fname )
False
>>> fname = get_test_fname( 'interval.interval' )
>>> Interval().sniff( fname )
True
"""
found_valid_lines = False
try:
headers = iter_headers(file_prefix, "\t", comment_designator="#")
# If we got here, we already know the file is_column_based and is not bed,
# so we'll just look for some valid data.
for hdr in headers:
if hdr:
if len(hdr) < 3:
return False
# Assume chrom start and end are in column positions 1 and 2
# respectively ( for 0 based columns )
int(hdr[1])
int(hdr[2])
found_valid_lines = True
except Exception:
return False
return found_valid_lines
def get_track_resolution(self, dataset, start, end):
return None
# ------------- Dataproviders
@dataproviders.decorators.dataprovider_factory(
"genomic-region", dataproviders.dataset.GenomicRegionDataProvider.settings
)
def genomic_region_dataprovider(self, dataset, **settings):
return dataproviders.dataset.GenomicRegionDataProvider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory(
"genomic-region-dict", dataproviders.dataset.GenomicRegionDataProvider.settings
)
def genomic_region_dict_dataprovider(self, dataset, **settings):
settings["named_columns"] = True
return self.genomic_region_dataprovider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory("interval", dataproviders.dataset.IntervalDataProvider.settings)
def interval_dataprovider(self, dataset, **settings):
return dataproviders.dataset.IntervalDataProvider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory("interval-dict", dataproviders.dataset.IntervalDataProvider.settings)
def interval_dict_dataprovider(self, dataset, **settings):
settings["named_columns"] = True
return self.interval_dataprovider(dataset, **settings)
class BedGraph(Interval):
"""Tab delimited chrom/start/end/datavalue dataset"""
edam_format = "format_3583"
file_ext = "bedgraph"
track_type = "LineTrack"
data_sources = {"data": "bigwig", "index": "bigwig"}
def as_ucsc_display_file(self, dataset, **kwd):
"""
Returns file contents as is with no modifications.
TODO: this is a functional stub and will need to be enhanced moving forward to provide additional support for bedgraph.
"""
return open(dataset.file_name, "rb")
def get_estimated_display_viewport(self, dataset, chrom_col=0, start_col=1, end_col=2):
"""
Set viewport based on dataset's first 100 lines.
"""
return Interval.get_estimated_display_viewport(
self, dataset, chrom_col=chrom_col, start_col=start_col, end_col=end_col
)
class Bed(Interval):
"""Tab delimited data in BED format"""
edam_format = "format_3003"
file_ext = "bed"
data_sources = {"data": "tabix", "index": "bigwig", "feature_search": "fli"}
track_type = Interval.track_type
check_required_metadata = True
column_names = [
"Chrom",
"Start",
"End",
"Name",
"Score",
"Strand",
"ThickStart",
"ThickEnd",
"ItemRGB",
"BlockCount",
"BlockSizes",
"BlockStarts",
]
MetadataElement(name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter)
MetadataElement(name="startCol", default=2, desc="Start column", param=metadata.ColumnParameter)
MetadataElement(name="endCol", default=3, desc="End column", param=metadata.ColumnParameter)
MetadataElement(
name="strandCol",
desc="Strand column (click box & select)",
param=metadata.ColumnParameter,
optional=True,
no_value=0,
)
MetadataElement(name="columns", default=3, desc="Number of columns", readonly=True, visible=False)
MetadataElement(
name="viz_filter_cols",
desc="Score column for visualization",
default=[4],
param=metadata.ColumnParameter,
optional=True,
multiple=True,
)
# do we need to repeat these? they are the same as should be inherited from interval type
def set_meta(self, dataset, overwrite=True, **kwd):
"""Sets the metadata information for datasets previously determined to be in bed format."""
if dataset.has_data():
i = 0
for i, line in enumerate(open(dataset.file_name)): # noqa: B007
line = line.rstrip("\r\n")
if line and not line.startswith("#"):
elems = line.split("\t")
if len(elems) > 2:
if len(elems) > 3:
if overwrite or not dataset.metadata.element_is_set("nameCol"):
dataset.metadata.nameCol = 4
if len(elems) < 6:
if overwrite or not dataset.metadata.element_is_set("strandCol"):
dataset.metadata.strandCol = 0
else:
if overwrite or not dataset.metadata.element_is_set("strandCol"):
dataset.metadata.strandCol = 6
break
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=i)
def as_ucsc_display_file(self, dataset, **kwd):
"""Returns file contents with only the bed data. If bed 6+, treat as interval."""
for line in open(dataset.file_name):
line = line.strip()
if line == "" or line.startswith("#"):
continue
fields = line.split("\t")
"""check to see if this file doesn't conform to strict genome browser accepted bed"""
try:
if len(fields) > 12:
return Interval.as_ucsc_display_file(self, dataset) # too many fields
if len(fields) > 6:
int(fields[6])
if len(fields) > 7:
int(fields[7])
if len(fields) > 8:
if int(fields[8]) != 0:
return Interval.as_ucsc_display_file(self, dataset)
if len(fields) > 9:
int(fields[9])
if len(fields) > 10:
fields2 = (
fields[10].rstrip(",").split(",")
) # remove trailing comma and split on comma
for field in fields2:
int(field)
if len(fields) > 11:
fields2 = (
fields[11].rstrip(",").split(",")
) # remove trailing comma and split on comma
for field in fields2:
int(field)
except Exception:
return Interval.as_ucsc_display_file(self, dataset)
# only check first line for proper form
break
try:
return open(dataset.file_name, "rb")
except Exception:
return "This item contains no content"
def sniff_prefix(self, file_prefix: FilePrefix):
"""
Checks for 'bedness'
BED lines have three required fields and nine additional optional fields.
The number of fields per line must be consistent throughout any single set of data in
an annotation track. The order of the optional fields is binding: lower-numbered
fields must always be populated if higher-numbered fields are used. The data type of
all 12 columns is:
1-str, 2-int, 3-int, 4-str, 5-int, 6-str, 7-int, 8-int, 9-int or list, 10-int, 11-list, 12-list
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format1
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'test_tab.bed' )
>>> Bed().sniff( fname )
True
>>> fname = get_test_fname( 'interv1.bed' )
>>> Bed().sniff( fname )
True
>>> fname = get_test_fname( 'complete.bed' )
>>> Bed().sniff( fname )
True
"""
if not get_headers(file_prefix, "\t", comment_designator="#", count=1):
return False
try:
found_valid_lines = False
for hdr in iter_headers(file_prefix, "\t", comment_designator="#"):
if not hdr or hdr == [""]:
continue
if len(hdr) < 3 or len(hdr) > 12:
return False
try:
int(hdr[1])
int(hdr[2])
except Exception:
return False
if len(hdr) > 4:
# hdr[3] is a string, 'name', which defines the name of the BED line - difficult to test for this.
# hdr[4] is an int, 'score', a score between 0 and 1000.
try:
if int(hdr[4]) < 0 or int(hdr[4]) > 1000:
return False
except Exception:
return False
if len(hdr) > 5:
# hdr[5] is strand
if hdr[5] not in data.valid_strand:
return False
if len(hdr) > 6:
# hdr[6] is thickStart, the starting position at which the feature is drawn thickly.
try:
int(hdr[6])
except Exception:
return False
if len(hdr) > 7:
# hdr[7] is thickEnd, the ending position at which the feature is drawn thickly
try:
int(hdr[7])
except Exception:
return False
if len(hdr) > 8:
# hdr[8] is itemRgb, an RGB value of the form R,G,B (e.g. 255,0,0). However, this could also be an int (e.g., 0)
try:
int(hdr[8])
except Exception:
try:
hdr[8].split(",")
except Exception:
return False
if len(hdr) > 9:
# hdr[9] is blockCount, the number of blocks (exons) in the BED line.
try:
block_count = int(hdr[9])
except Exception:
return False
if len(hdr) > 10:
# hdr[10] is blockSizes - A comma-separated list of the block sizes.
# Sometimes the blosck_sizes and block_starts lists end in extra commas
try:
block_sizes = hdr[10].rstrip(",").split(",")
except Exception:
return False
if len(hdr) > 11:
# hdr[11] is blockStarts - A comma-separated list of block starts.
try:
block_starts = hdr[11].rstrip(",").split(",")
except Exception:
return False
if len(block_sizes) != block_count or len(block_starts) != block_count:
return False
found_valid_lines = True
return found_valid_lines
except Exception:
return False
class ProBed(Bed):
"""Tab delimited data in proBED format - adaptation of BED for proteomics data."""
edam_format = "format_3827"
file_ext = "probed"
column_names = [
"Chrom",
"Start",
"End",
"Name",
"Score",
"Strand",
"ThickStart",
"ThickEnd",
"ItemRGB",
"BlockCount",
"BlockSizes",
"BlockStarts",
"ProteinAccession",
"PeptideSequence",
"Uniqueness",
"GenomeReferenceVersion",
"PsmScore",
"Fdr",
"Modifications",
"Charge",
"ExpMassToCharge",
"CalcMassToCharge",
"PsmRank",
"DatasetID",
"Uri",
]
class BedStrict(Bed):
"""Tab delimited data in strict BED format - no non-standard columns allowed"""
edam_format = "format_3584"
file_ext = "bedstrict"
# no user change of datatype allowed
allow_datatype_change = False
# Read only metadata elements
MetadataElement(name="chromCol", default=1, desc="Chrom column", readonly=True, param=metadata.MetadataParameter)
MetadataElement(
name="startCol", default=2, desc="Start column", readonly=True, param=metadata.MetadataParameter
) # TODO: start and end should be able to be set to these or the proper thick[start/end]?
MetadataElement(name="endCol", default=3, desc="End column", readonly=True, param=metadata.MetadataParameter)
MetadataElement(
name="strandCol",
desc="Strand column (click box & select)",
readonly=True,
param=metadata.MetadataParameter,
no_value=0,
optional=True,
)
MetadataElement(
name="nameCol",
desc="Name/Identifier column (click box & select)",
readonly=True,
param=metadata.MetadataParameter,
no_value=0,
optional=True,
)
MetadataElement(name="columns", default=3, desc="Number of columns", readonly=True, visible=False)
def __init__(self, **kwd):
Tabular.__init__(self, **kwd)
self.clear_display_apps() # only new style display applications for this datatype
def set_meta(self, dataset, overwrite=True, **kwd):
Tabular.set_meta(self, dataset, overwrite=overwrite, **kwd) # need column count first
if dataset.metadata.columns >= 4:
dataset.metadata.nameCol = 4
if dataset.metadata.columns >= 6:
dataset.metadata.strandCol = 6
def sniff(self, filename):
return False # NOTE: This would require aggressively validating the entire file
class Bed6(BedStrict):
"""Tab delimited data in strict BED format - no non-standard columns allowed; column count forced to 6"""
edam_format = "format_3585"
file_ext = "bed6"
class Bed12(BedStrict):
"""Tab delimited data in strict BED format - no non-standard columns allowed; column count forced to 12"""
edam_format = "format_3586"
file_ext = "bed12"
class _RemoteCallMixin:
def _get_remote_call_url(self, redirect_url, site_name, dataset, type, app, base_url):
"""Retrieve the URL to call out to an external site and retrieve data.
This routes our external URL through a local galaxy instance which makes
the data available, followed by redirecting to the remote site with a
link back to the available information.
"""
internal_url = f"{app.url_for(controller="dataset", dataset_id=dataset.id, action="display_at", filename=f"{type}_{site_name}')}"
base_url = app.config.get("display_at_callback", base_url)
display_url = quote_plus(
"%s%s/display_as?id=%i&display_app=%s&authz_method=display_at"
% (base_url, app.url_for(controller="root"), dataset.id, type)
)
link = f"{internal_url}?redirect_url={redirect_url}&display_url={display_url}"
return link
@dataproviders.decorators.has_dataproviders
@build_sniff_from_prefix
class Gff(Tabular, _RemoteCallMixin):
"""Tab delimited data in Gff format"""
edam_data = "data_1255"
edam_format = "format_2305"
file_ext = "gff"
valid_gff_frame = [".", "0", "1", "2"]
column_names = ["Seqname", "Source", "Feature", "Start", "End", "Score", "Strand", "Frame", "Group"]
data_sources = {"data": "interval_index", "index": "bigwig", "feature_search": "fli"}
track_type = Interval.track_type
MetadataElement(name="columns", default=9, desc="Number of columns", readonly=True, visible=False)
MetadataElement(
name="column_types",
default=["str", "str", "str", "int", "int", "int", "str", "str", "str"],
param=metadata.ColumnTypesParameter,
desc="Column types",
readonly=True,
visible=False,
)
MetadataElement(name="attributes", default=0, desc="Number of attributes", readonly=True, visible=False, no_value=0)
MetadataElement(
name="attribute_types",
default={},
desc="Attribute types",
param=metadata.DictParameter,
readonly=True,
visible=False,
no_value=[],
)
def __init__(self, **kwd):
"""Initialize datatype, by adding GBrowse display app"""
Tabular.__init__(self, **kwd)
self.add_display_app("ucsc", "display at UCSC", "as_ucsc_display_file", "ucsc_links")
self.add_display_app("gbrowse", "display in Gbrowse", "as_gbrowse_display_file", "gbrowse_links")
def set_attribute_metadata(self, dataset):
"""
Sets metadata elements for dataset's attributes.
"""
# Use first N lines to set metadata for dataset attributes. Attributes
# not found in the first N lines will not have metadata.
num_lines = 200
attribute_types = {}
with compression_utils.get_fileobj(dataset.file_name) as in_fh:
for i, line in enumerate(in_fh):
if line and not line.startswith("#"):
elems = line.split("\t")
if len(elems) == 9:
try:
# Loop through attributes to set types.
for name, value in parse_gff_attributes(elems[8]).items():
# Default type is string.
value_type = "str"
try:
# Try int.
int(value)
value_type = "int"
except ValueError:
try:
# Try float.
float(value)
value_type = "float"
except ValueError:
pass
attribute_types[name] = value_type
except Exception:
pass
if i + 1 == num_lines:
break
# Set attribute metadata and then set additional metadata.
dataset.metadata.attribute_types = attribute_types
dataset.metadata.attributes = len(attribute_types)
def set_meta(self, dataset, overwrite=True, **kwd):
self.set_attribute_metadata(dataset)
i = 0
with compression_utils.get_fileobj(dataset.file_name) as in_fh:
for i, line in enumerate(in_fh): # noqa: B007
line = line.rstrip("\r\n")
if line and not line.startswith("#"):
elems = line.split("\t")
if len(elems) == 9:
try:
int(elems[3])
int(elems[4])
break
except Exception:
pass
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=i)
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(dataset, column_names=self.column_names)
def get_estimated_display_viewport(self, dataset):
"""
Return a chrom, start, stop tuple for viewing a file. There are slight differences between gff 2 and gff 3
formats. This function should correctly handle both...
"""
viewport_feature_count = 100 # viewport should check at least 100 features; excludes comment lines
max_line_count = max(viewport_feature_count, 500) # maximum number of lines to check; includes comment lines
if self.displayable(dataset):
try:
seqid = None
start = sys.maxsize
stop = 0
with compression_utils.get_fileobj(dataset.file_name) as fh:
for line in util.iter_start_of_line(fh, VIEWPORT_READLINE_BUFFER_SIZE):
try:
if line.startswith("##sequence-region"): # ##sequence-region IV 6000000 6030000
elems = line.rstrip("\n\r").split()
if len(elems) > 3:
# line looks like:
# sequence-region ctg123 1 1497228
seqid = elems[1] # IV
start = int(elems[2]) # 6000000
stop = int(elems[3]) # 6030000
break # use location declared in file
elif len(elems) == 2 and elems[1].find("..") > 0:
# line looks like this:
# sequence-region X:120000..140000
elems = elems[1].split(":")
seqid = elems[0]
start = int(elems[1].split("..")[0])
stop = int(elems[1].split("..")[1])
break # use location declared in file
else:
log.debug(f"line ({line}) uses an unsupported ##sequence-region definition.")
# break #no break, if bad definition, we try another line
elif line.startswith("browser position"):
# Allow UCSC style browser and track info in the GFF file
pos_info = line.split()[-1]
seqid, startend = pos_info.split(":")
start, stop = map(int, startend.split("-"))
break # use location declared in file
elif not line.startswith(("#", "track", "browser")):
viewport_feature_count -= 1
elems = line.rstrip("\n\r").split("\t")
if len(elems) > 3:
if not seqid:
# We can only set the viewport for a single chromosome
seqid = elems[0]
if seqid == elems[0]:
# Make sure we have not spanned chromosomes
start = min(start, int(elems[3]))
stop = max(stop, int(elems[4]))
except Exception:
# most likely start/stop is not an int or not enough fields
pass
# make sure we are at the next new line
readline_count = VIEWPORT_MAX_READS_PER_LINE
while line.rstrip("\n\r") == line:
assert readline_count > 0, Exception(
f"Viewport readline count exceeded for dataset {dataset.id}."
)
line = fh.readline(VIEWPORT_READLINE_BUFFER_SIZE)
if not line:
break # EOF
readline_count -= 1
max_line_count -= 1
if not viewport_feature_count or not max_line_count:
# exceeded viewport or total line count to check
break
if seqid is not None:
return (seqid, str(start), str(stop)) # Necessary to return strings?
except Exception:
log.exception("Unexpected error")
return (None, None, None) # could not determine viewport
def ucsc_links(self, dataset, type, app, base_url):
ret_val = []
seqid, start, stop = self.get_estimated_display_viewport(dataset)
if seqid is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build("ucsc", dataset.dbkey):
if site_name in app.datatypes_registry.get_display_sites("ucsc"):
redirect_url = quote_plus(
f"{site_url}db={dataset.dbkey}&position={seqid}:{start}-{stop}&hgt.customText=%s"
)
link = self._get_remote_call_url(redirect_url, site_name, dataset, type, app, base_url)
ret_val.append((site_name, link))
return ret_val
def gbrowse_links(self, dataset, type, app, base_url):
ret_val = []
seqid, start, stop = self.get_estimated_display_viewport(dataset)
if seqid is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build("gbrowse", dataset.dbkey):
if site_name in app.datatypes_registry.get_display_sites("gbrowse"):
if seqid.startswith("chr") and len(seqid) > 3:
seqid = seqid[3:]
redirect_url = quote_plus(f"{site_url}/?q={seqid}:{start}..{stop}&eurl=%s")
link = self._get_remote_call_url(redirect_url, site_name, dataset, type, app, base_url)
ret_val.append((site_name, link))
return ret_val
def sniff_prefix(self, file_prefix: FilePrefix):
"""
Determines whether the file is in gff format
GFF lines have nine required fields that must be tab-separated.
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format3
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('gff.gff3')
>>> Gff().sniff( fname )
False
>>> fname = get_test_fname('test.gff')
>>> Gff().sniff( fname )
True
"""
if len(get_headers(file_prefix, "\t", count=2)) < 2:
return False
try:
found_valid_lines = False
for hdr in iter_headers(file_prefix, "\t"):
if not hdr or hdr == [""]:
continue
hdr0_parts = hdr[0].split()
if hdr0_parts[0] == "##gff-version":
return hdr0_parts[1].startswith("2")
# The gff-version header comment may have been stripped, so inspect the data
if hdr[0].startswith("#"):
continue
if len(hdr) != 9:
return False
try:
int(hdr[3])
int(hdr[4])
except Exception:
return False
if hdr[5] != ".":
try:
float(hdr[5])
except Exception:
return False
if hdr[6] not in data.valid_strand:
return False
if hdr[7] not in self.valid_gff_frame:
return False
found_valid_lines = True
return found_valid_lines
except Exception:
return False
# ------------- Dataproviders
# redefine bc super is Tabular
@dataproviders.decorators.dataprovider_factory(
"genomic-region", dataproviders.dataset.GenomicRegionDataProvider.settings
)
def genomic_region_dataprovider(self, dataset, **settings):
return dataproviders.dataset.GenomicRegionDataProvider(dataset, 0, 3, 4, **settings)
@dataproviders.decorators.dataprovider_factory(
"genomic-region-dict", dataproviders.dataset.GenomicRegionDataProvider.settings
)
def genomic_region_dict_dataprovider(self, dataset, **settings):
settings["named_columns"] = True
return self.genomic_region_dataprovider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory("interval", dataproviders.dataset.IntervalDataProvider.settings)
def interval_dataprovider(self, dataset, **settings):
return dataproviders.dataset.IntervalDataProvider(dataset, 0, 3, 4, 6, 2, **settings)
@dataproviders.decorators.dataprovider_factory("interval-dict", dataproviders.dataset.IntervalDataProvider.settings)
def interval_dict_dataprovider(self, dataset, **settings):
settings["named_columns"] = True
return self.interval_dataprovider(dataset, **settings)
class Gff3(Gff):
"""Tab delimited data in Gff3 format"""
edam_format = "format_1975"
file_ext = "gff3"
valid_gff3_strand = ["+", "-", ".", "?"]
valid_gff3_phase = Gff.valid_gff_frame
column_names = ["Seqid", "Source", "Type", "Start", "End", "Score", "Strand", "Phase", "Attributes"]
track_type = Interval.track_type
MetadataElement(
name="column_types",
default=["str", "str", "str", "int", "int", "float", "str", "int", "list"],
param=metadata.ColumnTypesParameter,
desc="Column types",
readonly=True,
visible=False,
)
def __init__(self, **kwd):
"""Initialize datatype, by adding GBrowse display app"""
Gff.__init__(self, **kwd)
def set_meta(self, dataset, overwrite=True, **kwd):
self.set_attribute_metadata(dataset)
i = 0
with compression_utils.get_fileobj(dataset.file_name) as in_fh:
for i, line in enumerate(in_fh): # noqa: B007
line = line.rstrip("\r\n")
if line and not line.startswith("#"):
elems = line.split("\t")
valid_start = False
valid_end = False
if len(elems) == 9:
try:
start = int(elems[3])
valid_start = True
except Exception:
if elems[3] == ".":
valid_start = True
try:
end = int(elems[4])
valid_end = True
except Exception:
if elems[4] == ".":
valid_end = True
strand = elems[6]
phase = elems[7]
if (
valid_start
and valid_end
and start < end
and strand in self.valid_gff3_strand
and phase in self.valid_gff3_phase
):
break
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=i)
def sniff_prefix(self, file_prefix: FilePrefix):
"""
Determines whether the file is in GFF version 3 format
GFF 3 format:
1) adds a mechanism for representing more than one level
of hierarchical grouping of features and subfeatures.
2) separates the ideas of group membership and feature name/id
3) constrains the feature type field to be taken from a controlled
vocabulary.
4) allows a single feature, such as an exon, to belong to more than
one group at a time.
5) provides an explicit convention for pairwise alignments
6) provides an explicit convention for features that occupy disjunct regions
The format consists of 9 columns, separated by tabs (NOT spaces).
Undefined fields are replaced with the "." character, as described in the original GFF spec.
For complete details see http://song.sourceforge.net/gff3.shtml
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'test.gff' )
>>> Gff3().sniff( fname )
False
>>> fname = get_test_fname( 'test.gtf' )
>>> Gff3().sniff( fname )
False
>>> fname = get_test_fname('gff.gff3')
>>> Gff3().sniff( fname )
True
>>> fname = get_test_fname( 'grch37.75.gtf' )
>>> Gff3().sniff( fname )
False
"""
if len(get_headers(file_prefix, "\t", count=2)) < 2:
return False
try:
found_valid_lines = False
for hdr in iter_headers(file_prefix, "\t"):
if not hdr or hdr == [""]:
continue
hdr0_parts = hdr[0].split()
if hdr0_parts[0] == "##gff-version":
return hdr0_parts[1].startswith("3")
# The gff-version header comment may have been stripped, so inspect the data
if hdr[0].startswith("#"):
continue
if len(hdr) != 9:
return False
try:
int(hdr[3])
except Exception:
if hdr[3] != ".":
return False
try:
int(hdr[4])
except Exception:
if hdr[4] != ".":
return False
if hdr[5] != ".":
try:
float(hdr[5])
except Exception:
return False
if hdr[6] not in self.valid_gff3_strand:
return False
if hdr[7] not in self.valid_gff3_phase:
return False
parse_gff3_attributes(hdr[8])
found_valid_lines = True
return found_valid_lines
except Exception:
return False
class Gtf(Gff):
"""Tab delimited data in Gtf format"""
edam_format = "format_2306"
file_ext = "gtf"
column_names = ["Seqname", "Source", "Feature", "Start", "End", "Score", "Strand", "Frame", "Attributes"]
track_type = Interval.track_type
MetadataElement(name="columns", default=9, desc="Number of columns", readonly=True, visible=False)
MetadataElement(
name="column_types",
default=["str", "str", "str", "int", "int", "float", "str", "int", "list"],
param=metadata.ColumnTypesParameter,
desc="Column types",
readonly=True,
visible=False,
)
def sniff_prefix(self, file_prefix: FilePrefix):
"""
Determines whether the file is in gtf format
GTF lines have nine required fields that must be tab-separated. The first eight GTF fields are the same as GFF.
The group field has been expanded into a list of attributes. Each attribute consists of a type/value pair.
Attributes must end in a semi-colon, and be separated from any following attribute by exactly one space.
The attribute list must begin with the two mandatory attributes:
gene_id value - A globally unique identifier for the genomic source of the sequence.
transcript_id value - A globally unique identifier for the predicted transcript.
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format4
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( '1.bed' )
>>> Gtf().sniff( fname )
False
>>> fname = get_test_fname( 'test.gff' )
>>> Gtf().sniff( fname )
False
>>> fname = get_test_fname( 'test.gtf' )
>>> Gtf().sniff( fname )
True
>>> fname = get_test_fname( 'grch37.75.gtf' )
>>> Gtf().sniff( fname )
True
"""
if len(get_headers(file_prefix, "\t", count=2)) < 2:
return False
try:
found_valid_lines = False
for hdr in iter_headers(file_prefix, "\t"):
if not hdr or hdr == [""]:
continue
hdr0_parts = hdr[0].split()
if hdr0_parts[0] == "##gff-version" and not hdr0_parts[1].startswith("2"):
return False
# The gff-version header comment may have been stripped, so inspect the data
if hdr[0].startswith("#"):
continue
if len(hdr) != 9:
return False
try:
int(hdr[3])
int(hdr[4])
except Exception:
return False
if hdr[5] != ".":
try:
float(hdr[5])
except Exception:
return False
if hdr[6] not in data.valid_strand:
return False
if hdr[7] not in self.valid_gff_frame:
return False
# Check attributes for gene_id (transcript_id is also mandatory
# but not for genes)
attributes = parse_gff_attributes(hdr[8])
if "gene_id" not in attributes:
return False
found_valid_lines = True
return found_valid_lines
except Exception:
return False
@dataproviders.decorators.has_dataproviders
@build_sniff_from_prefix
class Wiggle(Tabular, _RemoteCallMixin):
"""Tab delimited data in wiggle format"""
edam_format = "format_3005"
file_ext = "wig"
track_type = "LineTrack"
data_sources = {"data": "bigwig", "index": "bigwig"}
MetadataElement(name="columns", default=3, desc="Number of columns", readonly=True, visible=False)
def __init__(self, **kwd):
Tabular.__init__(self, **kwd)
self.add_display_app("ucsc", "display at UCSC", "as_ucsc_display_file", "ucsc_links")
self.add_display_app("gbrowse", "display in Gbrowse", "as_gbrowse_display_file", "gbrowse_links")
def get_estimated_display_viewport(self, dataset):
"""Return a chrom, start, stop tuple for viewing a file."""
viewport_feature_count = 100 # viewport should check at least 100 features; excludes comment lines
max_line_count = max(viewport_feature_count, 500) # maximum number of lines to check; includes comment lines
if self.displayable(dataset):
try:
chrom = None
start = sys.maxsize
end = 0
span = 1
step = None
with open(dataset.file_name) as fh:
for line in util.iter_start_of_line(fh, VIEWPORT_READLINE_BUFFER_SIZE):
try:
if line.startswith("browser"):
chr_info = line.rstrip("\n\r").split()[-1]
chrom, coords = chr_info.split(":")
start, end = map(int, coords.split("-"))
break # use the browser line
# variableStep chrom=chr20
if line and (
line.lower().startswith("variablestep") or line.lower().startswith("fixedstep")
):
if chrom is not None:
break # different chrom or different section of the chrom
chrom = line.rstrip("\n\r").split("chrom=")[1].split()[0]
if "span=" in line:
span = int(line.rstrip("\n\r").split("span=")[1].split()[0])
if "step=" in line:
step = int(line.rstrip("\n\r").split("step=")[1].split()[0])
start = int(line.rstrip("\n\r").split("start=")[1].split()[0])
else:
fields = line.rstrip("\n\r").split()
if fields:
if step is not None:
if not end:
end = start + span
else:
end += step
else:
start = min(int(fields[0]), start)
end = max(end, int(fields[0]) + span)
viewport_feature_count -= 1
except Exception:
pass
# make sure we are at the next new line
readline_count = VIEWPORT_MAX_READS_PER_LINE
while line.rstrip("\n\r") == line:
assert readline_count > 0, Exception(
f"Viewport readline count exceeded for dataset {dataset.id}."
)
line = fh.readline(VIEWPORT_READLINE_BUFFER_SIZE)
if not line:
break # EOF
readline_count -= 1
max_line_count -= 1
if not viewport_feature_count or not max_line_count:
# exceeded viewport or total line count to check
break
if chrom is not None:
return (chrom, str(start), str(end)) # Necessary to return strings?
except Exception:
log.exception("Unexpected error")
return (None, None, None) # could not determine viewport
def gbrowse_links(self, dataset, type, app, base_url):
ret_val = []
chrom, start, stop = self.get_estimated_display_viewport(dataset)
if chrom is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build("gbrowse", dataset.dbkey):
if site_name in app.datatypes_registry.get_display_sites("gbrowse"):
if chrom.startswith("chr") and len(chrom) > 3:
chrom = chrom[3:]
redirect_url = quote_plus(f"{site_url}/?q={chrom}:{start}..{stop}&eurl=%s")
link = self._get_remote_call_url(redirect_url, site_name, dataset, type, app, base_url)
ret_val.append((site_name, link))
return ret_val
def ucsc_links(self, dataset, type, app, base_url):
ret_val = []
chrom, start, stop = self.get_estimated_display_viewport(dataset)
if chrom is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build("ucsc", dataset.dbkey):
if site_name in app.datatypes_registry.get_display_sites("ucsc"):
redirect_url = quote_plus(
f"{site_url}db={dataset.dbkey}&position={chrom}:{start}-{stop}&hgt.customText=%s"
)
link = self._get_remote_call_url(redirect_url, site_name, dataset, type, app, base_url)
ret_val.append((site_name, link))
return ret_val
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(dataset, skipchars=["track", "#"])
def set_meta(self, dataset, overwrite=True, **kwd):
max_data_lines = None
i = 0
for i, line in enumerate(open(dataset.file_name)): # noqa: B007
line = line.rstrip("\r\n")
if line and not line.startswith("#"):
elems = line.split("\t")
try:
# variableStep format is nucleotide position\tvalue\n,
# fixedStep is value\n
# "Wiggle track data values can be integer or real, positive or negative values"
float(elems[0])
break
except Exception:
# We are either in the track definition line or in a declaration line
pass
if self.max_optional_metadata_filesize >= 0 and dataset.get_size() > self.max_optional_metadata_filesize:
# we'll arbitrarily only use the first 100 data lines in this wig file to calculate tabular attributes (column types)
# this should be sufficient, except when we have mixed wig track types (bed, variable, fixed),
# but those cases are not a single table that would have consistant column definitions
# optional metadata values set in Tabular class will be 'None'
max_data_lines = 100
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=i, max_data_lines=max_data_lines)
def sniff_prefix(self, file_prefix: FilePrefix):
"""
Determines wether the file is in wiggle format
The .wig format is line-oriented. Wiggle data is preceeded by a track definition line,
which adds a number of options for controlling the default display of this track.
Following the track definition line is the track data, which can be entered in several
different formats.
The track definition line begins with the word 'track' followed by the track type.
The track type with version is REQUIRED, and it currently must be wiggle_0. For example,
track type=wiggle_0...
For complete details see http://genome.ucsc.edu/goldenPath/help/wiggle.html
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'interv1.bed' )
>>> Wiggle().sniff( fname )
False
>>> fname = get_test_fname( 'wiggle.wig' )
>>> Wiggle().sniff( fname )
True
"""
try:
headers = iter_headers(file_prefix, None)
for hdr in headers:
if len(hdr) > 1 and hdr[0] == "track" and hdr[1].startswith("type=wiggle"):
return True
return False
except Exception:
return False
def get_track_resolution(self, dataset, start, end):
range = end - start
# Determine appropriate resolution to plot ~1000 points
resolution = math.ceil(10 ** math.ceil(math.log10(range / 1000)))
# Restrict to valid range
resolution = min(resolution, 100000)
resolution = max(resolution, 1)
return resolution
# ------------- Dataproviders
@dataproviders.decorators.dataprovider_factory("wiggle", dataproviders.dataset.WiggleDataProvider.settings)
def wiggle_dataprovider(self, dataset, **settings):
dataset_source = dataproviders.dataset.DatasetDataProvider(dataset)
return dataproviders.dataset.WiggleDataProvider(dataset_source, **settings)
@dataproviders.decorators.dataprovider_factory("wiggle-dict", dataproviders.dataset.WiggleDataProvider.settings)
def wiggle_dict_dataprovider(self, dataset, **settings):
dataset_source = dataproviders.dataset.DatasetDataProvider(dataset)
settings["named_columns"] = True
return dataproviders.dataset.WiggleDataProvider(dataset_source, **settings)
@build_sniff_from_prefix
class CustomTrack(Tabular):
"""UCSC CustomTrack"""
edam_format = "format_3588"
file_ext = "customtrack"
def __init__(self, **kwd):
"""Initialize interval datatype, by adding UCSC display app"""
Tabular.__init__(self, **kwd)
self.add_display_app("ucsc", "display at UCSC", "as_ucsc_display_file", "ucsc_links")
def set_meta(self, dataset, overwrite=True, **kwd):
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=1)
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(dataset, skipchars=["track", "#"])
def get_estimated_display_viewport(self, dataset, chrom_col=None, start_col=None, end_col=None):
"""Return a chrom, start, stop tuple for viewing a file."""
# FIXME: only BED and WIG custom tracks are currently supported
# As per previously existing behavior, viewport will only be over the first intervals
max_line_count = 100 # maximum number of lines to check; includes comment lines
variable_step_wig = False
chrom = None
span = 1
if self.displayable(dataset):
try:
with open(dataset.file_name) as fh:
for line in util.iter_start_of_line(fh, VIEWPORT_READLINE_BUFFER_SIZE):
if not line.startswith("#"):
try:
if variable_step_wig:
fields = line.rstrip().split()
if len(fields) == 2:
start = int(fields[0])
return (chrom, str(start), str(start + span))
elif line and (
line.lower().startswith("variablestep") or line.lower().startswith("fixedstep")
):
chrom = line.rstrip("\n\r").split("chrom=")[1].split()[0]
if "span=" in line:
span = int(line.rstrip("\n\r").split("span=")[1].split()[0])
if "start=" in line:
start = int(line.rstrip("\n\r").split("start=")[1].split()[0])
return (chrom, str(start), str(start + span))
else:
variable_step_wig = True
else:
fields = line.rstrip().split("\t")
if len(fields) >= 3:
chrom = fields[0]
start = int(fields[1])
end = int(fields[2])
return (chrom, str(start), str(end))
except Exception:
# most likely a non-integer field has been encountered for start / stop
continue
# make sure we are at the next new line
readline_count = VIEWPORT_MAX_READS_PER_LINE
while line.rstrip("\n\r") == line:
assert readline_count > 0, Exception(
f"Viewport readline count exceeded for dataset {dataset.id}."
)
line = fh.readline(VIEWPORT_READLINE_BUFFER_SIZE)
if not line:
break # EOF
readline_count -= 1
max_line_count -= 1
if not max_line_count:
# exceeded viewport or total line count to check
break
except Exception:
log.exception("Unexpected error")
return (None, None, None) # could not determine viewport
def ucsc_links(self, dataset, type, app, base_url):
ret_val = []
chrom, start, stop = self.get_estimated_display_viewport(dataset)
if chrom is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build("ucsc", dataset.dbkey):
if site_name in app.datatypes_registry.get_display_sites("ucsc"):
internal_url = f"{app.url_for(controller="dataset", dataset_id=dataset.id, action="display_at", filename="ucsc_" + site_name)}"
display_url = quote_plus(
"%s%s/display_as?id=%i&display_app=%s&authz_method=display_at"
% (base_url, app.url_for(controller="root"), dataset.id, type)
)
redirect_url = quote_plus(
f"{site_url}db={dataset.dbkey}&position={chrom}:{start}-{stop}&hgt.customText=%s"
)
link = f"{internal_url}?redirect_url={redirect_url}&display_url={display_url}"
ret_val.append((site_name, link))
return ret_val
def sniff_prefix(self, file_prefix: FilePrefix):
"""
Determines whether the file is in customtrack format.
CustomTrack files are built within Galaxy and are basically bed or interval files with the first line looking
something like this.
track name="User Track" description="User Supplied Track (from Galaxy)" color=0,0,0 visibility=1
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'complete.bed' )
>>> CustomTrack().sniff( fname )
False
>>> fname = get_test_fname( 'ucsc.customtrack' )
>>> CustomTrack().sniff( fname )
True
"""
headers = iter_headers(file_prefix, None)
found_at_least_one_track = False
first_line = True
for hdr in headers:
if first_line:
first_line = False
try:
if hdr[0].startswith("track"):
color_found = False
visibility_found = False
for elem in hdr[1:]:
if elem.startswith("color"):
color_found = True
if elem.startswith("visibility"):
visibility_found = True
if color_found and visibility_found:
break
if not color_found or not visibility_found:
return False
else:
return False
except Exception:
return False
else:
try:
if hdr[0] and not hdr[0].startswith("#"):
if len(hdr) < 3:
return False
try:
int(hdr[1])
int(hdr[2])
except Exception:
return False
found_at_least_one_track = True
except Exception:
return False
return found_at_least_one_track
class ENCODEPeak(Interval):
"""
Human ENCODE peak format. There are both broad and narrow peak formats.
Formats are very similar; narrow peak has an additional column, though.
Broad peak ( http://genome.ucsc.edu/FAQ/FAQformat#format13 ):
This format is used to provide called regions of signal enrichment based
on pooled, normalized (interpreted) data. It is a BED 6+3 format.
Narrow peak http://genome.ucsc.edu/FAQ/FAQformat#format12 and :
This format is used to provide called peaks of signal enrichment based on
pooled, normalized (interpreted) data. It is a BED6+4 format.
"""
edam_format = "format_3612"
file_ext = "encodepeak"
column_names = ["Chrom", "Start", "End", "Name", "Score", "Strand", "SignalValue", "pValue", "qValue", "Peak"]
data_sources = {"data": "tabix", "index": "bigwig"}
MetadataElement(name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter)
MetadataElement(name="startCol", default=2, desc="Start column", param=metadata.ColumnParameter)
MetadataElement(name="endCol", default=3, desc="End column", param=metadata.ColumnParameter)
MetadataElement(
name="strandCol",
desc="Strand column (click box & select)",
param=metadata.ColumnParameter,
optional=True,
no_value=0,
)
MetadataElement(name="columns", default=3, desc="Number of columns", readonly=True, visible=False)
def sniff(self, filename):
return False
class ChromatinInteractions(Interval):
"""
Chromatin interactions obtained from 3C/5C/Hi-C experiments.
"""
file_ext = "chrint"
track_type = "DiagonalHeatmapTrack"
data_sources = {"data": "tabix", "index": "bigwig"}
column_names = ["Chrom1", "Start1", "End1", "Chrom2", "Start2", "End2", "Value"]
MetadataElement(name="chrom1Col", default=1, desc="Chrom1 column", param=metadata.ColumnParameter)
MetadataElement(name="start1Col", default=2, desc="Start1 column", param=metadata.ColumnParameter)
MetadataElement(name="end1Col", default=3, desc="End1 column", param=metadata.ColumnParameter)
MetadataElement(name="chrom2Col", default=4, desc="Chrom2 column", param=metadata.ColumnParameter)
MetadataElement(name="start2Col", default=5, desc="Start2 column", param=metadata.ColumnParameter)
MetadataElement(name="end2Col", default=6, desc="End2 column", param=metadata.ColumnParameter)
MetadataElement(name="valueCol", default=7, desc="Value column", param=metadata.ColumnParameter)
MetadataElement(name="columns", default=7, desc="Number of columns", readonly=True, visible=False)
def sniff(self, filename):
return False
@build_sniff_from_prefix
class ScIdx(Tabular):
"""
ScIdx files are 1-based and consist of strand-specific coordinate counts.
They always have 5 columns, and the first row is the column labels:
'chrom', 'index', 'forward', 'reverse', 'value'.
Each line following the first consists of data:
chromosome name (type str), peak index (type int), Forward strand peak
count (type int), Reverse strand peak count (type int) and value (type int).
The value of the 5th 'value' column is the sum of the forward and reverse
peak count values.
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('cntrl_hg19.scidx')
>>> ScIdx().sniff(fname)
True
>>> Bed().sniff(fname)
False
>>> fname = get_test_fname('empty.txt')
>>> ScIdx().sniff(fname)
False
"""
file_ext = "scidx"
MetadataElement(name="columns", default=0, desc="Number of columns", readonly=True, visible=False)
MetadataElement(
name="column_types",
default=[],
param=metadata.ColumnTypesParameter,
desc="Column types",
readonly=True,
visible=False,
no_value=[],
)
def __init__(self, **kwd):
"""
Initialize scidx datatype.
"""
Tabular.__init__(self, **kwd)
# Don't set column names since the first
# line of the dataset displays them.
self.column_names = ["chrom", "index", "forward", "reverse", "value"]
def sniff_prefix(self, file_prefix: FilePrefix):
"""
Checks for 'scidx-ness.'
"""
count = 0
for count, line in enumerate(iter_headers(file_prefix, "\t")):
# The first line is always a comment like this:
# 2015-11-23 20:18:56.51;input.bam;READ1
if count == 0:
if not line[0].startswith("#"):
return False
# The 2nd line is always a specific header
elif count == 1:
if line != ["chrom", "index", "forward", "reverse", "value"]:
return False
# data line columns 2:5 need to be integers and
# the fwd and rev column need to sum to value
else:
if len(line) != 5:
return False
if not line[1].isdigit():
return False
if int(line[2]) + int(line[3]) != int(line[4]):
return False
# just check one data line
break
# at least the comment and header are required
if count >= 1:
return True
return False
if __name__ == "__main__":
import doctest
doctest.testmod(sys.modules[__name__])
|
"""
Interval datatypes
"""
import logging
import math
import sys
import tempfile
from urllib.parse import quote_plus
from bx.intervals.io import (
GenomicIntervalReader,
ParseError,
)
from galaxy import util
from galaxy.datatypes import metadata
from galaxy.datatypes.data import DatatypeValidation
from galaxy.datatypes.metadata import MetadataElement
from galaxy.datatypes.sniff import (
build_sniff_from_prefix,
FilePrefix,
get_headers,
iter_headers,
)
from galaxy.datatypes.tabular import Tabular
from galaxy.datatypes.util.gff_util import (
parse_gff3_attributes,
parse_gff_attributes,
)
from galaxy.util import compression_utils
from . import (
data,
dataproviders,
)
log = logging.getLogger(__name__)
# Contains the meta columns and the words that map to it; list aliases on the
# right side of the : in decreasing order of priority
alias_spec = {
"chromCol": ["chrom", "CHROMOSOME", "CHROM", "Chromosome Name"],
"startCol": ["start", "START", "chromStart", "txStart", "Start Position (bp)"],
"endCol": ["end", "END", "STOP", "chromEnd", "txEnd", "End Position (bp)"],
"strandCol": ["strand", "STRAND", "Strand"],
"nameCol": [
"name",
"NAME",
"Name",
"name2",
"NAME2",
"Name2",
"Ensembl Gene ID",
"Ensembl Transcript ID",
"Ensembl Peptide ID",
],
}
# a little faster lookup
alias_helper = {}
for key, value in alias_spec.items():
for elem in value:
alias_helper[elem] = key
# Constants for configuring viewport generation: If a line is greater than
# VIEWPORT_MAX_READS_PER_LINE * VIEWPORT_READLINE_BUFFER_SIZE bytes in size,
# then we will not generate a viewport for that dataset
VIEWPORT_READLINE_BUFFER_SIZE = 1048576 # 1MB
VIEWPORT_MAX_READS_PER_LINE = 10
@dataproviders.decorators.has_dataproviders
@build_sniff_from_prefix
class Interval(Tabular):
"""Tab delimited data containing interval information"""
edam_data = "data_3002"
edam_format = "format_3475"
file_ext = "interval"
line_class = "region"
track_type = "FeatureTrack"
data_sources = {"data": "tabix", "index": "bigwig"}
MetadataElement(name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter)
MetadataElement(name="startCol", default=2, desc="Start column", param=metadata.ColumnParameter)
MetadataElement(name="endCol", default=3, desc="End column", param=metadata.ColumnParameter)
MetadataElement(
name="strandCol",
desc="Strand column (click box & select)",
param=metadata.ColumnParameter,
optional=True,
no_value=0,
)
MetadataElement(
name="nameCol",
desc="Name/Identifier column (click box & select)",
param=metadata.ColumnParameter,
optional=True,
no_value=0,
)
MetadataElement(name="columns", default=3, desc="Number of columns", readonly=True, visible=False)
def __init__(self, **kwd):
"""Initialize interval datatype, by adding UCSC display apps"""
Tabular.__init__(self, **kwd)
self.add_display_app("ucsc", "display at UCSC", "as_ucsc_display_file", "ucsc_links")
def init_meta(self, dataset, copy_from=None):
Tabular.init_meta(self, dataset, copy_from=copy_from)
def set_meta(self, dataset, overwrite=True, first_line_is_header=False, **kwd):
"""Tries to guess from the line the location number of the column for the chromosome, region start-end and strand"""
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=0)
if dataset.has_data():
empty_line_count = 0
num_check_lines = 100 # only check up to this many non empty lines
with compression_utils.get_fileobj(dataset.file_name) as in_fh:
for i, line in enumerate(in_fh):
line = line.rstrip("\r\n")
if line:
if first_line_is_header or line[0] == "#":
self.init_meta(dataset)
line = line.strip("#")
elems = line.split("\t")
for meta_name, header_list in alias_spec.items():
for header_val in header_list:
if header_val in elems:
# found highest priority header to meta_name
setattr(dataset.metadata, meta_name, elems.index(header_val) + 1)
break # next meta_name
break # Our metadata is set, so break out of the outer loop
else:
# Header lines in Interval files are optional. For example, BED is Interval but has no header.
# We'll make a best guess at the location of the metadata columns.
elems = line.split("\t")
if len(elems) > 2:
if overwrite or not dataset.metadata.element_is_set("chromCol"):
dataset.metadata.chromCol = 1
try:
int(elems[1])
if overwrite or not dataset.metadata.element_is_set("startCol"):
dataset.metadata.startCol = 2
except Exception:
pass # Metadata default will be used
try:
int(elems[2])
if overwrite or not dataset.metadata.element_is_set("endCol"):
dataset.metadata.endCol = 3
except Exception:
pass # Metadata default will be used
# we no longer want to guess that this column is the 'name', name must now be set manually for interval files
# we will still guess at the strand, as we can make a more educated guess
# if len( elems ) > 3:
# try:
# int( elems[3] )
# except Exception:
# if overwrite or not dataset.metadata.element_is_set( 'nameCol' ):
# dataset.metadata.nameCol = 4
if len(elems) < 6 or elems[5] not in data.valid_strand:
if overwrite or not dataset.metadata.element_is_set("strandCol"):
dataset.metadata.strandCol = 0
else:
if overwrite or not dataset.metadata.element_is_set("strandCol"):
dataset.metadata.strandCol = 6
break
if (i - empty_line_count) > num_check_lines:
break # Our metadata is set or we examined 100 non-empty lines, so break out of the outer loop
else:
empty_line_count += 1
def displayable(self, dataset):
try:
return (
dataset.has_data()
and dataset.state == dataset.states.OK
and dataset.metadata.columns > 0
and dataset.metadata.data_lines != 0
and dataset.metadata.chromCol
and dataset.metadata.startCol
and dataset.metadata.endCol
)
except Exception:
return False
def get_estimated_display_viewport(self, dataset, chrom_col=None, start_col=None, end_col=None):
"""Return a chrom, start, stop tuple for viewing a file."""
viewport_feature_count = 100 # viewport should check at least 100 features; excludes comment lines
max_line_count = max(viewport_feature_count, 500) # maximum number of lines to check; includes comment lines
if not self.displayable(dataset):
return (None, None, None)
try:
# If column indexes were not passwed, determine from metadata
if chrom_col is None:
chrom_col = int(dataset.metadata.chromCol) - 1
if start_col is None:
start_col = int(dataset.metadata.startCol) - 1
if end_col is None:
end_col = int(dataset.metadata.endCol) - 1
# Scan lines of file to find a reasonable chromosome and range
chrom = None
start = sys.maxsize
end = 0
max_col = max(chrom_col, start_col, end_col)
with compression_utils.get_fileobj(dataset.file_name) as fh:
for line in util.iter_start_of_line(fh, VIEWPORT_READLINE_BUFFER_SIZE):
# Skip comment lines
if not line.startswith("#"):
try:
fields = line.rstrip().split("\t")
if len(fields) > max_col:
if chrom is None or chrom == fields[chrom_col]:
start = min(start, int(fields[start_col]))
end = max(end, int(fields[end_col]))
# Set chrom last, in case start and end are not integers
chrom = fields[chrom_col]
viewport_feature_count -= 1
except Exception:
# Most likely a non-integer field has been encountered
# for start / stop. Just ignore and make sure we finish
# reading the line and decrementing the counters.
pass
# Make sure we are at the next new line
readline_count = VIEWPORT_MAX_READS_PER_LINE
while line.rstrip("\n\r") == line:
assert readline_count > 0, Exception(
f"Viewport readline count exceeded for dataset {dataset.id}."
)
line = fh.readline(VIEWPORT_READLINE_BUFFER_SIZE)
if not line:
break # EOF
readline_count -= 1
max_line_count -= 1
if not viewport_feature_count or not max_line_count:
# exceeded viewport or total line count to check
break
if chrom is not None:
return (chrom, str(start), str(end)) # Necessary to return strings?
except Exception:
# Unexpected error, possibly missing metadata
log.exception("Exception caught attempting to generate viewport for dataset '%d'", dataset.id)
return (None, None, None)
def as_ucsc_display_file(self, dataset, **kwd):
"""Returns file contents with only the bed data"""
with tempfile.NamedTemporaryFile(delete=False, mode="w") as fh:
c, s, e, t, n = (
dataset.metadata.chromCol,
dataset.metadata.startCol,
dataset.metadata.endCol,
dataset.metadata.strandCol or 0,
dataset.metadata.nameCol or 0,
)
c, s, e, t, n = int(c) - 1, int(s) - 1, int(e) - 1, int(t) - 1, int(n) - 1
if t >= 0: # strand column (should) exists
for i, elems in enumerate(compression_utils.file_iter(dataset.file_name)):
strand = "+"
name = "region_%i" % i
if n >= 0 and n < len(elems):
name = elems[n]
if t < len(elems):
strand = elems[t]
tmp = [elems[c], elems[s], elems[e], name, "0", strand]
fh.write("%s\n" % "\t".join(tmp))
elif n >= 0: # name column (should) exists
for i, elems in enumerate(compression_utils.file_iter(dataset.file_name)):
name = "region_%i" % i
if n >= 0 and n < len(elems):
name = elems[n]
tmp = [elems[c], elems[s], elems[e], name]
fh.write("%s\n" % "\t".join(tmp))
else:
for elems in compression_utils.file_iter(dataset.file_name):
tmp = [elems[c], elems[s], elems[e]]
fh.write("%s\n" % "\t".join(tmp))
return compression_utils.get_fileobj(fh.name, mode="rb")
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(
dataset,
column_parameter_alias={
"chromCol": "Chrom",
"startCol": "Start",
"endCol": "End",
"strandCol": "Strand",
"nameCol": "Name",
},
)
def ucsc_links(self, dataset, type, app, base_url):
"""
Generate links to UCSC genome browser sites based on the dbkey
and content of dataset.
"""
# Filter UCSC sites to only those that are supported by this build and
# enabled.
valid_sites = [
(name, url)
for name, url in app.datatypes_registry.get_legacy_sites_by_build("ucsc", dataset.dbkey)
if name in app.datatypes_registry.get_display_sites("ucsc")
]
if not valid_sites:
return []
# If there are any valid sites, we need to generate the estimated
# viewport
chrom, start, stop = self.get_estimated_display_viewport(dataset)
if chrom is None:
return []
# Accumulate links for valid sites
ret_val = []
for site_name, site_url in valid_sites:
internal_url = app.url_for(
controller="dataset", dataset_id=dataset.id, action="display_at", filename="ucsc_" + site_name
)
display_url = quote_plus(
"%s%s/display_as?id=%i&display_app=%s&authz_method=display_at"
% (base_url, app.url_for(controller="root"), dataset.id, type)
)
redirect_url = quote_plus(f"{site_url}db={dataset.dbkey}&position={chrom}:{start}-{stop}&hgt.customText=%s")
link = f"{internal_url}?redirect_url={redirect_url}&display_url={display_url}"
ret_val.append((site_name, link))
return ret_val
def validate(self, dataset, **kwd):
"""Validate an interval file using the bx GenomicIntervalReader"""
c, s, e, t = (
dataset.metadata.chromCol,
dataset.metadata.startCol,
dataset.metadata.endCol,
dataset.metadata.strandCol,
)
c, s, e, t = int(c) - 1, int(s) - 1, int(e) - 1, int(t) - 1
with compression_utils.get_fileobj(dataset.file_name, "r") as infile:
reader = GenomicIntervalReader(infile, chrom_col=c, start_col=s, end_col=e, strand_col=t)
while True:
try:
next(reader)
except ParseError as e:
return DatatypeValidation.invalid(util.unicodify(e))
except StopIteration:
return DatatypeValidation.valid()
def repair_methods(self, dataset):
"""Return options for removing errors along with a description"""
return [("lines", "Remove erroneous lines")]
def sniff_prefix(self, file_prefix: FilePrefix):
"""
Checks for 'intervalness'
This format is mostly used by galaxy itself. Valid interval files should include
a valid header comment, but this seems to be loosely regulated.
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'test_space.txt' )
>>> Interval().sniff( fname )
False
>>> fname = get_test_fname( 'interval.interval' )
>>> Interval().sniff( fname )
True
"""
found_valid_lines = False
try:
headers = iter_headers(file_prefix, "\t", comment_designator="#")
# If we got here, we already know the file is_column_based and is not bed,
# so we'll just look for some valid data.
for hdr in headers:
if hdr:
if len(hdr) < 3:
return False
# Assume chrom start and end are in column positions 1 and 2
# respectively ( for 0 based columns )
int(hdr[1])
int(hdr[2])
found_valid_lines = True
except Exception:
return False
return found_valid_lines
def get_track_resolution(self, dataset, start, end):
return None
# ------------- Dataproviders
@dataproviders.decorators.dataprovider_factory(
"genomic-region", dataproviders.dataset.GenomicRegionDataProvider.settings
)
def genomic_region_dataprovider(self, dataset, **settings):
return dataproviders.dataset.GenomicRegionDataProvider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory(
"genomic-region-dict", dataproviders.dataset.GenomicRegionDataProvider.settings
)
def genomic_region_dict_dataprovider(self, dataset, **settings):
settings["named_columns"] = True
return self.genomic_region_dataprovider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory("interval", dataproviders.dataset.IntervalDataProvider.settings)
def interval_dataprovider(self, dataset, **settings):
return dataproviders.dataset.IntervalDataProvider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory("interval-dict", dataproviders.dataset.IntervalDataProvider.settings)
def interval_dict_dataprovider(self, dataset, **settings):
settings["named_columns"] = True
return self.interval_dataprovider(dataset, **settings)
class BedGraph(Interval):
"""Tab delimited chrom/start/end/datavalue dataset"""
edam_format = "format_3583"
file_ext = "bedgraph"
track_type = "LineTrack"
data_sources = {"data": "bigwig", "index": "bigwig"}
def as_ucsc_display_file(self, dataset, **kwd):
"""
Returns file contents as is with no modifications.
TODO: this is a functional stub and will need to be enhanced moving forward to provide additional support for bedgraph.
"""
return open(dataset.file_name, "rb")
def get_estimated_display_viewport(self, dataset, chrom_col=0, start_col=1, end_col=2):
"""
Set viewport based on dataset's first 100 lines.
"""
return Interval.get_estimated_display_viewport(
self, dataset, chrom_col=chrom_col, start_col=start_col, end_col=end_col
)
class Bed(Interval):
"""Tab delimited data in BED format"""
edam_format = "format_3003"
file_ext = "bed"
data_sources = {"data": "tabix", "index": "bigwig", "feature_search": "fli"}
track_type = Interval.track_type
check_required_metadata = True
column_names = [
"Chrom",
"Start",
"End",
"Name",
"Score",
"Strand",
"ThickStart",
"ThickEnd",
"ItemRGB",
"BlockCount",
"BlockSizes",
"BlockStarts",
]
MetadataElement(name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter)
MetadataElement(name="startCol", default=2, desc="Start column", param=metadata.ColumnParameter)
MetadataElement(name="endCol", default=3, desc="End column", param=metadata.ColumnParameter)
MetadataElement(
name="strandCol",
desc="Strand column (click box & select)",
param=metadata.ColumnParameter,
optional=True,
no_value=0,
)
MetadataElement(name="columns", default=3, desc="Number of columns", readonly=True, visible=False)
MetadataElement(
name="viz_filter_cols",
desc="Score column for visualization",
default=[4],
param=metadata.ColumnParameter,
optional=True,
multiple=True,
)
# do we need to repeat these? they are the same as should be inherited from interval type
def set_meta(self, dataset, overwrite=True, **kwd):
"""Sets the metadata information for datasets previously determined to be in bed format."""
if dataset.has_data():
i = 0
for i, line in enumerate(open(dataset.file_name)): # noqa: B007
line = line.rstrip("\r\n")
if line and not line.startswith("#"):
elems = line.split("\t")
if len(elems) > 2:
if len(elems) > 3:
if overwrite or not dataset.metadata.element_is_set("nameCol"):
dataset.metadata.nameCol = 4
if len(elems) < 6:
if overwrite or not dataset.metadata.element_is_set("strandCol"):
dataset.metadata.strandCol = 0
else:
if overwrite or not dataset.metadata.element_is_set("strandCol"):
dataset.metadata.strandCol = 6
break
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=i)
def as_ucsc_display_file(self, dataset, **kwd):
"""Returns file contents with only the bed data. If bed 6+, treat as interval."""
for line in open(dataset.file_name):
line = line.strip()
if line == "" or line.startswith("#"):
continue
fields = line.split("\t")
"""check to see if this file doesn't conform to strict genome browser accepted bed"""
try:
if len(fields) > 12:
return Interval.as_ucsc_display_file(self, dataset) # too many fields
if len(fields) > 6:
int(fields[6])
if len(fields) > 7:
int(fields[7])
if len(fields) > 8:
if int(fields[8]) != 0:
return Interval.as_ucsc_display_file(self, dataset)
if len(fields) > 9:
int(fields[9])
if len(fields) > 10:
fields2 = (
fields[10].rstrip(",").split(",")
) # remove trailing comma and split on comma
for field in fields2:
int(field)
if len(fields) > 11:
fields2 = (
fields[11].rstrip(",").split(",")
) # remove trailing comma and split on comma
for field in fields2:
int(field)
except Exception:
return Interval.as_ucsc_display_file(self, dataset)
# only check first line for proper form
break
try:
return open(dataset.file_name, "rb")
except Exception:
return "This item contains no content"
def sniff_prefix(self, file_prefix: FilePrefix):
"""
Checks for 'bedness'
BED lines have three required fields and nine additional optional fields.
The number of fields per line must be consistent throughout any single set of data in
an annotation track. The order of the optional fields is binding: lower-numbered
fields must always be populated if higher-numbered fields are used. The data type of
all 12 columns is:
1-str, 2-int, 3-int, 4-str, 5-int, 6-str, 7-int, 8-int, 9-int or list, 10-int, 11-list, 12-list
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format1
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'test_tab.bed' )
>>> Bed().sniff( fname )
True
>>> fname = get_test_fname( 'interv1.bed' )
>>> Bed().sniff( fname )
True
>>> fname = get_test_fname( 'complete.bed' )
>>> Bed().sniff( fname )
True
"""
if not get_headers(file_prefix, "\t", comment_designator="#", count=1):
return False
try:
found_valid_lines = False
for hdr in iter_headers(file_prefix, "\t", comment_designator="#"):
if not hdr or hdr == [""]:
continue
if len(hdr) < 3 or len(hdr) > 12:
return False
try:
int(hdr[1])
int(hdr[2])
except Exception:
return False
if len(hdr) > 4:
# hdr[3] is a string, 'name', which defines the name of the BED line - difficult to test for this.
# hdr[4] is an int, 'score', a score between 0 and 1000.
try:
if int(hdr[4]) < 0 or int(hdr[4]) > 1000:
return False
except Exception:
return False
if len(hdr) > 5:
# hdr[5] is strand
if hdr[5] not in data.valid_strand:
return False
if len(hdr) > 6:
# hdr[6] is thickStart, the starting position at which the feature is drawn thickly.
try:
int(hdr[6])
except Exception:
return False
if len(hdr) > 7:
# hdr[7] is thickEnd, the ending position at which the feature is drawn thickly
try:
int(hdr[7])
except Exception:
return False
if len(hdr) > 8:
# hdr[8] is itemRgb, an RGB value of the form R,G,B (e.g. 255,0,0). However, this could also be an int (e.g., 0)
try:
int(hdr[8])
except Exception:
try:
hdr[8].split(",")
except Exception:
return False
if len(hdr) > 9:
# hdr[9] is blockCount, the number of blocks (exons) in the BED line.
try:
block_count = int(hdr[9])
except Exception:
return False
if len(hdr) > 10:
# hdr[10] is blockSizes - A comma-separated list of the block sizes.
# Sometimes the blosck_sizes and block_starts lists end in extra commas
try:
block_sizes = hdr[10].rstrip(",").split(",")
except Exception:
return False
if len(hdr) > 11:
# hdr[11] is blockStarts - A comma-separated list of block starts.
try:
block_starts = hdr[11].rstrip(",").split(",")
except Exception:
return False
if len(block_sizes) != block_count or len(block_starts) != block_count:
return False
found_valid_lines = True
return found_valid_lines
except Exception:
return False
class ProBed(Bed):
"""Tab delimited data in proBED format - adaptation of BED for proteomics data."""
edam_format = "format_3827"
file_ext = "probed"
column_names = [
"Chrom",
"Start",
"End",
"Name",
"Score",
"Strand",
"ThickStart",
"ThickEnd",
"ItemRGB",
"BlockCount",
"BlockSizes",
"BlockStarts",
"ProteinAccession",
"PeptideSequence",
"Uniqueness",
"GenomeReferenceVersion",
"PsmScore",
"Fdr",
"Modifications",
"Charge",
"ExpMassToCharge",
"CalcMassToCharge",
"PsmRank",
"DatasetID",
"Uri",
]
class BedStrict(Bed):
"""Tab delimited data in strict BED format - no non-standard columns allowed"""
edam_format = "format_3584"
file_ext = "bedstrict"
# no user change of datatype allowed
allow_datatype_change = False
# Read only metadata elements
MetadataElement(name="chromCol", default=1, desc="Chrom column", readonly=True, param=metadata.MetadataParameter)
MetadataElement(
name="startCol", default=2, desc="Start column", readonly=True, param=metadata.MetadataParameter
) # TODO: start and end should be able to be set to these or the proper thick[start/end]?
MetadataElement(name="endCol", default=3, desc="End column", readonly=True, param=metadata.MetadataParameter)
MetadataElement(
name="strandCol",
desc="Strand column (click box & select)",
readonly=True,
param=metadata.MetadataParameter,
no_value=0,
optional=True,
)
MetadataElement(
name="nameCol",
desc="Name/Identifier column (click box & select)",
readonly=True,
param=metadata.MetadataParameter,
no_value=0,
optional=True,
)
MetadataElement(name="columns", default=3, desc="Number of columns", readonly=True, visible=False)
def __init__(self, **kwd):
Tabular.__init__(self, **kwd)
self.clear_display_apps() # only new style display applications for this datatype
def set_meta(self, dataset, overwrite=True, **kwd):
Tabular.set_meta(self, dataset, overwrite=overwrite, **kwd) # need column count first
if dataset.metadata.columns >= 4:
dataset.metadata.nameCol = 4
if dataset.metadata.columns >= 6:
dataset.metadata.strandCol = 6
def sniff(self, filename):
return False # NOTE: This would require aggressively validating the entire file
class Bed6(BedStrict):
"""Tab delimited data in strict BED format - no non-standard columns allowed; column count forced to 6"""
edam_format = "format_3585"
file_ext = "bed6"
class Bed12(BedStrict):
"""Tab delimited data in strict BED format - no non-standard columns allowed; column count forced to 12"""
edam_format = "format_3586"
file_ext = "bed12"
class _RemoteCallMixin:
def _get_remote_call_url(self, redirect_url, site_name, dataset, type, app, base_url):
"""Retrieve the URL to call out to an external site and retrieve data.
This routes our external URL through a local galaxy instance which makes
the data available, followed by redirecting to the remote site with a
link back to the available information.
"""
internal_url = f"{app.url_for(controller='dataset', dataset_id=dataset.id, action='display_at', filename=f'{type}_{site_name}')}"
base_url = app.config.get("display_at_callback", base_url)
display_url = quote_plus(
"%s%s/display_as?id=%i&display_app=%s&authz_method=display_at"
% (base_url, app.url_for(controller="root"), dataset.id, type)
)
link = f"{internal_url}?redirect_url={redirect_url}&display_url={display_url}"
return link
@dataproviders.decorators.has_dataproviders
@build_sniff_from_prefix
class Gff(Tabular, _RemoteCallMixin):
"""Tab delimited data in Gff format"""
edam_data = "data_1255"
edam_format = "format_2305"
file_ext = "gff"
valid_gff_frame = [".", "0", "1", "2"]
column_names = ["Seqname", "Source", "Feature", "Start", "End", "Score", "Strand", "Frame", "Group"]
data_sources = {"data": "interval_index", "index": "bigwig", "feature_search": "fli"}
track_type = Interval.track_type
MetadataElement(name="columns", default=9, desc="Number of columns", readonly=True, visible=False)
MetadataElement(
name="column_types",
default=["str", "str", "str", "int", "int", "int", "str", "str", "str"],
param=metadata.ColumnTypesParameter,
desc="Column types",
readonly=True,
visible=False,
)
MetadataElement(name="attributes", default=0, desc="Number of attributes", readonly=True, visible=False, no_value=0)
MetadataElement(
name="attribute_types",
default={},
desc="Attribute types",
param=metadata.DictParameter,
readonly=True,
visible=False,
no_value=[],
)
def __init__(self, **kwd):
"""Initialize datatype, by adding GBrowse display app"""
Tabular.__init__(self, **kwd)
self.add_display_app("ucsc", "display at UCSC", "as_ucsc_display_file", "ucsc_links")
self.add_display_app("gbrowse", "display in Gbrowse", "as_gbrowse_display_file", "gbrowse_links")
def set_attribute_metadata(self, dataset):
"""
Sets metadata elements for dataset's attributes.
"""
# Use first N lines to set metadata for dataset attributes. Attributes
# not found in the first N lines will not have metadata.
num_lines = 200
attribute_types = {}
with compression_utils.get_fileobj(dataset.file_name) as in_fh:
for i, line in enumerate(in_fh):
if line and not line.startswith("#"):
elems = line.split("\t")
if len(elems) == 9:
try:
# Loop through attributes to set types.
for name, value in parse_gff_attributes(elems[8]).items():
# Default type is string.
value_type = "str"
try:
# Try int.
int(value)
value_type = "int"
except ValueError:
try:
# Try float.
float(value)
value_type = "float"
except ValueError:
pass
attribute_types[name] = value_type
except Exception:
pass
if i + 1 == num_lines:
break
# Set attribute metadata and then set additional metadata.
dataset.metadata.attribute_types = attribute_types
dataset.metadata.attributes = len(attribute_types)
def set_meta(self, dataset, overwrite=True, **kwd):
self.set_attribute_metadata(dataset)
i = 0
with compression_utils.get_fileobj(dataset.file_name) as in_fh:
for i, line in enumerate(in_fh): # noqa: B007
line = line.rstrip("\r\n")
if line and not line.startswith("#"):
elems = line.split("\t")
if len(elems) == 9:
try:
int(elems[3])
int(elems[4])
break
except Exception:
pass
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=i)
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(dataset, column_names=self.column_names)
def get_estimated_display_viewport(self, dataset):
"""
Return a chrom, start, stop tuple for viewing a file. There are slight differences between gff 2 and gff 3
formats. This function should correctly handle both...
"""
viewport_feature_count = 100 # viewport should check at least 100 features; excludes comment lines
max_line_count = max(viewport_feature_count, 500) # maximum number of lines to check; includes comment lines
if self.displayable(dataset):
try:
seqid = None
start = sys.maxsize
stop = 0
with compression_utils.get_fileobj(dataset.file_name) as fh:
for line in util.iter_start_of_line(fh, VIEWPORT_READLINE_BUFFER_SIZE):
try:
if line.startswith("##sequence-region"): # ##sequence-region IV 6000000 6030000
elems = line.rstrip("\n\r").split()
if len(elems) > 3:
# line looks like:
# sequence-region ctg123 1 1497228
seqid = elems[1] # IV
start = int(elems[2]) # 6000000
stop = int(elems[3]) # 6030000
break # use location declared in file
elif len(elems) == 2 and elems[1].find("..") > 0:
# line looks like this:
# sequence-region X:120000..140000
elems = elems[1].split(":")
seqid = elems[0]
start = int(elems[1].split("..")[0])
stop = int(elems[1].split("..")[1])
break # use location declared in file
else:
log.debug(f"line ({line}) uses an unsupported ##sequence-region definition.")
# break #no break, if bad definition, we try another line
elif line.startswith("browser position"):
# Allow UCSC style browser and track info in the GFF file
pos_info = line.split()[-1]
seqid, startend = pos_info.split(":")
start, stop = map(int, startend.split("-"))
break # use location declared in file
elif not line.startswith(("#", "track", "browser")):
viewport_feature_count -= 1
elems = line.rstrip("\n\r").split("\t")
if len(elems) > 3:
if not seqid:
# We can only set the viewport for a single chromosome
seqid = elems[0]
if seqid == elems[0]:
# Make sure we have not spanned chromosomes
start = min(start, int(elems[3]))
stop = max(stop, int(elems[4]))
except Exception:
# most likely start/stop is not an int or not enough fields
pass
# make sure we are at the next new line
readline_count = VIEWPORT_MAX_READS_PER_LINE
while line.rstrip("\n\r") == line:
assert readline_count > 0, Exception(
f"Viewport readline count exceeded for dataset {dataset.id}."
)
line = fh.readline(VIEWPORT_READLINE_BUFFER_SIZE)
if not line:
break # EOF
readline_count -= 1
max_line_count -= 1
if not viewport_feature_count or not max_line_count:
# exceeded viewport or total line count to check
break
if seqid is not None:
return (seqid, str(start), str(stop)) # Necessary to return strings?
except Exception:
log.exception("Unexpected error")
return (None, None, None) # could not determine viewport
def ucsc_links(self, dataset, type, app, base_url):
ret_val = []
seqid, start, stop = self.get_estimated_display_viewport(dataset)
if seqid is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build("ucsc", dataset.dbkey):
if site_name in app.datatypes_registry.get_display_sites("ucsc"):
redirect_url = quote_plus(
f"{site_url}db={dataset.dbkey}&position={seqid}:{start}-{stop}&hgt.customText=%s"
)
link = self._get_remote_call_url(redirect_url, site_name, dataset, type, app, base_url)
ret_val.append((site_name, link))
return ret_val
def gbrowse_links(self, dataset, type, app, base_url):
ret_val = []
seqid, start, stop = self.get_estimated_display_viewport(dataset)
if seqid is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build("gbrowse", dataset.dbkey):
if site_name in app.datatypes_registry.get_display_sites("gbrowse"):
if seqid.startswith("chr") and len(seqid) > 3:
seqid = seqid[3:]
redirect_url = quote_plus(f"{site_url}/?q={seqid}:{start}..{stop}&eurl=%s")
link = self._get_remote_call_url(redirect_url, site_name, dataset, type, app, base_url)
ret_val.append((site_name, link))
return ret_val
def sniff_prefix(self, file_prefix: FilePrefix):
"""
Determines whether the file is in gff format
GFF lines have nine required fields that must be tab-separated.
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format3
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('gff.gff3')
>>> Gff().sniff( fname )
False
>>> fname = get_test_fname('test.gff')
>>> Gff().sniff( fname )
True
"""
if len(get_headers(file_prefix, "\t", count=2)) < 2:
return False
try:
found_valid_lines = False
for hdr in iter_headers(file_prefix, "\t"):
if not hdr or hdr == [""]:
continue
hdr0_parts = hdr[0].split()
if hdr0_parts[0] == "##gff-version":
return hdr0_parts[1].startswith("2")
# The gff-version header comment may have been stripped, so inspect the data
if hdr[0].startswith("#"):
continue
if len(hdr) != 9:
return False
try:
int(hdr[3])
int(hdr[4])
except Exception:
return False
if hdr[5] != ".":
try:
float(hdr[5])
except Exception:
return False
if hdr[6] not in data.valid_strand:
return False
if hdr[7] not in self.valid_gff_frame:
return False
found_valid_lines = True
return found_valid_lines
except Exception:
return False
# ------------- Dataproviders
# redefine bc super is Tabular
@dataproviders.decorators.dataprovider_factory(
"genomic-region", dataproviders.dataset.GenomicRegionDataProvider.settings
)
def genomic_region_dataprovider(self, dataset, **settings):
return dataproviders.dataset.GenomicRegionDataProvider(dataset, 0, 3, 4, **settings)
@dataproviders.decorators.dataprovider_factory(
"genomic-region-dict", dataproviders.dataset.GenomicRegionDataProvider.settings
)
def genomic_region_dict_dataprovider(self, dataset, **settings):
settings["named_columns"] = True
return self.genomic_region_dataprovider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory("interval", dataproviders.dataset.IntervalDataProvider.settings)
def interval_dataprovider(self, dataset, **settings):
return dataproviders.dataset.IntervalDataProvider(dataset, 0, 3, 4, 6, 2, **settings)
@dataproviders.decorators.dataprovider_factory("interval-dict", dataproviders.dataset.IntervalDataProvider.settings)
def interval_dict_dataprovider(self, dataset, **settings):
settings["named_columns"] = True
return self.interval_dataprovider(dataset, **settings)
class Gff3(Gff):
"""Tab delimited data in Gff3 format"""
edam_format = "format_1975"
file_ext = "gff3"
valid_gff3_strand = ["+", "-", ".", "?"]
valid_gff3_phase = Gff.valid_gff_frame
column_names = ["Seqid", "Source", "Type", "Start", "End", "Score", "Strand", "Phase", "Attributes"]
track_type = Interval.track_type
MetadataElement(
name="column_types",
default=["str", "str", "str", "int", "int", "float", "str", "int", "list"],
param=metadata.ColumnTypesParameter,
desc="Column types",
readonly=True,
visible=False,
)
def __init__(self, **kwd):
"""Initialize datatype, by adding GBrowse display app"""
Gff.__init__(self, **kwd)
def set_meta(self, dataset, overwrite=True, **kwd):
self.set_attribute_metadata(dataset)
i = 0
with compression_utils.get_fileobj(dataset.file_name) as in_fh:
for i, line in enumerate(in_fh): # noqa: B007
line = line.rstrip("\r\n")
if line and not line.startswith("#"):
elems = line.split("\t")
valid_start = False
valid_end = False
if len(elems) == 9:
try:
start = int(elems[3])
valid_start = True
except Exception:
if elems[3] == ".":
valid_start = True
try:
end = int(elems[4])
valid_end = True
except Exception:
if elems[4] == ".":
valid_end = True
strand = elems[6]
phase = elems[7]
if (
valid_start
and valid_end
and start < end
and strand in self.valid_gff3_strand
and phase in self.valid_gff3_phase
):
break
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=i)
def sniff_prefix(self, file_prefix: FilePrefix):
"""
Determines whether the file is in GFF version 3 format
GFF 3 format:
1) adds a mechanism for representing more than one level
of hierarchical grouping of features and subfeatures.
2) separates the ideas of group membership and feature name/id
3) constrains the feature type field to be taken from a controlled
vocabulary.
4) allows a single feature, such as an exon, to belong to more than
one group at a time.
5) provides an explicit convention for pairwise alignments
6) provides an explicit convention for features that occupy disjunct regions
The format consists of 9 columns, separated by tabs (NOT spaces).
Undefined fields are replaced with the "." character, as described in the original GFF spec.
For complete details see http://song.sourceforge.net/gff3.shtml
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'test.gff' )
>>> Gff3().sniff( fname )
False
>>> fname = get_test_fname( 'test.gtf' )
>>> Gff3().sniff( fname )
False
>>> fname = get_test_fname('gff.gff3')
>>> Gff3().sniff( fname )
True
>>> fname = get_test_fname( 'grch37.75.gtf' )
>>> Gff3().sniff( fname )
False
"""
if len(get_headers(file_prefix, "\t", count=2)) < 2:
return False
try:
found_valid_lines = False
for hdr in iter_headers(file_prefix, "\t"):
if not hdr or hdr == [""]:
continue
hdr0_parts = hdr[0].split()
if hdr0_parts[0] == "##gff-version":
return hdr0_parts[1].startswith("3")
# The gff-version header comment may have been stripped, so inspect the data
if hdr[0].startswith("#"):
continue
if len(hdr) != 9:
return False
try:
int(hdr[3])
except Exception:
if hdr[3] != ".":
return False
try:
int(hdr[4])
except Exception:
if hdr[4] != ".":
return False
if hdr[5] != ".":
try:
float(hdr[5])
except Exception:
return False
if hdr[6] not in self.valid_gff3_strand:
return False
if hdr[7] not in self.valid_gff3_phase:
return False
parse_gff3_attributes(hdr[8])
found_valid_lines = True
return found_valid_lines
except Exception:
return False
class Gtf(Gff):
"""Tab delimited data in Gtf format"""
edam_format = "format_2306"
file_ext = "gtf"
column_names = ["Seqname", "Source", "Feature", "Start", "End", "Score", "Strand", "Frame", "Attributes"]
track_type = Interval.track_type
MetadataElement(name="columns", default=9, desc="Number of columns", readonly=True, visible=False)
MetadataElement(
name="column_types",
default=["str", "str", "str", "int", "int", "float", "str", "int", "list"],
param=metadata.ColumnTypesParameter,
desc="Column types",
readonly=True,
visible=False,
)
def sniff_prefix(self, file_prefix: FilePrefix):
"""
Determines whether the file is in gtf format
GTF lines have nine required fields that must be tab-separated. The first eight GTF fields are the same as GFF.
The group field has been expanded into a list of attributes. Each attribute consists of a type/value pair.
Attributes must end in a semi-colon, and be separated from any following attribute by exactly one space.
The attribute list must begin with the two mandatory attributes:
gene_id value - A globally unique identifier for the genomic source of the sequence.
transcript_id value - A globally unique identifier for the predicted transcript.
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format4
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( '1.bed' )
>>> Gtf().sniff( fname )
False
>>> fname = get_test_fname( 'test.gff' )
>>> Gtf().sniff( fname )
False
>>> fname = get_test_fname( 'test.gtf' )
>>> Gtf().sniff( fname )
True
>>> fname = get_test_fname( 'grch37.75.gtf' )
>>> Gtf().sniff( fname )
True
"""
if len(get_headers(file_prefix, "\t", count=2)) < 2:
return False
try:
found_valid_lines = False
for hdr in iter_headers(file_prefix, "\t"):
if not hdr or hdr == [""]:
continue
hdr0_parts = hdr[0].split()
if hdr0_parts[0] == "##gff-version" and not hdr0_parts[1].startswith("2"):
return False
# The gff-version header comment may have been stripped, so inspect the data
if hdr[0].startswith("#"):
continue
if len(hdr) != 9:
return False
try:
int(hdr[3])
int(hdr[4])
except Exception:
return False
if hdr[5] != ".":
try:
float(hdr[5])
except Exception:
return False
if hdr[6] not in data.valid_strand:
return False
if hdr[7] not in self.valid_gff_frame:
return False
# Check attributes for gene_id (transcript_id is also mandatory
# but not for genes)
attributes = parse_gff_attributes(hdr[8])
if "gene_id" not in attributes:
return False
found_valid_lines = True
return found_valid_lines
except Exception:
return False
@dataproviders.decorators.has_dataproviders
@build_sniff_from_prefix
class Wiggle(Tabular, _RemoteCallMixin):
"""Tab delimited data in wiggle format"""
edam_format = "format_3005"
file_ext = "wig"
track_type = "LineTrack"
data_sources = {"data": "bigwig", "index": "bigwig"}
MetadataElement(name="columns", default=3, desc="Number of columns", readonly=True, visible=False)
def __init__(self, **kwd):
Tabular.__init__(self, **kwd)
self.add_display_app("ucsc", "display at UCSC", "as_ucsc_display_file", "ucsc_links")
self.add_display_app("gbrowse", "display in Gbrowse", "as_gbrowse_display_file", "gbrowse_links")
def get_estimated_display_viewport(self, dataset):
"""Return a chrom, start, stop tuple for viewing a file."""
viewport_feature_count = 100 # viewport should check at least 100 features; excludes comment lines
max_line_count = max(viewport_feature_count, 500) # maximum number of lines to check; includes comment lines
if self.displayable(dataset):
try:
chrom = None
start = sys.maxsize
end = 0
span = 1
step = None
with open(dataset.file_name) as fh:
for line in util.iter_start_of_line(fh, VIEWPORT_READLINE_BUFFER_SIZE):
try:
if line.startswith("browser"):
chr_info = line.rstrip("\n\r").split()[-1]
chrom, coords = chr_info.split(":")
start, end = map(int, coords.split("-"))
break # use the browser line
# variableStep chrom=chr20
if line and (
line.lower().startswith("variablestep") or line.lower().startswith("fixedstep")
):
if chrom is not None:
break # different chrom or different section of the chrom
chrom = line.rstrip("\n\r").split("chrom=")[1].split()[0]
if "span=" in line:
span = int(line.rstrip("\n\r").split("span=")[1].split()[0])
if "step=" in line:
step = int(line.rstrip("\n\r").split("step=")[1].split()[0])
start = int(line.rstrip("\n\r").split("start=")[1].split()[0])
else:
fields = line.rstrip("\n\r").split()
if fields:
if step is not None:
if not end:
end = start + span
else:
end += step
else:
start = min(int(fields[0]), start)
end = max(end, int(fields[0]) + span)
viewport_feature_count -= 1
except Exception:
pass
# make sure we are at the next new line
readline_count = VIEWPORT_MAX_READS_PER_LINE
while line.rstrip("\n\r") == line:
assert readline_count > 0, Exception(
f"Viewport readline count exceeded for dataset {dataset.id}."
)
line = fh.readline(VIEWPORT_READLINE_BUFFER_SIZE)
if not line:
break # EOF
readline_count -= 1
max_line_count -= 1
if not viewport_feature_count or not max_line_count:
# exceeded viewport or total line count to check
break
if chrom is not None:
return (chrom, str(start), str(end)) # Necessary to return strings?
except Exception:
log.exception("Unexpected error")
return (None, None, None) # could not determine viewport
def gbrowse_links(self, dataset, type, app, base_url):
ret_val = []
chrom, start, stop = self.get_estimated_display_viewport(dataset)
if chrom is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build("gbrowse", dataset.dbkey):
if site_name in app.datatypes_registry.get_display_sites("gbrowse"):
if chrom.startswith("chr") and len(chrom) > 3:
chrom = chrom[3:]
redirect_url = quote_plus(f"{site_url}/?q={chrom}:{start}..{stop}&eurl=%s")
link = self._get_remote_call_url(redirect_url, site_name, dataset, type, app, base_url)
ret_val.append((site_name, link))
return ret_val
def ucsc_links(self, dataset, type, app, base_url):
ret_val = []
chrom, start, stop = self.get_estimated_display_viewport(dataset)
if chrom is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build("ucsc", dataset.dbkey):
if site_name in app.datatypes_registry.get_display_sites("ucsc"):
redirect_url = quote_plus(
f"{site_url}db={dataset.dbkey}&position={chrom}:{start}-{stop}&hgt.customText=%s"
)
link = self._get_remote_call_url(redirect_url, site_name, dataset, type, app, base_url)
ret_val.append((site_name, link))
return ret_val
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(dataset, skipchars=["track", "#"])
def set_meta(self, dataset, overwrite=True, **kwd):
max_data_lines = None
i = 0
for i, line in enumerate(open(dataset.file_name)): # noqa: B007
line = line.rstrip("\r\n")
if line and not line.startswith("#"):
elems = line.split("\t")
try:
# variableStep format is nucleotide position\tvalue\n,
# fixedStep is value\n
# "Wiggle track data values can be integer or real, positive or negative values"
float(elems[0])
break
except Exception:
# We are either in the track definition line or in a declaration line
pass
if self.max_optional_metadata_filesize >= 0 and dataset.get_size() > self.max_optional_metadata_filesize:
# we'll arbitrarily only use the first 100 data lines in this wig file to calculate tabular attributes (column types)
# this should be sufficient, except when we have mixed wig track types (bed, variable, fixed),
# but those cases are not a single table that would have consistant column definitions
# optional metadata values set in Tabular class will be 'None'
max_data_lines = 100
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=i, max_data_lines=max_data_lines)
def sniff_prefix(self, file_prefix: FilePrefix):
"""
Determines wether the file is in wiggle format
The .wig format is line-oriented. Wiggle data is preceeded by a track definition line,
which adds a number of options for controlling the default display of this track.
Following the track definition line is the track data, which can be entered in several
different formats.
The track definition line begins with the word 'track' followed by the track type.
The track type with version is REQUIRED, and it currently must be wiggle_0. For example,
track type=wiggle_0...
For complete details see http://genome.ucsc.edu/goldenPath/help/wiggle.html
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'interv1.bed' )
>>> Wiggle().sniff( fname )
False
>>> fname = get_test_fname( 'wiggle.wig' )
>>> Wiggle().sniff( fname )
True
"""
try:
headers = iter_headers(file_prefix, None)
for hdr in headers:
if len(hdr) > 1 and hdr[0] == "track" and hdr[1].startswith("type=wiggle"):
return True
return False
except Exception:
return False
def get_track_resolution(self, dataset, start, end):
range = end - start
# Determine appropriate resolution to plot ~1000 points
resolution = math.ceil(10 ** math.ceil(math.log10(range / 1000)))
# Restrict to valid range
resolution = min(resolution, 100000)
resolution = max(resolution, 1)
return resolution
# ------------- Dataproviders
@dataproviders.decorators.dataprovider_factory("wiggle", dataproviders.dataset.WiggleDataProvider.settings)
def wiggle_dataprovider(self, dataset, **settings):
dataset_source = dataproviders.dataset.DatasetDataProvider(dataset)
return dataproviders.dataset.WiggleDataProvider(dataset_source, **settings)
@dataproviders.decorators.dataprovider_factory("wiggle-dict", dataproviders.dataset.WiggleDataProvider.settings)
def wiggle_dict_dataprovider(self, dataset, **settings):
dataset_source = dataproviders.dataset.DatasetDataProvider(dataset)
settings["named_columns"] = True
return dataproviders.dataset.WiggleDataProvider(dataset_source, **settings)
@build_sniff_from_prefix
class CustomTrack(Tabular):
"""UCSC CustomTrack"""
edam_format = "format_3588"
file_ext = "customtrack"
def __init__(self, **kwd):
"""Initialize interval datatype, by adding UCSC display app"""
Tabular.__init__(self, **kwd)
self.add_display_app("ucsc", "display at UCSC", "as_ucsc_display_file", "ucsc_links")
def set_meta(self, dataset, overwrite=True, **kwd):
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=1)
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(dataset, skipchars=["track", "#"])
def get_estimated_display_viewport(self, dataset, chrom_col=None, start_col=None, end_col=None):
"""Return a chrom, start, stop tuple for viewing a file."""
# FIXME: only BED and WIG custom tracks are currently supported
# As per previously existing behavior, viewport will only be over the first intervals
max_line_count = 100 # maximum number of lines to check; includes comment lines
variable_step_wig = False
chrom = None
span = 1
if self.displayable(dataset):
try:
with open(dataset.file_name) as fh:
for line in util.iter_start_of_line(fh, VIEWPORT_READLINE_BUFFER_SIZE):
if not line.startswith("#"):
try:
if variable_step_wig:
fields = line.rstrip().split()
if len(fields) == 2:
start = int(fields[0])
return (chrom, str(start), str(start + span))
elif line and (
line.lower().startswith("variablestep") or line.lower().startswith("fixedstep")
):
chrom = line.rstrip("\n\r").split("chrom=")[1].split()[0]
if "span=" in line:
span = int(line.rstrip("\n\r").split("span=")[1].split()[0])
if "start=" in line:
start = int(line.rstrip("\n\r").split("start=")[1].split()[0])
return (chrom, str(start), str(start + span))
else:
variable_step_wig = True
else:
fields = line.rstrip().split("\t")
if len(fields) >= 3:
chrom = fields[0]
start = int(fields[1])
end = int(fields[2])
return (chrom, str(start), str(end))
except Exception:
# most likely a non-integer field has been encountered for start / stop
continue
# make sure we are at the next new line
readline_count = VIEWPORT_MAX_READS_PER_LINE
while line.rstrip("\n\r") == line:
assert readline_count > 0, Exception(
f"Viewport readline count exceeded for dataset {dataset.id}."
)
line = fh.readline(VIEWPORT_READLINE_BUFFER_SIZE)
if not line:
break # EOF
readline_count -= 1
max_line_count -= 1
if not max_line_count:
# exceeded viewport or total line count to check
break
except Exception:
log.exception("Unexpected error")
return (None, None, None) # could not determine viewport
def ucsc_links(self, dataset, type, app, base_url):
ret_val = []
chrom, start, stop = self.get_estimated_display_viewport(dataset)
if chrom is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build("ucsc", dataset.dbkey):
if site_name in app.datatypes_registry.get_display_sites("ucsc"):
internal_url = f"{app.url_for(controller='dataset', dataset_id=dataset.id, action='display_at', filename='ucsc_' + site_name)}"
display_url = quote_plus(
"%s%s/display_as?id=%i&display_app=%s&authz_method=display_at"
% (base_url, app.url_for(controller="root"), dataset.id, type)
)
redirect_url = quote_plus(
f"{site_url}db={dataset.dbkey}&position={chrom}:{start}-{stop}&hgt.customText=%s"
)
link = f"{internal_url}?redirect_url={redirect_url}&display_url={display_url}"
ret_val.append((site_name, link))
return ret_val
def sniff_prefix(self, file_prefix: FilePrefix):
"""
Determines whether the file is in customtrack format.
CustomTrack files are built within Galaxy and are basically bed or interval files with the first line looking
something like this.
track name="User Track" description="User Supplied Track (from Galaxy)" color=0,0,0 visibility=1
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'complete.bed' )
>>> CustomTrack().sniff( fname )
False
>>> fname = get_test_fname( 'ucsc.customtrack' )
>>> CustomTrack().sniff( fname )
True
"""
headers = iter_headers(file_prefix, None)
found_at_least_one_track = False
first_line = True
for hdr in headers:
if first_line:
first_line = False
try:
if hdr[0].startswith("track"):
color_found = False
visibility_found = False
for elem in hdr[1:]:
if elem.startswith("color"):
color_found = True
if elem.startswith("visibility"):
visibility_found = True
if color_found and visibility_found:
break
if not color_found or not visibility_found:
return False
else:
return False
except Exception:
return False
else:
try:
if hdr[0] and not hdr[0].startswith("#"):
if len(hdr) < 3:
return False
try:
int(hdr[1])
int(hdr[2])
except Exception:
return False
found_at_least_one_track = True
except Exception:
return False
return found_at_least_one_track
class ENCODEPeak(Interval):
"""
Human ENCODE peak format. There are both broad and narrow peak formats.
Formats are very similar; narrow peak has an additional column, though.
Broad peak ( http://genome.ucsc.edu/FAQ/FAQformat#format13 ):
This format is used to provide called regions of signal enrichment based
on pooled, normalized (interpreted) data. It is a BED 6+3 format.
Narrow peak http://genome.ucsc.edu/FAQ/FAQformat#format12 and :
This format is used to provide called peaks of signal enrichment based on
pooled, normalized (interpreted) data. It is a BED6+4 format.
"""
edam_format = "format_3612"
file_ext = "encodepeak"
column_names = ["Chrom", "Start", "End", "Name", "Score", "Strand", "SignalValue", "pValue", "qValue", "Peak"]
data_sources = {"data": "tabix", "index": "bigwig"}
MetadataElement(name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter)
MetadataElement(name="startCol", default=2, desc="Start column", param=metadata.ColumnParameter)
MetadataElement(name="endCol", default=3, desc="End column", param=metadata.ColumnParameter)
MetadataElement(
name="strandCol",
desc="Strand column (click box & select)",
param=metadata.ColumnParameter,
optional=True,
no_value=0,
)
MetadataElement(name="columns", default=3, desc="Number of columns", readonly=True, visible=False)
def sniff(self, filename):
return False
class ChromatinInteractions(Interval):
"""
Chromatin interactions obtained from 3C/5C/Hi-C experiments.
"""
file_ext = "chrint"
track_type = "DiagonalHeatmapTrack"
data_sources = {"data": "tabix", "index": "bigwig"}
column_names = ["Chrom1", "Start1", "End1", "Chrom2", "Start2", "End2", "Value"]
MetadataElement(name="chrom1Col", default=1, desc="Chrom1 column", param=metadata.ColumnParameter)
MetadataElement(name="start1Col", default=2, desc="Start1 column", param=metadata.ColumnParameter)
MetadataElement(name="end1Col", default=3, desc="End1 column", param=metadata.ColumnParameter)
MetadataElement(name="chrom2Col", default=4, desc="Chrom2 column", param=metadata.ColumnParameter)
MetadataElement(name="start2Col", default=5, desc="Start2 column", param=metadata.ColumnParameter)
MetadataElement(name="end2Col", default=6, desc="End2 column", param=metadata.ColumnParameter)
MetadataElement(name="valueCol", default=7, desc="Value column", param=metadata.ColumnParameter)
MetadataElement(name="columns", default=7, desc="Number of columns", readonly=True, visible=False)
def sniff(self, filename):
return False
@build_sniff_from_prefix
class ScIdx(Tabular):
"""
ScIdx files are 1-based and consist of strand-specific coordinate counts.
They always have 5 columns, and the first row is the column labels:
'chrom', 'index', 'forward', 'reverse', 'value'.
Each line following the first consists of data:
chromosome name (type str), peak index (type int), Forward strand peak
count (type int), Reverse strand peak count (type int) and value (type int).
The value of the 5th 'value' column is the sum of the forward and reverse
peak count values.
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('cntrl_hg19.scidx')
>>> ScIdx().sniff(fname)
True
>>> Bed().sniff(fname)
False
>>> fname = get_test_fname('empty.txt')
>>> ScIdx().sniff(fname)
False
"""
file_ext = "scidx"
MetadataElement(name="columns", default=0, desc="Number of columns", readonly=True, visible=False)
MetadataElement(
name="column_types",
default=[],
param=metadata.ColumnTypesParameter,
desc="Column types",
readonly=True,
visible=False,
no_value=[],
)
def __init__(self, **kwd):
"""
Initialize scidx datatype.
"""
Tabular.__init__(self, **kwd)
# Don't set column names since the first
# line of the dataset displays them.
self.column_names = ["chrom", "index", "forward", "reverse", "value"]
def sniff_prefix(self, file_prefix: FilePrefix):
"""
Checks for 'scidx-ness.'
"""
count = 0
for count, line in enumerate(iter_headers(file_prefix, "\t")):
# The first line is always a comment like this:
# 2015-11-23 20:18:56.51;input.bam;READ1
if count == 0:
if not line[0].startswith("#"):
return False
# The 2nd line is always a specific header
elif count == 1:
if line != ["chrom", "index", "forward", "reverse", "value"]:
return False
# data line columns 2:5 need to be integers and
# the fwd and rev column need to sum to value
else:
if len(line) != 5:
return False
if not line[1].isdigit():
return False
if int(line[2]) + int(line[3]) != int(line[4]):
return False
# just check one data line
break
# at least the comment and header are required
if count >= 1:
return True
return False
if __name__ == "__main__":
import doctest
doctest.testmod(sys.modules[__name__])
|
from __future__ import annotations
from pre_commit_hooks.check_yaml import yaml
def test_readme_contains_all_hooks():
with open('README.md', encoding='UTF-8') as f:
readme_contents = f.read()
with open('.pre-commit-hooks.yaml', encoding='UTF-8') as f:
hooks = yaml.load(f)
for hook in hooks:
assert f'`{hook['id']}`' in readme_contents
|
from __future__ import annotations
from pre_commit_hooks.check_yaml import yaml
def test_readme_contains_all_hooks():
with open('README.md', encoding='UTF-8') as f:
readme_contents = f.read()
with open('.pre-commit-hooks.yaml', encoding='UTF-8') as f:
hooks = yaml.load(f)
for hook in hooks:
assert f'`{hook["id"]}`' in readme_contents
|
from fpdf import FPDF
import os
from datetime import datetime
width, height = 595, 842
topic_size = 32
subtopic_size = 24
normal_size = 16
padding = 10
next_topic = "next topic"
next_subtopic = "next subtopic"
image_trigger = "insert screenshot"
def getImportantWord(line):
words = line.split(" ")
ind = words.index("is")
return " ".join(words[ind + 1:])
def isImportant(line, trigger):
return line.lower().find(trigger) != -1
def generate(transcripts, classID):
pdf = FPDF()
inp = []
if transcripts:
inp = transcripts
pdf.add_page()
# image_count = 1
now = datetime.now()
class_datestamp = f'Class Notes\n{classID} {now.strftime('%d/%m/%Y')}'
pdf.set_font("Helvetica", size=normal_size)
pdf.set_text_color(255)
pdf.set_fill_color(r=13, g=40, b=76)
pdf.multi_cell(w=0, txt=class_datestamp, align="C",
h=normal_size / 2, fill=True)
pdf.set_text_color(0)
pdf.ln(h=padding)
pdf.set_draw_color(r=13, g=40, b=76)
for line in inp:
line = line.strip()
if isImportant(line, next_topic):
pdf.ln(h=padding)
pdf.set_text_color(255)
pdf.set_font("Helvetica", style="B", size=topic_size)
pdf.set_fill_color(r=13, g=40, b=76)
pdf.multi_cell(
txt=getImportantWord(line),
w=pdf.get_string_width(getImportantWord(line) + " "),
align="L",
fill=True,
h=topic_size / 2,
)
pdf.set_text_color(0)
pdf.ln(h=padding)
elif isImportant(line, next_subtopic):
pdf.ln(h=padding)
pdf.set_text_color(r=13, g=40, b=76)
pdf.set_font("Helvetica", style="B", size=subtopic_size)
pdf.multi_cell(
txt=getImportantWord(line),
w=pdf.get_string_width(getImportantWord(line)) + 10,
align="L",
h=subtopic_size / 2,
)
pdf.set_text_color(0)
pdf.ln(h=padding)
elif isImportant(line, image_trigger):
pdf.set_font('Helvetica', size=topic_size)
pdf.set_fill_color(167, 197, 238)
pdf.set_text_color(r=13, g=40, b=76)
pdf.multi_cell(txt="\nScreenshot here.\n\n", w=0,
h=topic_size/2, align="C", fill=True, border=1)
pdf.cell(txt='', ln=1, w=0)
pdf.set_text_color(0)
pdf.ln(h=padding)
# image_path = os.path.join(
# "data", classID.lower(), f"image_{image_count}.png"
# )
# if os.path.exists(image_path):
# pdf.ln(h=padding)
# pdf.image(image_path, w=190)
# pdf.ln(h=padding)
# image_count += 1
else:
pdf.set_font("Helvetica", size=normal_size)
pdf.multi_cell(txt=line, w=0, h=normal_size / 2)
pdf.cell(txt="", ln=1, w=0)
return pdf.output(dest="S").encode("latin-1")
# def download_image():
|
from fpdf import FPDF
import os
from datetime import datetime
width, height = 595, 842
topic_size = 32
subtopic_size = 24
normal_size = 16
padding = 10
next_topic = "next topic"
next_subtopic = "next subtopic"
image_trigger = "insert screenshot"
def getImportantWord(line):
words = line.split(" ")
ind = words.index("is")
return " ".join(words[ind + 1:])
def isImportant(line, trigger):
return line.lower().find(trigger) != -1
def generate(transcripts, classID):
pdf = FPDF()
inp = []
if transcripts:
inp = transcripts
pdf.add_page()
# image_count = 1
now = datetime.now()
class_datestamp = f'Class Notes\n{classID} {now.strftime("%d/%m/%Y")}'
pdf.set_font("Helvetica", size=normal_size)
pdf.set_text_color(255)
pdf.set_fill_color(r=13, g=40, b=76)
pdf.multi_cell(w=0, txt=class_datestamp, align="C",
h=normal_size / 2, fill=True)
pdf.set_text_color(0)
pdf.ln(h=padding)
pdf.set_draw_color(r=13, g=40, b=76)
for line in inp:
line = line.strip()
if isImportant(line, next_topic):
pdf.ln(h=padding)
pdf.set_text_color(255)
pdf.set_font("Helvetica", style="B", size=topic_size)
pdf.set_fill_color(r=13, g=40, b=76)
pdf.multi_cell(
txt=getImportantWord(line),
w=pdf.get_string_width(getImportantWord(line) + " "),
align="L",
fill=True,
h=topic_size / 2,
)
pdf.set_text_color(0)
pdf.ln(h=padding)
elif isImportant(line, next_subtopic):
pdf.ln(h=padding)
pdf.set_text_color(r=13, g=40, b=76)
pdf.set_font("Helvetica", style="B", size=subtopic_size)
pdf.multi_cell(
txt=getImportantWord(line),
w=pdf.get_string_width(getImportantWord(line)) + 10,
align="L",
h=subtopic_size / 2,
)
pdf.set_text_color(0)
pdf.ln(h=padding)
elif isImportant(line, image_trigger):
pdf.set_font('Helvetica', size=topic_size)
pdf.set_fill_color(167, 197, 238)
pdf.set_text_color(r=13, g=40, b=76)
pdf.multi_cell(txt="\nScreenshot here.\n\n", w=0,
h=topic_size/2, align="C", fill=True, border=1)
pdf.cell(txt='', ln=1, w=0)
pdf.set_text_color(0)
pdf.ln(h=padding)
# image_path = os.path.join(
# "data", classID.lower(), f"image_{image_count}.png"
# )
# if os.path.exists(image_path):
# pdf.ln(h=padding)
# pdf.image(image_path, w=190)
# pdf.ln(h=padding)
# image_count += 1
else:
pdf.set_font("Helvetica", size=normal_size)
pdf.multi_cell(txt=line, w=0, h=normal_size / 2)
pdf.cell(txt="", ln=1, w=0)
return pdf.output(dest="S").encode("latin-1")
# def download_image():
|
from psychopy import core
from bcipy.display.rsvp.mode.calibration import CalibrationDisplay
from bcipy.tasks.task import Task
from bcipy.helpers.triggers import _write_triggers_from_sequence_calibration
from bcipy.helpers.stimuli import random_rsvp_calibration_seq_gen, get_task_info
from bcipy.helpers.task import (
alphabet, trial_complete_message, get_user_input, pause_calibration)
class RSVPCalibrationTask(Task):
"""RSVP Calibration Task.
Calibration task performs an RSVP stimulus sequence
to elicit an ERP. Parameters will change how many stimuli
and for how long they present. Parameters also change
color and text / image inputs.
A task begins setting up variables --> initializing eeg -->
awaiting user input to start -->
setting up stimuli --> presenting sequences -->
saving data
PARAMETERS:
----------
win (PsychoPy Display Object)
daq (Data Acquisition Object)
parameters (Dictionary)
file_save (String)
"""
def __init__(self, win, daq, parameters, file_save):
super(RSVPCalibrationTask, self).__init__()
self.window = win
self.frame_rate = self.window.getActualFrameRate()
self.parameters = parameters
self.daq = daq
self.static_clock = core.StaticPeriod(screenHz=self.frame_rate)
self.experiment_clock = core.Clock()
self.buffer_val = parameters['task_buffer_len']
self.alp = alphabet(parameters)
self.rsvp = init_calibration_display_task(
self.parameters, self.window, self.daq,
self.static_clock, self.experiment_clock)
self.file_save = file_save
trigger_save_location = f"{self.file_save}/{parameters["trigger_file_name"]}"
self.trigger_file = open(trigger_save_location, 'w')
self.wait_screen_message = parameters['wait_screen_message']
self.wait_screen_message_color = parameters[
'wait_screen_message_color']
self.stim_number = parameters['stim_number']
self.stim_length = parameters['stim_length']
self.timing = [parameters['time_target'],
parameters['time_cross'],
parameters['time_flash']]
self.color = [parameters['target_letter_color'],
parameters['fixation_color'],
parameters['stim_color']]
self.task_info_color = parameters['task_color']
self.stimuli_height = parameters['stim_height']
self.is_txt_stim = parameters['is_txt_stim']
self.eeg_buffer = parameters['eeg_buffer_len']
self.enable_breaks = parameters['enable_breaks']
def execute(self):
self.logger.debug(f'Starting {self.name()}!')
run = True
# Check user input to make sure we should be going
if not get_user_input(self.rsvp, self.wait_screen_message,
self.wait_screen_message_color,
first_run=True):
run = False
# Begin the Experiment
while run:
# Get random sequence information given stimuli parameters
(ele_sti, timing_sti,
color_sti) = random_rsvp_calibration_seq_gen(
self.alp,
stim_number=self.stim_number,
stim_length=self.stim_length,
timing=self.timing,
is_txt=self.rsvp.is_txt_stim,
color=self.color)
(task_text, task_color) = get_task_info(self.stim_number,
self.task_info_color)
# Execute the RSVP sequences
for idx_o in range(len(task_text)):
# check user input to make sure we should be going
if not get_user_input(self.rsvp, self.wait_screen_message,
self.wait_screen_message_color):
break
# Take a break every number of trials defined
if self.enable_breaks:
pause_calibration(self.window, self.rsvp, idx_o,
self.parameters)
# update task state
self.rsvp.update_task_state(
text=task_text[idx_o],
color_list=task_color[idx_o])
# Draw and flip screen
self.rsvp.draw_static()
self.window.flip()
# Get height
self.rsvp.sti.height = self.stimuli_height
# Schedule a sequence
self.rsvp.stimuli_sequence = ele_sti[idx_o]
# check if text stimuli or not for color information
if self.is_txt_stim:
self.rsvp.stimuli_colors = color_sti[idx_o]
self.rsvp.stimuli_timing = timing_sti[idx_o]
# Wait for a time
core.wait(self.buffer_val)
# Do the sequence
last_sequence_timing = self.rsvp.do_sequence()
# Write triggers for the sequence
_write_triggers_from_sequence_calibration(
last_sequence_timing, self.trigger_file)
# Wait for a time
core.wait(self.buffer_val)
# Set run to False to stop looping
run = False
# Say Goodbye!
self.rsvp.text = trial_complete_message(
self.window, self.parameters)
self.rsvp.draw_static()
self.window.flip()
# Give the system time to process
core.wait(self.buffer_val)
if self.daq.is_calibrated:
_write_triggers_from_sequence_calibration(
['offset', self.daq.offset], self.trigger_file, offset=True)
# Close this sessions trigger file and return some data
self.trigger_file.close()
# Wait some time before exiting so there is trailing eeg data saved
core.wait(self.eeg_buffer)
return self.file_save
def name(self):
return 'RSVP Calibration Task'
def init_calibration_display_task(
parameters, window, daq, static_clock, experiment_clock):
return CalibrationDisplay(
window,
static_clock,
experiment_clock,
daq.marker_writer,
info_text=parameters['info_text'],
info_color=parameters['info_color'],
info_pos=(parameters['text_pos_x'],
parameters['text_pos_y']),
info_height=parameters['info_height'],
info_font=parameters['info_font'],
task_color=[parameters['task_color']],
task_font=parameters['task_font'],
task_height=parameters['task_height'],
stim_font=parameters['stim_font'],
stim_pos=(parameters['stim_pos_x'],
parameters['stim_pos_y']),
stim_height=parameters['stim_height'],
stim_colors=[parameters['stim_color']* 10],
is_txt_stim=parameters['is_txt_stim'],
trigger_type=parameters['trigger_type'],
space_char=parameters['stim_space_char'])
|
from psychopy import core
from bcipy.display.rsvp.mode.calibration import CalibrationDisplay
from bcipy.tasks.task import Task
from bcipy.helpers.triggers import _write_triggers_from_sequence_calibration
from bcipy.helpers.stimuli import random_rsvp_calibration_seq_gen, get_task_info
from bcipy.helpers.task import (
alphabet, trial_complete_message, get_user_input, pause_calibration)
class RSVPCalibrationTask(Task):
"""RSVP Calibration Task.
Calibration task performs an RSVP stimulus sequence
to elicit an ERP. Parameters will change how many stimuli
and for how long they present. Parameters also change
color and text / image inputs.
A task begins setting up variables --> initializing eeg -->
awaiting user input to start -->
setting up stimuli --> presenting sequences -->
saving data
PARAMETERS:
----------
win (PsychoPy Display Object)
daq (Data Acquisition Object)
parameters (Dictionary)
file_save (String)
"""
def __init__(self, win, daq, parameters, file_save):
super(RSVPCalibrationTask, self).__init__()
self.window = win
self.frame_rate = self.window.getActualFrameRate()
self.parameters = parameters
self.daq = daq
self.static_clock = core.StaticPeriod(screenHz=self.frame_rate)
self.experiment_clock = core.Clock()
self.buffer_val = parameters['task_buffer_len']
self.alp = alphabet(parameters)
self.rsvp = init_calibration_display_task(
self.parameters, self.window, self.daq,
self.static_clock, self.experiment_clock)
self.file_save = file_save
trigger_save_location = f"{self.file_save}/{parameters['trigger_file_name']}"
self.trigger_file = open(trigger_save_location, 'w')
self.wait_screen_message = parameters['wait_screen_message']
self.wait_screen_message_color = parameters[
'wait_screen_message_color']
self.stim_number = parameters['stim_number']
self.stim_length = parameters['stim_length']
self.timing = [parameters['time_target'],
parameters['time_cross'],
parameters['time_flash']]
self.color = [parameters['target_letter_color'],
parameters['fixation_color'],
parameters['stim_color']]
self.task_info_color = parameters['task_color']
self.stimuli_height = parameters['stim_height']
self.is_txt_stim = parameters['is_txt_stim']
self.eeg_buffer = parameters['eeg_buffer_len']
self.enable_breaks = parameters['enable_breaks']
def execute(self):
self.logger.debug(f'Starting {self.name()}!')
run = True
# Check user input to make sure we should be going
if not get_user_input(self.rsvp, self.wait_screen_message,
self.wait_screen_message_color,
first_run=True):
run = False
# Begin the Experiment
while run:
# Get random sequence information given stimuli parameters
(ele_sti, timing_sti,
color_sti) = random_rsvp_calibration_seq_gen(
self.alp,
stim_number=self.stim_number,
stim_length=self.stim_length,
timing=self.timing,
is_txt=self.rsvp.is_txt_stim,
color=self.color)
(task_text, task_color) = get_task_info(self.stim_number,
self.task_info_color)
# Execute the RSVP sequences
for idx_o in range(len(task_text)):
# check user input to make sure we should be going
if not get_user_input(self.rsvp, self.wait_screen_message,
self.wait_screen_message_color):
break
# Take a break every number of trials defined
if self.enable_breaks:
pause_calibration(self.window, self.rsvp, idx_o,
self.parameters)
# update task state
self.rsvp.update_task_state(
text=task_text[idx_o],
color_list=task_color[idx_o])
# Draw and flip screen
self.rsvp.draw_static()
self.window.flip()
# Get height
self.rsvp.sti.height = self.stimuli_height
# Schedule a sequence
self.rsvp.stimuli_sequence = ele_sti[idx_o]
# check if text stimuli or not for color information
if self.is_txt_stim:
self.rsvp.stimuli_colors = color_sti[idx_o]
self.rsvp.stimuli_timing = timing_sti[idx_o]
# Wait for a time
core.wait(self.buffer_val)
# Do the sequence
last_sequence_timing = self.rsvp.do_sequence()
# Write triggers for the sequence
_write_triggers_from_sequence_calibration(
last_sequence_timing, self.trigger_file)
# Wait for a time
core.wait(self.buffer_val)
# Set run to False to stop looping
run = False
# Say Goodbye!
self.rsvp.text = trial_complete_message(
self.window, self.parameters)
self.rsvp.draw_static()
self.window.flip()
# Give the system time to process
core.wait(self.buffer_val)
if self.daq.is_calibrated:
_write_triggers_from_sequence_calibration(
['offset', self.daq.offset], self.trigger_file, offset=True)
# Close this sessions trigger file and return some data
self.trigger_file.close()
# Wait some time before exiting so there is trailing eeg data saved
core.wait(self.eeg_buffer)
return self.file_save
def name(self):
return 'RSVP Calibration Task'
def init_calibration_display_task(
parameters, window, daq, static_clock, experiment_clock):
return CalibrationDisplay(
window,
static_clock,
experiment_clock,
daq.marker_writer,
info_text=parameters['info_text'],
info_color=parameters['info_color'],
info_pos=(parameters['text_pos_x'],
parameters['text_pos_y']),
info_height=parameters['info_height'],
info_font=parameters['info_font'],
task_color=[parameters['task_color']],
task_font=parameters['task_font'],
task_height=parameters['task_height'],
stim_font=parameters['stim_font'],
stim_pos=(parameters['stim_pos_x'],
parameters['stim_pos_y']),
stim_height=parameters['stim_height'],
stim_colors=[parameters['stim_color']* 10],
is_txt_stim=parameters['is_txt_stim'],
trigger_type=parameters['trigger_type'],
space_char=parameters['stim_space_char'])
|
"""
@author: Gabriele Girelli
@contact: [email protected]
"""
import argparse
from collections import defaultdict
from czifile import CziFile # type: ignore
import logging
from logging import Logger, getLogger
from nd2reader import ND2Reader # type: ignore
from nd2reader.parser import Parser as ND2Parser # type: ignore
import numpy as np # type: ignore
from radiantkit import stat
from radiantkit.string import TIFFNameTemplate as TNTemplate
import re
import six # type: ignore
from typing import DefaultDict, Iterable, List, Optional, Tuple
import warnings
import xml.etree.ElementTree as ET
class ND2Reader2(ND2Reader):
_xy_resolution: float
_z_resolution: DefaultDict[float, int]
_dtype: str
def __init__(self, filename):
super(ND2Reader2, self).__init__(filename)
self._set_xy_resolution()
self._set_z_resolution()
self._set_proposed_dtype()
def log_details(self, logger: Logger = getLogger()) -> None:
logger.info(f"Input: '{self.filename}'")
logger.info(
"".join(
[
f"Found {self.field_count()} field(s) of view, ",
f"with {self.channel_count()} channel(s).",
]
)
)
logger.info(f"Channels: {list(self.get_channel_names())}.")
if self.is3D():
logger.info(
"".join(
[
f"XYZ size: {self.sizes["x"]} x ",
f"{self.sizes["y"]} x {self.sizes["z"]}",
]
)
)
logger.info(f"XY resolution: {self.xy_resolution:.3f} um")
logger.info(f"Delta Z value(s): {list(self._z_resolution.keys())} um")
else:
logger.info(f"XY size: {self.sizes["x"]} x {self.sizes["y"]}")
logger.info(f"XY resolution: {self.xy_resolution} um")
logger.info(
f"Format: '{self.dtype}' [{self.pixel_type_tag}:{self.bits_per_pixel}]"
)
@property
def xy_resolution(self) -> float:
return self._xy_resolution
@property
def z_resolution(self) -> List[Tuple[float, int]]:
return list(self._z_resolution.items())
@property
def z_resolution_mode(self) -> float:
return stat.get_hist_mode(list(self._z_resolution.items()))
@property
def pixel_type_tag(self) -> int:
return self.parser._raw_metadata.image_attributes[six.b("SLxImageAttributes")][
six.b("ePixelType")
]
@property
def bits_per_pixel(self) -> int:
return self.parser._raw_metadata.image_attributes[six.b("SLxImageAttributes")][
six.b("uiBpcInMemory")
]
@property
def dtype(self) -> str:
return self._dtype
def _set_xy_resolution(self):
self._xy_resolution = self.metadata["pixel_microns"]
if 0 == self._xy_resolution:
logging.warning("XY resolution set to 0! (possibly incorrect obj. setup)")
def _set_z_resolution(self):
self._z_resolution: DefaultDict[float, int] = defaultdict(lambda: 0)
for field_id in range(self.field_count()):
for delta_z in self.get_field_resolutionZ(field_id):
self._z_resolution[delta_z] += 1
def _set_proposed_dtype(self) -> None:
dtype_tag: DefaultDict = defaultdict(lambda: "float")
dtype_tag[1] = "uint"
dtype_tag[2] = "int"
dtype = f"{dtype_tag[self.pixel_type_tag]}{self.bits_per_pixel}"
supported_dtypes = (
"uint8",
"uint16",
"uint32",
"int8",
"int16",
"int32",
"float8",
"float16",
"float32",
)
self._dtype = "float64" if dtype not in supported_dtypes else dtype
def field_count(self) -> int:
if "v" not in self.axes:
return 1
return self.sizes["v"]
def isLive(self) -> bool:
if "t" in self.axes:
return 1 < self.sizes["t"]
return False
def is3D(self) -> bool:
return "z" in self.axes
def has_multi_channels(self) -> bool:
if "c" in self.axes:
return 1 < self.channel_count()
return False
def get_channel_names(self) -> Iterable[str]:
for channel in self.metadata["channels"]:
yield channel.lower()
def channel_count(self) -> int:
if "c" not in self.sizes:
n = 1
else:
n = self.sizes["c"]
assert len(list(self.get_channel_names())) == n, "channel count mismatch."
return n
def set_axes_for_bundling(self):
if self.is3D():
self.bundle_axes = "zyxc" if self.has_multi_channels() else "zyx"
else:
self.bundle_axes = "yxc" if "c" in self.axes else "yx"
def get_Z_loop_step(self, parser) -> Tuple[int, float, str]:
image_text_info = parser._raw_metadata.image_text_info[
six.b("SLxImageTextInfo")
]
metadata_fields = [x for x in image_text_info.values() if b"Z Stack Loop" in x]
if 0 == len(metadata_fields):
return (0, np.nan, "")
metadata = metadata_fields[0]
parsed = re.search(
"Z Stack Loop: ([0-9]+)\r\n- Step: ([0-9,\\.]+) ([^\r\n]*)",
metadata.decode(),
)
if parsed is None:
return (0, np.nan, "")
parsed_fields = parsed.groups()
return (
int(parsed_fields[0]),
float(parsed_fields[1].replace(",", ".")),
parsed_fields[2],
)
def get_field_resolutionZ(self, field_id: int) -> List[float]:
with open(self.filename, "rb") as ND2H:
parser = ND2Parser(ND2H)
z_fields, z_step, z_unit = self.get_Z_loop_step(parser)
if 0 != z_fields:
return [z_step]
Zdata = np.array(parser._raw_metadata.z_data)
Zlevels = np.array(parser.metadata["z_levels"]).astype("int")
Zlevels = Zlevels + len(Zlevels) * field_id
Zdata = Zdata[Zlevels]
return np.round(np.diff(Zdata), 3).tolist()
def select_channels(self, channels: List[str]) -> List[str]:
return [
c.lower() for c in channels if c.lower() in list(self.get_channel_names())
]
def get_tiff_path(
self, template: TNTemplate, channel_id: int, field_id: int
) -> str:
d = dict(
channel_name=self.metadata["channels"][channel_id].lower(),
channel_id=f"{(channel_id+1):03d}",
series_id=f"{(field_id+1):03d}",
dimensions=len(self.bundle_axes),
axes_order="".join(self.bundle_axes),
)
return f"{template.safe_substitute(d)}.tiff"
class CziFile2(CziFile):
__pixels: Optional[np.ndarray] = None
axes: str
def __init__(self, filename):
super(CziFile2, self).__init__(filename)
@property
def pixels(self) -> np.ndarray:
if self.__pixels is None:
with warnings.catch_warnings(record=True):
self.__pixels = self.asarray()
return self.__pixels
def log_details(self, logger: Logger = getLogger()) -> None:
logger.info(f"Input: '{self._fh.name}'")
logger.info(
"".join(
[
f"Found {self.field_count()} field(s) of view, ",
f"with {self.channel_count()} channel(s).",
]
)
)
logger.info(f"Channels: {list(self.get_channel_names())}.")
x_size = self.pixels.shape[self.axes.index("X")]
y_size = self.pixels.shape[self.axes.index("Y")]
if self.is3D:
z_size = self.pixels.shape[self.axes.index("Z")]
logger.info(f"XYZ size: {x_size} x {y_size} x {z_size}")
else:
logger.info(f"XY size: {x_size} x {y_size}")
for axis_name, axis_resolution in self.get_resolution().items():
logger.info(f"{axis_name} resolution: {axis_resolution*1e6:.3f} um")
def field_count(self) -> int:
if "S" not in self.axes:
return 1
return self.pixels.shape[self.axes.index("S")]
def isLive(self) -> bool:
if "T" in self.axes:
return 1 < self.shape[self.axes.index("T")]
return False
def is3D(self) -> bool:
return "Z" in self.axes
def has_multi_channels(self) -> bool:
if "C" in self.axes:
return 1 < self.channel_count()
return False
def get_channel_names(self) -> Iterable[str]:
channel_path = "Metadata/DisplaySetting/Channels/Channel/DyeName"
for x in ET.fromstring(self.metadata()).findall(channel_path):
if x.text is None:
continue
yield x.text.replace(" ", "").lower()
def channel_count(self) -> int:
if "C" not in self.axes:
n = 1
else:
n = self.pixels.shape[self.axes.index("C")]
assert len(list(self.get_channel_names())) == n, "channel count mismatch."
return n
def get_axis_resolution(self, axis: str) -> float:
resolution_path = "Metadata/Scaling/Items/Distance"
for x in ET.fromstring(self.metadata()).findall(resolution_path):
if x.attrib["Id"] == axis:
if x[0].text is not None:
return float(x[0].text)
return 1
def get_resolution(self) -> dict:
resolution = {}
for axis in "XYZ":
resolution[axis] = self.get_axis_resolution(axis)
return resolution
def squeeze_axes(self, skip: str) -> None:
axes = list(self.axes)
for axis in axes:
axis_id = axes.index(axis)
if axis in skip:
continue
self.__pixels = np.squeeze(self.pixels, axis_id)
self.shape = self.pixels.shape
axes.pop(axis_id)
self.axes = "".join(axes)
def reorder_axes(self, bundle_axes: str) -> None:
if self.axes == bundle_axes:
return
bundle_axes_list = [a for a in bundle_axes if a in self.axes]
assert len(bundle_axes_list) == len(self.axes)
assert all([axis in self.axes for axis in bundle_axes_list])
self.__pixels = np.moveaxis(
self.pixels,
range(len(self.axes)),
[bundle_axes_list.index(axis) for axis in self.axes],
)
self.shape = self.pixels.shape
self.axes = "".join(bundle_axes_list)
def get_channel_pixels(
self, args: argparse.Namespace, field_id: Optional[int] = None
) -> Iterable[Tuple[np.ndarray, int]]:
if field_id is not None:
field = self.pixels[field_id, :]
else:
assert "C" == self.axes[0]
field = self.pixels
for channel_id in range(self.channel_count()):
yield (field[channel_id], channel_id)
def select_channels(self, channels: List[str]) -> List[str]:
return [
c.lower() for c in channels if c.lower() in list(self.get_channel_names())
]
def get_tiff_path(
self, template: TNTemplate, channel_id: int, field_id: int
) -> str:
d = dict(
channel_name=list(self.get_channel_names())[channel_id],
channel_id=f"{(channel_id+1):03d}",
series_id=f"{(field_id+1):03d}",
dimensions=3,
axes_order="ZYX",
)
return f"{template.safe_substitute(d)}.tiff"
|
"""
@author: Gabriele Girelli
@contact: [email protected]
"""
import argparse
from collections import defaultdict
from czifile import CziFile # type: ignore
import logging
from logging import Logger, getLogger
from nd2reader import ND2Reader # type: ignore
from nd2reader.parser import Parser as ND2Parser # type: ignore
import numpy as np # type: ignore
from radiantkit import stat
from radiantkit.string import TIFFNameTemplate as TNTemplate
import re
import six # type: ignore
from typing import DefaultDict, Iterable, List, Optional, Tuple
import warnings
import xml.etree.ElementTree as ET
class ND2Reader2(ND2Reader):
_xy_resolution: float
_z_resolution: DefaultDict[float, int]
_dtype: str
def __init__(self, filename):
super(ND2Reader2, self).__init__(filename)
self._set_xy_resolution()
self._set_z_resolution()
self._set_proposed_dtype()
def log_details(self, logger: Logger = getLogger()) -> None:
logger.info(f"Input: '{self.filename}'")
logger.info(
"".join(
[
f"Found {self.field_count()} field(s) of view, ",
f"with {self.channel_count()} channel(s).",
]
)
)
logger.info(f"Channels: {list(self.get_channel_names())}.")
if self.is3D():
logger.info(
"".join(
[
f"XYZ size: {self.sizes['x']} x ",
f"{self.sizes['y']} x {self.sizes['z']}",
]
)
)
logger.info(f"XY resolution: {self.xy_resolution:.3f} um")
logger.info(f"Delta Z value(s): {list(self._z_resolution.keys())} um")
else:
logger.info(f"XY size: {self.sizes['x']} x {self.sizes['y']}")
logger.info(f"XY resolution: {self.xy_resolution} um")
logger.info(
f"Format: '{self.dtype}' [{self.pixel_type_tag}:{self.bits_per_pixel}]"
)
@property
def xy_resolution(self) -> float:
return self._xy_resolution
@property
def z_resolution(self) -> List[Tuple[float, int]]:
return list(self._z_resolution.items())
@property
def z_resolution_mode(self) -> float:
return stat.get_hist_mode(list(self._z_resolution.items()))
@property
def pixel_type_tag(self) -> int:
return self.parser._raw_metadata.image_attributes[six.b("SLxImageAttributes")][
six.b("ePixelType")
]
@property
def bits_per_pixel(self) -> int:
return self.parser._raw_metadata.image_attributes[six.b("SLxImageAttributes")][
six.b("uiBpcInMemory")
]
@property
def dtype(self) -> str:
return self._dtype
def _set_xy_resolution(self):
self._xy_resolution = self.metadata["pixel_microns"]
if 0 == self._xy_resolution:
logging.warning("XY resolution set to 0! (possibly incorrect obj. setup)")
def _set_z_resolution(self):
self._z_resolution: DefaultDict[float, int] = defaultdict(lambda: 0)
for field_id in range(self.field_count()):
for delta_z in self.get_field_resolutionZ(field_id):
self._z_resolution[delta_z] += 1
def _set_proposed_dtype(self) -> None:
dtype_tag: DefaultDict = defaultdict(lambda: "float")
dtype_tag[1] = "uint"
dtype_tag[2] = "int"
dtype = f"{dtype_tag[self.pixel_type_tag]}{self.bits_per_pixel}"
supported_dtypes = (
"uint8",
"uint16",
"uint32",
"int8",
"int16",
"int32",
"float8",
"float16",
"float32",
)
self._dtype = "float64" if dtype not in supported_dtypes else dtype
def field_count(self) -> int:
if "v" not in self.axes:
return 1
return self.sizes["v"]
def isLive(self) -> bool:
if "t" in self.axes:
return 1 < self.sizes["t"]
return False
def is3D(self) -> bool:
return "z" in self.axes
def has_multi_channels(self) -> bool:
if "c" in self.axes:
return 1 < self.channel_count()
return False
def get_channel_names(self) -> Iterable[str]:
for channel in self.metadata["channels"]:
yield channel.lower()
def channel_count(self) -> int:
if "c" not in self.sizes:
n = 1
else:
n = self.sizes["c"]
assert len(list(self.get_channel_names())) == n, "channel count mismatch."
return n
def set_axes_for_bundling(self):
if self.is3D():
self.bundle_axes = "zyxc" if self.has_multi_channels() else "zyx"
else:
self.bundle_axes = "yxc" if "c" in self.axes else "yx"
def get_Z_loop_step(self, parser) -> Tuple[int, float, str]:
image_text_info = parser._raw_metadata.image_text_info[
six.b("SLxImageTextInfo")
]
metadata_fields = [x for x in image_text_info.values() if b"Z Stack Loop" in x]
if 0 == len(metadata_fields):
return (0, np.nan, "")
metadata = metadata_fields[0]
parsed = re.search(
"Z Stack Loop: ([0-9]+)\r\n- Step: ([0-9,\\.]+) ([^\r\n]*)",
metadata.decode(),
)
if parsed is None:
return (0, np.nan, "")
parsed_fields = parsed.groups()
return (
int(parsed_fields[0]),
float(parsed_fields[1].replace(",", ".")),
parsed_fields[2],
)
def get_field_resolutionZ(self, field_id: int) -> List[float]:
with open(self.filename, "rb") as ND2H:
parser = ND2Parser(ND2H)
z_fields, z_step, z_unit = self.get_Z_loop_step(parser)
if 0 != z_fields:
return [z_step]
Zdata = np.array(parser._raw_metadata.z_data)
Zlevels = np.array(parser.metadata["z_levels"]).astype("int")
Zlevels = Zlevels + len(Zlevels) * field_id
Zdata = Zdata[Zlevels]
return np.round(np.diff(Zdata), 3).tolist()
def select_channels(self, channels: List[str]) -> List[str]:
return [
c.lower() for c in channels if c.lower() in list(self.get_channel_names())
]
def get_tiff_path(
self, template: TNTemplate, channel_id: int, field_id: int
) -> str:
d = dict(
channel_name=self.metadata["channels"][channel_id].lower(),
channel_id=f"{(channel_id+1):03d}",
series_id=f"{(field_id+1):03d}",
dimensions=len(self.bundle_axes),
axes_order="".join(self.bundle_axes),
)
return f"{template.safe_substitute(d)}.tiff"
class CziFile2(CziFile):
__pixels: Optional[np.ndarray] = None
axes: str
def __init__(self, filename):
super(CziFile2, self).__init__(filename)
@property
def pixels(self) -> np.ndarray:
if self.__pixels is None:
with warnings.catch_warnings(record=True):
self.__pixels = self.asarray()
return self.__pixels
def log_details(self, logger: Logger = getLogger()) -> None:
logger.info(f"Input: '{self._fh.name}'")
logger.info(
"".join(
[
f"Found {self.field_count()} field(s) of view, ",
f"with {self.channel_count()} channel(s).",
]
)
)
logger.info(f"Channels: {list(self.get_channel_names())}.")
x_size = self.pixels.shape[self.axes.index("X")]
y_size = self.pixels.shape[self.axes.index("Y")]
if self.is3D:
z_size = self.pixels.shape[self.axes.index("Z")]
logger.info(f"XYZ size: {x_size} x {y_size} x {z_size}")
else:
logger.info(f"XY size: {x_size} x {y_size}")
for axis_name, axis_resolution in self.get_resolution().items():
logger.info(f"{axis_name} resolution: {axis_resolution*1e6:.3f} um")
def field_count(self) -> int:
if "S" not in self.axes:
return 1
return self.pixels.shape[self.axes.index("S")]
def isLive(self) -> bool:
if "T" in self.axes:
return 1 < self.shape[self.axes.index("T")]
return False
def is3D(self) -> bool:
return "Z" in self.axes
def has_multi_channels(self) -> bool:
if "C" in self.axes:
return 1 < self.channel_count()
return False
def get_channel_names(self) -> Iterable[str]:
channel_path = "Metadata/DisplaySetting/Channels/Channel/DyeName"
for x in ET.fromstring(self.metadata()).findall(channel_path):
if x.text is None:
continue
yield x.text.replace(" ", "").lower()
def channel_count(self) -> int:
if "C" not in self.axes:
n = 1
else:
n = self.pixels.shape[self.axes.index("C")]
assert len(list(self.get_channel_names())) == n, "channel count mismatch."
return n
def get_axis_resolution(self, axis: str) -> float:
resolution_path = "Metadata/Scaling/Items/Distance"
for x in ET.fromstring(self.metadata()).findall(resolution_path):
if x.attrib["Id"] == axis:
if x[0].text is not None:
return float(x[0].text)
return 1
def get_resolution(self) -> dict:
resolution = {}
for axis in "XYZ":
resolution[axis] = self.get_axis_resolution(axis)
return resolution
def squeeze_axes(self, skip: str) -> None:
axes = list(self.axes)
for axis in axes:
axis_id = axes.index(axis)
if axis in skip:
continue
self.__pixels = np.squeeze(self.pixels, axis_id)
self.shape = self.pixels.shape
axes.pop(axis_id)
self.axes = "".join(axes)
def reorder_axes(self, bundle_axes: str) -> None:
if self.axes == bundle_axes:
return
bundle_axes_list = [a for a in bundle_axes if a in self.axes]
assert len(bundle_axes_list) == len(self.axes)
assert all([axis in self.axes for axis in bundle_axes_list])
self.__pixels = np.moveaxis(
self.pixels,
range(len(self.axes)),
[bundle_axes_list.index(axis) for axis in self.axes],
)
self.shape = self.pixels.shape
self.axes = "".join(bundle_axes_list)
def get_channel_pixels(
self, args: argparse.Namespace, field_id: Optional[int] = None
) -> Iterable[Tuple[np.ndarray, int]]:
if field_id is not None:
field = self.pixels[field_id, :]
else:
assert "C" == self.axes[0]
field = self.pixels
for channel_id in range(self.channel_count()):
yield (field[channel_id], channel_id)
def select_channels(self, channels: List[str]) -> List[str]:
return [
c.lower() for c in channels if c.lower() in list(self.get_channel_names())
]
def get_tiff_path(
self, template: TNTemplate, channel_id: int, field_id: int
) -> str:
d = dict(
channel_name=list(self.get_channel_names())[channel_id],
channel_id=f"{(channel_id+1):03d}",
series_id=f"{(field_id+1):03d}",
dimensions=3,
axes_order="ZYX",
)
return f"{template.safe_substitute(d)}.tiff"
|
import datetime
from src import const
from src.commands import BaseCmd
from src.config import bc
from src.message import Msg
from src.reminder import Reminder
from src.utils import Util
class ReminderCommands(BaseCmd):
def bind(self):
bc.commands.register_command(__name__, self.get_classname(), "addreminder",
permission=const.Permission.USER.value, subcommand=False)
bc.commands.register_command(__name__, self.get_classname(), "updreminder",
permission=const.Permission.USER.value, subcommand=False)
bc.commands.register_command(__name__, self.get_classname(), "listreminder",
permission=const.Permission.USER.value, subcommand=True)
bc.commands.register_command(__name__, self.get_classname(), "delreminder",
permission=const.Permission.USER.value, subcommand=False)
bc.commands.register_command(__name__, self.get_classname(), "remindme",
permission=const.Permission.USER.value, subcommand=False)
bc.commands.register_command(__name__, self.get_classname(), "remindwme",
permission=const.Permission.USER.value, subcommand=False)
bc.commands.register_command(__name__, self.get_classname(), "repeatreminder",
permission=const.Permission.USER.value, subcommand=False)
bc.commands.register_command(__name__, self.get_classname(), "skipreminder",
permission=const.Permission.USER.value, subcommand=False)
@staticmethod
async def _addreminder(message, command, silent=False):
"""Print message at particular time
Examples:
!addreminder 2020-01-01 00:00 Happy new year!
!addreminder today 08:00 Wake up
!addreminder tomorrow 08:00 Wake up
!addreminder 2d 08:00 Wake up <- 2 days
!addreminder 1w 08:00 Wake up <- 1 week
!addreminder in 1w5d10h5m Test reminder
!addreminder in 1w Test reminder 2
!addreminder in 5h10m Test reminder 3
"""
if not await Util.check_args_count(message, command, silent, min=4):
return
# !listreminder in <weeks>w<days>d<hours>h<minutes>m
if command[1] == "in":
time = command[2]
text = ' '.join(command[3:])
r = const.REMINDER_IN_REGEX.match(time)
if r is None:
await Msg.response(
message, ("Provide relative time in the following format: "
"<weeks>w<days>d<hours>h<minutes>m. "
"All parts except one are optional"), silent)
weeks = int(r.group(2)) if r.group(2) is not None else 0
days = int(r.group(4)) if r.group(4) is not None else 0
hours = int(r.group(6)) if r.group(6) is not None else 0
minutes = int(r.group(8)) if r.group(8) is not None else 0
time = (datetime.datetime.now() + datetime.timedelta(
weeks=weeks, days=days, hours=hours, minutes=minutes)).strftime(const.REMINDER_TIME_FORMAT)
id_ = bc.config.ids["reminder"]
bc.config.reminders[id_] = Reminder(str(time), text, message.channel.id)
bc.config.ids["reminder"] += 1
await Msg.response(message, f"Reminder '{text}' with id {id_} added at {time}", silent)
return
date = command[1]
time = command[2]
if command[1] == "today":
date = datetime.datetime.strftime(datetime.datetime.now(), const.REMINDER_DATE_FORMAT)
elif command[1] == "tomorrow":
date = datetime.datetime.strftime(
datetime.datetime.now() + datetime.timedelta(days=1), const.REMINDER_DATE_FORMAT)
elif command[1].endswith("d"):
days_amount = command[1][:-1]
days_amount = await Util.parse_int(
message, days_amount, "You need to specify amount of days before 'd'. Example: 3d for 3 days", silent)
if days_amount is None:
return
date = datetime.datetime.strftime(
datetime.datetime.now() + datetime.timedelta(days=days_amount), const.REMINDER_DATE_FORMAT)
elif command[1].endswith("w"):
weeks_amount = command[1][:-1]
weeks_amount = await Util.parse_int(
message, weeks_amount, "You need to specify amount of weeks before 'w'. Example: 2w for 2 weeks",
silent)
if weeks_amount is None:
return
date = datetime.datetime.strftime(
datetime.datetime.now() + datetime.timedelta(days=weeks_amount * 7), const.REMINDER_DATE_FORMAT)
time = date + ' ' + time
try:
time = datetime.datetime.strptime(time, const.REMINDER_TIME_FORMAT).strftime(const.REMINDER_TIME_FORMAT)
except ValueError:
await Msg.response(message, f"{time} does not match format {const.REMINDER_TIME_FORMAT}\n"
"More information about format: <https://strftime.org/>", silent)
return
text = ' '.join(command[3:])
id_ = bc.config.ids["reminder"]
bc.config.reminders[id_] = Reminder(str(time), text, message.channel.id)
bc.config.ids["reminder"] += 1
await Msg.response(message, f"Reminder '{text}' with id {id_} added at {time}", silent)
@staticmethod
async def _updreminder(message, command, silent=False):
"""Update reminder by index
Example: !updreminder 0 2020-01-01 00:00 Happy new year!"""
if not await Util.check_args_count(message, command, silent, min=5):
return
index = await Util.parse_int(
message, command[1], f"Second parameter for '{command[0]}' should be an index of reminder", silent)
if index is None:
return
if index in bc.config.reminders.keys():
time = command[2] + ' ' + command[3]
try:
time = datetime.datetime.strptime(time, const.REMINDER_TIME_FORMAT).strftime(const.REMINDER_TIME_FORMAT)
except ValueError:
await Msg.response(message, f"{time} does not match format {const.REMINDER_TIME_FORMAT}\n"
"More information about format: <https://strftime.org/>", silent)
return
text = ' '.join(command[4:])
bc.config.reminders[index] = Reminder(str(time), text, message.channel.id)
await Msg.response(
message, f"Successfully updated reminder {index}: '{text}' at {time}", silent)
else:
await Msg.response(message, "Invalid index of reminder!", silent)
@staticmethod
async def _listreminder(message, command, silent=False):
"""Print list of reminders
Example: !listreminder"""
if not await Util.check_args_count(message, command, silent, min=1, max=2):
return
reminder_list = []
for index, reminder in bc.config.reminders.items():
reminder_list.append(
(reminder.time,
f"{index} - {reminder.time}"
f"{f" in <#{reminder.channel_id}>" if message.channel.id != reminder.channel_id else ""}"
f" -> {reminder.message}"
f"{f" (repeats every {reminder.repeat_after} minutes)" if reminder.repeat_after else ""}"))
reminder_list.sort()
result = '\n'.join([x[1] for x in reminder_list])
if result:
await Msg.response(message, result, silent)
else:
await Msg.response(message, "No reminders found!", silent)
return result
@staticmethod
async def _delreminder(message, command, silent=False):
"""Delete reminder by index
Example: !delreminder 0"""
if not await Util.check_args_count(message, command, silent, min=2, max=2):
return
index = await Util.parse_int(
message, command[1], f"Second parameter for '{command[0]}' should be an index of reminder", silent)
if index is None:
return
if index in bc.config.reminders.keys():
bc.config.reminders.pop(index)
await Msg.response(message, "Successfully deleted reminder!", silent)
else:
await Msg.response(message, "Invalid index of reminder!", silent)
@staticmethod
async def _remindme(message, command, silent=False):
"""Ask bot to ping you when it sends reminder
Example: !remindme 1"""
if not await Util.check_args_count(message, command, silent, min=2, max=2):
return
index = await Util.parse_int(
message, command[1], f"Second parameter for '{command[0]}' should be an index of reminder", silent)
if index is None:
return
if index in bc.config.reminders.keys():
bc.config.reminders[index].ping_users.append(message.author.mention)
await Msg.response(message, f"You will be mentioned when reminder {index} is sent", silent)
else:
await Msg.response(message, "Invalid index of reminder!", silent)
@staticmethod
async def _remindwme(message, command, silent=False):
"""Ask bot to send direct message you when it sends reminder
Example: !remindwme 1"""
if not await Util.check_args_count(message, command, silent, min=2, max=2):
return
index = await Util.parse_int(
message, command[1], f"Second parameter for '{command[0]}' should be an index of reminder", silent)
if index is None:
return
if index in bc.config.reminders.keys():
bc.config.reminders[index].whisper_users.append(message.author.id)
await Msg.response(
message, f"You will be notified in direct messages when reminder {index} is sent", silent)
else:
await Msg.response(message, "Invalid index of reminder!", silent)
@staticmethod
async def _repeatreminder(message, command, silent=False):
"""Make reminder repeating with particular period
Examples:
!repeatreminder 1 1
!repeatreminder 1 1h
!repeatreminder 1 1d
!repeatreminder 1 1w
Note: number without postfix is translated to minutes"""
if not await Util.check_args_count(message, command, silent, min=3, max=3):
return
index = await Util.parse_int(
message, command[1], f"Second parameter for '{command[0]}' should be an index of reminder", silent)
if index is None:
return
if index not in bc.config.reminders.keys():
await Msg.response(message, "Invalid index of reminder!", silent)
return
if command[2].endswith("h"):
duration = command[2][:-1]
duration = await Util.parse_int(
message, duration, "You need to specify amount of days before 'd'. Example: 3d for 3 days", silent)
if duration is None:
return
duration *= 60
elif command[2].endswith("d"):
duration = command[2][:-1]
duration = await Util.parse_int(
message, duration, "You need to specify amount of days before 'd'. Example: 3d for 3 days", silent)
if duration is None:
return
duration *= 1440
elif command[2].endswith("w"):
duration = command[2][:-1]
duration = await Util.parse_int(
message, duration, "You need to specify amount of days before 'd'. Example: 3d for 3 days", silent)
if duration is None:
return
duration *= 10080
else:
duration = await Util.parse_int(
message, command[2],
f"Third parameter for '{command[0]}' should be duration of period between reminders", silent)
if duration is None:
return
if duration < 0:
await Msg.response(message, "Duration should be positive or zero (to disable repetition)!", silent)
return
bc.config.reminders[index].repeat_after = duration
await Msg.response(message, f"Reminder {index} will be repeated every {duration} minutes!", silent)
@staticmethod
async def _skipreminder(message, command, silent=False):
"""Skip next instance of recurring (repeating) reminder
Example: !skipreminder 1
Note: only recurring (repeating) reminders are affected by this command"""
if not await Util.check_args_count(message, command, silent, min=2, max=2):
return
index = await Util.parse_int(
message, command[1], f"Second parameter for '{command[0]}' should be an index of reminder", silent)
if index is None:
return
if index not in bc.config.reminders.keys():
await Msg.response(message, "Invalid index of reminder!", silent)
return
if bc.config.reminders[index].repeat_after == 0:
await Msg.response(message, "This reminder is not recurring!", silent)
return
rem = bc.config.reminders[index]
new_time = datetime.datetime.strftime(
datetime.datetime.strptime(rem.time, const.REMINDER_TIME_FORMAT) +
datetime.timedelta(minutes=rem.repeat_after), const.REMINDER_TIME_FORMAT)
id_ = bc.config.ids["reminder"]
bc.config.reminders[id_] = Reminder(str(new_time), rem.message, message.channel.id)
bc.config.reminders[id_].repeat_after = rem.repeat_after
bc.config.ids["reminder"] += 1
bc.config.reminders.pop(index)
await Msg.response(
message, f"Skipped reminder {index} at {rem.time}, "
f"next reminder {id_} will be at {bc.config.reminders[id_].time}", silent)
|
import datetime
from src import const
from src.commands import BaseCmd
from src.config import bc
from src.message import Msg
from src.reminder import Reminder
from src.utils import Util
class ReminderCommands(BaseCmd):
def bind(self):
bc.commands.register_command(__name__, self.get_classname(), "addreminder",
permission=const.Permission.USER.value, subcommand=False)
bc.commands.register_command(__name__, self.get_classname(), "updreminder",
permission=const.Permission.USER.value, subcommand=False)
bc.commands.register_command(__name__, self.get_classname(), "listreminder",
permission=const.Permission.USER.value, subcommand=True)
bc.commands.register_command(__name__, self.get_classname(), "delreminder",
permission=const.Permission.USER.value, subcommand=False)
bc.commands.register_command(__name__, self.get_classname(), "remindme",
permission=const.Permission.USER.value, subcommand=False)
bc.commands.register_command(__name__, self.get_classname(), "remindwme",
permission=const.Permission.USER.value, subcommand=False)
bc.commands.register_command(__name__, self.get_classname(), "repeatreminder",
permission=const.Permission.USER.value, subcommand=False)
bc.commands.register_command(__name__, self.get_classname(), "skipreminder",
permission=const.Permission.USER.value, subcommand=False)
@staticmethod
async def _addreminder(message, command, silent=False):
"""Print message at particular time
Examples:
!addreminder 2020-01-01 00:00 Happy new year!
!addreminder today 08:00 Wake up
!addreminder tomorrow 08:00 Wake up
!addreminder 2d 08:00 Wake up <- 2 days
!addreminder 1w 08:00 Wake up <- 1 week
!addreminder in 1w5d10h5m Test reminder
!addreminder in 1w Test reminder 2
!addreminder in 5h10m Test reminder 3
"""
if not await Util.check_args_count(message, command, silent, min=4):
return
# !listreminder in <weeks>w<days>d<hours>h<minutes>m
if command[1] == "in":
time = command[2]
text = ' '.join(command[3:])
r = const.REMINDER_IN_REGEX.match(time)
if r is None:
await Msg.response(
message, ("Provide relative time in the following format: "
"<weeks>w<days>d<hours>h<minutes>m. "
"All parts except one are optional"), silent)
weeks = int(r.group(2)) if r.group(2) is not None else 0
days = int(r.group(4)) if r.group(4) is not None else 0
hours = int(r.group(6)) if r.group(6) is not None else 0
minutes = int(r.group(8)) if r.group(8) is not None else 0
time = (datetime.datetime.now() + datetime.timedelta(
weeks=weeks, days=days, hours=hours, minutes=minutes)).strftime(const.REMINDER_TIME_FORMAT)
id_ = bc.config.ids["reminder"]
bc.config.reminders[id_] = Reminder(str(time), text, message.channel.id)
bc.config.ids["reminder"] += 1
await Msg.response(message, f"Reminder '{text}' with id {id_} added at {time}", silent)
return
date = command[1]
time = command[2]
if command[1] == "today":
date = datetime.datetime.strftime(datetime.datetime.now(), const.REMINDER_DATE_FORMAT)
elif command[1] == "tomorrow":
date = datetime.datetime.strftime(
datetime.datetime.now() + datetime.timedelta(days=1), const.REMINDER_DATE_FORMAT)
elif command[1].endswith("d"):
days_amount = command[1][:-1]
days_amount = await Util.parse_int(
message, days_amount, "You need to specify amount of days before 'd'. Example: 3d for 3 days", silent)
if days_amount is None:
return
date = datetime.datetime.strftime(
datetime.datetime.now() + datetime.timedelta(days=days_amount), const.REMINDER_DATE_FORMAT)
elif command[1].endswith("w"):
weeks_amount = command[1][:-1]
weeks_amount = await Util.parse_int(
message, weeks_amount, "You need to specify amount of weeks before 'w'. Example: 2w for 2 weeks",
silent)
if weeks_amount is None:
return
date = datetime.datetime.strftime(
datetime.datetime.now() + datetime.timedelta(days=weeks_amount * 7), const.REMINDER_DATE_FORMAT)
time = date + ' ' + time
try:
time = datetime.datetime.strptime(time, const.REMINDER_TIME_FORMAT).strftime(const.REMINDER_TIME_FORMAT)
except ValueError:
await Msg.response(message, f"{time} does not match format {const.REMINDER_TIME_FORMAT}\n"
"More information about format: <https://strftime.org/>", silent)
return
text = ' '.join(command[3:])
id_ = bc.config.ids["reminder"]
bc.config.reminders[id_] = Reminder(str(time), text, message.channel.id)
bc.config.ids["reminder"] += 1
await Msg.response(message, f"Reminder '{text}' with id {id_} added at {time}", silent)
@staticmethod
async def _updreminder(message, command, silent=False):
"""Update reminder by index
Example: !updreminder 0 2020-01-01 00:00 Happy new year!"""
if not await Util.check_args_count(message, command, silent, min=5):
return
index = await Util.parse_int(
message, command[1], f"Second parameter for '{command[0]}' should be an index of reminder", silent)
if index is None:
return
if index in bc.config.reminders.keys():
time = command[2] + ' ' + command[3]
try:
time = datetime.datetime.strptime(time, const.REMINDER_TIME_FORMAT).strftime(const.REMINDER_TIME_FORMAT)
except ValueError:
await Msg.response(message, f"{time} does not match format {const.REMINDER_TIME_FORMAT}\n"
"More information about format: <https://strftime.org/>", silent)
return
text = ' '.join(command[4:])
bc.config.reminders[index] = Reminder(str(time), text, message.channel.id)
await Msg.response(
message, f"Successfully updated reminder {index}: '{text}' at {time}", silent)
else:
await Msg.response(message, "Invalid index of reminder!", silent)
@staticmethod
async def _listreminder(message, command, silent=False):
"""Print list of reminders
Example: !listreminder"""
if not await Util.check_args_count(message, command, silent, min=1, max=2):
return
reminder_list = []
for index, reminder in bc.config.reminders.items():
reminder_list.append(
(reminder.time,
f"{index} - {reminder.time}"
f"{f' in <#{reminder.channel_id}>' if message.channel.id != reminder.channel_id else ''}"
f" -> {reminder.message}"
f"{f' (repeats every {reminder.repeat_after} minutes)' if reminder.repeat_after else ''}"))
reminder_list.sort()
result = '\n'.join([x[1] for x in reminder_list])
if result:
await Msg.response(message, result, silent)
else:
await Msg.response(message, "No reminders found!", silent)
return result
@staticmethod
async def _delreminder(message, command, silent=False):
"""Delete reminder by index
Example: !delreminder 0"""
if not await Util.check_args_count(message, command, silent, min=2, max=2):
return
index = await Util.parse_int(
message, command[1], f"Second parameter for '{command[0]}' should be an index of reminder", silent)
if index is None:
return
if index in bc.config.reminders.keys():
bc.config.reminders.pop(index)
await Msg.response(message, "Successfully deleted reminder!", silent)
else:
await Msg.response(message, "Invalid index of reminder!", silent)
@staticmethod
async def _remindme(message, command, silent=False):
"""Ask bot to ping you when it sends reminder
Example: !remindme 1"""
if not await Util.check_args_count(message, command, silent, min=2, max=2):
return
index = await Util.parse_int(
message, command[1], f"Second parameter for '{command[0]}' should be an index of reminder", silent)
if index is None:
return
if index in bc.config.reminders.keys():
bc.config.reminders[index].ping_users.append(message.author.mention)
await Msg.response(message, f"You will be mentioned when reminder {index} is sent", silent)
else:
await Msg.response(message, "Invalid index of reminder!", silent)
@staticmethod
async def _remindwme(message, command, silent=False):
"""Ask bot to send direct message you when it sends reminder
Example: !remindwme 1"""
if not await Util.check_args_count(message, command, silent, min=2, max=2):
return
index = await Util.parse_int(
message, command[1], f"Second parameter for '{command[0]}' should be an index of reminder", silent)
if index is None:
return
if index in bc.config.reminders.keys():
bc.config.reminders[index].whisper_users.append(message.author.id)
await Msg.response(
message, f"You will be notified in direct messages when reminder {index} is sent", silent)
else:
await Msg.response(message, "Invalid index of reminder!", silent)
@staticmethod
async def _repeatreminder(message, command, silent=False):
"""Make reminder repeating with particular period
Examples:
!repeatreminder 1 1
!repeatreminder 1 1h
!repeatreminder 1 1d
!repeatreminder 1 1w
Note: number without postfix is translated to minutes"""
if not await Util.check_args_count(message, command, silent, min=3, max=3):
return
index = await Util.parse_int(
message, command[1], f"Second parameter for '{command[0]}' should be an index of reminder", silent)
if index is None:
return
if index not in bc.config.reminders.keys():
await Msg.response(message, "Invalid index of reminder!", silent)
return
if command[2].endswith("h"):
duration = command[2][:-1]
duration = await Util.parse_int(
message, duration, "You need to specify amount of days before 'd'. Example: 3d for 3 days", silent)
if duration is None:
return
duration *= 60
elif command[2].endswith("d"):
duration = command[2][:-1]
duration = await Util.parse_int(
message, duration, "You need to specify amount of days before 'd'. Example: 3d for 3 days", silent)
if duration is None:
return
duration *= 1440
elif command[2].endswith("w"):
duration = command[2][:-1]
duration = await Util.parse_int(
message, duration, "You need to specify amount of days before 'd'. Example: 3d for 3 days", silent)
if duration is None:
return
duration *= 10080
else:
duration = await Util.parse_int(
message, command[2],
f"Third parameter for '{command[0]}' should be duration of period between reminders", silent)
if duration is None:
return
if duration < 0:
await Msg.response(message, "Duration should be positive or zero (to disable repetition)!", silent)
return
bc.config.reminders[index].repeat_after = duration
await Msg.response(message, f"Reminder {index} will be repeated every {duration} minutes!", silent)
@staticmethod
async def _skipreminder(message, command, silent=False):
"""Skip next instance of recurring (repeating) reminder
Example: !skipreminder 1
Note: only recurring (repeating) reminders are affected by this command"""
if not await Util.check_args_count(message, command, silent, min=2, max=2):
return
index = await Util.parse_int(
message, command[1], f"Second parameter for '{command[0]}' should be an index of reminder", silent)
if index is None:
return
if index not in bc.config.reminders.keys():
await Msg.response(message, "Invalid index of reminder!", silent)
return
if bc.config.reminders[index].repeat_after == 0:
await Msg.response(message, "This reminder is not recurring!", silent)
return
rem = bc.config.reminders[index]
new_time = datetime.datetime.strftime(
datetime.datetime.strptime(rem.time, const.REMINDER_TIME_FORMAT) +
datetime.timedelta(minutes=rem.repeat_after), const.REMINDER_TIME_FORMAT)
id_ = bc.config.ids["reminder"]
bc.config.reminders[id_] = Reminder(str(new_time), rem.message, message.channel.id)
bc.config.reminders[id_].repeat_after = rem.repeat_after
bc.config.ids["reminder"] += 1
bc.config.reminders.pop(index)
await Msg.response(
message, f"Skipped reminder {index} at {rem.time}, "
f"next reminder {id_} will be at {bc.config.reminders[id_].time}", silent)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.