python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import datasets
import numpy as np
from lm_eval.base import rf
from ..metrics import mean
from .common import HFTask
class each:
def __init__(self, f):
self.f = f
def __rrshift__(self, other):
return list(map(self.f, other))
class RACE(HFTask):
VERSION = 0
DATASET_PATH = "race"
DATASET_NAME = "high"
cache = {}
letter_to_num = {"A": 0, "B": 1, "C": 2, "D": 3}
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return True
def _collate_data(self, set):
if set in self.cache:
return self.cache[set]
# One big issue with HF's implementation of this dataset: it makes a
# separate document for each question; meanwhile, in the GPT3 paper it
# is shown that one document is made per passage.
r = collections.defaultdict(list)
for item in datasets.load_dataset(path=self.DATASET_PATH, name=self.DATASET_NAME)[set]:
r[item["article"]].append(item)
res = list(
r.values()
>> each(
lambda x: {
"article": x[0]["article"],
"problems": x
>> each(lambda y: {"question": y["question"], "answer": y["answer"], "options": y["options"],}),
}
)
)
self.cache[set] = res
return res
def training_docs(self):
return self._collate_data("train")
def validation_docs(self):
return self._collate_data("validation")
def test_docs(self):
return self._collate_data("test")
def fewshot_description(self):
# TODO: figure out description
return ""
@classmethod
def get_answer_option(cls, problem):
answer = cls.letter_to_num[problem["answer"]]
return problem["options"][answer]
@classmethod
def last_problem(cls, doc):
return doc["problems"][-1]
def doc_to_text(self, doc):
text = "Article: " + doc["article"] + "\n\n"
for problem in doc["problems"][:-1]:
question = "Question: " + problem["question"] + "\n"
answer = "Answer: " + self.get_answer_option(problem) + "\n"
text += question + answer
text += "Question: " + self.last_problem(doc)["question"] + "\n" + "Answer:"
return text
def doc_to_target(self, doc):
return " " + self.get_answer_option(self.last_problem(doc))
def construct_requests(self, doc, ctx):
"""Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
"""
problem = self.last_problem(doc)
ll_choices = [rf.loglikelihood(ctx, " " + problem["options"][i])[0] for i in range(4)] + [
rf.loglikelihood("Answer:", " " + problem["options"][i])[0] for i in range(4)
]
return ll_choices
def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param results:
The results of the requests created in construct_requests.
"""
gold = self.letter_to_num[self.last_problem(doc)["answer"]]
context_conditional_ll = results[:4]
context_free_ll = results[4:]
assert len(context_free_ll) == len(context_conditional_ll)
ll_gain = [ccl - cfl for ccl, cfl in zip(context_conditional_ll, context_free_ll)]
pred = np.argmax(ll_gain)
pred_raw_ll = np.argmax(results[:4])
return {
"acc": int(pred_raw_ll == gold),
"acc_norm": int(pred == gold),
}
def serialize_results(self, doc, results):
gold = self.letter_to_num[self.last_problem(doc)["answer"]]
context_conditional_ll = results[:4]
context_free_ll = results[4:]
assert len(context_free_ll) == len(context_conditional_ll)
ll_gain = [ccl - cfl for ccl, cfl in zip(context_conditional_ll, context_free_ll)]
pred = np.argmax(ll_gain)
return {
"format": self.doc_to_text(doc),
"gold_choice": self.last_problem(doc)["options"][gold],
"model_choice": self.last_problem(doc)["options"][pred],
"choices (ll, ull, ll_gain)": dict(
zip(self.last_problem(doc)["options"], zip(results[:4], results[4:], ll_gain))
),
}
def aggregation(self):
"""
:returns: {str: [float] -> float}
A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metrics
"""
return {
"acc": mean,
"acc_norm": mean,
}
def higher_is_better(self):
"""
:returns: {str: bool}
A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better
"""
return {
"acc": True,
"acc_norm": True,
}
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/lm_eval/tasks/race.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pprint import pprint
from . import hellaswag, lambada, piqa, prompt, race, superglue, wikitext, winogrande
from .common import HFTask
########################################
# Translation tasks
########################################
# 6 total
gpt3_translation_benchmarks = {
"wmt14": ["en-fr", "fr-en"], # French
"wmt16": ["en-ro", "ro-en", "de-en", "en-de"], # German, Romanian
}
########################################
# All tasks
########################################
TASK_REGISTRY = {
"lambada": lambada.LAMBADA,
"boolq": superglue.BoolQ,
"piqa": piqa.PiQA,
"hellaswag": hellaswag.HellaSwag,
"race": race.RACE,
"wikitext2": wikitext.WikiText,
"wikitext103": wikitext.WikiText103,
"winogrande": winogrande.Winogrande,
}
PROMPT_TASK_REGISTRY = {"prompt": prompt.Prompt}
ALL_TASKS = sorted(list(TASK_REGISTRY))
def get_task(task_name):
if task_name in TASK_REGISTRY:
return TASK_REGISTRY[task_name]
print("Available tasks:")
pprint(TASK_REGISTRY)
raise KeyError(f"Missing task {task_name}")
def get_task_dict(task_name_list, cache_dir):
return {task_name: get_task(task_name)(cache_dir) for task_name in task_name_list}
def get_prompt_task(task_name):
if task_name in PROMPT_TASK_REGISTRY:
return PROMPT_TASK_REGISTRY[task_name]
print("Available tasks:")
pprint(PROMPT_TASK_REGISTRY)
raise KeyError(f"Missing task {task_name}")
def get_prompt_task_dict(task_name_list, **kwargs):
return {task_name: get_prompt_task(task_name)(**kwargs) for task_name in task_name_list}
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/lm_eval/tasks/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from lm_eval.base import MultipleChoiceTask
from .common import HFTask
class HellaSwag(HFTask, MultipleChoiceTask):
VERSION = 0
DATASET_PATH = "hellaswag"
DATASET_NAME = None
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
@classmethod
def preprocess(cls, text):
text = text.strip()
# NOTE: Brackets are artifacts of the WikiHow dataset portion of HellaSwag.
text = text.replace(" [title]", ". ")
text = re.sub("\\[.*?\\]", "", text)
text = text.replace(" ", " ")
return text
def _convert_standard(self, doc):
ctx = doc["ctx_a"] + " " + doc["ctx_b"].capitalize()
out_doc = {
"query": self.preprocess(doc["activity_label"] + ": " + ctx),
"choices": [self.preprocess(ending) for ending in doc["endings"]],
"gold": int(doc["label"]),
}
return out_doc
def fewshot_description(self):
return (
"Label for the relevant action: Sentences describing the "
"context, with an incomplete sentence trailing\nanswer that "
"plausibly completes the situation."
)
def doc_to_text(self, doc):
return doc["query"]
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/lm_eval/tasks/hellaswag.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datasets
from ..base import Task
class HFTask(Task):
DATASET_PATH = None
DATASET_NAME = None
def __init__(self, cache_dir=""):
self.data = None
self.cache_dir = cache_dir
super().__init__()
def download(self):
self.data = datasets.load_dataset(path=self.DATASET_PATH, name=self.DATASET_NAME, cache_dir=self.cache_dir)
def has_training_docs(self):
"""Whether the task has a training set"""
return True if "train" in self.data.keys() else False
def has_validation_docs(self):
"""Whether the task has a validation set"""
return True if "validation" in self.data.keys() else False
def has_test_docs(self):
"""Whether the task has a test set"""
return True if "test" in self.data.keys() else False
def _convert_standard(self, doc):
return doc
def training_docs(self):
# Cache training for faster few-shot.
# If data is too large to fit in memory, override this method.
if self.has_training_docs():
if self._training_docs is None:
self._training_docs = list(map(self._convert_standard, self.data["train"]))
return self._training_docs
def validation_docs(self):
if self.has_validation_docs():
return map(self._convert_standard, self.data["validation"])
def test_docs(self):
if self.has_test_docs():
return map(self._convert_standard, self.data["test"])
def yesno(x):
if x:
return "yes"
else:
return "no"
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/lm_eval/tasks/common.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
To-do:
- WSC requires free-form generation
- ReCoRD
"""
import numpy as np
import sklearn
import transformers.data.metrics.squad_metrics as squad_metrics
from lm_eval.base import rf
from ..metrics import acc_all, mean, metric_max_over_ground_truths
from ..utils import general_detokenize
from .common import HFTask, yesno
class BoolQ(HFTask):
VERSION = 0
DATASET_PATH = "super_glue"
DATASET_NAME = "boolq"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def fewshot_description(self):
# TODO: figure out actual description
return "Read the following passages and answer each question with a yes or a no."
def doc_to_text(self, doc):
return f"{doc['passage']}\nQuestion: {doc['question']}\nAnswer:"
def doc_to_target(self, doc):
return " " + yesno(doc["label"])
def construct_requests(self, doc, ctx):
ll_yes, *_ = rf.loglikelihood(ctx, " yes")
ll_no, *_ = rf.loglikelihood(ctx, " no")
return ll_yes, ll_no
def process_results(self, doc, results):
ll_yes, ll_no = results
gold = doc["label"]
acc = 1.0 if (ll_yes > ll_no) == gold else 0.0
return {"acc": acc}
def serialize_results(self, doc, results):
ll_yes, ll_no = results
return {
"format": self.doc_to_text(doc) + " {yes/no}",
"model_choice": "yes" if ll_yes > ll_no else "no",
"gold_choice": "yes" if doc["label"] else "no",
"likelihoods": {"yes": ll_yes, "no": ll_no},
}
def higher_is_better(self):
return {"acc": True}
def aggregation(self):
return {"acc": mean}
class CommitmentBank(HFTask):
VERSION = 0
DATASET_PATH = "super_glue"
DATASET_NAME = "cb"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def fewshot_description(self):
# TODO: figure out actual description
return (
"Given a premise and a hypothesis, classify whether the author of the premise is committed"
"to the truth of the hypothesis. The three possible labels are true, false or neither."
)
def doc_to_text(self, doc):
return "{}\nQuestion: {}. True, False or Neither?\nAnswer:".format(doc["premise"], doc["hypothesis"],)
def doc_to_target(self, doc):
# True = entailment
# False = contradiction
# Neither = neutral
return " {}".format({0: "True", 1: "Neither", 2: "False"}[doc["label"]])
def construct_requests(self, doc, ctx):
ll_true, _, _, _ = rf.loglikelihood(ctx, " True")
ll_neither, _, _, _ = rf.loglikelihood(ctx, " Neither")
ll_false, _, _, _ = rf.loglikelihood(ctx, " False")
return ll_true, ll_neither, ll_false
def process_results(self, doc, results):
gold = doc["label"]
pred = np.argmax(results)
acc = 1.0 if pred == gold else 0.0
return {"acc": acc, "f1": (pred, gold)}
def serialize_results(self, doc, results):
return {
"gold_choice": doc["label"],
"model_output": results,
"question": doc["premise"] + "\nQuestion: " + doc["hypothesis"] + ". True, False or Neither?\nAnswer:",
}
def higher_is_better(self):
return {"acc": True, "f1": True}
@classmethod
def cb_multi_fi(cls, items):
preds, golds = zip(*items)
preds = np.array(preds)
golds = np.array(golds)
f11 = sklearn.metrics.f1_score(y_true=golds == 0, y_pred=preds == 0)
f12 = sklearn.metrics.f1_score(y_true=golds == 1, y_pred=preds == 1)
f13 = sklearn.metrics.f1_score(y_true=golds == 2, y_pred=preds == 2)
avg_f1 = mean([f11, f12, f13])
return avg_f1
def aggregation(self):
return {
"acc": mean,
"f1": self.cb_multi_fi,
}
class Copa(HFTask):
VERSION = 0
DATASET_PATH = "super_glue"
DATASET_NAME = "copa"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def fewshot_description(self):
# TODO: figure out actual description
return (
"Given a premise and one alternative with a causal relation to the premise and another without,"
"choose the more plausible alternative"
)
def doc_to_text(self, doc):
# Drop the period
connector = {"cause": "because", "effect": "therefore",}[doc["question"]]
return doc["premise"].strip()[:-1] + f" {connector}"
def doc_to_target(self, doc):
correct_choice = doc["choice1"] if doc["label"] == 0 else doc["choice2"]
# Connect the sentences
return " " + self.convert_choice(correct_choice)
def construct_requests(self, doc, ctx):
choice1 = " " + self.convert_choice(doc["choice1"])
choice2 = " " + self.convert_choice(doc["choice2"])
ll_choice1, _, _, _ = rf.loglikelihood(ctx, choice1)
ll_choice2, _, _, _ = rf.loglikelihood(ctx, choice2)
return ll_choice1, ll_choice2
def process_results(self, doc, results):
gold = doc["label"]
pred = np.argmax(results)
acc = 1.0 if pred == gold else 0.0
return {"acc": acc}
def serialize_results(self, doc, results):
return {
"gold_choice": doc["label"],
"model_output": results,
"premise": doc["premise"],
}
def higher_is_better(self):
return {"acc": True}
def aggregation(self):
return {"acc": mean}
@staticmethod
def convert_choice(choice):
return choice[0].lower() + choice[1:]
class MultiRC(HFTask):
VERSION = 0
DATASET_PATH = "super_glue"
DATASET_NAME = "multirc"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def fewshot_description(self):
# TODO: figure out actual description
return "READING COMPREHENSION ANSWER KEY"
def doc_to_text(self, doc):
return f"{doc['paragraph']}\nQuestion: {doc['question']}\nAnswer:"
def doc_to_target(self, doc):
return " " + self.format_answer(answer=doc["answer"], label=doc["label"])
@staticmethod
def format_answer(answer, label):
label_str = "yes" if label else "no"
return f"{label_str}, {answer}"
def construct_requests(self, doc, ctx):
true_choice = self.format_answer(answer=doc["answer"], label=True)
false_choice = self.format_answer(answer=doc["answer"], label=False)
ll_true_choice, _, _, _ = rf.loglikelihood(ctx, f" {true_choice}")
ll_false_choice, _, _, _ = rf.loglikelihood(ctx, f" {false_choice}")
return ll_true_choice, ll_false_choice
def process_results(self, doc, results):
pred = np.argmax(results)
return {"acc": (pred, doc)}
def higher_is_better(self):
return {"acc": True}
def aggregation(self):
return {"acc": acc_all}
class ReCoRD(HFTask):
VERSION = 0
DATASET_PATH = "super_glue"
DATASET_NAME = "record"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def fewshot_description(self):
# TODO: figure out actual description
return ""
def training_docs(self):
# In ReCoRD, each doc manifests multiple "examples" in the context of few shot example packing.
# Each doc consists of multiple answer candidates, each of which is scored yes/no.
if self._training_docs is None:
self._training_docs = []
for doc in self.data["train"]:
self._training_docs.append(self._process_doc(doc))
return self._training_docs
def validation_docs(self):
# See: training_docs
for doc in self.data["validation"]:
yield self._process_doc(doc)
@classmethod
def _process_doc(cls, doc):
return {
"passage": doc["passage"],
"query": doc["query"],
"entities": sorted(list(set(doc["entities"]))),
"answers": sorted(list(set(doc["answers"]))),
}
def doc_to_text(self, doc):
initial_text, *highlights = doc["passage"].strip().split("\n@highlight\n")
text = initial_text + "\n\n"
for highlight in highlights:
text += f" - {highlight}.\n"
return text
@classmethod
def format_answer(cls, query, entity):
return f" - {query}".replace("@placeholder", entity)
def doc_to_target(self, doc):
# We only output the first correct entity in a doc
return self.format_answer(query=doc["query"], entity=doc["answers"][0])
def construct_requests(self, doc, ctx):
requests = [
rf.loglikelihood(ctx, self.format_answer(query=doc["query"], entity=entity)) for entity in doc["entities"]
]
return requests
def process_results(self, doc, results):
# ReCoRD's evaluation is actually deceptively simple:
# - Pick the maximum likelihood prediction entity
# - Evaluate the accuracy and token F1 PER EXAMPLE
# - Average over all examples
max_idx = np.argmax(np.array([result[0] for result in results]))
prediction = doc["entities"][max_idx]
gold_label_set = doc["answers"]
f1 = metric_max_over_ground_truths(squad_metrics.compute_f1, prediction, gold_label_set)
em = metric_max_over_ground_truths(squad_metrics.compute_exact, prediction, gold_label_set)
return {
"f1": f1,
"em": em,
}
def higher_is_better(self):
return {
"f1": True,
"em": True,
}
def aggregation(self):
return {
"f1": mean,
"em": mean,
}
class WordsInContext(HFTask):
VERSION = 0
DATASET_PATH = "super_glue"
DATASET_NAME = "wic"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def fewshot_description(self):
# TODO: figure out actual description
return ""
def doc_to_text(self, doc):
return (
"Sentence 1: {}\nSentence 2: {}\nQuestion: Is the word '{}' used in the same way in the"
" two sentences above?\nAnswer:".format(
doc["sentence1"], doc["sentence2"], doc["sentence1"][doc["start1"] : doc["end1"]],
)
)
def doc_to_target(self, doc):
return " {}".format({0: "no", 1: "yes"}[doc["label"]])
def construct_requests(self, doc, ctx):
ll_yes, _, _, _ = rf.loglikelihood(ctx, " yes")
ll_no, _, _, _ = rf.loglikelihood(ctx, " no")
return ll_yes, ll_no
def process_results(self, doc, results):
ll_yes, ll_no = results
gold = doc["label"]
acc = 1.0 if (ll_yes > ll_no) == gold else 0.0
return {"acc": acc}
def serialize_results(self, doc, results):
return {
"gold_choice": doc["label"],
"model_output": results,
"question": "Sentence 1: "
+ doc["sentence1"]
+ "\nSentence 2: "
+ doc["sentence2"]
+ "\nQuestion: Is the word "
+ doc["sentence1"][doc["start1"] : doc["end1"]]
+ " used in the same way in the two sentences above?\nAnswer:",
}
def higher_is_better(self):
return {"acc": True}
def aggregation(self):
return {"acc": mean}
class SGWinogradSchemaChallenge(HFTask):
VERSION = 0
# Note: This implementation differs from Fig G.32 because this is the SuperGLUE,
# binary version of the task.
DATASET_PATH = "super_glue"
DATASET_NAME = "wsc"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if self.has_training_docs():
if self._training_docs is None:
# GPT-3 Paper's format only uses positive examples for fewshot "training"
self._training_docs = [doc for doc in self.data["train"] if doc["label"]]
return self._training_docs
def fewshot_description(self):
return (
"Final Exam with Answer Key\n"
"Instructions: Please carefully read the following passages. "
"For each passage, you must identify which noun the pronoun marked in *bold*"
" refers to.\n====="
)
def doc_to_text(self, doc):
raw_passage = doc["text"]
# NOTE: HuggingFace span indices are word-based not character-based.
pre = " ".join(raw_passage.split()[: doc["span2_index"]])
post = raw_passage[len(pre) + len(doc["span2_text"]) + 1 :]
passage = general_detokenize(pre + " *{}*".format(doc["span2_text"]) + post)
noun = doc["span1_text"]
pronoun = doc["span2_text"]
text = (
f"Passage: {passage}\n"
+ f'Question: In the passage above, does the pronoun "*{pronoun}*" refer to "*{noun}*"?\n'
+ "Answer:"
)
return text
def doc_to_target(self, doc):
return " " + yesno(doc["label"])
def construct_requests(self, doc, ctx):
ll_yes, _, _, _ = rf.loglikelihood(ctx, " yes")
ll_no, _, _, _ = rf.loglikelihood(ctx, " no")
return ll_yes, ll_no
def process_results(self, doc, results):
ll_yes, ll_no = results
gold = doc["label"]
acc = 1.0 if (ll_yes > ll_no) == gold else 0.0
return {"acc": acc}
def higher_is_better(self):
return {"acc": True}
def aggregation(self):
return {"acc": mean}
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/lm_eval/tasks/superglue.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import torch
from best_download import download_file
from lm_eval.base import PerplexityTask
from lm_eval.utils import sh
def wikitext_detokenizer(string):
# contractions
string = string.replace("s '", "s'")
string = re.sub(r"/' [0-9]/", r"/'[0-9]/", string)
# number separators
string = string.replace(" @-@ ", "-")
string = string.replace(" @,@ ", ",")
string = string.replace(" @.@ ", ".")
# punctuation # GEO: TODO: What if string ends with punctuation? (e.g. "bla .") Isn't replace(" .", ".") more general?
string = string.replace(" : ", ": ")
string = string.replace(" ; ", "; ")
string = string.replace(" . ", ". ")
string = string.replace(" ! ", "! ")
string = string.replace(" ? ", "? ")
string = string.replace(" , ", ", ")
# double brackets
string = re.sub(r"\(\s*([^\)]*?)\s*\)", r"(\1)", string)
string = re.sub(r"\[\s*([^\]]*?)\s*\]", r"[\1]", string)
string = re.sub(r"{\s*([^}]*?)\s*}", r"{\1}", string)
string = re.sub(r"\"\s*([^\"]*?)\s*\"", r'"\1"', string)
string = re.sub(r"'\s*([^']*?)\s*'", r"'\1'", string)
# miscellaneous
string = string.replace("= = = =", "====")
string = string.replace("= = =", "===")
string = string.replace("= =", "==")
string = string.replace(" " + chr(176) + " ", chr(176))
string = string.replace(" \n", "\n")
string = string.replace("\n ", "\n")
string = string.replace(" N ", " 1 ")
string = string.replace(" 's", "'s")
return string
class WikiText(PerplexityTask):
VERSION = 0
def __init__(self, cache_dir=""):
self.cache_dir = cache_dir
super().__init__()
def download(self):
cache_dir = (
self.cache_dir
if self.cache_dir
else os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, "data")
)
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
if not os.path.exists(cache_dir + "/wikitext/wikitext-2-raw/wiki.valid.raw"):
os.makedirs(cache_dir + "/wikitext", exist_ok=True)
download_file(
"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip",
local_file=cache_dir + "/wikitext/wikitext-2-raw-v1.zip",
expected_checksum="ef7edb566e3e2b2d31b29c1fdb0c89a4cc683597484c3dc2517919c615435a11",
)
sh(f"cd {cache_dir}/wikitext && unzip wikitext-2-raw-v1.zip")
if torch.distributed.is_initialized():
torch.distributed.barrier()
self.cache_dir = cache_dir
def fewshot_description(self):
# TODO: figure out fewshot description
return ""
def has_validation_docs(self):
return True
def has_train_docs(self):
return True
def has_test_docs(self):
return True
def docs_for_split(self, split):
ret = []
for line in (
open(self.cache_dir + f"/wikitext/wikitext-2-raw/wiki.{split}.raw", encoding='utf-8').read().split("\n")
):
rline = line.replace("= = =", "===").replace("= =", "==").strip()
if rline.startswith("= ") and rline.strip().endswith(" ="):
s = "\n".join(ret)
if s.strip():
yield s
ret = []
ret.append(line)
yield "\n".join(ret)
def validation_docs(self):
return self.docs_for_split("valid")
def train_docs(self):
return self.docs_for_split("train")
def test_docs(self):
return self.docs_for_split("test")
def doc_to_target(self, doc):
return wikitext_detokenizer(doc)
def count_words(self, doc):
# count number of words in *original doc before detokenization*
return len(re.split(r"\s+", doc))
class WikiText103(WikiText):
def download(self):
cache_dir = (
self.cache_dir
if self.cache_dir
else os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, "data")
)
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
if not os.path.exists(cache_dir + "/wikitext/wikitext-103-raw/wiki.valid.raw"):
os.makedirs(cache_dir + "/wikitext", exist_ok=True)
download_file(
"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip",
local_file=cache_dir + "/wikitext/wikitext-103-raw-v1.zip",
)
sh(f"cd {cache_dir}/wikitext && unzip wikitext-103-raw-v1.zip")
if torch.distributed.is_initialized():
torch.distributed.barrier()
self.cache_dir = cache_dir
def docs_for_split(self, split):
ret = []
for line in (
open(self.cache_dir + f"/wikitext/wikitext-103-raw/wiki.{split}.raw", encoding='utf-8').read().split("\n")
):
rline = line.replace("= = =", "===").replace("= =", "==").strip()
if rline.startswith("= ") and rline.strip().endswith(" ="):
s = "\n".join(ret)
if s.strip():
yield s
ret = []
ret.append(line)
yield "\n".join(ret)
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/lm_eval/tasks/wikitext.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lm_eval.base import Task, rf
from lm_eval.metrics import mean, perplexity
from nemo.collections.nlp.data.language_modeling.megatron.gpt_prompt_learning_dataset import GPTPromptLearningDataset
class Prompt(Task):
VERSION = 0
def __init__(self, model, dataset_paths, disable_special_tokens=False):
super().__init__()
self.tokenizer = model.tokenizer
self.disable_special_tokens = disable_special_tokens
self.prompt_dataset = GPTPromptLearningDataset(
data=dataset_paths,
tokenizer=model.tokenizer,
virtual_prompt_source=model.virtual_prompt_source,
task_templates=model.task_templates,
pseudo_tokens=model.pseudo_tokens,
pad_token_id=model.pad_token_id,
max_seq_length=2048,
min_seq_length=1,
add_bos=False,
add_eos=True,
for_train=True,
)
def has_training_docs(self):
return False
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
pass
def validation_docs(self):
for example in self.prompt_dataset:
task_id, input_ids, answer_start_idx = example
if self.disable_special_tokens:
context = self.tokenizer.ids_to_text(input_ids[:answer_start_idx])
else:
context = self.tokenizer.tokens_to_text(self.tokenizer.ids_to_tokens(input_ids[:answer_start_idx]))
doc = {
"task_id": task_id,
"context": context,
"target": self.tokenizer.ids_to_text(input_ids[answer_start_idx:]),
}
yield doc
def test_docs(self):
pass
def fewshot_context(self, doc, num_fewshot, provide_description, rnd, filter_shot_examples=False, **kwargs):
"""Construct and format full prompt string for a given sample, optionally including description and shot examples
:param doc: document object corresponding to the sample under examination
:param num_fewshot: number of examples to be included in the prompt
:param provide_description: (bool), whether to prepend natural language description
:param rnd: initialized random number generator object, e.g. rnd = random.Random(1337)
:param filter_shot_examples: If True, will make sure to exclude certain samples from the prompt context, based
on member `filter_shots` function
:return: (shot_ids, context_str): tuple of (iterable of shot example IDs, string correspoding to context/prompt)
"""
raw_description = self.fewshot_description()
description = (raw_description + "\n===\n\n") if provide_description and raw_description else ""
if num_fewshot == 0:
labeled_examples = ""
shot_ids = []
else:
raise NotImplementedError("No support for fewshots in prompt model evaluation.")
example = self.doc_to_text(doc) # the document of interest, main part of the prompt
prompt_str = description + labeled_examples + example # the formatted prompt string
return shot_ids, prompt_str
def doc_to_text(self, doc):
return doc["context"]
def doc_to_target(self, doc):
return doc["target"]
def fewshot_description(self):
# TODO: figure out description
return ""
def construct_requests(self, doc, ctx):
ll, is_greedy, greedy_toks, cont_toks = rf.loglikelihood(ctx, self.doc_to_target(doc), doc["task_id"])
return ll, is_greedy, greedy_toks, cont_toks
def process_results(self, doc, results):
ll, is_greedy, *_ = results
return {"ppl": ll, "acc": int(is_greedy)}
def serialize_results(self, doc, results):
*_, greedy_toks, cont_toks = results
return {
"prompt": self.doc_to_text(doc),
"gold_answer": [x.replace("Ġ", " ") for x in cont_toks],
"model_answer": [x.replace("Ġ", " ") for x in greedy_toks],
}
def aggregation(self):
return {"ppl": perplexity, "acc": mean}
def higher_is_better(self):
return {"ppl": False, "acc": True}
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/lm_eval/tasks/prompt.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lm_eval.base import MultipleChoiceTask
from .common import HFTask
class PiQA(HFTask, MultipleChoiceTask):
VERSION = 0
DATASET_PATH = "piqa"
DATASET_NAME = None
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def fewshot_description(self):
# TODO: figure out fewshot description
return ""
def _convert_standard(self, doc):
out_doc = {
"goal": doc["goal"],
"choices": [doc["sol1"], doc["sol2"]],
"gold": doc["label"],
}
return out_doc
def doc_to_text(self, doc):
return doc["goal"]
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/lm_eval/tasks/piqa.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from omegaconf import OmegaConf, open_dict
import torch
import tqdm
from megatron.core import parallel_state
from lm_eval import utils
from lm_eval.base import LM
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.megatron_init import fake_initialize_model_parallel
from nemo.collections.nlp.modules.common.text_generation_utils import generate, get_computeprob_response
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy, NLPSaveRestoreConnector
from nemo.collections.common.tokenizers.sentencepiece_tokenizer import SentencePieceTokenizer
from nemo.utils import logging
from nemo.utils.app_state import AppState
from nemo.utils.get_rank import is_global_rank_zero
from nemo.utils.model_utils import inject_model_parallel_rank
from pytorch_lightning.trainer.trainer import Trainer
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.dataloader import default_collate
class RequestDataset(Dataset):
def __init__(self, requests, tokenizer, max_length=2048) -> None:
super().__init__()
self.requests = requests
self.tokenizer = tokenizer
self.max_length = max_length
def __len__(self):
return len(self.requests)
def __getitem__(self, index):
context, continuation = self.requests[index]
context_enc = self.tokenizer.text_to_ids(context) if isinstance(context, str) else context
continuation_enc = self.tokenizer.text_to_ids(continuation) if isinstance(continuation, str) else continuation
if isinstance(self.tokenizer, SentencePieceTokenizer):
continuation_enc = continuation_enc[1:]
# sanity check
assert len(context_enc) > 0
assert len(continuation_enc) > 0
assert len(continuation_enc) <= self.max_length
conti_len = len(continuation_enc)
inp_enc = torch.tensor((context_enc + continuation_enc)[-(self.max_length + 1) :])
return inp_enc, conti_len
def setup_trainer_and_model(args):
"""Setup model and optimizer."""
torch.set_grad_enabled(False)
assert args.nemo_model is not None or (
args.checkpoint_folder is not None and args.checkpoint_name is not None
), "Path to checkpoints is required."
# cast precision to int if 32 or 16
if args.precision in ["32", "16"]:
args.precision = int(args.precision)
model_parallel_size = args.tensor_model_parallel_size * args.pipeline_model_parallel_size
num_nodes = max(model_parallel_size // 8, 1)
num_gpus = min(model_parallel_size, 8)
trainer = Trainer(
strategy=NLPDDPStrategy(), devices=num_gpus, num_nodes=num_nodes, precision=args.precision, accelerator='gpu'
)
app_state = AppState()
if args.tensor_model_parallel_size > 1 or args.pipeline_model_parallel_size > 1:
app_state.model_parallel_size = args.tensor_model_parallel_size * args.pipeline_model_parallel_size
(
app_state.tensor_model_parallel_rank,
app_state.pipeline_model_parallel_rank,
app_state.model_parallel_size,
*_,
) = fake_initialize_model_parallel(
world_size=app_state.model_parallel_size,
rank=trainer.global_rank,
tensor_model_parallel_size_=args.tensor_model_parallel_size,
pipeline_model_parallel_size_=args.pipeline_model_parallel_size,
)
if args.nemo_model is not None and args.nemo_model != "None":
logging.info(f"**** Loading checkpoint from nemo model: {args.nemo_model}")
save_restore_connector = NLPSaveRestoreConnector()
if os.path.isdir(args.nemo_model):
save_restore_connector._model_extracted_dir = args.nemo_model
pretrained_cfg = MegatronGPTModel.restore_from(
restore_path=args.nemo_model,
trainer=trainer,
return_config=True,
save_restore_connector=save_restore_connector,
)
OmegaConf.set_struct(pretrained_cfg, True)
with open_dict(pretrained_cfg):
pretrained_cfg.sequence_parallel = False
pretrained_cfg.activations_checkpoint_granularity = None
pretrained_cfg.activations_checkpoint_method = None
pretrained_cfg.precision = trainer.precision
if trainer.precision == "16":
pretrained_cfg.megatron_amp_O2 = False
model = MegatronGPTModel.restore_from(
restore_path=args.nemo_model,
trainer=trainer,
override_config_path=pretrained_cfg,
save_restore_connector=save_restore_connector,
map_location=f'cuda:{trainer.local_rank}',
)
else:
if args.tensor_model_parallel_size > 1 or args.pipeline_model_parallel_size > 1:
app_state.pipeline_model_parallel_size = args.pipeline_model_parallel_size
app_state.tensor_model_parallel_size = args.tensor_model_parallel_size
logging.info(f"**** Loading checkpoint from {args.checkpoint_folder} - {args.checkpoint_name}")
# inject model parallel rank
checkpoint_path = inject_model_parallel_rank(os.path.join(args.checkpoint_folder, args.checkpoint_name))
model = MegatronGPTModel.load_from_checkpoint(checkpoint_path, hparams_file=args.hparams_file, trainer=trainer)
model.freeze()
return trainer, model
def DDP_initialize(model):
if parallel_state.is_unitialized():
def dummy():
return
if model.trainer.strategy.launcher is not None:
model.trainer.strategy.launcher.launch(dummy, trainer=model.trainer)
model.trainer.strategy.setup_environment()
if model.cfg.get('transformer_engine', False) and model.cfg.get('tensor_model_parallel_size', 1) > 1:
logging.info(f'Setting up transformer engine modules for tensor parallelism.')
if model.cfg.get('megatron_amp_O2', 'False'):
# when using O2 additional module key is added that casts the weights
for layer in model.model.module.language_model.encoder.layers:
layer.set_tensor_parallel_group(parallel_state.get_tensor_model_parallel_group())
else:
for layer in model.model.language_model.encoder.layers:
layer.set_tensor_parallel_group(parallel_state.get_tensor_model_parallel_group())
class NeMo_GPT3LM_TP_PP(LM):
def __init__(self, args, truncate=False, batch_size=1):
super().__init__()
# get nemo megatron
logging.info(f"**** Building GPT model ...")
self.trainer, self.model = setup_trainer_and_model(args)
self.tokenizer = self.model.tokenizer
self.model.eval()
self.max_length = self.model.cfg.get("max_position_embeddings")
self.pad_id = self.tokenizer.pad_id
self.eos_id = self.tokenizer.eos_id
self.truncate = truncate
self.batch_size = batch_size
# initialize DDP and move model to GPU
DDP_initialize(self.model)
self.model = self.model.cuda()
@classmethod
def create_from_arg_string(cls, arg_string, additional_config={}):
args = utils.simple_parse_args_string(arg_string)
args2 = {k: v for k, v in additional_config.items() if v is not None}
return cls(args, **args2)
def loglikelihood(self, requests):
return self._loglikelihood(requests)
"""
request: (context, continuation)
how this all works:
CTX CONT
inp 0 1 2 3|4 5 6 7 8 9 <- last token is deleted by inp[:, :-1]
gpt2 \ \
logits 1 2 3|4 5 6 7 8 9 <- the ctx half gets tossed out by the [:, -len(continuation_enc):, :self.VOCAB_SIZE] slice
cont_toks 4 5 6 7 8 9
when too long to fit in context, truncate from the left
"""
def _loglikelihood(self, requests):
def pad_collate(batch):
eos_id = self.eos_id
tokens = [item[0] for item in batch]
conti_lens = [item[1] for item in batch]
lens = [len(token) - 1 for token in tokens] # fake delete last token by reducing input len
max_len = max(lens)
extra_pad_len = 0
if max_len % 8 != 0:
extra_pad_len = 8 - (max_len % 8)
max_len += extra_pad_len
# extra_pad_len = 2048 - max_len
# max_len += extra_pad_len
tokens_pad = pad_sequence(tokens, batch_first=False, padding_value=eos_id)
if extra_pad_len > 0:
extra_pad = torch.ones(extra_pad_len, len(batch)) * eos_id
extra_pad = extra_pad.type_as(tokens_pad)
tokens_pad = torch.vstack((tokens_pad, extra_pad))
# Add padding to all samples to adapt nemo generate api
new_batch = []
for token, lenn, conti_len in zip(tokens_pad.T, lens, conti_lens):
# (token, lenn, tokens_to_generate, compute_logprobs)
new_batch.append((token, max_len, lenn, conti_len))
new_batch = default_collate(new_batch)
return new_batch
def _collate(x): # used to reorder request and remove duplications
"""
the negative sign on len(toks) sorts descending - this has a few advantages:
- time estimates will always be over not underestimates, which is more useful for planning
- to know the size of a batch when going through the list, you know the first one is always the batch padded context length.
this is useful to simplify the batching logic and more importantly to make automatic adaptive batches much much easier to implement
- any OOMs will happen right away rather than near the end
"""
toks = x[0] + x[1]
return -len(toks), tuple(toks)
reord = utils.Reorderer(requests, _collate)
request_ds = RequestDataset(reord.get_reordered(), self.model.tokenizer, self.max_length)
request_dl = DataLoader(request_ds, collate_fn=pad_collate, batch_size=self.batch_size, shuffle=False)
def logits_to_results(batch, response):
input_token_ids_batch, _, lens, conti_lens = batch
batch_size = len(lens)
assert len(response['token_ids']) == batch_size, "Response's length not equal to batch size."
batch_res = []
for index in range(batch_size):
inp_len = lens[index]
conti_len = conti_lens[index]
inp_token_ids = input_token_ids_batch[index].tolist()[: inp_len + 1] # recover fake deleted token
response_token_ids = response['token_ids'][index][:inp_len]
assert response_token_ids == inp_token_ids[:-1], f"Mismatch in input tokens."
log_probs = response['full_logprob'][index][:inp_len] # torch.tensor
log_probs = log_probs[-conti_len:]
greedy_tokens = log_probs.argmax(dim=-1)
greedy_tokens = self.tokenizer.ids_to_tokens(greedy_tokens.cpu().numpy().tolist())
conti_token_ids = inp_token_ids[-conti_len:]
conti_tokens = self.tokenizer.ids_to_tokens(conti_token_ids)
max_equal = greedy_tokens == conti_tokens
log_probs = log_probs.cpu().to(torch.float32)
conti_enc = torch.tensor(self.tokenizer.tokens_to_ids(conti_tokens))
conti_probs = torch.gather(log_probs, 1, conti_enc.unsqueeze(-1)).squeeze(-1)
batch_res.append((float(conti_probs.sum()), bool(max_equal), greedy_tokens, conti_tokens))
return batch_res
res = []
for batch in tqdm.tqdm(request_dl):
# inputs = (token_ids, conti_lens)
inputs = (batch[0].cuda(), batch[1].cuda())
response = generate(
model=self.model,
inputs=inputs,
tokens_to_generate=1,
all_probs=True,
temperature=1.0,
add_BOS=False,
top_k=0,
top_p=0.9,
greedy=True,
compute_logprob=True,
repetition_penalty=1.0,
min_tokens_to_generate=0,
)
response = get_computeprob_response(self.tokenizer, response, inputs)
if is_global_rank_zero():
res.extend(logits_to_results(batch, response))
del inputs, response
return reord.get_original(res) if self.can_access_output() else None
def loglikelihood_rolling(self, requests):
loglikelihoods = []
len_rolling_token_windows = [0]
all_rolling_token_windows = []
for (string,) in requests:
rolling_token_windows = list(
map(
utils.make_disjoint_window,
utils.get_rolling_token_windows(
token_list=self.tokenizer.text_to_ids(string),
prefix_token=self.eos_id,
max_seq_len=self.max_length,
context_len=1,
),
)
)
len_rolling_token_windows.append(len(rolling_token_windows) + len_rolling_token_windows[-1])
all_rolling_token_windows.extend(rolling_token_windows)
string_nll = self._loglikelihood(all_rolling_token_windows)
if self.can_access_output():
string_nll = [x[0] for x in string_nll]
# discard is_greedy
for i in range(len(len_rolling_token_windows) - 1):
loglikelihoods.append(sum(string_nll[len_rolling_token_windows[i] : len_rolling_token_windows[i + 1]]))
return loglikelihoods
def greedy_until(self, requests):
raise NotImplementedError
def can_access_output(self):
return is_global_rank_zero()
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/lm_eval/models/nemo_gpt3.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lm_eval.base import LM
from . import dummy, nemo_gpt3, nemo_gpt3_prompt
MODEL_REGISTRY = {
"nemo-gpt3": nemo_gpt3.NeMo_GPT3LM_TP_PP,
"nemo-gpt3-prompt": nemo_gpt3_prompt.NeMo_GPT3_PROMPTLM,
"dummy": dummy.DummyLM,
}
def get_model(model_name: str) -> LM:
return MODEL_REGISTRY[model_name]
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/lm_eval/models/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import tqdm
from lm_eval import utils
from lm_eval.base import LM
from nemo.collections.nlp.models.language_modeling.megatron_gpt_prompt_learning_model import (
MegatronGPTPromptLearningModel,
)
from nemo.collections.nlp.modules.common.megatron.megatron_init import fake_initialize_model_parallel
from nemo.collections.nlp.modules.common.text_generation_utils import generate, get_computeprob_response
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
from nemo.utils import logging
from nemo.utils.app_state import AppState
from nemo.utils.get_rank import is_global_rank_zero
from pytorch_lightning.trainer.trainer import Trainer
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.dataloader import default_collate
from .nemo_gpt3 import DDP_initialize
class PromptRequestDataset(Dataset):
def __init__(self, requests, tokenizer) -> None:
super().__init__()
self.requests = requests
self.tokenizer = tokenizer
self.max_length = 2048
def __len__(self):
return len(self.requests)
def __getitem__(self, index):
context, continuation, task_id = self.requests[index]
context_enc = self.tokenizer.text_to_ids(context) if isinstance(context, str) else context
continuation_enc = self.tokenizer.text_to_ids(continuation) if isinstance(continuation, str) else continuation
# sanity check
assert len(context_enc) > 0, "encoder length should large than 0"
assert len(continuation_enc) > 0, "target length should be large than 0"
assert len(continuation_enc) <= self.max_length, "target length cannot be large than max_length"
conti_len = len(continuation_enc)
inp_enc = torch.tensor((context_enc + continuation_enc)[-(self.max_length + 1) :])
return inp_enc, conti_len, task_id
def setup_trainer_and_model(args):
"""Setup model and optimizer."""
torch.set_grad_enabled(False)
assert args.nemo_model is not None or (
args.checkpoint_folder is not None and args.checkpoint_name is not None
), "Path to checkpoints is required."
# cast precision to int if 32 or 16
if args.precision in ["32", "16"]:
args.precision = int(args.precision)
model_parallel_size = args.tensor_model_parallel_size * args.pipeline_model_parallel_size
num_nodes = max(model_parallel_size // 8, 1)
num_gpus = min(model_parallel_size, 8)
trainer = Trainer(
strategy=NLPDDPStrategy(), devices=num_gpus, num_nodes=num_nodes, precision=args.precision, accelerator='gpu'
)
app_state = AppState()
if args.tensor_model_parallel_size > 1 or args.pipeline_model_parallel_size > 1:
app_state.model_parallel_size = args.tensor_model_parallel_size * args.pipeline_model_parallel_size
(
app_state.tensor_model_parallel_rank,
app_state.pipeline_model_parallel_rank,
app_state.model_parallel_size,
*_,
) = fake_initialize_model_parallel(
world_size=app_state.model_parallel_size,
rank=trainer.global_rank,
tensor_model_parallel_size_=args.tensor_model_parallel_size,
pipeline_model_parallel_size_=args.pipeline_model_parallel_size,
)
if args.nemo_model is not None:
logging.info(f"**** Loading checkpoint from {args.nemo_model}")
model = MegatronGPTPromptLearningModel.restore_from(restore_path=args.nemo_model, trainer=trainer)
else:
raise NotImplementedError("Prompt models can only be loaded from .nemo checkpoints.")
model.freeze()
return trainer, model
class NeMo_GPT3_PROMPTLM(LM):
def __init__(self, args, truncate=False, batch_size=1):
super().__init__()
# get nemo megatron
logging.info(f"**** Building GPT Prompt model ...")
self.trainer, self.model = setup_trainer_and_model(args)
self.tokenizer = self.model.tokenizer
self.model.eval()
self.max_length = self.model.cfg.get("max_position_embeddings")
assert self.tokenizer.text_to_ids("hello\n\nhello") == [
31373,
198,
198,
31373,
], "Tokenizer text_to_ids is not working as expected."
self.truncate = truncate
self.batch_size = batch_size
# initialize DDP and move model to GPU
DDP_initialize(self.model)
self.model = self.model.cuda()
@classmethod
def create_from_arg_string(cls, arg_string, additional_config={}):
args = utils.simple_parse_args_string(arg_string)
args2 = {k: v for k, v in additional_config.items() if v is not None}
return cls(args, **args2)
def loglikelihood(self, requests):
return self._loglikelihood(requests)
"""
request: (context, continuation)
how this all works:
CTX CONT
inp 0 1 2 3|4 5 6 7 8 9 <- last token is deleted by inp[:, :-1]
gpt2 \ \
logits 1 2 3|4 5 6 7 8 9 <- the ctx half gets tossed out by the [:, -len(continuation_enc):, :self.VOCAB_SIZE] slice
cont_toks 4 5 6 7 8 9
when too long to fit in context, truncate from the left
"""
def _loglikelihood(self, requests):
def pad_collate(batch, eos_id=50256):
tokens, conti_lens, task_ids, *_ = map(list, zip(*batch))
lens = [len(token) - 1 for token in tokens] # fake delete last token by reducing input len
max_len = max(lens)
tokens_pad = pad_sequence(tokens, batch_first=False, padding_value=eos_id)
# Add padding to all samples to adapt nemo generate api
# tokens_pad = torch.cat((tokens_pad, torch.ones((1, len(tokens)), dtype=torch.int) * eos_id), 0)
new_batch = []
for token, lenn, conti_len, task_id in zip(tokens_pad.T, lens, conti_lens, task_ids):
new_batch.append((token, max_len, task_id, lenn, conti_len))
new_batch = default_collate(new_batch)
return new_batch
def _collate(x): # used to reorder request and remove duplications
"""
the negative sign on len(toks) sorts descending - this has a few advantages:
- time estimates will always be over not underestimates, which is more useful for planning
- to know the size of a batch when going through the list, you know the first one is always the batch padded context length.
this is useful to simplify the batching logic and more importantly to make automatic adaptive batches much much easier to implement
- any OOMs will happen right away rather than near the end
"""
toks = x[0] + x[1]
return -len(toks), tuple(toks)
reord = utils.Reorderer(requests, _collate)
request_ds = PromptRequestDataset(reord.get_reordered(), self.model.tokenizer)
request_dl = DataLoader(request_ds, collate_fn=pad_collate, batch_size=self.batch_size, shuffle=False)
def logits_to_results(batch, response):
input_token_ids_batch, _, _, lens, conti_lens = batch
batch_size = len(lens)
assert len(response["token_ids"]) == batch_size, "Response's length not equal to batch size."
batch_res = []
for index in range(batch_size):
inp_len = lens[index]
conti_len = conti_lens[index]
inp_token_ids = input_token_ids_batch[index].tolist()[: inp_len + 1] # recover fake deleted token
log_probs = response["full_logprob"][index][:inp_len] # torch.tensor
log_probs = log_probs[-conti_len:]
greedy_tokens = log_probs.argmax(dim=-1)
greedy_tokens = self.tokenizer.ids_to_tokens(greedy_tokens.cpu().numpy().tolist())
conti_token_ids = inp_token_ids[-conti_len:]
conti_tokens = self.tokenizer.ids_to_tokens(conti_token_ids)
max_equal = greedy_tokens == conti_tokens
log_probs = log_probs.cpu().to(torch.float32)
conti_enc = torch.tensor(self.tokenizer.tokens_to_ids(conti_tokens))
conti_probs = torch.gather(log_probs, 1, conti_enc.unsqueeze(-1)).squeeze(-1)
batch_res.append((float(conti_probs.sum()), bool(max_equal), greedy_tokens, conti_tokens))
return batch_res
res = []
for batch in tqdm.tqdm(request_dl):
# inputs = (token_ids, conti_lens)
inputs = (batch[0].cuda(), batch[1].cuda())
task_ids = torch.zeros((self.batch_size, 1), device='cuda')
response = generate(
model=self.model,
inputs=inputs,
task_ids=task_ids,
tokens_to_generate=1,
all_probs=True,
temperature=1.0,
add_BOS=False,
top_k=0,
top_p=0.9,
greedy=True,
compute_logprob=True,
repetition_penalty=1.0,
min_tokens_to_generate=0,
)
response = get_computeprob_response(self.tokenizer, response, inputs)
if is_global_rank_zero():
res.extend(logits_to_results(batch, response))
return reord.get_original(res) if self.can_access_output() else None
def loglikelihood_rolling(self, requests):
raise NotImplementedError
def greedy_until(self, requests):
raise NotImplementedError
def can_access_output(self):
return is_global_rank_zero()
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/lm_eval/models/nemo_gpt3_prompt.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from lm_eval.base import LM
class DummyLM(LM):
def __init__(self):
pass
@classmethod
def create_from_arg_string(cls, arg_string):
return cls()
def loglikelihood(self, requests):
res = []
for _ in requests:
res.append((-random.random(), False))
return res
def greedy_until(self, requests):
res = []
for ctx, _ in requests:
res.append("lol")
assert ctx.strip() != ""
return res
def loglikelihood_rolling(self, requests):
res = []
for _ in requests:
res.append(-random.random())
return res
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/lm_eval/models/dummy.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/dataprep_scripts/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Multi-worker data preprocessing.
Example usage:
python preprocess.py \
--worker-mapping-file=<path/to/preprocess_mapping_file> \
--output-path=<output/path> \
--tokenizer-library <some_tokenizer_lib> \
--tokenizer-model <some_tokenizer_model> \
--dataset-impl mmap \
--workers 80 \
--apply-ftfy
"""
import argparse
import os
import subprocess
import time
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Preprocess custom dataset", allow_abbrev=False)
parser.add_argument("--output-path", help="Path to store output bin files", required=True)
parser.add_argument("--worker-mapping-file", help="Decide which worker download which languages", required=True)
parser.add_argument(
"--workers-per-node",
default=int(os.environ.get("SLURM_NTASKS_PER_NODE", 1)),
help="Number of workers per node in preprocessing step",
type=int,
)
parser.add_argument("--bcp", action="store_true", help="Whether on BCP platform")
args, other_args = parser.parse_known_args()
workers_per_node = args.workers_per_node # local world size
if args.bcp:
global_rank = int(os.environ.get("OMPI_COMM_WORLD_RANK", 0))
task_id = global_rank // workers_per_node
rank = global_rank % workers_per_node
else: # on slurm based platforms
task_id = int(os.environ.get("SLURM_ARRAY_TASK_ID", 0))
rank = int(os.environ.get("LOCAL_RANK", 0))
with open(args.worker_mapping_file) as f:
mapping = f.readlines()
data_files = []
if task_id * workers_per_node + rank < len(mapping):
data_files = mapping[task_id * workers_per_node + rank].strip().split(",")
print(f" ****** Task ID {task_id:02d} Rank {rank:02d} is preparing to preprocess {data_files}...")
os.makedirs(args.output_path, exist_ok=True)
start_time = time.time()
cmd = [
"python",
"/opt/NeMo/scripts/nlp_language_modeling/preprocess_data_for_megatron.py",
]
for split in data_files:
if not split: # Remove empty split
continue
print(f" ****** Task ID {task_id:02d} Rank {rank:02d} starts to preprocess {os.path.basename(split)}...")
input_arg = ["--input", split]
output_arg = ["--output-prefix", os.path.join(args.output_path, os.path.basename(split))]
subprocess.check_call(cmd + input_arg + output_arg + other_args)
print(f" ****** Task ID {task_id:02d} Rank {rank:02d} finished preprocessing {os.path.basename(split)}...")
print(f" ****** Task ID {task_id:02d} Rank {rank:02d} time elapsed {(time.time() - start_time) / 60:.2f} min.")
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/dataprep_scripts/custom_dataprep/preprocess.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/dataprep_scripts/custom_dataprep/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dolly data preprocessing.
Example usage:
python preprocess.py --input=<path/to/data/file>
"""
import json
import numpy as np
from argparse import ArgumentParser
def to_jsonl(path_to_data):
print(f"Preprocessing data to jsonl format...")
output_path = f"{path_to_data.split('.')[0]}-output.jsonl"
with open(path_to_data, 'r') as f, open(output_path, 'w') as g:
for line in f:
line = json.loads(line)
context = line['context'].strip()
if context != "":
# Randomize context and instruction order.
context_first = np.random.randint(0, 2) == 0
if context_first:
instruction = line['instruction'].strip()
assert instruction != ""
input = f"{context}\n\n{instruction}"
output = line['response']
else:
instruction = line['instruction'].strip()
assert instruction != ""
input = f"{instruction}\n\n{context}"
output = line['response']
else:
input = line['instruction']
output = line['response']
g.write(json.dumps({'input': input, 'output': output, 'category': line['category']}) + '\n')
print(f"Data was successfully preprocessed and saved by {output_path} .")
def get_args():
parser = ArgumentParser()
parser.add_argument(
"--input", type=str, required=True, help="Path to jsonl dataset you want to prepare."
)
args = parser.parse_args()
return args
def main():
args = get_args()
path_to_data = args.input
to_jsonl(path_to_data)
if __name__ == '__main__':
main()
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/dataprep_scripts/dolly_dataprep/preprocess.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dolly data downloading.
Example usage:
python download.py \
--path_to_save=<path/to/save/dolly> \
--download_link=<link/to/download>
"""
import os
from argparse import ArgumentParser
default_link = "https://huggingface.co/datasets/databricks/databricks-dolly-15k/blob/main/databricks-dolly-15k.jsonl"
def get_file_name(link):
file_name = link.split('/')[-1]
return file_name
def get_args(default_link=default_link):
parser = ArgumentParser()
parser.add_argument(
"--path_to_save", type=str, required=True, help="Specify the path where to save the data."
)
parser.add_argument(
"--link_to_download", type=str, required=False, default=default_link , help="Specify the link where to download the data."
)
args = parser.parse_args()
return args
def main():
args = get_args()
path_to_save = args.path_to_save
link_to_download = args.link_to_download
file_name = get_file_name(link_to_download)
print(f"Downloading Dolly dataset {file_name} to {path_to_save} ...")
os.system(
f"cd {path_to_save} && "
f"wget {link_to_download}"
)
print(f"Dolly dataset {file_name} was successfully downloaded to {path_to_save} .")
if __name__ == '__main__':
main()
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/dataprep_scripts/dolly_dataprep/download.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/dataprep_scripts/dolly_dataprep/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Setup multi-worker mapping file for mC4 preprocessing.
Example usage:
python setup_preprocess.py \
--c4-path=<path/to/c4> \
--soft-link-path=<path/to/save/softlinks> \
--languages='all' \
--node-array-size=20 \
--workers-per-node=8 \
--worker-mapping-file=<path/to/preprocess_mapping_file>
"""
import argparse
import glob
import math
import os
import shutil
from prepare import ALL_LANGS
def split_languages(c4_path, languages, max_split_size, soft_link_path, cleaned_en=False):
if languages == "all":
langs = ALL_LANGS
else:
langs = languages.split(",")
if soft_link_path is None:
soft_link_path = os.path.join(c4_path, "multilingual_soft_links")
os.makedirs(soft_link_path, exist_ok=True)
lang_splits_info = []
for lang in langs:
assert lang in ALL_LANGS, f"Language `{lang}` cannot be recognized."
if lang == "en" and cleaned_en:
file_list = sorted(glob.glob(os.path.join(c4_path, f"en/c4-train.*.json.gz")))
print(" ****** Using cleaned english data.")
else:
file_list = sorted(glob.glob(os.path.join(c4_path, f"multilingual/c4-{lang}.tfrecord-*.json.gz")))
file0 = file_list[0]
file_size = os.path.getsize(file0) * 1.0 / 1024 ** 3 # convert bytes to GB
num_files = len(file_list)
total_size = file_size * num_files
num_splits = max(2 ** (math.ceil(math.log2(total_size / max_split_size))), 1)
assert num_files % num_splits == 0, f"Language `{lang}` cannot be properly splitted."
for ind in range(num_splits):
lang_split = os.path.join(soft_link_path, "{:s}_{:03d}-{:03d}".format(lang, ind, num_splits))
os.makedirs(lang_split, exist_ok=True)
chunk_size = len(file_list) // num_splits # number of files in each split
file_chunk = file_list[ind * chunk_size : (ind + 1) * chunk_size]
for src in file_chunk:
dst = os.path.join(lang_split, os.path.basename(src))
if os.path.exists(dst):
os.remove(dst)
os.symlink(src, dst)
lang_splits_info.append((lang_split, file_size, chunk_size, total_size / num_splits))
print(f" ****** Prepare workers mapping to preprocess following language splits...")
for i, (lang_split, _, _, split_size) in enumerate(lang_splits_info):
print("{:>4d} {:>7.2f}GB {:s}".format(i + 1, split_size, lang_split))
return lang_splits_info
def distribute_lang_splits(lang_splits_info, avail_nodes, workers_per_node, max_split_size, worker_mapping_file):
avail_workers = avail_nodes * workers_per_node
distributed_splits = [[] for _ in range(avail_workers)]
distributed_size = [0] * avail_workers
lang_splits_info.sort(key=lambda x: -x[-1])
for i, (lang_split, _, _, split_size) in enumerate(lang_splits_info):
min_ind = distributed_size.index(min(distributed_size))
distributed_splits[min_ind].append(lang_split)
distributed_size[min_ind] += split_size
zipped_lists = zip(distributed_size, distributed_splits)
sorted_pairs = sorted(zipped_lists)
tuples = zip(*sorted_pairs)
distributed_size, distributed_splits = [list(tuple) for tuple in tuples]
output = "\n".join([",".join(distributed_splits[i]) for i in range(avail_workers)])
with open(worker_mapping_file, "w") as file:
file.write(output)
print(f" ****** Workers mapping saved to {worker_mapping_file} ...")
for i in range(avail_workers):
print(
"{:>4d} {:>7.2f}GB {:s}".format(
i + 1, distributed_size[i], ",".join([os.path.basename(split) for split in distributed_splits[i]]),
)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Setup (m)C4 preprocessing")
parser.add_argument("--c4-path", help="Path to (m)C4 dataset repo folder", required=True)
parser.add_argument("--soft-link-path", help="Path to languages soft links for preprocessing")
parser.add_argument(
"--languages",
help="Specify the language list e.g. `en,es,zh,de,...` or "
"use `all` to preprocess all languages. All specified "
"languages have to be downloaded first",
required=True,
)
parser.add_argument("--node-array-size", help="Size of node array in download step", required=True, type=int)
parser.add_argument(
"--workers-per-node", default=8, help="Number of workers per node in preprocessing step", type=int,
)
parser.add_argument(
"--max-split-size",
default=70,
help="The language files are distributed in to smaller shards "
"for preprocessing, the size of each shard is less than "
"max-split-size. (unit in GB)",
type=int,
)
parser.add_argument("--worker-mapping-file", help="Where to save worker mapping file", required=True)
parser.add_argument(
"--cleaned-en",
action="store_true",
help="Whether to use cleaned C4 en dataset instead." "of uncleaned mC4 en",
)
args = parser.parse_args()
print(f" ****** Removing git lfs cache files in {args.c4_path} ...")
# Remove git lfs cached files
if os.path.exists(os.path.join(args.c4_path, ".git", "lfs")):
shutil.rmtree(os.path.join(args.c4_path, ".git", "lfs"))
lang_splits_info = split_languages(
args.c4_path, args.languages, args.max_split_size, args.soft_link_path, args.cleaned_en
)
distribute_lang_splits(
lang_splits_info, args.node_array_size, args.workers_per_node, args.max_split_size, args.worker_mapping_file,
)
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/dataprep_scripts/mc4_dataprep/setup_preprocess.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Multi-worker mC4 data preprocessing.
Example usage:
python preprocess.py \
--worker-mapping-file=<path/to/preprocess_mapping_file> \
--output-path=<output/path> \
--tokenizer-library <some_tokenizer_lib> \
--tokenizer-model <some_tokenizer_model> \
--dataset-impl mmap \
--workers 80 \
--preproc-folder \
--apply-ftfy
"""
import argparse
import os
import shutil
import subprocess
import time
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Preprocess (m)C4", allow_abbrev=False)
parser.add_argument("--rm-downloaded", help="Whether to remove original downloaded data", action="store_true")
parser.add_argument("--output-path", help="Path to store output bin files", required=True)
parser.add_argument("--worker-mapping-file", help="Decide which worker download which languages", required=True)
parser.add_argument(
"--workers-per-node",
default=int(os.environ.get("SLURM_NTASKS_PER_NODE", 1)),
help="Number of workers per node in preprocessing step",
type=int,
)
parser.add_argument("--bcp", action="store_true", help="Whether on BCP platform")
args, other_args = parser.parse_known_args()
workers_per_node = args.workers_per_node # local world size
if args.bcp:
global_rank = int(os.environ.get("OMPI_COMM_WORLD_RANK", 0))
task_id = global_rank // workers_per_node
rank = global_rank % workers_per_node
else: # on slurm based platforms
task_id = int(os.environ.get("SLURM_ARRAY_TASK_ID", 0))
rank = int(os.environ.get("LOCAL_RANK", 0))
with open(args.worker_mapping_file) as f:
mapping = f.readlines()
lang_splits = []
if task_id * workers_per_node + rank < len(mapping):
lang_splits = mapping[task_id * workers_per_node + rank].strip().split(",")
print(" ****** Task ID {:02d} Rank {:02d} is preparing to preprocess {:}...".format(task_id, rank, lang_splits))
os.makedirs(args.output_path, exist_ok=True)
start_time = time.time()
cmd = [
"python",
"/opt/NeMo/scripts/nlp_language_modeling/preprocess_data_for_megatron.py",
]
for split in lang_splits:
if not split: # Remove empty split
continue
print(
" ****** Task ID {:02d} Rank {:02d} starts to preprocess {:}...".format(
task_id, rank, os.path.basename(split)
)
)
input_arg = ["--input", split]
output_arg = ["--output-prefix", os.path.join(args.output_path, os.path.basename(split))]
subprocess.check_call(cmd + input_arg + output_arg + other_args)
print(
" ****** Task ID {:02d} Rank {:02d} finished preprocessing {:}...".format(
task_id, rank, os.path.basename(split)
)
)
print(
" ****** Task ID {:02d} Rank {:02d} time elapsed {:.2f} min.".format(
task_id, rank, (time.time() - start_time) / 60
)
)
if args.rm_downloaded:
for f in os.listdir(split):
os.remove(os.readlink(os.path.join(split, f)))
shutil.rmtree(split)
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/dataprep_scripts/mc4_dataprep/preprocess.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Multi-worker mC4 data downloading.
Example usage:
python download.py \
--c4-path=<path/to/c4> \
--git-lfs-path=<path/to/git/lfs/folder> \
--worker-mapping-file=<path/to/download_mapping_file>
"""
import argparse
import os
import time
from prepare import LANG_SPLIT, setup_git_lfs
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Download (m)C4")
parser.add_argument("--c4-path", help="Path to (m)C4 dataset repo folder", required=True)
parser.add_argument("--git-lfs-path", help="Path to git lfs", required=True)
parser.add_argument("--worker-mapping-file", help="Decide which worker download which languages", required=True)
parser.add_argument("--bcp", action="store_true", help="Whether on BCP platform")
args = parser.parse_args()
setup_git_lfs(args.git_lfs_path)
if args.bcp:
task_id = int(os.environ.get("OMPI_COMM_WORLD_RANK", 0)) # assume exec with mpirun
else: # on slurm based platforms
task_id = int(os.environ.get("SLURM_ARRAY_TASK_ID", 0))
with open(args.worker_mapping_file) as f:
mapping = f.readlines()
languages = mapping[task_id].strip().split(",")
print(" ****** Task ID {:02d} is preparing to download {:}...".format(task_id, languages))
lang_split_dict = {}
for lang in LANG_SPLIT:
splits = LANG_SPLIT[lang]
for split, pattern in splits:
lang_split_dict[split] = pattern
c4_path = args.c4_path
start_time = time.time()
for lang in languages:
print(" ****** Task ID {:02d} starts to download {:}...".format(task_id, lang))
if lang in lang_split_dict:
os.system(
f"cd {c4_path} && " f"git -c lfs.concurrenttransfers=20 lfs pull --include '{lang_split_dict[lang]}'"
)
else:
os.system(
f"cd {c4_path} && "
f"git -c lfs.concurrenttransfers=20 lfs pull --include 'multilingual/c4-{lang}.*.json.gz'"
)
print(" ****** Task ID {:02d} finished downloading {:}...".format(task_id, lang))
print(" ****** Task ID {:02d} time elapsed {:.2f} min.".format(task_id, (time.time() - start_time) / 60))
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/dataprep_scripts/mc4_dataprep/download.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/dataprep_scripts/mc4_dataprep/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Prepare mapping file to do multi-worker mC4 data downloading.
Example usage:
python prepare.py \
--data-path=<path/to/data/folder> \
--git-lfs-path=<path/to/git/lfs/folder> \
--languages='all' \
--node-array-size=20 \
--worker-mapping-file=<path/to/download_mapping_file>
"""
import argparse
import os
import subprocess
# fmt: off
ALL_LANGS = [
"af", "am", "ar", "az", "be", "bg", "bn", "ca", "co", "cs", "cy", "da", "de", "el", "en", "eo",
"es", "et", "eu", "fa", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "hi", "ht", "hu", "hy",
"id", "ig", "is", "it", "iw", "ja", "jv", "ka", "kk", "km", "kn", "ko", "ku", "ky", "la", "lb",
"lo", "lt", "lv", "mg", "mi", "mk", "ml", "mn", "mr", "ms", "mt", "my", "ne", "nl", "no", "ny",
"pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "sm", "sn", "so", "sq", "sr", "st",
"su", "sv", "sw", "ta", "te", "tg", "th", "tr", "uk", "ur", "uz", "vi", "xh", "yi", "yo", "zh",
"zu", "ceb", "fil", "haw", "hmn",
]
# fmt: on
LANG_SPLIT = {
"en_cleaned": [("en_cleaned", "en/c4-train.*-of-01024.json.gz")],
"en": [
("en0", "multilingual/c4-en.tfrecord-0[01]*.json.gz"),
("en1", "multilingual/c4-en.tfrecord-0[23]*.json.gz"),
("en2", "multilingual/c4-en.tfrecord-0[45]*.json.gz"),
("en3", "multilingual/c4-en.tfrecord-0[67]*.json.gz"),
("en4", "multilingual/c4-en.tfrecord-0[89]*.json.gz"),
("en5", "multilingual/c4-en.tfrecord-1*.json.gz"),
],
"ru": [
("ru0", "multilingual/c4-ru.tfrecord-0[01]*.json.gz"),
("ru1", "multilingual/c4-ru.tfrecord-0[234]*.json.gz"),
],
}
def setup_git_lfs(git_lfs_path):
print(f" ****** Setting up git lfs under {git_lfs_path} ...")
if not os.path.exists(os.path.join(git_lfs_path, "install.sh")):
os.makedirs(git_lfs_path, exist_ok=True)
os.system(
f"cd {git_lfs_path} && "
f"wget https://github.com/git-lfs/git-lfs/releases/download/v3.0.2/git-lfs-linux-amd64-v3.0.2.tar.gz && "
f"tar -xvf git-lfs-linux-amd64-v3.0.2.tar.gz"
)
os.system(f"cd {git_lfs_path} && ./install.sh")
def prepare_c4_repo(data_path):
c4_path = os.path.join(data_path, "c4")
print(f" ****** Preparing (m)C4 dataset repo under {c4_path} ...")
os.system(f"cd {data_path} && " f"GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/datasets/allenai/c4")
os.system(f"cd {c4_path} && git lfs install")
def distribute_languages(data_path, languages, avail_nodes, worker_mapping_file, cleaned_en=False):
if languages == "all":
langs = ALL_LANGS
else:
langs = languages.split(",")
c4_path = os.path.join(data_path, "c4")
lang_info = []
for lang in langs:
assert lang in ALL_LANGS, f"Language `{lang}` cannot be recognized."
if lang == "en" and cleaned_en:
lang = "en_cleaned"
pattern = f"en/c4-train.00000-of-*.json.gz"
print(" ****** Using cleaned english data.")
else:
pattern = f"multilingual/c4-{lang}.tfrecord-00000-*.json.gz"
stdout = subprocess.check_output(f"cd {c4_path} && git lfs ls-files -s -I '{pattern}'", shell=True)
stdout = stdout.decode("utf-8").split()
file_name = stdout[2]
file_size = int(stdout[-2].strip("("))
num_files = int(file_name.split("-")[-1].split(".")[0])
if lang in LANG_SPLIT:
for split, pattern in LANG_SPLIT[lang]:
num_files = subprocess.check_output(
f"cd {c4_path} && git lfs ls-files -I '{pattern}' | wc -l", shell=True
)
num_files = int(num_files.decode("utf-8"))
total_size = file_size * num_files
lang_info.append((split, file_size, num_files, total_size))
else:
total_size = file_size * num_files
lang_info.append((lang, file_size, num_files, total_size))
print(f" ****** Prepare workers mapping to download following languages...")
for i, (lang, _, _, total_size) in enumerate(lang_info):
print("{:>4d} {:>8.1f}GB {:s}".format(i + 1, total_size / 1024, lang))
distributed_langs = [[] for _ in range(avail_nodes)]
distributed_size = [0] * avail_nodes
lang_info.sort(key=lambda x: -x[-1])
for lang, _, _, total_size in lang_info:
min_ind = distributed_size.index(min(distributed_size))
distributed_langs[min_ind].append(lang)
distributed_size[min_ind] += total_size
output = "\n".join([",".join(distributed_langs[i]) for i in range(avail_nodes)])
with open(worker_mapping_file, "w") as file:
file.write(output)
print(f" ****** Workers mapping saved to {worker_mapping_file} ...")
for i in range(avail_nodes):
print("{:>4d} {:>8.1f}GB {:s}".format(i + 1, distributed_size[i] / 1024, ",".join(distributed_langs[i])))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Setup (m)C4 download")
parser.add_argument("--data-path", help="Path to data storage folder", required=True)
parser.add_argument("--git-lfs-path", help="Path to git lfs", required=True)
parser.add_argument(
"--languages",
help="Specify the language list e.g. `en,es,zh,de,...` or " "use `all` to download all languages",
required=True,
)
parser.add_argument("--node-array-size", help="Size of node array in download step", required=True, type=int)
parser.add_argument("--worker-mapping-file", help="Where to save worker mapping file", required=True)
parser.add_argument(
"--cleaned-en",
action="store_true",
help="Whether to use cleaned C4 en dataset instead." "of uncleaned mC4 en",
)
args = parser.parse_args()
avail_nodes = args.node_array_size
setup_git_lfs(args.git_lfs_path)
prepare_c4_repo(args.data_path)
distribute_languages(args.data_path, args.languages, avail_nodes, args.worker_mapping_file, args.cleaned_en)
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/dataprep_scripts/mc4_dataprep/prepare.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
from time import sleep
import hydra
import nemo_launcher.utils.file_utils as utils # TODO: check if this in python path
import psutil
@hydra.main(config_path="conf", config_name="config")
def main(cfg):
launcher_scripts_path = cfg.get("launcher_scripts_path")
data_config = cfg.get("data_config")
data_dir = cfg.get("data_dir")
rm_extracted = cfg.get("rm_extracted")
tokenizer_type = cfg.get("tokenizer_type")
assert data_dir is not None, "data_dir must be a valid path"
# Vocab
vocab_dir = cfg.get("vocab_save_dir")
assert vocab_dir is not None, "vocab_save_dir must be a valid path."
if "gpt" in tokenizer_type.lower():
vocab_path = os.path.join(launcher_scripts_path, vocab_dir, "vocab.json")
else:
vocab_path = os.path.join(launcher_scripts_path, vocab_dir, "vocab.txt")
# Merges
merges_dir = cfg.get("merges_save_dir")
assert merges_dir is not None, "merges_save_dir must be a valid path."
merges_path = os.path.join(launcher_scripts_path, merges_dir, "merges.txt")
# This compile doesn't seem to do anything. It compiles
# "helpers.cpython-38-x86_64-linux-gnu.so", but since that file already
# exists, it doesn't do anything. Force make via: touch helpers.cpp
megatron_dir = "/opt/NeMo/nemo/collections/nlp/data/language_modeling/megatron"
compiled_helpers_lib = os.path.join(megatron_dir, "compiled_helpers_lib")
compilecmd = f"cd /opt/NeMo; git rev-parse HEAD; " f"cd {megatron_dir}; " f"touch helpers.cpp; make;"
code_path = "/opt/NeMo/scripts/nlp_language_modeling/preprocess_data_for_megatron.py"
runcmd = (
f"cd {megatron_dir}; "
f'export PYTHONPATH="/opt/NeMo/.:$PYTHONPATH"; '
f'export TRANSFORMERS_CACHE="/temp_root/.cache/"; '
f"python3 {code_path} "
)
if cfg.get("cluster_type") == "bcm":
file_number = int(os.environ.get("SLURM_ARRAY_TASK_ID"))
extracted_path = os.path.join(data_dir, f"{file_number:02d}.jsonl")
model_type = 't5'
if 'bert' in data_config:
model_type = 'bert'
elif 'gpt3' in data_config:
model_type = 'gpt3'
output_prefix = os.path.join(data_dir, f"my-{model_type}_{file_number:02d}")
flags = (
f"--input {extracted_path} "
f"--output-prefix {output_prefix} "
f"--vocab {vocab_path} "
f"--dataset-impl mmap "
f"--tokenizer-library megatron "
f"--tokenizer-type {tokenizer_type} "
f"--workers $SLURM_CPUS_ON_NODE "
)
if model_type == 'bert':
# Used for bert binary head (Next sentence predition)
flags += "--split-sentences "
else:
flags += f"--merge-file {merges_path} " f"--append-eod "
os.system(compilecmd)
runcmd += f"{flags} "
os.system(runcmd)
if rm_extracted:
os.remove(extracted_path)
elif cfg.get("cluster_type") == "bcp":
file_numbers = cfg.get("file_numbers")
files_list = utils.convert_file_numbers(file_numbers)
# Assumes launched via mpirun:
# mpirun -N <nnodes> -npernode 1 ...
wrank = int(os.environ.get("OMPI_COMM_WORLD_RANK", 0))
wsize = int(os.environ.get("OMPI_COMM_WORLD_SIZE", 0))
lrank = int(os.environ.get("OMPI_COMM_WORLD_LOCAL_RANK", 0))
if lrank == 0:
# Compile once per node. Should be one container instance per node.
os.system(compilecmd)
os.system(f"touch {compiled_helpers_lib}")
else:
while not os.path.exists(compiled_helpers_lib):
sleep(1)
files_list_groups = utils.split_list(files_list, wsize)
files_to_preproc = files_list_groups[wrank]
ncpus = psutil.cpu_count(logical=False)
for file_number in files_to_preproc:
extracted_path = os.path.join(data_dir, f"{file_number:02d}.jsonl")
model_type = 't5'
if 'bert' in data_config:
model_type = 'bert'
elif 'gpt3' in data_config:
model_type = 'gpt3'
output_prefix = os.path.join(data_dir, f"my-{model_type}_{file_number:02d}")
flags = (
f"--input {extracted_path} "
f"--output-prefix {output_prefix} "
f"--vocab {vocab_path} "
f"--dataset-impl mmap "
f"--tokenizer-library megatron "
f"--tokenizer-type {tokenizer_type} "
f"--workers {ncpus} "
)
if model_type == 'bert':
# Used for bert binary head (Next sentence predition)
flags += "--split-sentences "
else:
flags += f"--merge-file {merges_path} " f"--append-eod "
proc = subprocess.Popen(runcmd + flags, shell=True)
proc.wait()
if rm_extracted:
os.remove(extracted_path)
if __name__ == "__main__":
main()
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/dataprep_scripts/pile_dataprep/preprocess.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import os
import hydra
import nemo_launcher.utils.file_utils as utils
@hydra.main(config_path="conf", config_name="config")
def main(cfg):
"""Function to download the pile dataset files on BCM.
Arguments:
cfg: main config file.
"""
data_dir = cfg.get("data_dir")
pile_url_train = cfg.get("the_pile_url")
assert data_dir is not None, "data_dir must be a valid path."
if cfg.get("cluster_type") == "bcm":
file_number = int(os.environ.get("SLURM_ARRAY_TASK_ID"))
url = f"{pile_url_train}{file_number:02d}.jsonl.zst"
output_file = f"{file_number:02d}.jsonl.zst"
downloaded_path = utils.download_single_file(url, data_dir, output_file)
if cfg.get("cluster_type") == "bcp":
file_numbers = cfg["file_numbers"]
# Downloading the files
files_list = utils.convert_file_numbers(file_numbers)
# Assumes launched via mpirun:
# mpirun -N <nnodes> -npernode <preproc_npernode> ...
# where preproc_npernode is set in dataprep config -> bcp config
wrank = int(os.environ.get("OMPI_COMM_WORLD_RANK", 0))
wsize = int(os.environ.get("OMPI_COMM_WORLD_SIZE", 0))
files_list_groups = utils.split_list(files_list, wsize)
files_to_download = files_list_groups[wrank]
proc_list = []
for file_number in files_to_download:
url = f"{pile_url_train}{file_number:02d}.jsonl.zst"
output_file = f"{file_number:02d}.jsonl.zst"
# TODO: Consider multiprocessing.Pool instead.
proc = multiprocessing.Process(target=utils.download_single_file, args=(url, data_dir, output_file))
proc_list.append(proc)
proc.start()
for proc in proc_list:
proc.join()
if __name__ == "__main__":
main()
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/dataprep_scripts/pile_dataprep/download.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/dataprep_scripts/pile_dataprep/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import os
import hydra
import nemo_launcher.utils.file_utils as utils
@hydra.main(config_path="conf", config_name="config")
def main(cfg) -> None:
"""Function to extract the pile dataset files on BCM.
Arguments:
cfg: main config file.
"""
data_dir = cfg.get("data_dir")
rm_downloaded = cfg.get("rm_downloaded")
assert data_dir is not None, "data_dir must be a valid path."
if cfg.get("cluster_type") == "bcm":
file_number = int(os.environ.get("SLURM_ARRAY_TASK_ID"))
downloaded_path = os.path.join(data_dir, f"{file_number:02d}.jsonl.zst")
output_file = f"{file_number:02d}.jsonl"
utils.extract_single_zst_file(downloaded_path, data_dir, output_file, rm_downloaded)
elif cfg.get("cluster_type") == "bcp":
file_numbers = cfg.get("file_numbers")
# Downloading the files
files_list = utils.convert_file_numbers(file_numbers)
# Assumes launched via mpirun:
# mpirun -N <nnodes> -npernode 1 ...
wrank = int(os.environ.get("OMPI_COMM_WORLD_RANK", 0))
wsize = int(os.environ.get("OMPI_COMM_WORLD_SIZE", 0))
files_list_groups = utils.split_list(files_list, wsize)
files_to_extract = files_list_groups[wrank]
proc_list = []
for file_number in files_to_extract:
downloaded_path = os.path.join(data_dir, f"{file_number:02d}.jsonl.zst")
output_file = f"{file_number:02d}.jsonl"
# TODO: Consider multiprocessing.Pool instead.
proc = multiprocessing.Process(
target=utils.extract_single_zst_file, args=(downloaded_path, data_dir, output_file, rm_downloaded),
)
proc_list.append(proc)
proc.start()
for proc in proc_list:
proc.join()
if __name__ == "__main__":
main()
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/dataprep_scripts/pile_dataprep/extract.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import re
import string
from collections import Counter
try:
from nemo.utils.get_rank import is_global_rank_zero
except ModuleNotFoundError:
print("Importing NeMo module failed, checkout the NeMo submodule")
"""
This script can be used to calcualte exact match and F1 scores for many different tasks, not just squad.
Example command for T5 Preds
```
python squad_metric_calc.py \
--ground-truth squad_test_gt.jsonl \
--preds squad_preds_t5.txt
```
Example command for GPT Preds
```
python squad_metric_calc.py \
--ground-truth squad_test_gt.jsonl \
--preds squad_preds_gpt.txt \
--split-string "answer:"
```
In this case, the prediction file will be split on "answer: " when looking for the LM's predicted answer.
"""
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def main():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument(
'--ground-truth',
type=str,
help="ground truth .jsonl file made from /NeMo/scripts/dataset_processing/nlp/squad/prompt_learning_squad_preprocessing.py",
)
parser.add_argument(
'--preds',
type=str,
help="Text file with test set prompts + model predictions. Prediction file can be made by running NeMo/examples/nlp/language_modeling/megatron_gpt_prompt_learning_eval.py",
)
parser.add_argument(
'--split-string',
type=str,
help="The text at the end of the prompt, write before the predicted answer. This will be used to find the model's predictions in pred files when the pred file containers both the prompt and prediction.",
default=None,
) # If the pred file only has preditions, just pass none
args = parser.parse_args()
ground_truth_file = args.ground_truth
pred_file = args.preds
preds = open(pred_file, encoding="utf-8").readlines()
ground_truth = open(ground_truth_file).readlines()
f1 = exact_match = total = 0
for i in range(len(preds)):
truth = json.loads(ground_truth[i])
pred_answer = preds[i]
# Need to separate out preditions from prompt, spliting on the provided "split string"
if args.split_string is not None:
pred_answer = pred_answer.split(args.split_string)[-1].strip()
true_answers = truth["answer"]
if not isinstance(true_answers, list):
true_answers = [true_answers]
exact_match += metric_max_over_ground_truths(exact_match_score, pred_answer, true_answers)
f1 += metric_max_over_ground_truths(f1_score, pred_answer, true_answers)
total += 1
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
metric = {'exact_match': exact_match, 'f1': f1, 'total': total}
if is_global_rank_zero:
print(metric)
with open(os.path.join(os.path.dirname(pred_file), 'squad_metric.json'), 'w',) as outfile:
json.dump(metric, outfile)
if __name__ == "__main__":
main()
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/metric_calculation/squad_metric_calc.py |
NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/metric_calculation/__init__.py |
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import tempfile
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--pred_file", type=str, required=True, help="jsonl file with preds, inputs and targets.")
parser.add_argument(
"--target_file",
type=str,
required=True,
help="jsonl file that contains the squad dev set with multiple correct answers.",
)
parser.add_argument(
"--squad_eval_script_path", type=str, required=True, help="path to the squad evaluation script."
)
args = parser.parse_args()
with tempfile.TemporaryDirectory() as tmp:
with open(args.pred_file, 'r') as preds_file:
lines = preds_file.readlines()
for line in lines:
line = json.loads(line)
pred = line['pred']
pred = pred.strip().replace('\n', ' ')
with open(f"{tmp}/preds.text", "a") as f:
f.write(pred + "\n")
os.system(f"python {args.squad_eval_script_path} --ground-truth {args.target_file} --preds {tmp}/preds.text")
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/metric_calculation/fine_tuning_metric_calc.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
import re
from pathlib import Path
def version(root_path):
"""Returns the version taken from __init__.py
Parameters
----------
root_path : pathlib.Path
path to the root of the package
Reference
---------
https://packaging.python.org/guides/single-sourcing-package-version/
"""
version_path = root_path.joinpath('torch_harmonics', '__init__.py')
with version_path.open() as f:
version_file = f.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def readme(root_path):
"""Returns the text content of the README.md of the package
Parameters
----------
root_path : pathlib.Path
path to the root of the package
"""
with root_path.joinpath('README.md').open(encoding='UTF-8') as f:
return f.read()
root_path = Path(__file__).parent
README = readme(root_path)
VERSION = version(root_path)
config = {
'name': 'torch_harmonics',
'packages': find_packages(),
'description': 'A differentiable spherical harmonic transform for PyTorch.',
'long_description': README,
'long_description_content_type' : 'text/markdown',
'url' : 'https://github.com/NVIDIA/torch-harmonics',
'authors': [
{'name': "Boris Bonev", 'email': "[email protected]"},
],
'version': VERSION,
'install_requires': ['torch', 'numpy'],
'extras_require': {
'sfno': ['tensorly', 'tensorly-torch'],
},
'license': 'Modified BSD',
'scripts': [],
'include_package_data': True,
'classifiers': [
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3'
],
}
setup(**config)
| torch-harmonics-main | setup.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ignore this (just for development without installation)
import sys
import os
sys.path.append("..")
sys.path.append(".")
import torch
import torch.nn.functional as F
import torch.distributed as dist
import torch_harmonics as harmonics
import torch_harmonics.distributed as thd
try:
from tqdm import tqdm
except:
tqdm = lambda x : x
# set up distributed
world_rank = int(os.getenv('WORLD_RANK', 0))
grid_size_h = int(os.getenv('GRID_H', 1))
grid_size_w = int(os.getenv('GRID_W', 1))
port = int(os.getenv('MASTER_PORT', 0))
master_address = os.getenv('MASTER_ADDR', 'localhost')
world_size = grid_size_h * grid_size_w
dist.init_process_group(backend = 'nccl',
init_method = f"tcp://{master_address}:{port}",
rank = world_rank,
world_size = world_size)
local_rank = world_rank % torch.cuda.device_count()
device = torch.device(f"cuda:{local_rank}")
# compute local ranks in h and w:
# rank = wrank + grid_size_w * hrank
wrank = world_rank % grid_size_w
hrank = world_rank // grid_size_w
w_group = None
h_group = None
# now set up the comm grid:
wgroups = []
for h in range(grid_size_h):
start = h
end = h + grid_size_w
wgroups.append(list(range(start, end)))
print(wgroups)
for grp in wgroups:
if len(grp) == 1:
continue
tmp_group = dist.new_group(ranks=grp)
if wrank in grp:
w_group = tmp_group
# transpose:
hgroups = [sorted(list(i)) for i in zip(*wgroups)]
print(hgroups)
for grp in hgroups:
if len(grp) == 1:
continue
tmp_group = dist.new_group(ranks=grp)
if hrank in grp:
h_group = tmp_group
# set device
torch.cuda.set_device(device.index)
# set seed
torch.manual_seed(333)
torch.cuda.manual_seed(333)
if world_rank == 0:
print(f"Running distributed test on grid H x W = {grid_size_h} x {grid_size_w}")
# initializing sht
thd.init(h_group, w_group)
# common parameters
B, C, H, W = 1, 8, 721, 1440
Hloc = (H + grid_size_h - 1) // grid_size_h
Wloc = (W + grid_size_w - 1) // grid_size_w
Hpad = grid_size_h * Hloc - H
Wpad = grid_size_w * Wloc - W
# do serial tests first:
forward_transform_local = harmonics.RealSHT(nlat=H, nlon=W).to(device)
forward_transform_dist = thd.DistributedRealSHT(nlat=H, nlon=W).to(device)
Lloc = (forward_transform_dist.lpad + forward_transform_dist.lmax) // grid_size_h
Mloc = (forward_transform_dist.mpad + forward_transform_dist.mmax) // grid_size_w
# create tensors
inp_full = torch.randn((B, C, H, W), dtype=torch.float32, device=device)
# pad
with torch.no_grad():
inp_pad = F.pad(inp_full, (0, Wpad, 0, Hpad))
# split in W
inp_local = torch.split(inp_pad, split_size_or_sections=Wloc, dim=-1)[wrank]
# split in H
inp_local = torch.split(inp_local, split_size_or_sections=Hloc, dim=-2)[hrank]
# do FWD transform
out_full = forward_transform_local(inp_full)
out_local = forward_transform_dist(inp_local)
# gather the local data
# gather in W
if grid_size_w > 1:
olist = [torch.empty_like(out_local) for _ in range(grid_size_w)]
olist[wrank] = out_local
dist.all_gather(olist, out_local, group=w_group)
out_full_gather = torch.cat(olist, dim=-1)
out_full_gather = out_full_gather[..., :forward_transform_dist.mmax]
else:
out_full_gather = out_local
# gather in h
if grid_size_h > 1:
olist = [torch.empty_like(out_full_gather) for _ in range(grid_size_h)]
olist[hrank] = out_full_gather
dist.all_gather(olist, out_full_gather, group=h_group)
out_full_gather = torch.cat(olist, dim=-2)
out_full_gather = out_full_gather[..., :forward_transform_dist.lmax, :]
if world_rank == 0:
print(f"Local Out: sum={out_full.abs().sum().item()}, max={out_full.abs().max().item()}, min={out_full.abs().min().item()}")
print(f"Dist Out: sum={out_full_gather.abs().sum().item()}, max={out_full_gather.abs().max().item()}, min={out_full_gather.abs().min().item()}")
diff = (out_full-out_full_gather).abs()
print(f"Out Difference: abs={diff.sum().item()}, rel={diff.sum().item() / (0.5*(out_full.abs().sum() + out_full_gather.abs().sum()))}, max={diff.abs().max().item()}")
print("")
# create split input grad
with torch.no_grad():
# create full grad
ograd_full = torch.randn_like(out_full)
# pad
ograd_pad = F.pad(ograd_full, [0, forward_transform_dist.mpad, 0, forward_transform_dist.lpad])
# split in M
ograd_local = torch.split(ograd_pad, split_size_or_sections=Mloc, dim=-1)[wrank]
# split in H
ograd_local = torch.split(ograd_local, split_size_or_sections=Lloc, dim=-2)[hrank]
# backward pass:
# local
inp_full.requires_grad = True
out_full = forward_transform_local(inp_full)
out_full.backward(ograd_full)
igrad_full = inp_full.grad.clone()
# distributed
inp_local.requires_grad = True
out_local = forward_transform_dist(inp_local)
out_local.backward(ograd_local)
igrad_local = inp_local.grad.clone()
# gather
# gather in W
if grid_size_w > 1:
olist = [torch.empty_like(igrad_local) for _ in range(grid_size_w)]
olist[wrank] = igrad_local
dist.all_gather(olist, igrad_local, group=w_group)
igrad_full_gather = torch.cat(olist, dim=-1)
igrad_full_gather = igrad_full_gather[..., :W]
else:
igrad_full_gather = igrad_local
# gather in h
if grid_size_h > 1:
olist = [torch.empty_like(igrad_full_gather) for _ in range(grid_size_h)]
olist[hrank] = igrad_full_gather
dist.all_gather(olist, igrad_full_gather, group=h_group)
igrad_full_gather = torch.cat(olist, dim=-2)
igrad_full_gather = igrad_full_gather[..., :H, :]
if world_rank == 0:
print(f"Local Grad: sum={igrad_full.abs().sum().item()}, max={igrad_full.abs().max().item()}, min={igrad_full.abs().min().item()}")
print(f"Dist Grad: sum={igrad_full_gather.abs().sum().item()}, max={igrad_full_gather.abs().max().item()}, min={igrad_full_gather.abs().min().item()}")
diff = (igrad_full-igrad_full_gather).abs()
print(f"Grad Difference: abs={diff.sum().item()}, rel={diff.sum().item() / (0.5*(igrad_full.abs().sum() + igrad_full_gather.abs().sum()))}, max={diff.abs().max().item()}")
| torch-harmonics-main | tests/test_distributed_forward_transform.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ignore this (just for development without installation)
import sys
import os
sys.path.append("..")
sys.path.append(".")
import torch
import torch.nn.functional as F
import torch.distributed as dist
import torch_harmonics as harmonics
import torch_harmonics.distributed as thd
try:
from tqdm import tqdm
except:
tqdm = lambda x : x
# set up distributed
world_rank = int(os.getenv('WORLD_RANK', 0))
grid_size_h = int(os.getenv('GRID_H', 1))
grid_size_w = int(os.getenv('GRID_W', 1))
port = int(os.getenv('MASTER_PORT', 0))
master_address = os.getenv('MASTER_ADDR', 'localhost')
world_size = grid_size_h * grid_size_w
dist.init_process_group(backend = 'nccl',
init_method = f"tcp://{master_address}:{port}",
rank = world_rank,
world_size = world_size)
local_rank = world_rank % torch.cuda.device_count()
device = torch.device(f"cuda:{local_rank}")
# compute local ranks in h and w:
# rank = wrank + grid_size_w * hrank
wrank = world_rank % grid_size_w
hrank = world_rank // grid_size_w
w_group = None
h_group = None
# now set up the comm grid:
wgroups = []
for h in range(grid_size_h):
start = h
end = h + grid_size_w
wgroups.append(list(range(start, end)))
print(wgroups)
for grp in wgroups:
if len(grp) == 1:
continue
tmp_group = dist.new_group(ranks=grp)
if wrank in grp:
w_group = tmp_group
# transpose:
hgroups = [sorted(list(i)) for i in zip(*wgroups)]
print(hgroups)
for grp in hgroups:
if len(grp) == 1:
continue
tmp_group = dist.new_group(ranks=grp)
if hrank in grp:
h_group = tmp_group
# set device
torch.cuda.set_device(device.index)
# set seed
torch.manual_seed(333)
torch.cuda.manual_seed(333)
if world_rank == 0:
print(f"Running distributed test on grid H x W = {grid_size_h} x {grid_size_w}")
# initializing sht
thd.init(h_group, w_group)
# common parameters
B, C, H, W = 1, 8, 721, 1440
Hloc = (H + grid_size_h - 1) // grid_size_h
Wloc = (W + grid_size_w - 1) // grid_size_w
Hpad = grid_size_h * Hloc - H
Wpad = grid_size_w * Wloc - W
# do serial tests first:
forward_transform_local = harmonics.RealSHT(nlat=H, nlon=W).to(device)
backward_transform_local = harmonics.InverseRealSHT(nlat=H, nlon=W).to(device)
backward_transform_dist = thd.DistributedInverseRealSHT(nlat=H, nlon=W).to(device)
Lpad = backward_transform_dist.lpad
Mpad = backward_transform_dist.mpad
Lloc = (Lpad + backward_transform_dist.lmax) // grid_size_h
Mloc = (Mpad + backward_transform_dist.mmax) // grid_size_w
# create tensors
dummy_full = torch.randn((B, C, H, W), dtype=torch.float32, device=device)
inp_full = forward_transform_local(dummy_full)
# pad
with torch.no_grad():
inp_pad = F.pad(inp_full, (0, Mpad, 0, Lpad))
# split in W
inp_local = torch.split(inp_pad, split_size_or_sections=Mloc, dim=-1)[wrank]
# split in H
inp_local = torch.split(inp_local, split_size_or_sections=Lloc, dim=-2)[hrank]
# do FWD transform
out_full = backward_transform_local(inp_full)
out_local = backward_transform_dist(inp_local)
# gather the local data
# gather in W
if grid_size_w > 1:
olist = [torch.empty_like(out_local) for _ in range(grid_size_w)]
olist[wrank] = out_local
dist.all_gather(olist, out_local, group=w_group)
out_full_gather = torch.cat(olist, dim=-1)
out_full_gather = out_full_gather[..., :W]
else:
out_full_gather = out_local
# gather in h
if grid_size_h > 1:
olist = [torch.empty_like(out_full_gather) for _ in range(grid_size_h)]
olist[hrank] = out_full_gather
dist.all_gather(olist, out_full_gather, group=h_group)
out_full_gather = torch.cat(olist, dim=-2)
out_full_gather = out_full_gather[..., :H, :]
if world_rank == 0:
print(f"Local Out: sum={out_full.abs().sum().item()}, max={out_full.abs().max().item()}, min={out_full.abs().min().item()}")
print(f"Dist Out: sum={out_full_gather.abs().sum().item()}, max={out_full_gather.abs().max().item()}, min={out_full_gather.abs().min().item()}")
diff = (out_full-out_full_gather).abs()
print(f"Out Difference: abs={diff.sum().item()}, rel={diff.sum().item() / (0.5*(out_full.abs().sum() + out_full_gather.abs().sum()))}, max={diff.abs().max().item()}")
print("")
# create split input grad
with torch.no_grad():
# create full grad
ograd_full = torch.randn_like(out_full)
# pad
ograd_pad = F.pad(ograd_full, [0, Wpad, 0, Hpad])
# split in W
ograd_local = torch.split(ograd_pad, split_size_or_sections=Wloc, dim=-1)[wrank]
# split in H
ograd_local = torch.split(ograd_local, split_size_or_sections=Hloc, dim=-2)[hrank]
# backward pass:
# local
inp_full.requires_grad = True
out_full = backward_transform_local(inp_full)
out_full.backward(ograd_full)
igrad_full = inp_full.grad.clone()
# distributed
inp_local.requires_grad = True
out_local = backward_transform_dist(inp_local)
out_local.backward(ograd_local)
igrad_local = inp_local.grad.clone()
# gather
# gather in W
if grid_size_w > 1:
olist = [torch.empty_like(igrad_local) for _ in range(grid_size_w)]
olist[wrank] = igrad_local
dist.all_gather(olist, igrad_local, group=w_group)
igrad_full_gather = torch.cat(olist, dim=-1)
igrad_full_gather = igrad_full_gather[..., :backward_transform_dist.mmax]
else:
igrad_full_gather = igrad_local
# gather in h
if grid_size_h > 1:
olist = [torch.empty_like(igrad_full_gather) for _ in range(grid_size_h)]
olist[hrank] = igrad_full_gather
dist.all_gather(olist, igrad_full_gather, group=h_group)
igrad_full_gather = torch.cat(olist, dim=-2)
igrad_full_gather = igrad_full_gather[..., :backward_transform_dist.lmax, :]
if world_rank == 0:
print(f"Local Grad: sum={igrad_full.abs().sum().item()}, max={igrad_full.abs().max().item()}, min={igrad_full.abs().min().item()}")
print(f"Dist Grad: sum={igrad_full_gather.abs().sum().item()}, max={igrad_full_gather.abs().max().item()}, min={igrad_full_gather.abs().min().item()}")
diff = (igrad_full-igrad_full_gather).abs()
print(f"Grad Difference: abs={diff.sum().item()}, rel={diff.sum().item() / (0.5*(igrad_full.abs().sum() + igrad_full_gather.abs().sum()))}, max={diff.abs().max().item()}")
| torch-harmonics-main | tests/test_distributed_backward_transform.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import torch
from .sht import InverseRealSHT
class GaussianRandomFieldS2(torch.nn.Module):
def __init__(self, nlat, alpha=2.0, tau=3.0, sigma=None, radius=1.0, grid="equiangular", dtype=torch.float32):
super().__init__()
r"""
A mean-zero Gaussian Random Field on the sphere with Matern covariance:
C = sigma^2 (-Lap + tau^2 I)^(-alpha).
Lap is the Laplacian on the sphere, I the identity operator,
and sigma, tau, alpha are scalar parameters.
Note: C is trace-class on L^2 if and only if alpha > 1.
Parameters
----------
nlat : int
Number of latitudinal modes;
longitudinal modes are 2*nlat.
alpha : float, default is 2
Regularity parameter. Larger means smoother.
tau : float, default is 3
Lenght-scale parameter. Larger means more scales.
sigma : float, default is None
Scale parameter. Larger means bigger.
If None, sigma = tau**(0.5*(2*alpha - 2.0)).
radius : float, default is 1
Radius of the sphere.
grid : string, default is "equiangular"
Grid type. Currently supports "equiangular" and
"legendre-gauss".
dtype : torch.dtype, default is torch.float32
Numerical type for the calculations.
"""
#Number of latitudinal modes.
self.nlat = nlat
#Default value of sigma if None is given.
if sigma is None:
assert alpha > 1.0, f"Alpha must be greater than one, got {alpha}."
sigma = tau**(0.5*(2*alpha - 2.0))
# Inverse SHT
self.isht = InverseRealSHT(self.nlat, 2*self.nlat, grid=grid, norm='backward').to(dtype=dtype)
#Square root of the eigenvalues of C.
sqrt_eig = torch.tensor([j*(j+1) for j in range(self.nlat)]).view(self.nlat,1).repeat(1, self.nlat+1)
sqrt_eig = torch.tril(sigma*(((sqrt_eig/radius**2) + tau**2)**(-alpha/2.0)))
sqrt_eig[0,0] = 0.0
sqrt_eig = sqrt_eig.unsqueeze(0)
self.register_buffer('sqrt_eig', sqrt_eig)
#Save mean and var of the standard Gaussian.
#Need these to re-initialize distribution on a new device.
mean = torch.tensor([0.0]).to(dtype=dtype)
var = torch.tensor([1.0]).to(dtype=dtype)
self.register_buffer('mean', mean)
self.register_buffer('var', var)
#Standard normal noise sampler.
self.gaussian_noise = torch.distributions.normal.Normal(self.mean, self.var)
def forward(self, N, xi=None):
r"""
Sample random functions from a spherical GRF.
Parameters
----------
N : int
Number of functions to sample.
xi : torch.Tensor, default is None
Noise is a complex tensor of size (N, nlat, nlat+1).
If None, new Gaussian noise is sampled.
If xi is provided, N is ignored.
Output
-------
u : torch.Tensor
N random samples from the GRF returned as a
tensor of size (N, nlat, 2*nlat) on a equiangular grid.
"""
#Sample Gaussian noise.
if xi is None:
xi = self.gaussian_noise.sample(torch.Size((N, self.nlat, self.nlat + 1, 2))).squeeze()
xi = torch.view_as_complex(xi)
#Karhunen-Loeve expansion.
u = self.isht(xi*self.sqrt_eig)
return u
#Override cuda and to methods so sampler gets initialized with mean
#and variance on the correct device.
def cuda(self, *args, **kwargs):
super().cuda(*args, **kwargs)
self.gaussian_noise = torch.distributions.normal.Normal(self.mean, self.var)
return self
def to(self, *args, **kwargs):
super().to(*args, **kwargs)
self.gaussian_noise = torch.distributions.normal.Normal(self.mean, self.var)
return self
| torch-harmonics-main | torch_harmonics/random_fields.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import numpy as np
import torch
import torch.nn as nn
import torch.fft
from .quadrature import *
from .legendre import *
class RealSHT(nn.Module):
r"""
Defines a module for computing the forward (real-valued) SHT.
Precomputes Legendre Gauss nodes, weights and associated Legendre polynomials on these nodes.
The SHT is applied to the last two dimensions of the input
[1] Schaeffer, N. Efficient spherical harmonic transforms aimed at pseudospectral numerical simulations, G3: Geochemistry, Geophysics, Geosystems.
[2] Wang, B., Wang, L., Xie, Z.; Accurate calculation of spherical and vector spherical harmonic expansions via spectral element grids; Adv Comput Math.
"""
def __init__(self, nlat, nlon, lmax=None, mmax=None, grid="lobatto", norm="ortho", csphase=True):
r"""
Initializes the SHT Layer, precomputing the necessary quadrature weights
Parameters:
nlat: input grid resolution in the latitudinal direction
nlon: input grid resolution in the longitudinal direction
grid: grid in the latitude direction (for now only tensor product grids are supported)
"""
super().__init__()
self.nlat = nlat
self.nlon = nlon
self.grid = grid
self.norm = norm
self.csphase = csphase
# TODO: include assertions regarding the dimensions
# compute quadrature points
if self.grid == "legendre-gauss":
cost, w = legendre_gauss_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat
elif self.grid == "lobatto":
cost, w = lobatto_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat-1
elif self.grid == "equiangular":
cost, w = clenshaw_curtiss_weights(nlat, -1, 1)
# cost, w = fejer2_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat
else:
raise(ValueError("Unknown quadrature mode"))
# apply cosine transform and flip them
tq = np.flip(np.arccos(cost))
# determine the dimensions
self.mmax = mmax or self.nlon // 2 + 1
# combine quadrature weights with the legendre weights
weights = torch.from_numpy(w)
pct = precompute_legpoly(self.mmax, self.lmax, tq, norm=self.norm, csphase=self.csphase)
weights = torch.einsum('mlk,k->mlk', pct, weights)
# remember quadrature weights
self.register_buffer('weights', weights, persistent=False)
def extra_repr(self):
r"""
Pretty print module
"""
return f'nlat={self.nlat}, nlon={self.nlon},\n lmax={self.lmax}, mmax={self.mmax},\n grid={self.grid}, csphase={self.csphase}'
def forward(self, x: torch.Tensor):
assert(x.shape[-2] == self.nlat)
assert(x.shape[-1] == self.nlon)
# apply real fft in the longitudinal direction
x = 2.0 * torch.pi * torch.fft.rfft(x, dim=-1, norm="forward")
# do the Legendre-Gauss quadrature
x = torch.view_as_real(x)
# distributed contraction: fork
out_shape = list(x.size())
out_shape[-3] = self.lmax
out_shape[-2] = self.mmax
xout = torch.zeros(out_shape, dtype=x.dtype, device=x.device)
# contraction
xout[..., 0] = torch.einsum('...km,mlk->...lm', x[..., :self.mmax, 0], self.weights.to(x.dtype) )
xout[..., 1] = torch.einsum('...km,mlk->...lm', x[..., :self.mmax, 1], self.weights.to(x.dtype) )
x = torch.view_as_complex(xout)
return x
class InverseRealSHT(nn.Module):
r"""
Defines a module for computing the inverse (real-valued) SHT.
Precomputes Legendre Gauss nodes, weights and associated Legendre polynomials on these nodes.
nlat, nlon: Output dimensions
lmax, mmax: Input dimensions (spherical coefficients). For convenience, these are inferred from the output dimensions
[1] Schaeffer, N. Efficient spherical harmonic transforms aimed at pseudospectral numerical simulations, G3: Geochemistry, Geophysics, Geosystems.
[2] Wang, B., Wang, L., Xie, Z.; Accurate calculation of spherical and vector spherical harmonic expansions via spectral element grids; Adv Comput Math.
"""
def __init__(self, nlat, nlon, lmax=None, mmax=None, grid="lobatto", norm="ortho", csphase=True):
super().__init__()
self.nlat = nlat
self.nlon = nlon
self.grid = grid
self.norm = norm
self.csphase = csphase
# compute quadrature points
if self.grid == "legendre-gauss":
cost, _ = legendre_gauss_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat
elif self.grid == "lobatto":
cost, _ = lobatto_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat-1
elif self.grid == "equiangular":
cost, _ = clenshaw_curtiss_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat
else:
raise(ValueError("Unknown quadrature mode"))
# apply cosine transform and flip them
t = np.flip(np.arccos(cost))
# determine the dimensions
self.mmax = mmax or self.nlon // 2 + 1
pct = precompute_legpoly(self.mmax, self.lmax, t, norm=self.norm, inverse=True, csphase=self.csphase)
# register buffer
self.register_buffer('pct', pct, persistent=False)
def extra_repr(self):
r"""
Pretty print module
"""
return f'nlat={self.nlat}, nlon={self.nlon},\n lmax={self.lmax}, mmax={self.mmax},\n grid={self.grid}, csphase={self.csphase}'
def forward(self, x: torch.Tensor):
assert(x.shape[-2] == self.lmax)
assert(x.shape[-1] == self.mmax)
# Evaluate associated Legendre functions on the output nodes
x = torch.view_as_real(x)
rl = torch.einsum('...lm, mlk->...km', x[..., 0], self.pct.to(x.dtype) )
im = torch.einsum('...lm, mlk->...km', x[..., 1], self.pct.to(x.dtype) )
xs = torch.stack((rl, im), -1)
# apply the inverse (real) FFT
x = torch.view_as_complex(xs)
x = torch.fft.irfft(x, n=self.nlon, dim=-1, norm="forward")
return x
class RealVectorSHT(nn.Module):
r"""
Defines a module for computing the forward (real) vector SHT.
Precomputes Legendre Gauss nodes, weights and associated Legendre polynomials on these nodes.
The SHT is applied to the last three dimensions of the input.
[1] Schaeffer, N. Efficient spherical harmonic transforms aimed at pseudospectral numerical simulations, G3: Geochemistry, Geophysics, Geosystems.
[2] Wang, B., Wang, L., Xie, Z.; Accurate calculation of spherical and vector spherical harmonic expansions via spectral element grids; Adv Comput Math.
"""
def __init__(self, nlat, nlon, lmax=None, mmax=None, grid="lobatto", norm="ortho", csphase=True):
r"""
Initializes the vector SHT Layer, precomputing the necessary quadrature weights
Parameters:
nlat: input grid resolution in the latitudinal direction
nlon: input grid resolution in the longitudinal direction
grid: type of grid the data lives on
"""
super().__init__()
self.nlat = nlat
self.nlon = nlon
self.grid = grid
self.norm = norm
self.csphase = csphase
# compute quadrature points
if self.grid == "legendre-gauss":
cost, w = legendre_gauss_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat
elif self.grid == "lobatto":
cost, w = lobatto_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat-1
elif self.grid == "equiangular":
cost, w = clenshaw_curtiss_weights(nlat, -1, 1)
# cost, w = fejer2_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat
else:
raise(ValueError("Unknown quadrature mode"))
# apply cosine transform and flip them
tq = np.flip(np.arccos(cost))
# determine the dimensions
self.mmax = mmax or self.nlon // 2 + 1
weights = torch.from_numpy(w)
dpct = precompute_dlegpoly(self.mmax, self.lmax, tq, norm=self.norm, csphase=self.csphase)
# combine integration weights, normalization factor in to one:
l = torch.arange(0, self.lmax)
norm_factor = 1. / l / (l+1)
norm_factor[0] = 1.
weights = torch.einsum('dmlk,k,l->dmlk', dpct, weights, norm_factor)
# since the second component is imaginary, we need to take complex conjugation into account
weights[1] = -1 * weights[1]
# remember quadrature weights
self.register_buffer('weights', weights, persistent=False)
def extra_repr(self):
r"""
Pretty print module
"""
return f'nlat={self.nlat}, nlon={self.nlon},\n lmax={self.lmax}, mmax={self.mmax},\n grid={self.grid}, csphase={self.csphase}'
def forward(self, x: torch.Tensor):
assert(len(x.shape) >= 3)
# apply real fft in the longitudinal direction
x = 2.0 * torch.pi * torch.fft.rfft(x, dim=-1, norm="forward")
# do the Legendre-Gauss quadrature
x = torch.view_as_real(x)
# distributed contraction: fork
out_shape = list(x.size())
out_shape[-3] = self.lmax
out_shape[-2] = self.mmax
xout = torch.zeros(out_shape, dtype=x.dtype, device=x.device)
# contraction - spheroidal component
# real component
xout[..., 0, :, :, 0] = torch.einsum('...km,mlk->...lm', x[..., 0, :, :self.mmax, 0], self.weights[0].to(x.dtype)) \
- torch.einsum('...km,mlk->...lm', x[..., 1, :, :self.mmax, 1], self.weights[1].to(x.dtype))
# iamg component
xout[..., 0, :, :, 1] = torch.einsum('...km,mlk->...lm', x[..., 0, :, :self.mmax, 1], self.weights[0].to(x.dtype)) \
+ torch.einsum('...km,mlk->...lm', x[..., 1, :, :self.mmax, 0], self.weights[1].to(x.dtype))
# contraction - toroidal component
# real component
xout[..., 1, :, :, 0] = - torch.einsum('...km,mlk->...lm', x[..., 0, :, :self.mmax, 1], self.weights[1].to(x.dtype)) \
- torch.einsum('...km,mlk->...lm', x[..., 1, :, :self.mmax, 0], self.weights[0].to(x.dtype))
# imag component
xout[..., 1, :, :, 1] = torch.einsum('...km,mlk->...lm', x[..., 0, :, :self.mmax, 0], self.weights[1].to(x.dtype)) \
- torch.einsum('...km,mlk->...lm', x[..., 1, :, :self.mmax, 1], self.weights[0].to(x.dtype))
return torch.view_as_complex(xout)
class InverseRealVectorSHT(nn.Module):
r"""
Defines a module for computing the inverse (real-valued) vector SHT.
Precomputes Legendre Gauss nodes, weights and associated Legendre polynomials on these nodes.
[1] Schaeffer, N. Efficient spherical harmonic transforms aimed at pseudospectral numerical simulations, G3: Geochemistry, Geophysics, Geosystems.
[2] Wang, B., Wang, L., Xie, Z.; Accurate calculation of spherical and vector spherical harmonic expansions via spectral element grids; Adv Comput Math.
"""
def __init__(self, nlat, nlon, lmax=None, mmax=None, grid="lobatto", norm="ortho", csphase=True):
super().__init__()
self.nlat = nlat
self.nlon = nlon
self.grid = grid
self.norm = norm
self.csphase = csphase
# compute quadrature points
if self.grid == "legendre-gauss":
cost, _ = legendre_gauss_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat
elif self.grid == "lobatto":
cost, _ = lobatto_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat-1
elif self.grid == "equiangular":
cost, _ = clenshaw_curtiss_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat
else:
raise(ValueError("Unknown quadrature mode"))
# apply cosine transform and flip them
t = np.flip(np.arccos(cost))
# determine the dimensions
self.mmax = mmax or self.nlon // 2 + 1
dpct = precompute_dlegpoly(self.mmax, self.lmax, t, norm=self.norm, inverse=True, csphase=self.csphase)
# register weights
self.register_buffer('dpct', dpct, persistent=False)
def extra_repr(self):
r"""
Pretty print module
"""
return f'nlat={self.nlat}, nlon={self.nlon},\n lmax={self.lmax}, mmax={self.mmax},\n grid={self.grid}, csphase={self.csphase}'
def forward(self, x: torch.Tensor):
assert(x.shape[-2] == self.lmax)
assert(x.shape[-1] == self.mmax)
# Evaluate associated Legendre functions on the output nodes
x = torch.view_as_real(x)
# contraction - spheroidal component
# real component
srl = torch.einsum('...lm,mlk->...km', x[..., 0, :, :, 0], self.dpct[0].to(x.dtype)) \
- torch.einsum('...lm,mlk->...km', x[..., 1, :, :, 1], self.dpct[1].to(x.dtype))
# iamg component
sim = torch.einsum('...lm,mlk->...km', x[..., 0, :, :, 1], self.dpct[0].to(x.dtype)) \
+ torch.einsum('...lm,mlk->...km', x[..., 1, :, :, 0], self.dpct[1].to(x.dtype))
# contraction - toroidal component
# real component
trl = - torch.einsum('...lm,mlk->...km', x[..., 0, :, :, 1], self.dpct[1].to(x.dtype)) \
- torch.einsum('...lm,mlk->...km', x[..., 1, :, :, 0], self.dpct[0].to(x.dtype))
# imag component
tim = torch.einsum('...lm,mlk->...km', x[..., 0, :, :, 0], self.dpct[1].to(x.dtype)) \
- torch.einsum('...lm,mlk->...km', x[..., 1, :, :, 1], self.dpct[0].to(x.dtype))
# reassemble
s = torch.stack((srl, sim), -1)
t = torch.stack((trl, tim), -1)
xs = torch.stack((s, t), -4)
# apply the inverse (real) FFT
x = torch.view_as_complex(xs)
x = torch.fft.irfft(x, n=self.nlon, dim=-1, norm="forward")
return x
| torch-harmonics-main | torch_harmonics/sht.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import numpy as np
def legendre_gauss_weights(n, a=-1.0, b=1.0):
r"""
Helper routine which returns the Legendre-Gauss nodes and weights
on the interval [a, b]
"""
xlg, wlg = np.polynomial.legendre.leggauss(n)
xlg = (b - a) * 0.5 * xlg + (b + a) * 0.5
wlg = wlg * (b - a) * 0.5
return xlg, wlg
def lobatto_weights(n, a=-1.0, b=1.0, tol=1e-16, maxiter=100):
r"""
Helper routine which returns the Legendre-Gauss-Lobatto nodes and weights
on the interval [a, b]
"""
wlg = np.zeros((n,))
tlg = np.zeros((n,))
tmp = np.zeros((n,))
# Vandermonde Matrix
vdm = np.zeros((n, n))
# initialize Chebyshev nodes as first guess
for i in range(n):
tlg[i] = -np.cos(np.pi*i / (n-1))
tmp = 2.0
for i in range(maxiter):
tmp = tlg
vdm[:,0] = 1.0
vdm[:,1] = tlg
for k in range(2, n):
vdm[:, k] = ( (2*k-1) * tlg * vdm[:, k-1] - (k-1) * vdm[:, k-2] ) / k
tlg = tmp - ( tlg*vdm[:, n-1] - vdm[:, n-2] ) / ( n * vdm[:, n-1])
if (max(abs(tlg - tmp).flatten()) < tol ):
break
wlg = 2.0 / ( (n*(n-1))*(vdm[:, n-1]**2))
# rescale
tlg = (b - a) * 0.5 * tlg + (b + a) * 0.5
wlg = wlg * (b - a) * 0.5
return tlg, wlg
def clenshaw_curtiss_weights(n, a=-1.0, b=1.0):
r"""
Computation of the Clenshaw-Curtis quadrature nodes and weights.
This implementation follows
[1] Joerg Waldvogel, Fast Construction of the Fejer and Clenshaw-Curtis Quadrature Rules; BIT Numerical Mathematics, Vol. 43, No. 1, pp. 001–018.
"""
assert(n > 1)
tcc = np.cos(np.linspace(np.pi, 0, n))
if n == 2:
wcc = np.array([1., 1.])
else:
n1 = n - 1
N = np.arange(1, n1, 2)
l = len(N)
m = n1 - l
v = np.concatenate([2 / N / (N-2), 1 / N[-1:], np.zeros(m)])
v = 0 - v[:-1] - v[-1:0:-1]
g0 = -np.ones(n1)
g0[l] = g0[l] + n1
g0[m] = g0[m] + n1
g = g0 / (n1**2 - 1 + (n1%2))
wcc = np.fft.ifft(v + g).real
wcc = np.concatenate((wcc, wcc[:1]))
# rescale
tcc = (b - a) * 0.5 * tcc + (b + a) * 0.5
wcc = wcc * (b - a) * 0.5
return tcc, wcc
def fejer2_weights(n, a=-1.0, b=1.0):
r"""
Computation of the Fejer quadrature nodes and weights.
This implementation follows
[1] Joerg Waldvogel, Fast Construction of the Fejer and Clenshaw-Curtis Quadrature Rules; BIT Numerical Mathematics, Vol. 43, No. 1, pp. 001–018.
"""
assert(n > 2)
tcc = np.cos(np.linspace(np.pi, 0, n))
n1 = n - 1
N = np.arange(1, n1, 2)
l = len(N)
m = n1 - l
v = np.concatenate([2 / N / (N-2), 1 / N[-1:], np.zeros(m)])
v = 0 - v[:-1] - v[-1:0:-1]
wcc = np.fft.ifft(v).real
wcc = np.concatenate((wcc, wcc[:1]))
# rescale
tcc = (b - a) * 0.5 * tcc + (b + a) * 0.5
wcc = wcc * (b - a) * 0.5
return tcc, wcc | torch-harmonics-main | torch_harmonics/quadrature.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
__version__ = '0.6.3'
from .sht import RealSHT, InverseRealSHT, RealVectorSHT, InverseRealVectorSHT
from . import quadrature
from . import random_fields
from . import examples
| torch-harmonics-main | torch_harmonics/__init__.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from parameterized import parameterized
import math
import numpy as np
import torch
from torch.autograd import gradcheck
from torch_harmonics import *
# try:
# from tqdm import tqdm
# except:
# tqdm = lambda x : x
tqdm = lambda x : x
class TestLegendrePolynomials(unittest.TestCase):
def setUp(self):
self.cml = lambda m, l : np.sqrt((2*l + 1) / 4 / np.pi) * np.sqrt(math.factorial(l-m) / math.factorial(l+m))
self.pml = dict()
# preparing associated Legendre Polynomials (These include the Condon-Shortley phase)
# for reference see e.g. https://en.wikipedia.org/wiki/Associated_Legendre_polynomials
self.pml[(0, 0)] = lambda x : np.ones_like(x)
self.pml[(0, 1)] = lambda x : x
self.pml[(1, 1)] = lambda x : - np.sqrt(1. - x**2)
self.pml[(0, 2)] = lambda x : 0.5 * (3*x**2 - 1)
self.pml[(1, 2)] = lambda x : - 3 * x * np.sqrt(1. - x**2)
self.pml[(2, 2)] = lambda x : 3 * (1 - x**2)
self.pml[(0, 3)] = lambda x : 0.5 * (5*x**3 - 3*x)
self.pml[(1, 3)] = lambda x : 1.5 * (1 - 5*x**2) * np.sqrt(1. - x**2)
self.pml[(2, 3)] = lambda x : 15 * x * (1 - x**2)
self.pml[(3, 3)] = lambda x : -15 * np.sqrt(1. - x**2)**3
self.lmax = self.mmax = 4
self.tol = 1e-9
def test_legendre(self):
print("Testing computation of associated Legendre polynomials")
from torch_harmonics.legendre import precompute_legpoly
t = np.linspace(0, np.pi, 100)
pct = precompute_legpoly(self.mmax, self.lmax, t)
for l in range(self.lmax):
for m in range(l+1):
diff = pct[m, l].numpy() / self.cml(m,l) - self.pml[(m,l)](np.cos(t))
self.assertTrue(diff.max() <= self.tol)
class TestSphericalHarmonicTransform(unittest.TestCase):
def setUp(self):
if torch.cuda.is_available():
print("Running test on GPU")
self.device = torch.device('cuda')
else:
print("Running test on CPU")
self.device = torch.device('cpu')
@parameterized.expand([
[256, 512, 32, "ortho", "equiangular", 1e-9],
[256, 512, 32, "ortho", "legendre-gauss", 1e-9],
[256, 512, 32, "four-pi", "equiangular", 1e-9],
[256, 512, 32, "four-pi", "legendre-gauss", 1e-9],
[256, 512, 32, "schmidt", "equiangular", 1e-9],
[256, 512, 32, "schmidt", "legendre-gauss", 1e-9],
])
def test_sht(self, nlat, nlon, batch_size, norm, grid, tol):
print(f"Testing real-valued SHT on {nlat}x{nlon} {grid} grid with {norm} normalization")
testiters = [1, 2, 4, 8, 16]
if grid == "equiangular":
mmax = nlat // 2
else:
mmax = nlat
lmax = mmax
sht = RealSHT(nlat, nlon, mmax=mmax, lmax=lmax, grid=grid, norm=norm).to(self.device)
isht = InverseRealSHT(nlat, nlon, mmax=mmax, lmax=lmax, grid=grid, norm=norm).to(self.device)
with torch.no_grad():
coeffs = torch.zeros(batch_size, lmax, mmax, device=self.device, dtype=torch.complex128)
coeffs[:, :lmax, :mmax] = torch.randn(batch_size, lmax, mmax, device=self.device, dtype=torch.complex128)
signal = isht(coeffs)
# testing error accumulation
for iter in testiters:
with self.subTest(i = iter):
print(f"{iter} iterations of batchsize {batch_size}:")
base = signal
for _ in tqdm(range(iter)):
base = isht(sht(base))
err = torch.mean(torch.norm(base-signal, p='fro', dim=(-1,-2)) / torch.norm(signal, p='fro', dim=(-1,-2)) )
print(f"final relative error: {err.item()}")
self.assertTrue(err.item() <= tol)
@parameterized.expand([
[12, 24, 2, "ortho", "equiangular", 1e-5],
[12, 24, 2, "ortho", "legendre-gauss", 1e-5],
[12, 24, 2, "four-pi", "equiangular", 1e-5],
[12, 24, 2, "four-pi", "legendre-gauss", 1e-5],
[12, 24, 2, "schmidt", "equiangular", 1e-5],
[12, 24, 2, "schmidt", "legendre-gauss", 1e-5],
])
def test_sht_grad(self, nlat, nlon, batch_size, norm, grid, tol):
print(f"Testing gradients of real-valued SHT on {nlat}x{nlon} {grid} grid with {norm} normalization")
if grid == "equiangular":
mmax = nlat // 2
else:
mmax = nlat
lmax = mmax
sht = RealSHT(nlat, nlon, mmax=mmax, lmax=lmax, grid=grid, norm=norm).to(self.device)
isht = InverseRealSHT(nlat, nlon, mmax=mmax, lmax=lmax, grid=grid, norm=norm).to(self.device)
with torch.no_grad():
coeffs = torch.zeros(batch_size, lmax, mmax, device=self.device, dtype=torch.complex128)
coeffs[:, :lmax, :mmax] = torch.randn(batch_size, lmax, mmax, device=self.device, dtype=torch.complex128)
signal = isht(coeffs)
input = torch.randn_like(signal, requires_grad=True)
err_handle = lambda x : torch.mean(torch.norm( isht(sht(x)) - signal , p='fro', dim=(-1,-2)) / torch.norm(signal, p='fro', dim=(-1,-2)) )
test_result = gradcheck(err_handle, input, eps=1e-6, atol=tol)
self.assertTrue(test_result)
if __name__ == '__main__':
unittest.main() | torch-harmonics-main | torch_harmonics/tests.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import numpy as np
import torch
def clm(l, m):
"""
defines the normalization factor to orthonormalize the Spherical Harmonics
"""
return np.sqrt((2*l + 1) / 4 / np.pi) * np.sqrt(np.math.factorial(l-m) / np.math.factorial(l+m))
def precompute_legpoly(mmax, lmax, t, norm="ortho", inverse=False, csphase=True):
r"""
Computes the values of (-1)^m c^l_m P^l_m(\cos \theta) at the positions specified by x (theta)
The resulting tensor has shape (mmax, lmax, len(x)).
The Condon-Shortley Phase (-1)^m can be turned off optionally
method of computation follows
[1] Schaeffer, N.; Efficient spherical harmonic transforms aimed at pseudospectral numerical simulations, G3: Geochemistry, Geophysics, Geosystems.
[2] Rapp, R.H.; A Fortran Program for the Computation of Gravimetric Quantities from High Degree Spherical Harmonic Expansions, Ohio State University Columbus; report; 1982;
https://apps.dtic.mil/sti/citations/ADA123406
[3] Schrama, E.; Orbit integration based upon interpolated gravitational gradients
"""
# compute the tensor P^m_n:
nmax = max(mmax,lmax)
pct = np.zeros((nmax, nmax, len(t)), dtype=np.float64)
sint = np.sin(t)
cost = np.cos(t)
norm_factor = 1. if norm == "ortho" else np.sqrt(4 * np.pi)
norm_factor = 1. / norm_factor if inverse else norm_factor
# initial values to start the recursion
pct[0,0,:] = norm_factor / np.sqrt(4 * np.pi)
# fill the diagonal and the lower diagonal
for l in range(1, nmax):
pct[l-1, l, :] = np.sqrt(2*l + 1) * cost * pct[l-1, l-1, :]
pct[l, l, :] = np.sqrt( (2*l + 1) * (1 + cost) * (1 - cost) / 2 / l ) * pct[l-1, l-1, :]
# fill the remaining values on the upper triangle and multiply b
for l in range(2, nmax):
for m in range(0, l-1):
pct[m, l, :] = cost * np.sqrt((2*l - 1) / (l - m) * (2*l + 1) / (l + m)) * pct[m, l-1, :] \
- np.sqrt((l + m - 1) / (l - m) * (2*l + 1) / (2*l - 3) * (l - m - 1) / (l + m)) * pct[m, l-2, :]
if norm == "schmidt":
for l in range(0, nmax):
if inverse:
pct[:, l, : ] = pct[:, l, : ] * np.sqrt(2*l + 1)
else:
pct[:, l, : ] = pct[:, l, : ] / np.sqrt(2*l + 1)
pct = pct[:mmax, :lmax]
if csphase:
for m in range(1, mmax, 2):
pct[m] *= -1
return torch.from_numpy(pct)
def precompute_dlegpoly(mmax, lmax, x, norm="ortho", inverse=False, csphase=True):
r"""
Computes the values of the derivatives $\frac{d}{d \theta} P^m_l(\cos \theta)$
at the positions specified by x (theta), as well as $\frac{1}{\sin \theta} P^m_l(\cos \theta)$,
needed for the computation of the vector spherical harmonics. The resulting tensor has shape
(2, mmax, lmax, len(x)).
computation follows
[2] Wang, B., Wang, L., Xie, Z.; Accurate calculation of spherical and vector spherical harmonic expansions via spectral element grids; Adv Comput Math.
"""
pct = precompute_legpoly(mmax+1, lmax+1, x, norm=norm, inverse=inverse, csphase=False)
dpct = torch.zeros((2, mmax, lmax, len(x)), dtype=torch.float64)
# fill the derivative terms wrt theta
for l in range(0, lmax):
# m = 0
dpct[0, 0, l] = - np.sqrt(l*(l+1)) * pct[1, l]
# 0 < m < l
for m in range(1, min(l, mmax)):
dpct[0, m, l] = 0.5 * ( np.sqrt((l+m)*(l-m+1)) * pct[m-1, l] - np.sqrt((l-m)*(l+m+1)) * pct[m+1, l] )
# m == l
if mmax > l:
dpct[0, l, l] = np.sqrt(l/2) * pct[l-1, l]
# fill the - 1j m P^m_l / sin(phi). as this component is purely imaginary,
# we won't store it explicitly in a complex array
for m in range(1, min(l+1, mmax)):
# this component is implicitly complex
# we do not divide by m here as this cancels with the derivative of the exponential
dpct[1, m, l] = 0.5 * np.sqrt((2*l+1)/(2*l+3)) * \
( np.sqrt((l-m+1)*(l-m+2)) * pct[m-1, l+1] + np.sqrt((l+m+1)*(l+m+2)) * pct[m+1, l+1] )
if csphase:
for m in range(1, mmax, 2):
dpct[:, m] *= -1
return dpct | torch-harmonics-main | torch_harmonics/legendre.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import numpy as np
import torch
import torch.nn as nn
import torch.fft
import torch.nn.functional as F
from torch_harmonics.quadrature import *
from torch_harmonics.legendre import *
from torch_harmonics.distributed import polar_group_size, azimuth_group_size, distributed_transpose_azimuth, distributed_transpose_polar
from torch_harmonics.distributed import polar_group_rank, azimuth_group_rank
class DistributedRealSHT(nn.Module):
"""
Defines a module for computing the forward (real-valued) SHT.
Precomputes Legendre Gauss nodes, weights and associated Legendre polynomials on these nodes.
The SHT is applied to the last two dimensions of the input
[1] Schaeffer, N. Efficient spherical harmonic transforms aimed at pseudospectral numerical simulations, G3: Geochemistry, Geophysics, Geosystems.
[2] Wang, B., Wang, L., Xie, Z.; Accurate calculation of spherical and vector spherical harmonic expansions via spectral element grids; Adv Comput Math.
"""
def __init__(self, nlat, nlon, lmax=None, mmax=None, grid="lobatto", norm="ortho", csphase=True):
"""
Initializes the SHT Layer, precomputing the necessary quadrature weights
Parameters:
nlat: input grid resolution in the latitudinal direction
nlon: input grid resolution in the longitudinal direction
grid: grid in the latitude direction (for now only tensor product grids are supported)
"""
super().__init__()
self.nlat = nlat
self.nlon = nlon
self.grid = grid
self.norm = norm
self.csphase = csphase
# TODO: include assertions regarding the dimensions
# compute quadrature points
if self.grid == "legendre-gauss":
cost, w = legendre_gauss_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat
elif self.grid == "lobatto":
cost, w = lobatto_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat-1
elif self.grid == "equiangular":
cost, w = clenshaw_curtiss_weights(nlat, -1, 1)
# cost, w = fejer2_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat
else:
raise(ValueError("Unknown quadrature mode"))
# get the comms grid:
self.comm_size_polar = polar_group_size()
self.comm_rank_polar = polar_group_rank()
self.comm_size_azimuth = azimuth_group_size()
self.comm_rank_azimuth = azimuth_group_rank()
# apply cosine transform and flip them
tq = np.flip(np.arccos(cost))
# determine the dimensions
self.mmax = mmax or self.nlon // 2 + 1
# spatial paddings
latdist = (self.nlat + self.comm_size_polar - 1) // self.comm_size_polar
self.nlatpad = latdist * self.comm_size_polar - self.nlat
londist = (self.nlon + self.comm_size_azimuth - 1) // self.comm_size_azimuth
self.nlonpad = londist * self.comm_size_azimuth - self.nlon
# frequency paddings
ldist = (self.lmax + self.comm_size_polar - 1) // self.comm_size_polar
self.lpad = ldist * self.comm_size_polar - self.lmax
mdist = (self.mmax + self.comm_size_azimuth - 1) // self.comm_size_azimuth
self.mpad = mdist * self.comm_size_azimuth - self.mmax
# combine quadrature weights with the legendre weights
weights = torch.from_numpy(w)
pct = precompute_legpoly(self.mmax, self.lmax, tq, norm=self.norm, csphase=self.csphase)
weights = torch.einsum('mlk,k->mlk', pct, weights)
# we need to split in m, pad before:
weights = F.pad(weights, [0, 0, 0, 0, 0, self.mpad], mode="constant")
weights = torch.split(weights, (self.mmax+self.mpad) // self.comm_size_azimuth, dim=0)[self.comm_rank_azimuth]
# compute the local pad and size
# spatial
self.nlat_local = min(latdist, self.nlat - self.comm_rank_polar * latdist)
self.nlatpad_local = latdist - self.nlat_local
self.nlon_local = min(londist, self.nlon - self.comm_rank_azimuth * londist)
self.nlonpad_local = londist - self.nlon_local
# frequency
self.lmax_local = min(ldist, self.lmax - self.comm_rank_polar * ldist)
self.lpad_local = ldist - self.lmax_local
self.mmax_local = min(mdist, self.mmax - self.comm_rank_azimuth * mdist)
self.mpad_local = mdist - self.mmax_local
# remember quadrature weights
self.register_buffer('weights', weights, persistent=False)
def extra_repr(self):
"""
Pretty print module
"""
return f'nlat={self.nlat}, nlon={self.nlon},\n lmax={self.lmax}, mmax={self.mmax},\n grid={self.grid}, csphase={self.csphase}'
def forward(self, x: torch.Tensor):
# we need to ensure that we can split the channels evenly
assert(x.shape[1] % self.comm_size_polar == 0)
assert(x.shape[1] % self.comm_size_azimuth == 0)
# h and w is split. First we make w local by transposing into channel dim
if self.comm_size_azimuth > 1:
xt = distributed_transpose_azimuth.apply(x, (1, -1))
else:
xt = x
# apply real fft in the longitudinal direction: make sure to truncate to nlon
xtf = 2.0 * torch.pi * torch.fft.rfft(xt, n=self.nlon, dim=-1, norm="forward")
# truncate
xtft = xtf[..., :self.mmax]
# pad the dim to allow for splitting
xtfp = F.pad(xtft, [0, self.mpad], mode="constant")
# transpose: after this, m is split and c is local
if self.comm_size_azimuth > 1:
y = distributed_transpose_azimuth.apply(xtfp, (-1, 1))
else:
y = xtfp
# transpose: after this, c is split and h is local
if self.comm_size_polar > 1:
yt = distributed_transpose_polar.apply(y, (1, -2))
else:
yt = y
# the input data might be padded, make sure to truncate to nlat:
ytt = yt[..., :self.nlat, :]
# do the Legendre-Gauss quadrature
yttr = torch.view_as_real(ytt)
# contraction
yor = torch.einsum('...kmr,mlk->...lmr', yttr, self.weights.to(yttr.dtype)).contiguous()
# pad if required, truncation is implicit
yopr = F.pad(yor, [0, 0, 0, 0, 0, self.lpad], mode="constant")
yop = torch.view_as_complex(yopr)
# transpose: after this, l is split and c is local
if self.comm_size_polar > 1:
y = distributed_transpose_polar.apply(yop, (-2, 1))
else:
y = yop
return y
class DistributedInverseRealSHT(nn.Module):
"""
Defines a module for computing the inverse (real-valued) SHT.
Precomputes Legendre Gauss nodes, weights and associated Legendre polynomials on these nodes.
nlat, nlon: Output dimensions
lmax, mmax: Input dimensions (spherical coefficients). For convenience, these are inferred from the output dimensions
[1] Schaeffer, N. Efficient spherical harmonic transforms aimed at pseudospectral numerical simulations, G3: Geochemistry, Geophysics, Geosystems.
[2] Wang, B., Wang, L., Xie, Z.; Accurate calculation of spherical and vector spherical harmonic expansions via spectral element grids; Adv Comput Math.
"""
def __init__(self, nlat, nlon, lmax=None, mmax=None, grid="lobatto", norm="ortho", csphase=True):
super().__init__()
self.nlat = nlat
self.nlon = nlon
self.grid = grid
self.norm = norm
self.csphase = csphase
# compute quadrature points
if self.grid == "legendre-gauss":
cost, _ = legendre_gauss_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat
elif self.grid == "lobatto":
cost, _ = lobatto_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat-1
elif self.grid == "equiangular":
cost, _ = clenshaw_curtiss_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat
else:
raise(ValueError("Unknown quadrature mode"))
# get the comms grid:
self.comm_size_polar = polar_group_size()
self.comm_rank_polar = polar_group_rank()
self.comm_size_azimuth = azimuth_group_size()
self.comm_rank_azimuth = azimuth_group_rank()
# apply cosine transform and flip them
t = np.flip(np.arccos(cost))
# determine the dimensions
self.mmax = mmax or self.nlon // 2 + 1
# spatial paddings
latdist = (self.nlat + self.comm_size_polar - 1) // self.comm_size_polar
self.nlatpad = latdist * self.comm_size_polar - self.nlat
londist = (self.nlon + self.comm_size_azimuth - 1) // self.comm_size_azimuth
self.nlonpad = londist * self.comm_size_azimuth - self.nlon
# frequency paddings
ldist = (self.lmax + self.comm_size_polar - 1) // self.comm_size_polar
self.lpad = ldist * self.comm_size_polar - self.lmax
mdist = (self.mmax + self.comm_size_azimuth - 1) // self.comm_size_azimuth
self.mpad = mdist * self.comm_size_azimuth - self.mmax
# compute legende polynomials
pct = precompute_legpoly(self.mmax, self.lmax, t, norm=self.norm, inverse=True, csphase=self.csphase)
# split in m
pct = F.pad(pct, [0, 0, 0, 0, 0, self.mpad], mode="constant")
pct = torch.split(pct, (self.mmax+self.mpad) // self.comm_size_azimuth, dim=0)[self.comm_rank_azimuth]
# compute the local pads and sizes
# spatial
self.nlat_local = min(latdist, self.nlat - self.comm_rank_polar * latdist)
self.nlatpad_local = latdist - self.nlat_local
self.nlon_local = min(londist, self.nlon - self.comm_rank_azimuth * londist)
self.nlonpad_local = londist - self.nlon_local
# frequency
self.lmax_local = min(ldist, self.lmax - self.comm_rank_polar * ldist)
self.lpad_local = ldist - self.lmax_local
self.mmax_local = min(mdist, self.mmax - self.comm_rank_azimuth * mdist)
self.mpad_local = mdist - self.mmax_local
# register
self.register_buffer('pct', pct, persistent=False)
def extra_repr(self):
"""
Pretty print module
"""
return f'nlat={self.nlat}, nlon={self.nlon},\n lmax={self.lmax}, mmax={self.mmax},\n grid={self.grid}, csphase={self.csphase}'
def forward(self, x: torch.Tensor):
# we need to ensure that we can split the channels evenly
assert(x.shape[1] % self.comm_size_polar == 0)
assert(x.shape[1] % self.comm_size_azimuth == 0)
# transpose: after that, channels are split, l is local:
if self.comm_size_polar > 1:
xt = distributed_transpose_polar.apply(x, (1, -2))
else:
xt = x
# remove padding in l:
xtt = xt[..., :self.lmax, :]
# Evaluate associated Legendre functions on the output nodes
xttr = torch.view_as_real(xtt)
# einsum
xs = torch.einsum('...lmr, mlk->...kmr', xttr, self.pct.to(xttr.dtype)).contiguous()
x = torch.view_as_complex(xs)
# transpose: after this, l is split and channels are local
xp = F.pad(x, [0, 0, 0, self.nlatpad])
if self.comm_size_polar > 1:
y = distributed_transpose_polar.apply(xp, (-2, 1))
else:
y = xp
# transpose: after this, channels are split and m is local
if self.comm_size_azimuth > 1:
yt = distributed_transpose_azimuth.apply(y, (1, -1))
else:
yt = y
# truncate
ytt = yt[..., :self.mmax]
# apply the inverse (real) FFT
x = torch.fft.irfft(ytt, n=self.nlon, dim=-1, norm="forward")
# pad before we transpose back
xp = F.pad(x, [0, self.nlonpad])
# transpose: after this, m is split and channels are local
if self.comm_size_azimuth > 1:
out = distributed_transpose_azimuth.apply(xp, (-1, 1))
else:
out = xp
return out
class DistributedRealVectorSHT(nn.Module):
"""
Defines a module for computing the forward (real) vector SHT.
Precomputes Legendre Gauss nodes, weights and associated Legendre polynomials on these nodes.
The SHT is applied to the last three dimensions of the input.
[1] Schaeffer, N. Efficient spherical harmonic transforms aimed at pseudospectral numerical simulations, G3: Geochemistry, Geophysics, Geosystems.
[2] Wang, B., Wang, L., Xie, Z.; Accurate calculation of spherical and vector spherical harmonic expansions via spectral element grids; Adv Comput Math.
"""
def __init__(self, nlat, nlon, lmax=None, mmax=None, grid="lobatto", norm="ortho", csphase=True):
"""
Initializes the vector SHT Layer, precomputing the necessary quadrature weights
Parameters:
nlat: input grid resolution in the latitudinal direction
nlon: input grid resolution in the longitudinal direction
grid: type of grid the data lives on
"""
super().__init__()
self.nlat = nlat
self.nlon = nlon
self.grid = grid
self.norm = norm
self.csphase = csphase
# compute quadrature points
if self.grid == "legendre-gauss":
cost, w = legendre_gauss_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat
elif self.grid == "lobatto":
cost, w = lobatto_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat-1
elif self.grid == "equiangular":
cost, w = clenshaw_curtiss_weights(nlat, -1, 1)
# cost, w = fejer2_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat
else:
raise(ValueError("Unknown quadrature mode"))
# get the comms grid:
self.comm_size_polar = polar_group_size()
self.comm_rank_polar = polar_group_rank()
self.comm_size_azimuth = azimuth_group_size()
self.comm_rank_azimuth = azimuth_group_rank()
# apply cosine transform and flip them
tq = np.flip(np.arccos(cost))
# determine the dimensions
self.mmax = mmax or self.nlon // 2 + 1
# spatial paddings
latdist = (self.nlat + self.comm_size_polar - 1) // self.comm_size_polar
self.nlatpad = latdist * self.comm_size_polar - self.nlat
londist = (self.nlon + self.comm_size_azimuth - 1) // self.comm_size_azimuth
self.nlonpad = londist * self.comm_size_azimuth - self.nlon
# frequency paddings
ldist = (self.lmax + self.comm_size_polar - 1) // self.comm_size_polar
self.lpad = ldist * self.comm_size_polar - self.lmax
mdist = (self.mmax + self.comm_size_azimuth - 1) // self.comm_size_azimuth
self.mpad = mdist * self.comm_size_azimuth - self.mmax
weights = torch.from_numpy(w)
dpct = precompute_dlegpoly(self.mmax, self.lmax, tq, norm=self.norm, csphase=self.csphase)
# combine integration weights, normalization factor in to one:
l = torch.arange(0, self.lmax)
norm_factor = 1. / l / (l+1)
norm_factor[0] = 1.
weights = torch.einsum('dmlk,k,l->dmlk', dpct, weights, norm_factor)
# since the second component is imaginary, we need to take complex conjugation into account
weights[1] = -1 * weights[1]
# we need to split in m, pad before:
weights = F.pad(weights, [0, 0, 0, 0, 0, self.mpad], mode="constant")
weights = torch.split(weights, (self.mmax+self.mpad) // self.comm_size_azimuth, dim=1)[self.comm_rank_azimuth]
# remember quadrature weights
self.register_buffer('weights', weights, persistent=False)
# compute the local pad and size
# spatial
self.nlat_local = min(latdist, self.nlat - self.comm_rank_polar * latdist)
self.nlatpad_local = latdist - self.nlat_local
self.nlon_local = min(londist, self.nlon - self.comm_rank_azimuth * londist)
self.nlonpad_local = londist - self.nlon_local
# frequency
self.lmax_local = min(ldist, self.lmax - self.comm_rank_polar * ldist)
self.lpad_local = ldist - self.lmax_local
self.mmax_local = min(mdist, self.mmax - self.comm_rank_azimuth * mdist)
self.mpad_local = mdist - self.mmax_local
def extra_repr(self):
"""
Pretty print module
"""
return f'nlat={self.nlat}, nlon={self.nlon},\n lmax={self.lmax}, mmax={self.mmax},\n grid={self.grid}, csphase={self.csphase}'
def forward(self, x: torch.Tensor):
assert(len(x.shape) >= 3)
assert(x.shape[1] % self.comm_size_polar == 0)
assert(x.shape[1] % self.comm_size_azimuth == 0)
# h and w is split. First we make w local by transposing into channel dim
if self.comm_size_azimuth > 1:
xt = distributed_transpose_azimuth.apply(x, (1, -1))
else:
xt = x
# apply real fft in the longitudinal direction: make sure to truncate to nlon
xtf = 2.0 * torch.pi * torch.fft.rfft(xt, n=self.nlon, dim=-1, norm="forward")
# truncate
xtft = xtf[..., :self.mmax]
# pad the dim to allow for splitting
xtfp = F.pad(xtft, [0, self.mpad], mode="constant")
# transpose: after this, m is split and c is local
if self.comm_size_azimuth > 1:
y = distributed_transpose_azimuth.apply(xtfp, (-1, 1))
else:
y = xtfp
# transpose: after this, c is split and h is local
if self.comm_size_polar > 1:
yt = distributed_transpose_polar.apply(y, (1, -2))
else:
yt = y
# the input data might be padded, make sure to truncate to nlat:
ytt = yt[..., :self.nlat, :]
# do the Legendre-Gauss quadrature
yttr = torch.view_as_real(ytt)
# create output array
yor = torch.zeros_like(yttr, dtype=yttr.dtype, device=yttr.device)
# contraction - spheroidal component
# real component
yor[..., 0, :, :, 0] = torch.einsum('...km,mlk->...lm', yttr[..., 0, :, :, 0], self.weights[0].to(yttr.dtype)) \
- torch.einsum('...km,mlk->...lm', yttr[..., 1, :, :, 1], self.weights[1].to(yttr.dtype))
# iamg component
yor[..., 0, :, :, 1] = torch.einsum('...km,mlk->...lm', yttr[..., 0, :, :, 1], self.weights[0].to(yttr.dtype)) \
+ torch.einsum('...km,mlk->...lm', yttr[..., 1, :, :, 0], self.weights[1].to(yttr.dtype))
# contraction - toroidal component
# real component
yor[..., 1, :, :, 0] = - torch.einsum('...km,mlk->...lm', yttr[..., 0, :, :, 1], self.weights[1].to(yttr.dtype)) \
- torch.einsum('...km,mlk->...lm', yttr[..., 1, :, :, 0], self.weights[0].to(yttr.dtype))
# imag component
yor[..., 1, :, :, 1] = torch.einsum('...km,mlk->...lm', yttr[..., 0, :, :, 0], self.weights[1].to(yttr.dtype)) \
- torch.einsum('...km,mlk->...lm', yttr[..., 1, :, :, 1], self.weights[0].to(yttr.dtype))
# pad if required
yopr = F.pad(yor, [0, 0, 0, 0, 0, self.lpad], mode="constant")
yop = torch.view_as_complex(yopr)
# transpose: after this, l is split and c is local
if self.comm_size_polar > 1:
y = distributed_transpose_polar.apply(yop, (-2, 1))
else:
y = yop
return y
class DistributedInverseRealVectorSHT(nn.Module):
"""
Defines a module for computing the inverse (real-valued) vector SHT.
Precomputes Legendre Gauss nodes, weights and associated Legendre polynomials on these nodes.
[1] Schaeffer, N. Efficient spherical harmonic transforms aimed at pseudospectral numerical simulations, G3: Geochemistry, Geophysics, Geosystems.
[2] Wang, B., Wang, L., Xie, Z.; Accurate calculation of spherical and vector spherical harmonic expansions via spectral element grids; Adv Comput Math.
"""
def __init__(self, nlat, nlon, lmax=None, mmax=None, grid="lobatto", norm="ortho", csphase=True):
super().__init__()
self.nlat = nlat
self.nlon = nlon
self.grid = grid
self.norm = norm
self.csphase = csphase
# compute quadrature points
if self.grid == "legendre-gauss":
cost, _ = legendre_gauss_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat
elif self.grid == "lobatto":
cost, _ = lobatto_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat-1
elif self.grid == "equiangular":
cost, _ = clenshaw_curtiss_weights(nlat, -1, 1)
self.lmax = lmax or self.nlat
else:
raise(ValueError("Unknown quadrature mode"))
self.comm_size_polar = polar_group_size()
self.comm_rank_polar = polar_group_rank()
self.comm_size_azimuth = azimuth_group_size()
self.comm_rank_azimuth = azimuth_group_rank()
# apply cosine transform and flip them
t = np.flip(np.arccos(cost))
# determine the dimensions
self.mmax = mmax or self.nlon // 2 + 1
# spatial paddings
latdist = (self.nlat + self.comm_size_polar - 1) // self.comm_size_polar
self.nlatpad = latdist * self.comm_size_polar - self.nlat
londist = (self.nlon + self.comm_size_azimuth - 1) // self.comm_size_azimuth
self.nlonpad = londist * self.comm_size_azimuth - self.nlon
# frequency paddings
ldist = (self.lmax + self.comm_size_polar - 1) // self.comm_size_polar
self.lpad = ldist * self.comm_size_polar - self.lmax
mdist = (self.mmax + self.comm_size_azimuth - 1) // self.comm_size_azimuth
self.mpad = mdist * self.comm_size_azimuth - self.mmax
# compute legende polynomials
dpct = precompute_dlegpoly(self.mmax, self.lmax, t, norm=self.norm, inverse=True, csphase=self.csphase)
# split in m
pct = F.pad(pct, [0, 0, 0, 0, 0, self.mpad], mode="constant")
pct = torch.split(pct, (self.mmax+self.mpad) // self.comm_size_azimuth, dim=0)[self.comm_rank_azimuth]
# register buffer
self.register_buffer('dpct', dpct, persistent=False)
# compute the local pad and size
# spatial
self.nlat_local = min(latdist, self.nlat - self.comm_rank_polar * latdist)
self.nlatpad_local = latdist - self.nlat_local
self.nlon_local = min(londist, self.nlon - self.comm_rank_azimuth * londist)
self.nlonpad_local = londist - self.nlon_local
# frequency
self.lmax_local = min(ldist, self.lmax - self.comm_rank_polar * ldist)
self.lpad_local = ldist - self.lmax_local
self.mmax_local = min(mdist, self.mmax - self.comm_rank_azimuth * mdist)
self.mpad_local = mdist - self.mmax_local
def extra_repr(self):
"""
Pretty print module
"""
return f'nlat={self.nlat}, nlon={self.nlon},\n lmax={self.lmax}, mmax={self.mmax},\n grid={self.grid}, csphase={self.csphase}'
def forward(self, x: torch.Tensor):
assert(x.shape[1] % self.comm_size_polar == 0)
assert(x.shape[1] % self.comm_size_azimuth == 0)
# transpose: after that, channels are split, l is local:
if self.comm_size_polar > 1:
xt = distributed_transpose_polar.apply(x, (1, -2))
else:
xt = x
# remove padding in l:
xtt = xt[..., :self.lmax, :]
# Evaluate associated Legendre functions on the output nodes
xttr = torch.view_as_real(xtt)
# contraction - spheroidal component
# real component
srl = torch.einsum('...lm,mlk->...km', xttr[..., 0, :, :, 0], self.dpct[0].to(xttr.dtype)) \
- torch.einsum('...lm,mlk->...km', xttr[..., 1, :, :, 1], self.dpct[1].to(xttr.dtype))
# imag component
sim = torch.einsum('...lm,mlk->...km', xttr[..., 0, :, :, 1], self.dpct[0].to(xttr.dtype)) \
+ torch.einsum('...lm,mlk->...km', xttr[..., 1, :, :, 0], self.dpct[1].to(xttr.dtype))
# contraction - toroidal component
# real component
trl = - torch.einsum('...lm,mlk->...km', xttr[..., 0, :, :, 1], self.dpct[1].to(xttr.dtype)) \
- torch.einsum('...lm,mlk->...km', xttr[..., 1, :, :, 0], self.dpct[0].to(xttr.dtype))
# imag component
tim = torch.einsum('...lm,mlk->...km', xttr[..., 0, :, :, 0], self.dpct[1].to(xttr.dtype)) \
- torch.einsum('...lm,mlk->...km', xttr[..., 1, :, :, 1], self.dpct[0].to(xttr.dtype))
# reassemble
s = torch.stack((srl, sim), -1)
t = torch.stack((trl, tim), -1)
xs = torch.stack((s, t), -4)
# convert to complex
x = torch.view_as_complex(xs)
# transpose: after this, l is split and channels are local
xp = F.pad(x, [0, 0, 0, self.nlatpad])
if self.comm_size_polar > 1:
y = distributed_transpose_polar.apply(xp, (-2, 1))
else:
y = xp
# transpose: after this, channels are split and m is local
if self.comm_size_azimuth > 1:
yt = distributed_transpose_azimuth.apply(y, (1, -1))
else:
yt = y
# truncate
ytt = yt[..., :self.mmax]
# apply the inverse (real) FFT
x = torch.fft.irfft(x, n=self.nlon, dim=-1, norm="forward")
# pad before we transpose back
xp = F.pad(x, [0, self.nlonpad])
# transpose: after this, m is split and channels are local
if self.comm_size_azimuth > 1:
out = distributed_transpose_azimuth.apply(xp, (-1, 1))
else:
out = xp
return out | torch-harmonics-main | torch_harmonics/distributed/distributed_sht.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import torch
import torch.distributed as dist
from .utils import polar_group, azimuth_group, is_initialized
# general helpers
def get_memory_format(tensor):
if tensor.is_contiguous(memory_format=torch.channels_last):
return torch.channels_last
else:
return torch.contiguous_format
def split_tensor_along_dim(tensor, dim, num_chunks):
assert dim < tensor.dim(), f"Error, tensor dimension is {tensor.dim()} which cannot be split along {dim}"
assert (tensor.shape[dim] % num_chunks == 0), f"Error, cannot split dim {dim} evenly. Dim size is \
{tensor.shape[dim]} and requested numnber of splits is {num_chunks}"
chunk_size = tensor.shape[dim] // num_chunks
tensor_list = torch.split(tensor, chunk_size, dim=dim)
return tensor_list
def _transpose(tensor, dim0, dim1, group=None, async_op=False):
# get input format
input_format = get_memory_format(tensor)
# get comm params
comm_size = dist.get_world_size(group=group)
# split and local transposition
split_size = tensor.shape[dim0] // comm_size
x_send = [y.contiguous(memory_format=input_format) for y in torch.split(tensor, split_size, dim=dim0)]
x_recv = [torch.empty_like(x_send[0]).contiguous(memory_format=input_format) for _ in range(comm_size)]
# global transposition
req = dist.all_to_all(x_recv, x_send, group=group, async_op=async_op)
return x_recv, req
class distributed_transpose_azimuth(torch.autograd.Function):
@staticmethod
def forward(ctx, x, dim):
xlist, _ = _transpose(x, dim[0], dim[1], group=azimuth_group())
x = torch.cat(xlist, dim=dim[1])
ctx.dim = dim
return x
@staticmethod
def backward(ctx, go):
dim = ctx.dim
gilist, _ = _transpose(go, dim[1], dim[0], group=azimuth_group())
gi = torch.cat(gilist, dim=dim[0])
return gi, None
class distributed_transpose_polar(torch.autograd.Function):
@staticmethod
def forward(ctx, x, dim):
xlist, _ = _transpose(x, dim[0], dim[1], group=polar_group())
x = torch.cat(xlist, dim=dim[1])
ctx.dim = dim
return x
@staticmethod
def backward(ctx, go):
dim = ctx.dim
gilist, _ = _transpose(go, dim[1], dim[0], group=polar_group())
gi = torch.cat(gilist, dim=dim[0])
return gi, None
| torch-harmonics-main | torch_harmonics/distributed/primitives.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# we need this in order to enable distributed
from .utils import init, is_initialized, polar_group, azimuth_group
from .utils import polar_group_size, azimuth_group_size, polar_group_rank, azimuth_group_rank
from .primitives import distributed_transpose_azimuth, distributed_transpose_polar
# import the sht stuff
from .distributed_sht import DistributedRealSHT, DistributedInverseRealSHT
from .distributed_sht import DistributedRealVectorSHT, DistributedInverseRealVectorSHT
| torch-harmonics-main | torch_harmonics/distributed/__init__.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# we need this in order to enable distributed
import torch
import torch.distributed as dist
# those need to be global
_POLAR_PARALLEL_GROUP = None
_AZIMUTH_PARALLEL_GROUP = None
_IS_INITIALIZED = False
def polar_group():
return _POLAR_PARALLEL_GROUP
def azimuth_group():
return _AZIMUTH_PARALLEL_GROUP
def init(polar_process_group, azimuth_process_group):
global _POLAR_PARALLEL_GROUP
global _AZIMUTH_PARALLEL_GROUP
_POLAR_PARALLEL_GROUP = polar_process_group
_AZIMUTH_PARALLEL_GROUP = azimuth_process_group
_IS_INITIALIZED = True
def is_initialized() -> bool:
return _IS_INITIALIZED
def is_distributed_polar() -> bool:
return (_POLAR_PARALLEL_GROUP is not None)
def is_distributed_azimuth() -> bool:
return (_AZIMUTH_PARALLEL_GROUP is not None)
def polar_group_size() -> int:
if not is_distributed_polar():
return 1
else:
return dist.get_world_size(group = _POLAR_PARALLEL_GROUP)
def azimuth_group_size() -> int:
if not is_distributed_azimuth():
return 1
else:
return dist.get_world_size(group = _AZIMUTH_PARALLEL_GROUP)
def polar_group_rank() -> int:
if not is_distributed_polar():
return 0
else:
return dist.get_rank(group = _POLAR_PARALLEL_GROUP)
def azimuth_group_rank() -> int:
if not is_distributed_azimuth():
return 0
else:
return dist.get_rank(group = _AZIMUTH_PARALLEL_GROUP)
| torch-harmonics-main | torch_harmonics/distributed/utils.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import torch
import torch.nn as nn
import torch_harmonics as harmonics
from torch_harmonics.quadrature import *
import numpy as np
class ShallowWaterSolver(nn.Module):
"""
SWE solver class. Interface inspired bu pyspharm and SHTns
"""
def __init__(self, nlat, nlon, dt, lmax=None, mmax=None, grid='legendre-gauss', radius=6.37122E6, \
omega=7.292E-5, gravity=9.80616, havg=10.e3, hamp=120.):
super().__init__()
# time stepping param
self.dt = dt
# grid parameters
self.nlat = nlat
self.nlon = nlon
self.grid = grid
# physical sonstants
self.register_buffer('radius', torch.as_tensor(radius, dtype=torch.float64))
self.register_buffer('omega', torch.as_tensor(omega, dtype=torch.float64))
self.register_buffer('gravity', torch.as_tensor(gravity, dtype=torch.float64))
self.register_buffer('havg', torch.as_tensor(havg, dtype=torch.float64))
self.register_buffer('hamp', torch.as_tensor(hamp, dtype=torch.float64))
# SHT
self.sht = harmonics.RealSHT(nlat, nlon, lmax=lmax, mmax=mmax, grid=grid, csphase=False)
self.isht = harmonics.InverseRealSHT(nlat, nlon, lmax=lmax, mmax=mmax, grid=grid, csphase=False)
self.vsht = harmonics.RealVectorSHT(nlat, nlon, lmax=lmax, mmax=mmax, grid=grid, csphase=False)
self.ivsht = harmonics.InverseRealVectorSHT(nlat, nlon, lmax=lmax, mmax=mmax, grid=grid, csphase=False)
self.lmax = lmax or self.sht.lmax
self.mmax = lmax or self.sht.mmax
# compute gridpoints
if self.grid == "legendre-gauss":
cost, quad_weights = harmonics.quadrature.legendre_gauss_weights(self.nlat, -1, 1)
elif self.grid == "lobatto":
cost, quad_weights = harmonics.quadrature.lobatto_weights(self.nlat, -1, 1)
elif self.grid == "equiangular":
cost, quad_weights = harmonics.quadrature.clenshaw_curtiss_weights(self.nlat, -1, 1)
quad_weights = torch.as_tensor(quad_weights).reshape(-1, 1)
# apply cosine transform and flip them
lats = -torch.as_tensor(np.arcsin(cost))
lons = torch.linspace(0, 2*np.pi, self.nlon+1, dtype=torch.float64)[:nlon]
self.lmax = self.sht.lmax
self.mmax = self.sht.mmax
# compute the laplace and inverse laplace operators
l = torch.arange(0, self.lmax).reshape(self.lmax, 1).double()
l = l.expand(self.lmax, self.mmax)
# the laplace operator acting on the coefficients is given by - l (l + 1)
lap = - l * (l + 1) / self.radius**2
invlap = - self.radius**2 / l / (l + 1)
invlap[0] = 0.
# compute coriolis force
coriolis = 2 * self.omega * torch.sin(lats).reshape(self.nlat, 1)
# hyperdiffusion
hyperdiff = torch.exp(torch.asarray((-self.dt / 2 / 3600.)*(lap / lap[-1, 0])**4))
# register all
self.register_buffer('lats', lats)
self.register_buffer('lons', lons)
self.register_buffer('l', l)
self.register_buffer('lap', lap)
self.register_buffer('invlap', invlap)
self.register_buffer('coriolis', coriolis)
self.register_buffer('hyperdiff', hyperdiff)
self.register_buffer('quad_weights', quad_weights)
def grid2spec(self, ugrid):
"""
spectral coefficients from spatial data
"""
return self.sht(ugrid)
def spec2grid(self, uspec):
"""
spatial data from spectral coefficients
"""
return self.isht(uspec)
def vrtdivspec(self, ugrid):
"""spatial data from spectral coefficients"""
vrtdivspec = self.lap * self.radius * self.vsht(ugrid)
return vrtdivspec
def getuv(self, vrtdivspec):
"""
compute wind vector from spectral coeffs of vorticity and divergence
"""
return self.ivsht( self.invlap * vrtdivspec / self.radius)
def gethuv(self, uspec):
"""
compute wind vector from spectral coeffs of vorticity and divergence
"""
hgrid = self.spec2grid(uspec[:1])
uvgrid = self.getuv(uspec[1:])
return torch.cat((hgrid, uvgrid), dim=-3)
def potential_vorticity(self, uspec):
"""
Compute potential vorticity
"""
ugrid = self.spec2grid(uspec)
pvrt = (0.5 * self.havg * self.gravity / self.omega) * (ugrid[1] + self.coriolis) / ugrid[0]
return pvrt
def dimensionless(self, uspec):
"""
Remove dimensions from variables
"""
uspec[0] = (uspec[0] - self.havg * self.gravity) / self.hamp / self.gravity
# vorticity is measured in 1/s so we normalize using sqrt(g h) / r
uspec[1:] = uspec[1:] * self.radius / torch.sqrt(self.gravity * self.havg)
return uspec
def dudtspec(self, uspec):
"""
Compute time derivatives from solution represented in spectral coefficients
"""
dudtspec = torch.zeros_like(uspec)
# compute the derivatives - this should be incorporated into the solver:
ugrid = self.spec2grid(uspec)
uvgrid = self.getuv(uspec[1:])
# phi = ugrid[0]
# vrtdiv = ugrid[1:]
tmp = uvgrid * (ugrid[1] + self.coriolis)
tmpspec = self.vrtdivspec(tmp)
dudtspec[2] = tmpspec[0]
dudtspec[1] = -1 * tmpspec[1]
tmp = uvgrid * ugrid[0]
tmp = self.vrtdivspec(tmp)
dudtspec[0] = -1 * tmp[1]
tmpspec = self.grid2spec(ugrid[0] + 0.5 * (uvgrid[0]**2 + uvgrid[1]**2))
dudtspec[2] = dudtspec[2] - self.lap * tmpspec
return dudtspec
def galewsky_initial_condition(self):
"""
Initializes non-linear barotropically unstable shallow water test case of Galewsky et al. (2004, Tellus, 56A, 429-440).
[1] Galewsky; An initial-value problem for testing numerical models of the global shallow-water equations;
DOI: 10.1111/j.1600-0870.2004.00071.x; http://www-vortex.mcs.st-and.ac.uk/~rks/reprints/galewsky_etal_tellus_2004.pdf
"""
device = self.lap.device
umax = 80.
phi0 = torch.asarray(torch.pi / 7., device=device)
phi1 = torch.asarray(0.5 * torch.pi - phi0, device=device)
phi2 = 0.25 * torch.pi
en = torch.exp(torch.asarray(-4.0 / (phi1 - phi0)**2, device=device))
alpha = 1. / 3.
beta = 1. / 15.
lats, lons = torch.meshgrid(self.lats, self.lons)
u1 = (umax/en)*torch.exp(1./((lats-phi0)*(lats-phi1)))
ugrid = torch.where(torch.logical_and(lats < phi1, lats > phi0), u1, torch.zeros(self.nlat, self.nlon, device=device))
vgrid = torch.zeros((self.nlat, self.nlon), device=device)
hbump = self.hamp * torch.cos(lats) * torch.exp(-((lons-torch.pi)/alpha)**2) * torch.exp(-(phi2-lats)**2/beta)
# intial velocity field
ugrid = torch.stack((ugrid, vgrid))
# intial vorticity/divergence field
vrtdivspec = self.vrtdivspec(ugrid)
vrtdivgrid = self.spec2grid(vrtdivspec)
# solve balance eqn to get initial zonal geopotential with a localized bump (not balanced).
tmp = ugrid * (vrtdivgrid + self.coriolis)
tmpspec = self.vrtdivspec(tmp)
tmpspec[1] = self.grid2spec(0.5 * torch.sum(ugrid**2, dim=0))
phispec = self.invlap*tmpspec[0] - tmpspec[1] + self.grid2spec(self.gravity*(self.havg + hbump))
# assemble solution
uspec = torch.zeros(3, self.lmax, self.mmax, dtype=vrtdivspec.dtype, device=device)
uspec[0] = phispec
uspec[1:] = vrtdivspec
return torch.tril(uspec)
def random_initial_condition(self, mach=0.1) -> torch.Tensor:
"""
random initial condition on the sphere
"""
device = self.lap.device
ctype = torch.complex128 if self.lap.dtype == torch.float64 else torch.complex64
# mach number relative to wave speed
llimit = mlimit = 20
# hgrid = self.havg + hamp * torch.randn(self.nlat, self.nlon, device=device, dtype=dtype)
# ugrid = uamp * torch.randn(self.nlat, self.nlon, device=device, dtype=dtype)
# vgrid = vamp * torch.randn(self.nlat, self.nlon, device=device, dtype=dtype)
# ugrid = torch.stack((ugrid, vgrid))
# initial geopotential
uspec = torch.zeros(3, self.lmax, self.mmax, dtype=ctype, device=self.lap.device)
uspec[:, :llimit, :mlimit] = torch.sqrt(torch.tensor(4 * torch.pi / llimit / (llimit+1), device=device, dtype=ctype)) * torch.randn_like(uspec[:, :llimit, :mlimit])
uspec[0] = self.gravity * self.hamp * uspec[0]
uspec[0, 0, 0] += torch.sqrt(torch.tensor(4 * torch.pi, device=device, dtype=ctype)) * self.havg * self.gravity
uspec[1:] = mach * uspec[1:] * torch.sqrt(self.gravity * self.havg) / self.radius
# uspec[1:] = self.vrtdivspec(self.spec2grid(uspec[1:]) * torch.cos(self.lats.reshape(-1, 1)))
# # intial velocity field
# ugrid = uamp * self.spec2grid(uspec[1])
# vgrid = vamp * self.spec2grid(uspec[2])
# ugrid = torch.stack((ugrid, vgrid))
# # intial vorticity/divergence field
# vrtdivspec = self.vrtdivspec(ugrid)
# vrtdivgrid = self.spec2grid(vrtdivspec)
# # solve balance eqn to get initial zonal geopotential with a localized bump (not balanced).
# tmp = ugrid * (vrtdivgrid + self.coriolis)
# tmpspec = self.vrtdivspec(tmp)
# tmpspec[1] = self.grid2spec(0.5 * torch.sum(ugrid**2, dim=0))
# phispec = self.invlap*tmpspec[0] - tmpspec[1] + self.grid2spec(self.gravity * hgrid)
# # assemble solution
# uspec = torch.zeros(3, self.lmax, self.mmax, dtype=phispec.dtype, device=device)
# uspec[0] = phispec
# uspec[1:] = vrtdivspec
return torch.tril(uspec)
def timestep(self, uspec: torch.Tensor, nsteps: int) -> torch.Tensor:
"""
Integrate the solution using Adams-Bashforth / forward Euler for nsteps steps.
"""
dudtspec = torch.zeros(3, 3, self.lmax, self.mmax, dtype=uspec.dtype, device=uspec.device)
# pointers to indicate the most current result
inew = 0
inow = 1
iold = 2
for iter in range(nsteps):
dudtspec[inew] = self.dudtspec(uspec)
# update vort,div,phiv with third-order adams-bashforth.
# forward euler, then 2nd-order adams-bashforth time steps to start.
if iter == 0:
dudtspec[inow] = dudtspec[inew]
dudtspec[iold] = dudtspec[inew]
elif iter == 1:
dudtspec[iold] = dudtspec[inew]
uspec = uspec + self.dt*( (23./12.) * dudtspec[inew] - (16./12.) * dudtspec[inow] + (5./12.) * dudtspec[iold] )
# implicit hyperdiffusion for vort and div.
uspec[1:] = self.hyperdiff * uspec[1:]
# cycle through the indices
inew = (inew - 1) % 3
inow = (inow - 1) % 3
iold = (iold - 1) % 3
return uspec
def integrate_grid(self, ugrid, dimensionless=False, polar_opt=0):
dlon = 2 * torch.pi / self.nlon
radius = 1 if dimensionless else self.radius
if polar_opt > 0:
out = torch.sum(ugrid[..., polar_opt:-polar_opt, :] * self.quad_weights[polar_opt:-polar_opt] * dlon * radius**2, dim=(-2, -1))
else:
out = torch.sum(ugrid * self.quad_weights * dlon * radius**2, dim=(-2, -1))
return out
def plot_griddata(self, data, fig, cmap='twilight_shifted', vmax=None, vmin=None, projection='3d', title=None, antialiased=False):
"""
plotting routine for data on the grid. Requires cartopy for 3d plots.
"""
import matplotlib.pyplot as plt
lons = self.lons.squeeze() - torch.pi
lats = self.lats.squeeze()
if data.is_cuda:
data = data.cpu()
lons = lons.cpu()
lats = lats.cpu()
Lons, Lats = np.meshgrid(lons, lats)
if projection == 'mollweide':
#ax = plt.gca(projection=projection)
ax = fig.add_subplot(projection=projection)
im = ax.pcolormesh(Lons, Lats, data, cmap=cmap, vmax=vmax, vmin=vmin)
# ax.set_title("Elevation map of mars")
ax.grid(True)
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.colorbar(im, orientation='horizontal')
plt.title(title)
elif projection == '3d':
import cartopy.crs as ccrs
proj = ccrs.Orthographic(central_longitude=0.0, central_latitude=25.0)
#ax = plt.gca(projection=proj, frameon=True)
ax = fig.add_subplot(projection=proj)
Lons = Lons*180/np.pi
Lats = Lats*180/np.pi
# contour data over the map.
im = ax.pcolormesh(Lons, Lats, data, cmap=cmap, transform=ccrs.PlateCarree(), antialiased=antialiased, vmax=vmax, vmin=vmin)
plt.title(title, y=1.05)
else:
raise NotImplementedError
return im
def plot_specdata(self, data, fig, **kwargs):
return self.plot_griddata(self.isht(data), fig, **kwargs)
| torch-harmonics-main | torch_harmonics/examples/shallow_water_equations.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from .pde_sphere import SphereSolver
from .shallow_water_equations import ShallowWaterSolver | torch-harmonics-main | torch_harmonics/examples/__init__.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import torch
import torch.nn as nn
import torch_harmonics as harmonics
import numpy as np
class SphereSolver(nn.Module):
"""
Solver class on the sphere. Can solve the following PDEs:
- Allen-Cahn eq
"""
def __init__(self, nlat, nlon, dt, lmax=None, mmax=None, grid='legendre-gauss', radius=1.0, coeff=0.001):
super().__init__()
# time stepping param
self.dt = dt
# grid parameters
self.nlat = nlat
self.nlon = nlon
self.grid = grid
# physical sonstants
self.register_buffer('radius', torch.as_tensor(radius, dtype=torch.float64))
self.register_buffer('coeff', torch.as_tensor(coeff, dtype=torch.float64))
# SHT
self.sht = harmonics.RealSHT(nlat, nlon, lmax=lmax, mmax=mmax, grid=grid, csphase=False)
self.isht = harmonics.InverseRealSHT(nlat, nlon, lmax=lmax, mmax=mmax, grid=grid, csphase=False)
self.lmax = lmax or self.sht.lmax
self.mmax = lmax or self.sht.mmax
# compute gridpoints
if self.grid == "legendre-gauss":
cost, _ = harmonics.quadrature.legendre_gauss_weights(self.nlat, -1, 1)
elif self.grid == "lobatto":
cost, _ = harmonics.quadrature.lobatto_weights(self.nlat, -1, 1)
elif self.grid == "equiangular":
cost, _ = harmonics.quadrature.clenshaw_curtiss_weights(self.nlat, -1, 1)
# apply cosine transform and flip them
lats = -torch.as_tensor(np.arcsin(cost))
lons = torch.linspace(0, 2*np.pi, self.nlon+1, dtype=torch.float64)[:nlon]
self.lmax = self.sht.lmax
self.mmax = self.sht.mmax
l = torch.arange(0, self.lmax).reshape(self.lmax, 1).cdouble()
l = l.expand(self.lmax, self.mmax)
# the laplace operator acting on the coefficients is given by l (l + 1)
lap = - l * (l + 1) / self.radius**2
invlap = - self.radius**2 / l / (l + 1)
invlap[0] = 0.
# register all
self.register_buffer('lats', lats)
self.register_buffer('lons', lons)
self.register_buffer('l', l)
self.register_buffer('lap', lap)
self.register_buffer('invlap', invlap)
def grid2spec(self, u):
"""spectral coefficients from spatial data"""
return self.sht(u)
def spec2grid(self, uspec):
"""spatial data from spectral coefficients"""
return self.isht(uspec)
def dudtspec(self, uspec, pde='allen-cahn'):
if pde == 'allen-cahn':
ugrid = self.spec2grid(uspec)
u3spec = self.grid2spec(ugrid**3)
dudtspec = self.coeff*self.lap*uspec + uspec - u3spec
elif pde == 'ginzburg-landau':
ugrid = self.spec2grid(uspec)
u3spec = self.grid2spec(ugrid**3)
dudtspec = uspec + (1. + 2.j)*self.coeff*self.lap*uspec - (1. + 2.j)*u3spec
else:
NotImplementedError
return dudtspec
def randspec(self):
"""random data on the sphere"""
rspec = torch.randn_like(self.lap) / 4 / torch.pi
return rspec
def plot_griddata(self, data, fig, cmap='twilight_shifted', vmax=None, vmin=None, projection='3d', title=None, antialiased=False):
"""
plotting routine for data on the grid. Requires cartopy for 3d plots.
"""
import matplotlib.pyplot as plt
lons = self.lons.squeeze() - torch.pi
lats = self.lats.squeeze()
if data.is_cuda:
data = data.cpu()
lons = lons.cpu()
lats = lats.cpu()
Lons, Lats = np.meshgrid(lons, lats)
if projection == 'mollweide':
#ax = plt.gca(projection=projection)
ax = fig.add_subplot(projection=projection)
im = ax.pcolormesh(Lons, Lats, data, cmap=cmap, vmax=vmax, vmin=vmin)
# ax.set_title("Elevation map of mars")
ax.grid(True)
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.colorbar(im, orientation='horizontal')
plt.title(title)
elif projection == '3d':
import cartopy.crs as ccrs
proj = ccrs.Orthographic(central_longitude=0.0, central_latitude=25.0)
#ax = plt.gca(projection=proj, frameon=True)
ax = fig.add_subplot(projection=proj)
Lons = Lons*180/np.pi
Lats = Lats*180/np.pi
# contour data over the map.
im = ax.pcolormesh(Lons, Lats, data, cmap=cmap, transform=ccrs.PlateCarree(), antialiased=antialiased, vmax=vmax, vmin=vmin)
plt.title(title, y=1.05)
else:
raise NotImplementedError
return im
def plot_specdata(self, data, fig, **kwargs):
return self.plot_griddata(self.isht(data), fig, **kwargs) | torch-harmonics-main | torch_harmonics/examples/pde_sphere.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from .utils.pde_dataset import PdeDataset
from .models.sfno import SphericalFourierNeuralOperatorNet
| torch-harmonics-main | torch_harmonics/examples/sfno/__init__.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import torch
from math import ceil
from ...shallow_water_equations import ShallowWaterSolver
class PdeDataset(torch.utils.data.Dataset):
"""Custom Dataset class for PDE training data"""
def __init__(self, dt, nsteps, dims=(384, 768), pde='shallow water equations', initial_condition='random',
num_examples=32, device=torch.device('cpu'), normalize=True, stream=None):
self.num_examples = num_examples
self.device = device
self.stream = stream
self.nlat = dims[0]
self.nlon = dims[1]
# number of solver steps used to compute the target
self.nsteps = nsteps
self.normalize = normalize
if pde == 'shallow water equations':
lmax = ceil(self.nlat/3)
mmax = lmax
dt_solver = dt / float(self.nsteps)
self.solver = ShallowWaterSolver(self.nlat, self.nlon, dt_solver, lmax=lmax, mmax=mmax, grid='equiangular').to(self.device).float()
else:
raise NotImplementedError
self.set_initial_condition(ictype=initial_condition)
if self.normalize:
inp0, _ = self._get_sample()
self.inp_mean = torch.mean(inp0, dim=(-1, -2)).reshape(-1, 1, 1)
self.inp_var = torch.var(inp0, dim=(-1, -2)).reshape(-1, 1, 1)
def __len__(self):
length = self.num_examples if self.ictype == 'random' else 1
return length
def set_initial_condition(self, ictype='random'):
self.ictype = ictype
def set_num_examples(self, num_examples=32):
self.num_examples = num_examples
def _get_sample(self):
if self.ictype == 'random':
inp = self.solver.random_initial_condition(mach=0.2)
elif self.ictype == 'galewsky':
inp = self.solver.galewsky_initial_condition()
# solve pde for n steps to return the target
tar = self.solver.timestep(inp, self.nsteps)
inp = self.solver.spec2grid(inp)
tar = self.solver.spec2grid(tar)
return inp, tar
def __getitem__(self, index):
# if self.stream is None:
# self.stream = torch.cuda.Stream()
# with torch.cuda.stream(self.stream):
# with torch.inference_mode():
# with torch.no_grad():
# inp, tar = self._get_sample()
# if self.normalize:
# inp = (inp - self.inp_mean) / torch.sqrt(self.inp_var)
# tar = (tar - self.inp_mean) / torch.sqrt(self.inp_var)
# self.stream.synchronize()
with torch.inference_mode():
with torch.no_grad():
inp, tar = self._get_sample()
if self.normalize:
inp = (inp - self.inp_mean) / torch.sqrt(self.inp_var)
tar = (tar - self.inp_mean) / torch.sqrt(self.inp_var)
return inp.clone(), tar.clone()
| torch-harmonics-main | torch_harmonics/examples/sfno/utils/pde_dataset.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import torch
import torch.nn as nn
# complex activation functions
class ComplexCardioid(nn.Module):
"""
Complex Cardioid activation function
"""
def __init__(self):
super(ComplexCardioid, self).__init__()
def forward(self, z: torch.Tensor) -> torch.Tensor:
out = 0.5 * (1. + torch.cos(z.angle())) * z
return out
class ComplexReLU(nn.Module):
"""
Complex-valued variants of the ReLU activation function
"""
def __init__(self, negative_slope=0., mode="real", bias_shape=None, scale=1.):
super(ComplexReLU, self).__init__()
# store parameters
self.mode = mode
if self.mode in ["modulus", "halfplane"]:
if bias_shape is not None:
self.bias = nn.Parameter(scale * torch.ones(bias_shape, dtype=torch.float32))
else:
self.bias = nn.Parameter(scale * torch.ones((1), dtype=torch.float32))
else:
self.bias = 0
self.negative_slope = negative_slope
self.act = nn.LeakyReLU(negative_slope = negative_slope)
def forward(self, z: torch.Tensor) -> torch.Tensor:
if self.mode == "cartesian":
zr = torch.view_as_real(z)
za = self.act(zr)
out = torch.view_as_complex(za)
elif self.mode == "modulus":
zabs = torch.sqrt(torch.square(z.real) + torch.square(z.imag))
out = torch.where(zabs + self.bias > 0, (zabs + self.bias) * z / zabs, 0.0)
elif self.mode == "cardioid":
out = 0.5 * (1. + torch.cos(z.angle())) * z
# elif self.mode == "halfplane":
# # bias is an angle parameter in this case
# modified_angle = torch.angle(z) - self.bias
# condition = torch.logical_and( (0. <= modified_angle), (modified_angle < torch.pi/2.) )
# out = torch.where(condition, z, self.negative_slope * z)
elif self.mode == "real":
zr = torch.view_as_real(z)
outr = zr.clone()
outr[..., 0] = self.act(zr[..., 0])
out = torch.view_as_complex(outr)
else:
raise NotImplementedError
return out | torch-harmonics-main | torch_harmonics/examples/sfno/models/activations.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import torch
import tensorly as tl
tl.set_backend('pytorch')
from tltorch.factorized_tensors.core import FactorizedTensor
einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _contract_dense(x, weight, separable=False, operator_type='diagonal'):
order = tl.ndim(x)
# batch-size, in_channels, x, y...
x_syms = list(einsum_symbols[:order])
# in_channels, out_channels, x, y...
weight_syms = list(x_syms[1:]) # no batch-size
# batch-size, out_channels, x, y...
if separable:
out_syms = [x_syms[0]] + list(weight_syms)
else:
weight_syms.insert(1, einsum_symbols[order]) # outputs
out_syms = list(weight_syms)
out_syms[0] = x_syms[0]
if operator_type == 'diagonal':
pass
elif operator_type == 'block-diagonal':
weight_syms.insert(-1, einsum_symbols[order+1])
out_syms[-1] = weight_syms[-2]
elif operator_type == 'vector':
weight_syms.pop()
else:
raise ValueError(f"Unkonw operator type {operator_type}")
eq= ''.join(x_syms) + ',' + ''.join(weight_syms) + '->' + ''.join(out_syms)
if not torch.is_tensor(weight):
weight = weight.to_tensor()
return tl.einsum(eq, x, weight)
def _contract_cp(x, cp_weight, separable=False, operator_type='diagonal'):
order = tl.ndim(x)
x_syms = str(einsum_symbols[:order])
rank_sym = einsum_symbols[order]
out_sym = einsum_symbols[order+1]
out_syms = list(x_syms)
if separable:
factor_syms = [einsum_symbols[1]+rank_sym] #in only
else:
out_syms[1] = out_sym
factor_syms = [einsum_symbols[1]+rank_sym, out_sym+rank_sym] #in, out
factor_syms += [xs+rank_sym for xs in x_syms[2:]] #x, y, ...
if operator_type == 'diagonal':
pass
elif operator_type == 'block-diagonal':
out_syms[-1] = einsum_symbols[order+2]
factor_syms += [out_syms[-1] + rank_sym]
elif operator_type == 'vector':
factor_syms.pop()
else:
raise ValueError(f"Unkonw operator type {operator_type}")
eq = x_syms + ',' + rank_sym + ',' + ','.join(factor_syms) + '->' + ''.join(out_syms)
return tl.einsum(eq, x, cp_weight.weights, *cp_weight.factors)
def _contract_tucker(x, tucker_weight, separable=False, operator_type='diagonal'):
order = tl.ndim(x)
x_syms = str(einsum_symbols[:order])
out_sym = einsum_symbols[order]
out_syms = list(x_syms)
if separable:
core_syms = einsum_symbols[order+1:2*order]
# factor_syms = [einsum_symbols[1]+core_syms[0]] #in only
factor_syms = [xs+rs for (xs, rs) in zip(x_syms[1:], core_syms)] #x, y, ...
else:
core_syms = einsum_symbols[order+1:2*order+1]
out_syms[1] = out_sym
factor_syms = [einsum_symbols[1]+core_syms[0], out_sym+core_syms[1]] #out, in
factor_syms += [xs+rs for (xs, rs) in zip(x_syms[2:], core_syms[2:])] #x, y, ...
if operator_type == 'diagonal':
pass
elif operator_type == 'block-diagonal':
raise NotImplementedError(f"Operator type {operator_type} not implemented for Tucker")
else:
raise ValueError(f"Unkonw operator type {operator_type}")
eq = x_syms + ',' + core_syms + ',' + ','.join(factor_syms) + '->' + ''.join(out_syms)
return tl.einsum(eq, x, tucker_weight.core, *tucker_weight.factors)
def _contract_tt(x, tt_weight, separable=False, operator_type='diagonal'):
order = tl.ndim(x)
x_syms = list(einsum_symbols[:order])
weight_syms = list(x_syms[1:]) # no batch-size
if not separable:
weight_syms.insert(1, einsum_symbols[order]) # outputs
out_syms = list(weight_syms)
out_syms[0] = x_syms[0]
else:
out_syms = list(x_syms)
if operator_type == 'diagonal':
pass
elif operator_type == 'block-diagonal':
weight_syms.insert(-1, einsum_symbols[order+1])
out_syms[-1] = weight_syms[-2]
elif operator_type == 'vector':
weight_syms.pop()
else:
raise ValueError(f"Unkonw operator type {operator_type}")
rank_syms = list(einsum_symbols[order+2:])
tt_syms = []
for i, s in enumerate(weight_syms):
tt_syms.append([rank_syms[i], s, rank_syms[i+1]])
eq = ''.join(x_syms) + ',' + ','.join(''.join(f) for f in tt_syms) + '->' + ''.join(out_syms)
return tl.einsum(eq, x, *tt_weight.factors)
def get_contract_fun(weight, implementation='reconstructed', separable=False):
"""Generic ND implementation of Fourier Spectral Conv contraction
Parameters
----------
weight : tensorly-torch's FactorizedTensor
implementation : {'reconstructed', 'factorized'}, default is 'reconstructed'
whether to reconstruct the weight and do a forward pass (reconstructed)
or contract directly the factors of the factorized weight with the input (factorized)
Returns
-------
function : (x, weight) -> x * weight in Fourier space
"""
if implementation == 'reconstructed':
return _contract_dense
elif implementation == 'factorized':
if torch.is_tensor(weight):
return _contract_dense
elif isinstance(weight, FactorizedTensor):
if weight.name.lower() == 'complexdense':
return _contract_dense
elif weight.name.lower() == 'complextucker':
return _contract_tucker
elif weight.name.lower() == 'complextt':
return _contract_tt
elif weight.name.lower() == 'complexcp':
return _contract_cp
else:
raise ValueError(f'Got unexpected factorized weight type {weight.name}')
else:
raise ValueError(f'Got unexpected weight type of class {weight.__class__.__name__}')
else:
raise ValueError(f'Got {implementation=}, expected "reconstructed" or "factorized"')
| torch-harmonics-main | torch_harmonics/examples/sfno/models/factorizations.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# | torch-harmonics-main | torch_harmonics/examples/sfno/models/__init__.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import torch
import torch.nn as nn
from torch_harmonics import *
from .layers import *
from functools import partial
class SpectralFilterLayer(nn.Module):
"""
Fourier layer. Contains the convolution part of the FNO/SFNO
"""
def __init__(
self,
forward_transform,
inverse_transform,
embed_dim,
filter_type = 'non-linear',
operator_type = 'diagonal',
sparsity_threshold = 0.0,
use_complex_kernels = True,
hidden_size_factor = 2,
factorization = None,
separable = False,
rank = 1e-2,
complex_activation = 'real',
spectral_layers = 1,
drop_rate = 0):
super(SpectralFilterLayer, self).__init__()
if filter_type == 'non-linear' and isinstance(forward_transform, RealSHT):
self.filter = SpectralAttentionS2(forward_transform,
inverse_transform,
embed_dim,
operator_type = operator_type,
sparsity_threshold = sparsity_threshold,
hidden_size_factor = hidden_size_factor,
complex_activation = complex_activation,
spectral_layers = spectral_layers,
drop_rate = drop_rate,
bias = False)
elif filter_type == 'non-linear' and isinstance(forward_transform, RealFFT2):
self.filter = SpectralAttention2d(forward_transform,
inverse_transform,
embed_dim,
sparsity_threshold = sparsity_threshold,
use_complex_kernels = use_complex_kernels,
hidden_size_factor = hidden_size_factor,
complex_activation = complex_activation,
spectral_layers = spectral_layers,
drop_rate = drop_rate,
bias = False)
elif filter_type == 'linear':
self.filter = SpectralConvS2(forward_transform,
inverse_transform,
embed_dim,
embed_dim,
operator_type = operator_type,
rank = rank,
factorization = factorization,
separable = separable,
bias = True)
else:
raise(NotImplementedError)
def forward(self, x):
return self.filter(x)
class SphericalFourierNeuralOperatorBlock(nn.Module):
"""
Helper module for a single SFNO/FNO block. Can use both FFTs and SHTs to represent either FNO or SFNO blocks.
"""
def __init__(
self,
forward_transform,
inverse_transform,
embed_dim,
filter_type = 'non-linear',
operator_type = 'diagonal',
mlp_ratio = 2.,
drop_rate = 0.,
drop_path = 0.,
act_layer = nn.GELU,
norm_layer = (nn.LayerNorm, nn.LayerNorm),
sparsity_threshold = 0.0,
use_complex_kernels = True,
factorization = None,
separable = False,
rank = 128,
inner_skip = 'linear',
outer_skip = None, # None, nn.linear or nn.Identity
concat_skip = False,
use_mlp = True,
complex_activation = 'real',
spectral_layers = 3):
super(SphericalFourierNeuralOperatorBlock, self).__init__()
# norm layer
self.norm0 = norm_layer[0]() #((h,w))
# convolution layer
self.filter = SpectralFilterLayer(forward_transform,
inverse_transform,
embed_dim,
filter_type,
operator_type = operator_type,
sparsity_threshold = sparsity_threshold,
use_complex_kernels = use_complex_kernels,
hidden_size_factor = mlp_ratio,
factorization = factorization,
separable = separable,
rank = rank,
complex_activation = complex_activation,
spectral_layers = spectral_layers,
drop_rate = drop_rate)
if inner_skip == 'linear':
self.inner_skip = nn.Conv2d(embed_dim, embed_dim, 1, 1)
elif inner_skip == 'identity':
self.inner_skip = nn.Identity()
self.concat_skip = concat_skip
if concat_skip and inner_skip is not None:
self.inner_skip_conv = nn.Conv2d(2*embed_dim, embed_dim, 1, bias=False)
if filter_type == 'linear' or filter_type == 'local':
self.act_layer = act_layer()
# dropout
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
# norm layer
self.norm1 = norm_layer[1]() #((h,w))
if use_mlp == True:
mlp_hidden_dim = int(embed_dim * mlp_ratio)
self.mlp = MLP(in_features = embed_dim,
hidden_features = mlp_hidden_dim,
act_layer = act_layer,
drop_rate = drop_rate,
checkpointing = False)
if outer_skip == 'linear':
self.outer_skip = nn.Conv2d(embed_dim, embed_dim, 1, 1)
elif outer_skip == 'identity':
self.outer_skip = nn.Identity()
if concat_skip and outer_skip is not None:
self.outer_skip_conv = nn.Conv2d(2*embed_dim, embed_dim, 1, bias=False)
def forward(self, x):
x = self.norm0(x)
x, residual = self.filter(x)
if hasattr(self, 'inner_skip'):
if self.concat_skip:
x = torch.cat((x, self.inner_skip(residual)), dim=1)
x = self.inner_skip_conv(x)
else:
x = x + self.inner_skip(residual)
if hasattr(self, 'act_layer'):
x = self.act_layer(x)
x = self.norm1(x)
if hasattr(self, 'mlp'):
x = self.mlp(x)
x = self.drop_path(x)
if hasattr(self, 'outer_skip'):
if self.concat_skip:
x = torch.cat((x, self.outer_skip(residual)), dim=1)
x = self.outer_skip_conv(x)
else:
x = x + self.outer_skip(residual)
return x
class SphericalFourierNeuralOperatorNet(nn.Module):
"""
SphericalFourierNeuralOperator module. Can use both FFTs and SHTs to represent either FNO or SFNO,
both linear and non-linear variants.
Parameters
----------
filter_type : str, optional
Type of filter to use ('linear', 'non-linear'), by default "linear"
spectral_transform : str, optional
Type of spectral transformation to use, by default "sht"
operator_type : str, optional
Type of operator to use ('vector', 'diagonal'), by default "vector"
img_shape : tuple, optional
Shape of the input channels, by default (128, 256)
scale_factor : int, optional
Scale factor to use, by default 3
in_chans : int, optional
Number of input channels, by default 3
out_chans : int, optional
Number of output channels, by default 3
embed_dim : int, optional
Dimension of the embeddings, by default 256
num_layers : int, optional
Number of layers in the network, by default 4
activation_function : str, optional
Activation function to use, by default "gelu"
encoder_layers : int, optional
Number of layers in the encoder, by default 1
use_mlp : int, optional
Whether to use MLP, by default True
mlp_ratio : int, optional
Ratio of MLP to use, by default 2.0
drop_rate : float, optional
Dropout rate, by default 0.0
drop_path_rate : float, optional
Dropout path rate, by default 0.0
sparsity_threshold : float, optional
Threshold for sparsity, by default 0.0
normalization_layer : str, optional
Type of normalization layer to use ("layer_norm", "instance_norm", "none"), by default "instance_norm"
hard_thresholding_fraction : float, optional
Fraction of hard thresholding (frequency cutoff) to apply, by default 1.0
use_complex_kernels : bool, optional
Whether to use complex kernels, by default True
big_skip : bool, optional
Whether to add a single large skip connection, by default True
rank : float, optional
Rank of the approximation, by default 1.0
factorization : Any, optional
Type of factorization to use, by default None
separable : bool, optional
Whether to use separable convolutions, by default False
rank : (int, Tuple[int]), optional
If a factorization is used, which rank to use. Argument is passed to tensorly
complex_activation : str, optional
Type of complex activation function to use, by default "real"
spectral_layers : int, optional
Number of spectral layers, by default 3
pos_embed : bool, optional
Whether to use positional embedding, by default True
Example:
--------
>>> model = SphericalFourierNeuralOperatorNet(
... img_shape=(128, 256),
... scale_factor=4,
... in_chans=2,
... out_chans=2,
... embed_dim=16,
... num_layers=2,
... encoder_layers=1,
... num_blocks=4,
... spectral_layers=2,
... use_mlp=True,)
>>> model(torch.randn(1, 2, 128, 256)).shape
torch.Size([1, 2, 128, 256])
"""
def __init__(
self,
filter_type = 'linear',
spectral_transform = 'sht',
operator_type = 'vector',
img_size = (128, 256),
scale_factor = 3,
in_chans = 3,
out_chans = 3,
embed_dim = 256,
num_layers = 4,
activation_function = 'gelu',
encoder_layers = 1,
use_mlp = True,
mlp_ratio = 2.,
drop_rate = 0.,
drop_path_rate = 0.,
sparsity_threshold = 0.0,
normalization_layer = 'instance_norm',
hard_thresholding_fraction = 1.0,
use_complex_kernels = True,
big_skip = True,
factorization = None,
separable = False,
rank = 128,
complex_activation = 'real',
spectral_layers = 2,
pos_embed = True):
super(SphericalFourierNeuralOperatorNet, self).__init__()
self.filter_type = filter_type
self.spectral_transform = spectral_transform
self.operator_type = operator_type
self.img_size = img_size
self.scale_factor = scale_factor
self.in_chans = in_chans
self.out_chans = out_chans
self.embed_dim = self.num_features = embed_dim
self.pos_embed_dim = self.embed_dim
self.num_layers = num_layers
self.hard_thresholding_fraction = hard_thresholding_fraction
self.normalization_layer = normalization_layer
self.use_mlp = use_mlp
self.encoder_layers = encoder_layers
self.big_skip = big_skip
self.factorization = factorization
self.separable = separable,
self.rank = rank
self.complex_activation = complex_activation
self.spectral_layers = spectral_layers
# activation function
if activation_function == 'relu':
self.activation_function = nn.ReLU
elif activation_function == 'gelu':
self.activation_function = nn.GELU
else:
raise ValueError(f"Unknown activation function {activation_function}")
# compute downsampled image size
self.h = self.img_size[0] // scale_factor
self.w = self.img_size[1] // scale_factor
# dropout
self.pos_drop = nn.Dropout(p=drop_rate) if drop_rate > 0. else nn.Identity()
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, self.num_layers)]
# pick norm layer
if self.normalization_layer == "layer_norm":
norm_layer0 = partial(nn.LayerNorm, normalized_shape=(self.img_size[0], self.img_size[1]), eps=1e-6)
norm_layer1 = partial(nn.LayerNorm, normalized_shape=(self.h, self.w), eps=1e-6)
elif self.normalization_layer == "instance_norm":
norm_layer0 = partial(nn.InstanceNorm2d, num_features=self.embed_dim, eps=1e-6, affine=True, track_running_stats=False)
norm_layer1 = norm_layer0
elif self.normalization_layer == "none":
norm_layer0 = nn.Identity
norm_layer1 = norm_layer0
else:
raise NotImplementedError(f"Error, normalization {self.normalization_layer} not implemented.")
if pos_embed:
self.pos_embed = nn.Parameter(torch.zeros(1, self.embed_dim, self.img_size[0], self.img_size[1]))
else:
self.pos_embed = None
# encoder
encoder_hidden_dim = self.embed_dim
current_dim = self.in_chans
encoder_modules = []
for i in range(self.encoder_layers):
encoder_modules.append(nn.Conv2d(current_dim, encoder_hidden_dim, 1, bias=True))
encoder_modules.append(self.activation_function())
current_dim = encoder_hidden_dim
encoder_modules.append(nn.Conv2d(current_dim, self.embed_dim, 1, bias=False))
self.encoder = nn.Sequential(*encoder_modules)
# prepare the spectral transform
if self.spectral_transform == 'sht':
modes_lat = int(self.h * self.hard_thresholding_fraction)
modes_lon = int((self.w // 2 + 1) * self.hard_thresholding_fraction)
self.trans_down = RealSHT(*self.img_size, lmax=modes_lat, mmax=modes_lon, grid='equiangular').float()
self.itrans_up = InverseRealSHT(*self.img_size, lmax=modes_lat, mmax=modes_lon, grid='equiangular').float()
self.trans = RealSHT(self.h, self.w, lmax=modes_lat, mmax=modes_lon, grid='legendre-gauss').float()
self.itrans = InverseRealSHT(self.h, self.w, lmax=modes_lat, mmax=modes_lon, grid='legendre-gauss').float()
elif self.spectral_transform == 'fft':
modes_lat = int(self.h * self.hard_thresholding_fraction)
modes_lon = int((self.w // 2 + 1) * self.hard_thresholding_fraction)
self.trans_down = RealFFT2(*self.img_size, lmax=modes_lat, mmax=modes_lon).float()
self.itrans_up = InverseRealFFT2(*self.img_size, lmax=modes_lat, mmax=modes_lon).float()
self.trans = RealFFT2(self.h, self.w, lmax=modes_lat, mmax=modes_lon).float()
self.itrans = InverseRealFFT2(self.h, self.w, lmax=modes_lat, mmax=modes_lon).float()
else:
raise(ValueError('Unknown spectral transform'))
self.blocks = nn.ModuleList([])
for i in range(self.num_layers):
first_layer = i == 0
last_layer = i == self.num_layers-1
forward_transform = self.trans_down if first_layer else self.trans
inverse_transform = self.itrans_up if last_layer else self.itrans
inner_skip = 'linear'
outer_skip = 'identity'
if first_layer:
norm_layer = (norm_layer0, norm_layer1)
elif last_layer:
norm_layer = (norm_layer1, norm_layer0)
else:
norm_layer = (norm_layer1, norm_layer1)
block = SphericalFourierNeuralOperatorBlock(forward_transform,
inverse_transform,
self.embed_dim,
filter_type = filter_type,
operator_type = self.operator_type,
mlp_ratio = mlp_ratio,
drop_rate = drop_rate,
drop_path = dpr[i],
act_layer = self.activation_function,
norm_layer = norm_layer,
sparsity_threshold = sparsity_threshold,
use_complex_kernels = use_complex_kernels,
inner_skip = inner_skip,
outer_skip = outer_skip,
use_mlp = use_mlp,
factorization = self.factorization,
separable = self.separable,
rank = self.rank,
complex_activation = self.complex_activation,
spectral_layers = self.spectral_layers)
self.blocks.append(block)
# decoder
decoder_hidden_dim = self.embed_dim
current_dim = self.embed_dim + self.big_skip*self.in_chans
decoder_modules = []
for i in range(self.encoder_layers):
decoder_modules.append(nn.Conv2d(current_dim, decoder_hidden_dim, 1, bias=True))
decoder_modules.append(self.activation_function())
current_dim = decoder_hidden_dim
decoder_modules.append(nn.Conv2d(current_dim, self.out_chans, 1, bias=False))
self.decoder = nn.Sequential(*decoder_modules)
# trunc_normal_(self.pos_embed, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=.02)
#nn.init.normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def forward_features(self, x):
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
return x
def forward(self, x):
if self.big_skip:
residual = x
x = self.encoder(x)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.forward_features(x)
if self.big_skip:
x = torch.cat((x, residual), dim=1)
x = self.decoder(x)
return x
| torch-harmonics-main | torch_harmonics/examples/sfno/models/sfno.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import torch
import torch.nn as nn
import torch.fft
from torch.utils.checkpoint import checkpoint
from torch.cuda import amp
import math
from torch_harmonics import *
from .contractions import *
from .activations import *
from .factorizations import get_contract_fun
# # import FactorizedTensor from tensorly for tensorized operations
# import tensorly as tl
# from tensorly.plugins import use_opt_einsum
# tl.set_backend('pytorch')
# use_opt_einsum('optimal')
from tltorch.factorized_tensors.core import FactorizedTensor
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
@torch.jit.script
def drop_path(x: torch.Tensor, drop_prob: float = 0., training: bool = False) -> torch.Tensor:
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1. - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2d ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class MLP(nn.Module):
def __init__(self,
in_features,
hidden_features = None,
out_features = None,
act_layer = nn.GELU,
output_bias = True,
drop_rate = 0.,
checkpointing = False):
super(MLP, self).__init__()
self.checkpointing = checkpointing
out_features = out_features or in_features
hidden_features = hidden_features or in_features
fc1 = nn.Conv2d(in_features, hidden_features, 1, bias=True)
# ln1 = norm_layer(num_features=hidden_features)
act = act_layer()
fc2 = nn.Conv2d(hidden_features, out_features, 1, bias = output_bias)
if drop_rate > 0.:
drop = nn.Dropout(drop_rate)
self.fwd = nn.Sequential(fc1, act, drop, fc2, drop)
else:
self.fwd = nn.Sequential(fc1, act, fc2)
@torch.jit.ignore
def checkpoint_forward(self, x):
return checkpoint(self.fwd, x)
def forward(self, x):
if self.checkpointing:
return self.checkpoint_forward(x)
else:
return self.fwd(x)
class RealFFT2(nn.Module):
"""
Helper routine to wrap FFT similarly to the SHT
"""
def __init__(self,
nlat,
nlon,
lmax = None,
mmax = None):
super(RealFFT2, self).__init__()
self.nlat = nlat
self.nlon = nlon
self.lmax = lmax or self.nlat
self.mmax = mmax or self.nlon // 2 + 1
def forward(self, x):
y = torch.fft.rfft2(x, dim=(-2, -1), norm="ortho")
y = torch.cat((y[..., :math.ceil(self.lmax/2), :self.mmax], y[..., -math.floor(self.lmax/2):, :self.mmax]), dim=-2)
return y
class InverseRealFFT2(nn.Module):
"""
Helper routine to wrap FFT similarly to the SHT
"""
def __init__(self,
nlat,
nlon,
lmax = None,
mmax = None):
super(InverseRealFFT2, self).__init__()
self.nlat = nlat
self.nlon = nlon
self.lmax = lmax or self.nlat
self.mmax = mmax or self.nlon // 2 + 1
def forward(self, x):
return torch.fft.irfft2(x, dim=(-2, -1), s=(self.nlat, self.nlon), norm="ortho")
class SpectralConvS2(nn.Module):
"""
Spectral Convolution according to Driscoll & Healy. Designed for convolutions on the two-sphere S2
using the Spherical Harmonic Transforms in torch-harmonics, but supports convolutions on the periodic
domain via the RealFFT2 and InverseRealFFT2 wrappers.
"""
def __init__(self,
forward_transform,
inverse_transform,
in_channels,
out_channels,
scale = 'auto',
operator_type = 'diagonal',
rank = 0.2,
factorization = None,
separable = False,
implementation = 'factorized',
decomposition_kwargs=dict(),
bias = False):
super(SpectralConvS2, self).__init__()
if scale == 'auto':
scale = (1 / (in_channels * out_channels))
self.forward_transform = forward_transform
self.inverse_transform = inverse_transform
self.modes_lat = self.inverse_transform.lmax
self.modes_lon = self.inverse_transform.mmax
self.scale_residual = (self.forward_transform.nlat != self.inverse_transform.nlat) \
or (self.forward_transform.nlon != self.inverse_transform.nlon)
# Make sure we are using a Complex Factorized Tensor
if factorization is None:
factorization = 'Dense' # No factorization
if not factorization.lower().startswith('complex'):
factorization = f'Complex{factorization}'
# remember factorization details
self.operator_type = operator_type
self.rank = rank
self.factorization = factorization
self.separable = separable
assert self.inverse_transform.lmax == self.modes_lat
assert self.inverse_transform.mmax == self.modes_lon
weight_shape = [in_channels]
if not self.separable:
weight_shape += [out_channels]
if self.operator_type == 'diagonal':
weight_shape += [self.modes_lat, self.modes_lon]
elif self.operator_type == 'block-diagonal':
weight_shape += [self.modes_lat, self.modes_lon, self.modes_lon]
elif self.operator_type == 'vector':
weight_shape += [self.modes_lat]
else:
raise NotImplementedError(f"Unkonw operator type f{self.operator_type}")
# form weight tensors
self.weight = FactorizedTensor.new(weight_shape, rank=self.rank, factorization=factorization,
fixed_rank_modes=False, **decomposition_kwargs)
# initialization of weights
self.weight.normal_(0, scale)
self._contract = get_contract_fun(self.weight, implementation=implementation, separable=separable)
if bias:
self.bias = nn.Parameter(scale * torch.randn(1, out_channels, 1, 1))
def forward(self, x):
dtype = x.dtype
x = x.float()
residual = x
B, C, H, W = x.shape
with amp.autocast(enabled=False):
x = self.forward_transform(x)
if self.scale_residual:
residual = self.inverse_transform(x)
x = self._contract(x, self.weight, separable=self.separable, operator_type=self.operator_type)
with amp.autocast(enabled=False):
x = self.inverse_transform(x)
if hasattr(self, 'bias'):
x = x + self.bias
x = x.type(dtype)
return x, residual
class SpectralAttention2d(nn.Module):
"""
geometrical Spectral Attention layer
"""
def __init__(self,
forward_transform,
inverse_transform,
embed_dim,
sparsity_threshold = 0.0,
hidden_size_factor = 2,
use_complex_kernels = False,
complex_activation = 'real',
bias = False,
spectral_layers = 1,
drop_rate = 0.):
super(SpectralAttention2d, self).__init__()
self.embed_dim = embed_dim
self.sparsity_threshold = sparsity_threshold
self.hidden_size = int(hidden_size_factor * self.embed_dim)
self.scale = 1 / embed_dim**2
self.mul_add_handle = compl_muladd2d_fwd_c if use_complex_kernels else compl_muladd2d_fwd
self.mul_handle = compl_mul2d_fwd_c if use_complex_kernels else compl_mul2d_fwd
self.spectral_layers = spectral_layers
self.modes_lat = forward_transform.lmax
self.modes_lon = forward_transform.mmax
# only storing the forward handle to be able to call it
self.forward_transform = forward_transform
self.inverse_transform = inverse_transform
self.scale_residual = (self.forward_transform.nlat != self.inverse_transform.nlat) \
or (self.forward_transform.nlon != self.inverse_transform.nlon)
assert inverse_transform.lmax == self.modes_lat
assert inverse_transform.mmax == self.modes_lon
# weights
w = [self.scale * torch.randn(self.embed_dim, self.hidden_size, 2)]
for l in range(1, self.spectral_layers):
w.append(self.scale * torch.randn(self.hidden_size, self.hidden_size, 2))
self.w = nn.ParameterList(w)
if bias:
self.b = nn.ParameterList([self.scale * torch.randn(self.hidden_size, 1, 2) for _ in range(self.spectral_layers)])
self.wout = nn.Parameter(self.scale * torch.randn(self.hidden_size, self.embed_dim, 2))
self.drop = nn.Dropout(drop_rate) if drop_rate > 0. else nn.Identity()
self.activations = nn.ModuleList([])
for l in range(0, self.spectral_layers):
self.activations.append(ComplexReLU(mode=complex_activation, bias_shape=(self.hidden_size, 1, 1), scale=self.scale))
def forward_mlp(self, x):
x = torch.view_as_real(x)
xr = x
for l in range(self.spectral_layers):
if hasattr(self, 'b'):
xr = self.mul_add_handle(xr, self.w[l], self.b[l])
else:
xr = self.mul_handle(xr, self.w[l])
xr = torch.view_as_complex(xr)
xr = self.activations[l](xr)
xr = self.drop(xr)
xr = torch.view_as_real(xr)
x = self.mul_handle(xr, self.wout)
x = torch.view_as_complex(x)
return x
def forward(self, x):
dtype = x.dtype
x = x.float()
residual = x
with amp.autocast(enabled=False):
x = self.forward_transform(x)
if self.scale_residual:
residual = self.inverse_transform(x)
x = self.forward_mlp(x)
with amp.autocast(enabled=False):
x = self.inverse_transform(x)
x = x.type(dtype)
return x, residual
class SpectralAttentionS2(nn.Module):
"""
Spherical non-linear FNO layer
"""
def __init__(self,
forward_transform,
inverse_transform,
embed_dim,
operator_type = 'diagonal',
sparsity_threshold = 0.0,
hidden_size_factor = 2,
complex_activation = 'real',
scale = 'auto',
bias = False,
spectral_layers = 1,
drop_rate = 0.):
super(SpectralAttentionS2, self).__init__()
self.embed_dim = embed_dim
self.sparsity_threshold = sparsity_threshold
self.operator_type = operator_type
self.spectral_layers = spectral_layers
if scale == 'auto':
self.scale = (1 / (embed_dim * embed_dim))
self.modes_lat = forward_transform.lmax
self.modes_lon = forward_transform.mmax
# only storing the forward handle to be able to call it
self.forward_transform = forward_transform
self.inverse_transform = inverse_transform
self.scale_residual = (self.forward_transform.nlat != self.inverse_transform.nlat) \
or (self.forward_transform.nlon != self.inverse_transform.nlon)
assert inverse_transform.lmax == self.modes_lat
assert inverse_transform.mmax == self.modes_lon
hidden_size = int(hidden_size_factor * self.embed_dim)
if operator_type == 'diagonal':
self.mul_add_handle = compl_muladd2d_fwd
self.mul_handle = compl_mul2d_fwd
# weights
w = [self.scale * torch.randn(self.embed_dim, hidden_size, 2)]
for l in range(1, self.spectral_layers):
w.append(self.scale * torch.randn(hidden_size, hidden_size, 2))
self.w = nn.ParameterList(w)
self.wout = nn.Parameter(self.scale * torch.randn(hidden_size, self.embed_dim, 2))
if bias:
self.b = nn.ParameterList([self.scale * torch.randn(hidden_size, 1, 1, 2) for _ in range(self.spectral_layers)])
self.activations = nn.ModuleList([])
for l in range(0, self.spectral_layers):
self.activations.append(ComplexReLU(mode=complex_activation, bias_shape=(hidden_size, 1, 1), scale=self.scale))
elif operator_type == 'vector':
self.mul_add_handle = compl_exp_muladd2d_fwd
self.mul_handle = compl_exp_mul2d_fwd
# weights
w = [self.scale * torch.randn(self.modes_lat, self.embed_dim, hidden_size, 2)]
for l in range(1, self.spectral_layers):
w.append(self.scale * torch.randn(self.modes_lat, hidden_size, hidden_size, 2))
self.w = nn.ParameterList(w)
if bias:
self.b = nn.ParameterList([self.scale * torch.randn(hidden_size, 1, 1, 2) for _ in range(self.spectral_layers)])
self.wout = nn.Parameter(self.scale * torch.randn(self.modes_lat, hidden_size, self.embed_dim, 2))
self.activations = nn.ModuleList([])
for l in range(0, self.spectral_layers):
self.activations.append(ComplexReLU(mode=complex_activation, bias_shape=(hidden_size, 1, 1), scale=self.scale))
else:
raise ValueError('Unknown operator type')
self.drop = nn.Dropout(drop_rate) if drop_rate > 0. else nn.Identity()
def forward_mlp(self, x):
B, C, H, W = x.shape
xr = torch.view_as_real(x)
for l in range(self.spectral_layers):
if hasattr(self, 'b'):
xr = self.mul_add_handle(xr, self.w[l], self.b[l])
else:
xr = self.mul_handle(xr, self.w[l])
xr = torch.view_as_complex(xr)
xr = self.activations[l](xr)
xr = self.drop(xr)
xr = torch.view_as_real(xr)
# final MLP
x = self.mul_handle(xr, self.wout)
x = torch.view_as_complex(x)
return x
def forward(self, x):
dtype = x.dtype
x = x.to(torch.float32)
residual = x
# FWD transform
with amp.autocast(enabled=False):
x = self.forward_transform(x)
if self.scale_residual:
residual = self.inverse_transform(x)
# MLP
x = self.forward_mlp(x)
# BWD transform
with amp.autocast(enabled=False):
x = self.inverse_transform(x)
# cast back to initial precision
x = x.to(dtype)
return x, residual | torch-harmonics-main | torch_harmonics/examples/sfno/models/layers.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import torch
"""
Contains complex contractions wrapped into jit for harmonic layers
"""
@torch.jit.script
def compl_contract2d_fwd(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
tmp = torch.einsum("bixys,kixr->srbkx", a, b)
res = torch.stack([tmp[0,0,...] - tmp[1,1,...], tmp[1,0,...] + tmp[0,1,...]], dim=-1)
return res
@torch.jit.script
def compl_contract2d_fwd_c(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
res = torch.einsum("bixy,kix->bkx", ac, bc)
return torch.view_as_real(res)
@torch.jit.script
def compl_contract_fwd(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
tmp = torch.einsum("bins,kinr->srbkn", a, b)
res = torch.stack([tmp[0,0,...] - tmp[1,1,...], tmp[1,0,...] + tmp[0,1,...]], dim=-1)
return res
@torch.jit.script
def compl_contract_fwd_c(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
res = torch.einsum("bin,kin->bkn", ac, bc)
return torch.view_as_real(res)
# Helper routines for spherical MLPs
@torch.jit.script
def compl_mul1d_fwd(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
tmp = torch.einsum("bixs,ior->srbox", a, b)
res = torch.stack([tmp[0,0,...] - tmp[1,1,...], tmp[1,0,...] + tmp[0,1,...]], dim=-1)
return res
@torch.jit.script
def compl_mul1d_fwd_c(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
resc = torch.einsum("bix,io->box", ac, bc)
res = torch.view_as_real(resc)
return res
@torch.jit.script
def compl_muladd1d_fwd(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
res = compl_mul1d_fwd(a, b) + c
return res
@torch.jit.script
def compl_muladd1d_fwd_c(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
tmpcc = torch.view_as_complex(compl_mul1d_fwd_c(a, b))
cc = torch.view_as_complex(c)
return torch.view_as_real(tmpcc + cc)
# Helper routines for FFT MLPs
@torch.jit.script
def compl_mul2d_fwd(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
tmp = torch.einsum("bixys,ior->srboxy", a, b)
res = torch.stack([tmp[0,0,...] - tmp[1,1,...], tmp[1,0,...] + tmp[0,1,...]], dim=-1)
return res
@torch.jit.script
def compl_mul2d_fwd_c(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
resc = torch.einsum("bixy,io->boxy", ac, bc)
res = torch.view_as_real(resc)
return res
@torch.jit.script
def compl_muladd2d_fwd(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
res = compl_mul2d_fwd(a, b) + c
return res
@torch.jit.script
def compl_muladd2d_fwd_c(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
tmpcc = torch.view_as_complex(compl_mul2d_fwd_c(a, b))
cc = torch.view_as_complex(c)
return torch.view_as_real(tmpcc + cc)
@torch.jit.script
def real_mul2d_fwd(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
out = torch.einsum("bixy,io->boxy", a, b)
return out
@torch.jit.script
def real_muladd2d_fwd(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
return compl_mul2d_fwd_c(a, b) + c
# for all the experimental layers
# @torch.jit.script
# def compl_exp_mul2d_fwd(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
# ac = torch.view_as_complex(a)
# bc = torch.view_as_complex(b)
# resc = torch.einsum("bixy,xio->boxy", ac, bc)
# res = torch.view_as_real(resc)
# return res
# @torch.jit.script
# def compl_exp_muladd2d_fwd(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
# tmpcc = torch.view_as_complex(compl_exp_mul2d_fwd(a, b))
# cc = torch.view_as_complex(c)
# return torch.view_as_real(tmpcc + cc)
| torch-harmonics-main | torch_harmonics/examples/sfno/models/contractions.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ignore this (just for development without installation)
import sys
sys.path.append("..")
sys.path.append(".")
import torch
import torch_harmonics as harmonics
try:
from tqdm import tqdm
except:
tqdm = lambda x : x
# everything is awesome on GPUs
device = torch.device("cuda")
# create a batch with one sample and 21 channels
b, c, n_theta, n_lambda = 1, 21, 360, 720
# your layers to play with
forward_transform = harmonics.RealSHT(n_theta, n_lambda).to(device)
inverse_transform = harmonics.InverseRealSHT(n_theta, n_lambda).to(device)
forward_transform_equi = harmonics.RealSHT(n_theta, n_lambda, grid="equiangular").to(device)
inverse_transform_equi = harmonics.InverseRealSHT(n_theta, n_lambda, grid="equiangular").to(device)
signal_leggauss = inverse_transform(torch.randn(b, c, n_theta, n_theta+1, device=device, dtype=torch.complex128))
signal_equi = inverse_transform(torch.randn(b, c, n_theta, n_theta+1, device=device, dtype=torch.complex128))
# let's check the layers
for num_iters in [1, 8, 64, 512]:
base = signal_leggauss
for iteration in tqdm(range(num_iters)):
base = inverse_transform(forward_transform(base))
print("relative l2 error accumulation on the legendre-gauss grid: ",
torch.mean(torch.norm(base-signal_leggauss, p='fro', dim=(-1,-2)) / torch.norm(signal_leggauss, p='fro', dim=(-1,-2)) ).item(),
"after", num_iters, "iterations")
# let's check the equiangular layers
for num_iters in [1, 8, 64, 512]:
base = signal_equi
for iteration in tqdm(range(num_iters)):
base = inverse_transform_equi(forward_transform_equi(base))
print("relative l2 error accumulation with interpolation onto equiangular grid: ",
torch.mean(torch.norm(base-signal_equi, p='fro', dim=(-1,-2)) / torch.norm(signal_equi, p='fro', dim=(-1,-2)) ).item(),
"after", num_iters, "iterations")
| torch-harmonics-main | examples/minimal_example.py |
# coding=utf-8
# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import time
from tqdm import tqdm
from functools import partial
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.cuda import amp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from torch_harmonics.examples.sfno import PdeDataset
from torch_harmonics.examples.sfno import SphericalFourierNeuralOperatorNet as SFNO
# wandb logging
import wandb
wandb.login()
def l2loss_sphere(solver, prd, tar, relative=False, squared=False):
loss = solver.integrate_grid((prd - tar)**2, dimensionless=True).sum(dim=-1)
if relative:
loss = loss / solver.integrate_grid(tar**2, dimensionless=True).sum(dim=-1)
if not squared:
loss = torch.sqrt(loss)
loss = loss.mean()
return loss
def spectral_l2loss_sphere(solver, prd, tar, relative=False, squared=False):
# compute coefficients
coeffs = torch.view_as_real(solver.sht(prd - tar))
coeffs = coeffs[..., 0]**2 + coeffs[..., 1]**2
norm2 = coeffs[..., :, 0] + 2 * torch.sum(coeffs[..., :, 1:], dim=-1)
loss = torch.sum(norm2, dim=(-1,-2))
if relative:
tar_coeffs = torch.view_as_real(solver.sht(tar))
tar_coeffs = tar_coeffs[..., 0]**2 + tar_coeffs[..., 1]**2
tar_norm2 = tar_coeffs[..., :, 0] + 2 * torch.sum(tar_coeffs[..., :, 1:], dim=-1)
tar_norm2 = torch.sum(tar_norm2, dim=(-1,-2))
loss = loss / tar_norm2
if not squared:
loss = torch.sqrt(loss)
loss = loss.mean()
return loss
def spectral_loss_sphere(solver, prd, tar, relative=False, squared=False):
# gradient weighting factors
lmax = solver.sht.lmax
ls = torch.arange(lmax).float()
spectral_weights = (ls*(ls + 1)).reshape(1, 1, -1, 1).to(prd.device)
# compute coefficients
coeffs = torch.view_as_real(solver.sht(prd - tar))
coeffs = coeffs[..., 0]**2 + coeffs[..., 1]**2
coeffs = spectral_weights * coeffs
norm2 = coeffs[..., :, 0] + 2 * torch.sum(coeffs[..., :, 1:], dim=-1)
loss = torch.sum(norm2, dim=(-1,-2))
if relative:
tar_coeffs = torch.view_as_real(solver.sht(tar))
tar_coeffs = tar_coeffs[..., 0]**2 + tar_coeffs[..., 1]**2
tar_coeffs = spectral_weights * tar_coeffs
tar_norm2 = tar_coeffs[..., :, 0] + 2 * torch.sum(tar_coeffs[..., :, 1:], dim=-1)
tar_norm2 = torch.sum(tar_norm2, dim=(-1,-2))
loss = loss / tar_norm2
if not squared:
loss = torch.sqrt(loss)
loss = loss.mean()
return loss
def h1loss_sphere(solver, prd, tar, relative=False, squared=False):
# gradient weighting factors
lmax = solver.sht.lmax
ls = torch.arange(lmax).float()
spectral_weights = (ls*(ls + 1)).reshape(1, 1, -1, 1).to(prd.device)
# compute coefficients
coeffs = torch.view_as_real(solver.sht(prd - tar))
coeffs = coeffs[..., 0]**2 + coeffs[..., 1]**2
h1_coeffs = spectral_weights * coeffs
h1_norm2 = h1_coeffs[..., :, 0] + 2 * torch.sum(h1_coeffs[..., :, 1:], dim=-1)
l2_norm2 = coeffs[..., :, 0] + 2 * torch.sum(coeffs[..., :, 1:], dim=-1)
h1_loss = torch.sum(h1_norm2, dim=(-1,-2))
l2_loss = torch.sum(l2_norm2, dim=(-1,-2))
# strictly speaking this is not exactly h1 loss
if not squared:
loss = torch.sqrt(h1_loss) + torch.sqrt(l2_loss)
else:
loss = h1_loss + l2_loss
if relative:
raise NotImplementedError("Relative H1 loss not implemented")
loss = loss.mean()
return loss
def fluct_l2loss_sphere(solver, prd, tar, inp, relative=False, polar_opt=0):
# compute the weighting factor first
fluct = solver.integrate_grid((tar - inp)**2, dimensionless=True, polar_opt=polar_opt)
weight = fluct / torch.sum(fluct, dim=-1, keepdim=True)
# weight = weight.reshape(*weight.shape, 1, 1)
loss = weight * solver.integrate_grid((prd - tar)**2, dimensionless=True, polar_opt=polar_opt)
if relative:
loss = loss / (weight * solver.integrate_grid(tar**2, dimensionless=True, polar_opt=polar_opt))
loss = torch.mean(loss)
return loss
def main(train=True, load_checkpoint=False, enable_amp=False):
# set seed
torch.manual_seed(333)
torch.cuda.manual_seed(333)
# set device
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
if torch.cuda.is_available():
torch.cuda.set_device(device.index)
# 1 hour prediction steps
dt = 1*3600
dt_solver = 150
nsteps = dt//dt_solver
dataset = PdeDataset(dt=dt, nsteps=nsteps, dims=(256, 512), device=device, normalize=True)
# There is still an issue with parallel dataloading. Do NOT use it at the moment
# dataloader = DataLoader(dataset, batch_size=4, shuffle=True, num_workers=4, persistent_workers=True)
dataloader = DataLoader(dataset, batch_size=4, shuffle=True, num_workers=0, persistent_workers=False)
solver = dataset.solver.to(device)
nlat = dataset.nlat
nlon = dataset.nlon
# training function
def train_model(model, dataloader, optimizer, gscaler, scheduler=None, nepochs=20, nfuture=0, num_examples=256, num_valid=8, loss_fn='l2'):
train_start = time.time()
for epoch in range(nepochs):
# time each epoch
epoch_start = time.time()
dataloader.dataset.set_initial_condition('random')
dataloader.dataset.set_num_examples(num_examples)
# do the training
acc_loss = 0
model.train()
for inp, tar in dataloader:
with amp.autocast(enabled=enable_amp):
prd = model(inp)
for _ in range(nfuture):
prd = model(prd)
if loss_fn == 'l2':
loss = l2loss_sphere(solver, prd, tar, relative=False)
elif loss_fn == 'h1':
loss = h1loss_sphere(solver, prd, tar, relative=False)
elif loss_fn == 'spectral':
loss = spectral_loss_sphere(solver, prd, tar, relative=False)
elif loss_fn == 'fluct':
loss = fluct_l2loss_sphere(solver, prd, tar, inp, relative=True)
else:
raise NotImplementedError(f'Unknown loss function {loss_fn}')
acc_loss += loss.item() * inp.size(0)
optimizer.zero_grad(set_to_none=True)
# gscaler.scale(loss).backward()
gscaler.scale(loss).backward()
gscaler.step(optimizer)
gscaler.update()
acc_loss = acc_loss / len(dataloader.dataset)
dataloader.dataset.set_initial_condition('random')
dataloader.dataset.set_num_examples(num_valid)
# perform validation
valid_loss = 0
model.eval()
with torch.no_grad():
for inp, tar in dataloader:
prd = model(inp)
for _ in range(nfuture):
prd = model(prd)
loss = l2loss_sphere(solver, prd, tar, relative=True)
valid_loss += loss.item() * inp.size(0)
valid_loss = valid_loss / len(dataloader.dataset)
if scheduler is not None:
scheduler.step(valid_loss)
epoch_time = time.time() - epoch_start
print(f'--------------------------------------------------------------------------------')
print(f'Epoch {epoch} summary:')
print(f'time taken: {epoch_time}')
print(f'accumulated training loss: {acc_loss}')
print(f'relative validation loss: {valid_loss}')
if wandb.run is not None:
current_lr = optimizer.param_groups[0]['lr']
wandb.log({"loss": acc_loss, "validation loss": valid_loss, "learning rate": current_lr})
train_time = time.time() - train_start
print(f'--------------------------------------------------------------------------------')
print(f'done. Training took {train_time}.')
return valid_loss
# rolls out the FNO and compares to the classical solver
def autoregressive_inference(model, dataset, path_root, nsteps, autoreg_steps=10, nskip=1, plot_channel=0, nics=20):
model.eval()
losses = np.zeros(nics)
fno_times = np.zeros(nics)
nwp_times = np.zeros(nics)
for iic in range(nics):
ic = dataset.solver.random_initial_condition(mach=0.2)
inp_mean = dataset.inp_mean
inp_var = dataset.inp_var
prd = (dataset.solver.spec2grid(ic) - inp_mean) / torch.sqrt(inp_var)
prd = prd.unsqueeze(0)
uspec = ic.clone()
# ML model
start_time = time.time()
for i in range(1, autoreg_steps+1):
# evaluate the ML model
prd = model(prd)
if iic == nics-1 and nskip > 0 and i % nskip == 0:
# do plotting
fig = plt.figure(figsize=(7.5, 6))
dataset.solver.plot_griddata(prd[0, plot_channel], fig, vmax=4, vmin=-4)
plt.savefig(path_root+'_pred_'+str(i//nskip)+'.png')
plt.clf()
fno_times[iic] = time.time() - start_time
# classical model
start_time = time.time()
for i in range(1, autoreg_steps+1):
# advance classical model
uspec = dataset.solver.timestep(uspec, nsteps)
if iic == nics-1 and i % nskip == 0 and nskip > 0:
ref = (dataset.solver.spec2grid(uspec) - inp_mean) / torch.sqrt(inp_var)
fig = plt.figure(figsize=(7.5, 6))
dataset.solver.plot_griddata(ref[plot_channel], fig, vmax=4, vmin=-4)
plt.savefig(path_root+'_truth_'+str(i//nskip)+'.png')
plt.clf()
nwp_times[iic] = time.time() - start_time
# ref = (dataset.solver.spec2grid(uspec) - inp_mean) / torch.sqrt(inp_var)
ref = dataset.solver.spec2grid(uspec)
prd = prd * torch.sqrt(inp_var) + inp_mean
losses[iic] = l2loss_sphere(solver, prd, ref, relative=True).item()
return losses, fno_times, nwp_times
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
# prepare dicts containing models and corresponding metrics
models = {}
metrics = {}
# # U-Net if installed
# from models.unet import UNet
# models['unet_baseline'] = partial(UNet)
# SFNO models
models['sfno_sc3_layer4_edim256_linear'] = partial(SFNO, spectral_transform='sht', filter_type='linear', img_size=(nlat, nlon),
num_layers=4, scale_factor=3, embed_dim=256, operator_type='vector')
models['sfno_sc3_layer4_edim256_real'] = partial(SFNO, spectral_transform='sht', filter_type='non-linear', img_size=(nlat, nlon),
num_layers=4, scale_factor=3, embed_dim=256, complex_activation = 'real', operator_type='diagonal')
# FNO models
models['fno_sc3_layer4_edim256_linear'] = partial(SFNO, spectral_transform='fft', filter_type='linear', img_size=(nlat, nlon),
num_layers=4, scale_factor=3, embed_dim=256, operator_type='diagonal')
models['fno_sc3_layer4_edim256_real'] = partial(SFNO, spectral_transform='fft', filter_type='non-linear', img_size=(nlat, nlon),
num_layers=4, scale_factor=3, embed_dim=256, complex_activation='real')
# iterate over models and train each model
root_path = os.path.dirname(__file__)
for model_name, model_handle in models.items():
model = model_handle().to(device)
metrics[model_name] = {}
num_params = count_parameters(model)
print(f'number of trainable params: {num_params}')
metrics[model_name]['num_params'] = num_params
if load_checkpoint:
model.load_state_dict(torch.load(os.path.join(root_path, 'checkpoints/'+model_name)))
# run the training
if train:
run = wandb.init(project="sfno spherical swe", group=model_name, name=model_name + '_' + str(time.time()), config=model_handle.keywords)
# optimizer:
optimizer = torch.optim.Adam(model.parameters(), lr=1E-3)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')
gscaler = amp.GradScaler(enabled=enable_amp)
start_time = time.time()
print(f'Training {model_name}, single step')
train_model(model, dataloader, optimizer, gscaler, scheduler, nepochs=200, loss_fn='l2')
# multistep training
print(f'Training {model_name}, two step')
optimizer = torch.optim.Adam(model.parameters(), lr=5E-5)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')
gscaler = amp.GradScaler(enabled=enable_amp)
dataloader.dataset.nsteps = 2 * dt//dt_solver
train_model(model, dataloader, optimizer, gscaler, scheduler, nepochs=20, nfuture=1)
dataloader.dataset.nsteps = 1 * dt//dt_solver
training_time = time.time() - start_time
run.finish()
torch.save(model.state_dict(), os.path.join(root_path, 'checkpoints/'+model_name))
# set seed
torch.manual_seed(333)
torch.cuda.manual_seed(333)
with torch.inference_mode():
losses, fno_times, nwp_times = autoregressive_inference(model, dataset, os.path.join(root_path,'paper_figures/'+model_name), nsteps=nsteps, autoreg_steps=10)
metrics[model_name]['loss_mean'] = np.mean(losses)
metrics[model_name]['loss_std'] = np.std(losses)
metrics[model_name]['fno_time_mean'] = np.mean(fno_times)
metrics[model_name]['fno_time_std'] = np.std(fno_times)
metrics[model_name]['nwp_time_mean'] = np.mean(nwp_times)
metrics[model_name]['nwp_time_std'] = np.std(nwp_times)
if train:
metrics[model_name]['training_time'] = training_time
df = pd.DataFrame(metrics)
df.to_pickle(os.path.join(root_path, 'output_data/metrics.pkl'))
if __name__ == "__main__":
import torch.multiprocessing as mp
mp.set_start_method('forkserver', force=True)
main(train=True, load_checkpoint=False, enable_amp=False)
| torch-harmonics-main | examples/train_sfno.py |
from setuptools import setup, Extension
import numpy as np
# To compile and install locally run "python setup.py build_ext --inplace"
# To install library to Python site-packages run "python setup.py build_ext install"
ext_modules = [
Extension(
'pycocotools._mask',
sources=['../common/maskApi.c', 'pycocotools/_mask.pyx'],
include_dirs = [np.get_include(), '../common'],
extra_compile_args=['-Wno-cpp', '-Wno-unused-function', '-std=c99'],
)
]
setup(
name='pycocotools',
packages=['pycocotools'],
package_dir = {'pycocotools': 'pycocotools'},
install_requires=[
'setuptools>=18.0',
'cython>=0.27.3',
'matplotlib>=2.1.0'
],
version='2.0',
ext_modules= ext_modules
)
| cocoapi-master | PythonAPI/setup.py |
__author__ = 'tylin'
__version__ = '2.0'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# annToMask - Convert segmentation in an annotation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>annToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
import copy
import itertools
from . import mask as maskUtils
import os
from collections import defaultdict
import sys
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 2:
from urllib import urlretrieve
elif PYTHON_VERSION == 3:
from urllib.request import urlretrieve
def _isArrayLike(obj):
return hasattr(obj, '__iter__') and hasattr(obj, '__len__')
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time()- tic))
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns,catToImgs = defaultdict(list),defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print('{}: {}'.format(key, value))
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if _isArrayLike(catNms) else [catNms]
supNms = supNms if _isArrayLike(supNms) else [supNms]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if _isArrayLike(ids):
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if _isArrayLike(ids):
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if _isArrayLike(ids):
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
else:
raise Exception('datasetType not supported')
if datasetType == 'instances':
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((int(len(seg)/2), 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = maskUtils.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
if 'keypoints' in ann and type(ann['keypoints']) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
kp = np.array(ann['keypoints'])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk]>0):
plt.plot(x[sk],y[sk], linewidth=3, color=c)
plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print(ann['caption'])
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
print('Loading and preparing results...')
tic = time.time()
if type(resFile) == str or type(resFile) == unicode:
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = maskUtils.area(ann['segmentation'])
if not 'bbox' in ann:
ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1-x0)*(y1-y0)
ann['id'] = id + 1
ann['bbox'] = [x0,y0,x1-x0,y1-y0]
print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
res.createIndex()
return res
def download(self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print('Please specify target directory')
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urlretrieve(img['coco_url'], fname)
print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print('Converting ndarray to lists...')
assert(type(data) == np.ndarray)
print(data.shape)
assert(data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print('{}/{}'.format(i,N))
ann += [{
'image_id' : int(data[i, 0]),
'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
'score' : data[i, 5],
'category_id': int(data[i, 6]),
}]
return ann
def annToRLE(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
t = self.imgs[ann['image_id']]
h, w = t['height'], t['width']
segm = ann['segmentation']
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, h, w)
rle = maskUtils.merge(rles)
elif type(segm['counts']) == list:
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, h, w)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann)
m = maskUtils.decode(rle)
return m | cocoapi-master | PythonAPI/pycocotools/coco.py |
__author__ = 'tsungyi'
import pycocotools._mask as _mask
# Interface for manipulating masks stored in RLE format.
#
# RLE is a simple yet efficient format for storing binary masks. RLE
# first divides a vector (or vectorized image) into a series of piecewise
# constant regions and then for each piece simply stores the length of
# that piece. For example, given M=[0 0 1 1 1 0 1] the RLE counts would
# be [2 3 1 1], or for M=[1 1 1 1 1 1 0] the counts would be [0 6 1]
# (note that the odd counts are always the numbers of zeros). Instead of
# storing the counts directly, additional compression is achieved with a
# variable bitrate representation based on a common scheme called LEB128.
#
# Compression is greatest given large piecewise constant regions.
# Specifically, the size of the RLE is proportional to the number of
# *boundaries* in M (or for an image the number of boundaries in the y
# direction). Assuming fairly simple shapes, the RLE representation is
# O(sqrt(n)) where n is number of pixels in the object. Hence space usage
# is substantially lower, especially for large simple objects (large n).
#
# Many common operations on masks can be computed directly using the RLE
# (without need for decoding). This includes computations such as area,
# union, intersection, etc. All of these operations are linear in the
# size of the RLE, in other words they are O(sqrt(n)) where n is the area
# of the object. Computing these operations on the original mask is O(n).
# Thus, using the RLE can result in substantial computational savings.
#
# The following API functions are defined:
# encode - Encode binary masks using RLE.
# decode - Decode binary masks encoded via RLE.
# merge - Compute union or intersection of encoded masks.
# iou - Compute intersection over union between masks.
# area - Compute area of encoded masks.
# toBbox - Get bounding boxes surrounding encoded masks.
# frPyObjects - Convert polygon, bbox, and uncompressed RLE to encoded RLE mask.
#
# Usage:
# Rs = encode( masks )
# masks = decode( Rs )
# R = merge( Rs, intersect=false )
# o = iou( dt, gt, iscrowd )
# a = area( Rs )
# bbs = toBbox( Rs )
# Rs = frPyObjects( [pyObjects], h, w )
#
# In the API the following formats are used:
# Rs - [dict] Run-length encoding of binary masks
# R - dict Run-length encoding of binary mask
# masks - [hxwxn] Binary mask(s) (must have type np.ndarray(dtype=uint8) in column-major order)
# iscrowd - [nx1] list of np.ndarray. 1 indicates corresponding gt image has crowd region to ignore
# bbs - [nx4] Bounding box(es) stored as [x y w h]
# poly - Polygon stored as [[x1 y1 x2 y2...],[x1 y1 ...],...] (2D list)
# dt,gt - May be either bounding boxes or encoded masks
# Both poly and bbs are 0-indexed (bbox=[0 0 1 1] encloses first pixel).
#
# Finally, a note about the intersection over union (iou) computation.
# The standard iou of a ground truth (gt) and detected (dt) object is
# iou(gt,dt) = area(intersect(gt,dt)) / area(union(gt,dt))
# For "crowd" regions, we use a modified criteria. If a gt object is
# marked as "iscrowd", we allow a dt to match any subregion of the gt.
# Choosing gt' in the crowd gt that best matches the dt can be done using
# gt'=intersect(dt,gt). Since by definition union(gt',dt)=dt, computing
# iou(gt,dt,iscrowd) = iou(gt',dt) = area(intersect(gt,dt)) / area(dt)
# For crowd gt regions we use this modified criteria above for the iou.
#
# To compile run "python setup.py build_ext --inplace"
# Please do not contact us for help with compiling.
#
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
# Licensed under the Simplified BSD License [see coco/license.txt]
iou = _mask.iou
merge = _mask.merge
frPyObjects = _mask.frPyObjects
def encode(bimask):
if len(bimask.shape) == 3:
return _mask.encode(bimask)
elif len(bimask.shape) == 2:
h, w = bimask.shape
return _mask.encode(bimask.reshape((h, w, 1), order='F'))[0]
def decode(rleObjs):
if type(rleObjs) == list:
return _mask.decode(rleObjs)
else:
return _mask.decode([rleObjs])[:,:,0]
def area(rleObjs):
if type(rleObjs) == list:
return _mask.area(rleObjs)
else:
return _mask.area([rleObjs])[0]
def toBbox(rleObjs):
if type(rleObjs) == list:
return _mask.toBbox(rleObjs)
else:
return _mask.toBbox([rleObjs])[0] | cocoapi-master | PythonAPI/pycocotools/mask.py |
__author__ = 'tylin'
| cocoapi-master | PythonAPI/pycocotools/__init__.py |
__author__ = 'tsungyi'
import numpy as np
import datetime
import time
from collections import defaultdict
from . import mask as maskUtils
import copy
class COCOeval:
# Interface for evaluating detection on the Microsoft COCO dataset.
#
# The usage for CocoEval is as follows:
# cocoGt=..., cocoDt=... # load dataset and results
# E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object
# E.params.recThrs = ...; # set parameters as desired
# E.evaluate(); # run per image evaluation
# E.accumulate(); # accumulate per image results
# E.summarize(); # display summary metrics of results
# For example usage see evalDemo.m and http://mscoco.org/.
#
# The evaluation parameters are as follows (defaults in brackets):
# imgIds - [all] N img ids to use for evaluation
# catIds - [all] K cat ids to use for evaluation
# iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation
# recThrs - [0:.01:1] R=101 recall thresholds for evaluation
# areaRng - [...] A=4 object area ranges for evaluation
# maxDets - [1 10 100] M=3 thresholds on max detections per image
# iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'
# iouType replaced the now DEPRECATED useSegm parameter.
# useCats - [1] if true use category labels for evaluation
# Note: if useCats=0 category labels are ignored as in proposal scoring.
# Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.
#
# evaluate(): evaluates detections on every image and every category and
# concats the results into the "evalImgs" with fields:
# dtIds - [1xD] id for each of the D detections (dt)
# gtIds - [1xG] id for each of the G ground truths (gt)
# dtMatches - [TxD] matching gt id at each IoU or 0
# gtMatches - [TxG] matching dt id at each IoU or 0
# dtScores - [1xD] confidence of each dt
# gtIgnore - [1xG] ignore flag for each gt
# dtIgnore - [TxD] ignore flag for each dt at each IoU
#
# accumulate(): accumulates the per-image, per-category evaluation
# results in "evalImgs" into the dictionary "eval" with fields:
# params - parameters used for evaluation
# date - date evaluation was performed
# counts - [T,R,K,A,M] parameter dimensions (see above)
# precision - [TxRxKxAxM] precision for every evaluation setting
# recall - [TxKxAxM] max recall for every evaluation setting
# Note: precision and recall==-1 for settings with no gt objects.
#
# See also coco, mask, pycocoDemo, pycocoEvalDemo
#
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
# Licensed under the Simplified BSD License [see coco/license.txt]
def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):
'''
Initialize CocoEval using coco APIs for gt and dt
:param cocoGt: coco object with ground truth annotations
:param cocoDt: coco object with detection results
:return: None
'''
if not iouType:
print('iouType not specified. use default iouType segm')
self.cocoGt = cocoGt # ground truth COCO API
self.cocoDt = cocoDt # detections COCO API
self.params = {} # evaluation parameters
self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements
self.eval = {} # accumulated evaluation results
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
self.params = Params(iouType=iouType) # parameters
self._paramsEval = {} # parameters for evaluation
self.stats = [] # result summarization
self.ious = {} # ious between all gts and dts
if not cocoGt is None:
self.params.imgIds = sorted(cocoGt.getImgIds())
self.params.catIds = sorted(cocoGt.getCatIds())
def _prepare(self):
'''
Prepare ._gts and ._dts for evaluation based on params
:return: None
'''
def _toMask(anns, coco):
# modify ann['segmentation'] by reference
for ann in anns:
rle = coco.annToRLE(ann)
ann['segmentation'] = rle
p = self.params
if p.useCats:
gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
else:
gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))
dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))
# convert ground truth to mask if iouType == 'segm'
if p.iouType == 'segm':
_toMask(gts, self.cocoGt)
_toMask(dts, self.cocoDt)
# set ignore flag
for gt in gts:
gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0
gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']
if p.iouType == 'keypoints':
gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
for gt in gts:
self._gts[gt['image_id'], gt['category_id']].append(gt)
for dt in dts:
self._dts[dt['image_id'], dt['category_id']].append(dt)
self.evalImgs = defaultdict(list) # per-image per-category evaluation results
self.eval = {} # accumulated evaluation results
def evaluate(self):
'''
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
'''
tic = time.time()
print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if not p.useSegm is None:
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params=p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == 'segm' or p.iouType == 'bbox':
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
self.ious = {(imgId, catId): computeIoU(imgId, catId) \
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
print('DONE (t={:0.2f}s).'.format(toc-tic))
def computeIoU(self, imgId, catId):
p = self.params
if p.useCats:
gt = self._gts[imgId,catId]
dt = self._dts[imgId,catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]
if len(gt) == 0 and len(dt) ==0:
return []
inds = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in inds]
if len(dt) > p.maxDets[-1]:
dt=dt[0:p.maxDets[-1]]
if p.iouType == 'segm':
g = [g['segmentation'] for g in gt]
d = [d['segmentation'] for d in dt]
elif p.iouType == 'bbox':
g = [g['bbox'] for g in gt]
d = [d['bbox'] for d in dt]
else:
raise Exception('unknown iouType for iou computation')
# compute iou between each dt and gt region
iscrowd = [int(o['iscrowd']) for o in gt]
ious = maskUtils.iou(d,g,iscrowd)
return ious
def computeOks(self, imgId, catId):
p = self.params
# dimention here should be Nxm
gts = self._gts[imgId, catId]
dts = self._dts[imgId, catId]
inds = np.argsort([-d['score'] for d in dts], kind='mergesort')
dts = [dts[i] for i in inds]
if len(dts) > p.maxDets[-1]:
dts = dts[0:p.maxDets[-1]]
# if len(gts) == 0 and len(dts) == 0:
if len(gts) == 0 or len(dts) == 0:
return []
ious = np.zeros((len(dts), len(gts)))
sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0
vars = (sigmas * 2)**2
k = len(sigmas)
# compute oks between each detection and ground truth object
for j, gt in enumerate(gts):
# create bounds for ignore regions(double the gt bbox)
g = np.array(gt['keypoints'])
xg = g[0::3]; yg = g[1::3]; vg = g[2::3]
k1 = np.count_nonzero(vg > 0)
bb = gt['bbox']
x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2
y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2
for i, dt in enumerate(dts):
d = np.array(dt['keypoints'])
xd = d[0::3]; yd = d[1::3]
if k1>0:
# measure the per-keypoint distance if keypoints visible
dx = xd - xg
dy = yd - yg
else:
# measure minimum distance to keypoints in (x0,y0) & (x1,y1)
z = np.zeros((k))
dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0)
dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0)
e = (dx**2 + dy**2) / vars / (gt['area']+np.spacing(1)) / 2
if k1 > 0:
e=e[vg > 0]
ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]
return ious
def evaluateImg(self, imgId, catId, aRng, maxDet):
'''
perform evaluation for single category and image
:return: dict (single image results)
'''
p = self.params
if p.useCats:
gt = self._gts[imgId,catId]
dt = self._dts[imgId,catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]
if len(gt) == 0 and len(dt) ==0:
return None
for g in gt:
if g['ignore'] or (g['area']<aRng[0] or g['area']>aRng[1]):
g['_ignore'] = 1
else:
g['_ignore'] = 0
# sort dt highest score first, sort gt ignore last
gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')
gt = [gt[i] for i in gtind]
dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in dtind[0:maxDet]]
iscrowd = [int(o['iscrowd']) for o in gt]
# load computed ious
ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]
T = len(p.iouThrs)
G = len(gt)
D = len(dt)
gtm = np.zeros((T,G))
dtm = np.zeros((T,D))
gtIg = np.array([g['_ignore'] for g in gt])
dtIg = np.zeros((T,D))
if not len(ious)==0:
for tind, t in enumerate(p.iouThrs):
for dind, d in enumerate(dt):
# information about best match so far (m=-1 -> unmatched)
iou = min([t,1-1e-10])
m = -1
for gind, g in enumerate(gt):
# if this gt already matched, and not a crowd, continue
if gtm[tind,gind]>0 and not iscrowd[gind]:
continue
# if dt matched to reg gt, and on ignore gt, stop
if m>-1 and gtIg[m]==0 and gtIg[gind]==1:
break
# continue to next gt unless better match made
if ious[dind,gind] < iou:
continue
# if match successful and best so far, store appropriately
iou=ious[dind,gind]
m=gind
# if match made store id of match for both dt and gt
if m ==-1:
continue
dtIg[tind,dind] = gtIg[m]
dtm[tind,dind] = gt[m]['id']
gtm[tind,m] = d['id']
# set unmatched detections outside of area range to ignore
a = np.array([d['area']<aRng[0] or d['area']>aRng[1] for d in dt]).reshape((1, len(dt)))
dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))
# store results for given image and category
return {
'image_id': imgId,
'category_id': catId,
'aRng': aRng,
'maxDet': maxDet,
'dtIds': [d['id'] for d in dt],
'gtIds': [g['id'] for g in gt],
'dtMatches': dtm,
'gtMatches': gtm,
'dtScores': [d['score'] for d in dt],
'gtIgnore': gtIg,
'dtIgnore': dtIg,
}
def accumulate(self, p = None):
'''
Accumulate per image evaluation results and store the result in self.eval
:param p: input params for evaluation
:return: None
'''
print('Accumulating evaluation results...')
tic = time.time()
if not self.evalImgs:
print('Please run evaluate() first')
# allows input customized parameters
if p is None:
p = self.params
p.catIds = p.catIds if p.useCats == 1 else [-1]
T = len(p.iouThrs)
R = len(p.recThrs)
K = len(p.catIds) if p.useCats else 1
A = len(p.areaRng)
M = len(p.maxDets)
precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories
recall = -np.ones((T,K,A,M))
scores = -np.ones((T,R,K,A,M))
# create dictionary for future indexing
_pe = self._paramsEval
catIds = _pe.catIds if _pe.useCats else [-1]
setK = set(catIds)
setA = set(map(tuple, _pe.areaRng))
setM = set(_pe.maxDets)
setI = set(_pe.imgIds)
# get inds to evaluate
k_list = [n for n, k in enumerate(p.catIds) if k in setK]
m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]
i_list = [n for n, i in enumerate(p.imgIds) if i in setI]
I0 = len(_pe.imgIds)
A0 = len(_pe.areaRng)
# retrieve E at each category, area range, and max number of detections
for k, k0 in enumerate(k_list):
Nk = k0*A0*I0
for a, a0 in enumerate(a_list):
Na = a0*I0
for m, maxDet in enumerate(m_list):
E = [self.evalImgs[Nk + Na + i] for i in i_list]
E = [e for e in E if not e is None]
if len(E) == 0:
continue
dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])
# different sorting method generates slightly different results.
# mergesort is used to be consistent as Matlab implementation.
inds = np.argsort(-dtScores, kind='mergesort')
dtScoresSorted = dtScores[inds]
dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]
dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]
gtIg = np.concatenate([e['gtIgnore'] for e in E])
npig = np.count_nonzero(gtIg==0 )
if npig == 0:
continue
tps = np.logical_and( dtm, np.logical_not(dtIg) )
fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fp = np.array(fp)
nd = len(tp)
rc = tp / npig
pr = tp / (fp+tp+np.spacing(1))
q = np.zeros((R,))
ss = np.zeros((R,))
if nd:
recall[t,k,a,m] = rc[-1]
else:
recall[t,k,a,m] = 0
# numpy is slow without cython optimization for accessing elements
# use python array gets significant speed improvement
pr = pr.tolist(); q = q.tolist()
for i in range(nd-1, 0, -1):
if pr[i] > pr[i-1]:
pr[i-1] = pr[i]
inds = np.searchsorted(rc, p.recThrs, side='left')
try:
for ri, pi in enumerate(inds):
q[ri] = pr[pi]
ss[ri] = dtScoresSorted[pi]
except:
pass
precision[t,:,k,a,m] = np.array(q)
scores[t,:,k,a,m] = np.array(ss)
self.eval = {
'params': p,
'counts': [T, R, K, A, M],
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'precision': precision,
'recall': recall,
'scores': scores,
}
toc = time.time()
print('DONE (t={:0.2f}s).'.format( toc-tic))
def summarize(self):
'''
Compute and display summary metrics for evaluation results.
Note this functin can *only* be applied on the default parameter setting
'''
def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):
p = self.params
iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'
titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
typeStr = '(AP)' if ap==1 else '(AR)'
iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \
if iouThr is None else '{:0.2f}'.format(iouThr)
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = self.eval['precision']
# IoU
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:,:,:,aind,mind]
else:
# dimension of recall: [TxKxAxM]
s = self.eval['recall']
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:,:,aind,mind]
if len(s[s>-1])==0:
mean_s = -1
else:
mean_s = np.mean(s[s>-1])
print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))
return mean_s
def _summarizeDets():
stats = np.zeros((12,))
stats[0] = _summarize(1)
stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])
stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])
stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])
stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])
stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])
stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])
stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])
stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])
return stats
def _summarizeKps():
stats = np.zeros((10,))
stats[0] = _summarize(1, maxDets=20)
stats[1] = _summarize(1, maxDets=20, iouThr=.5)
stats[2] = _summarize(1, maxDets=20, iouThr=.75)
stats[3] = _summarize(1, maxDets=20, areaRng='medium')
stats[4] = _summarize(1, maxDets=20, areaRng='large')
stats[5] = _summarize(0, maxDets=20)
stats[6] = _summarize(0, maxDets=20, iouThr=.5)
stats[7] = _summarize(0, maxDets=20, iouThr=.75)
stats[8] = _summarize(0, maxDets=20, areaRng='medium')
stats[9] = _summarize(0, maxDets=20, areaRng='large')
return stats
if not self.eval:
raise Exception('Please run accumulate() first')
iouType = self.params.iouType
if iouType == 'segm' or iouType == 'bbox':
summarize = _summarizeDets
elif iouType == 'keypoints':
summarize = _summarizeKps
self.stats = summarize()
def __str__(self):
self.summarize()
class Params:
'''
Params for coco evaluation api
'''
def setDetParams(self):
self.imgIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
self.iouThrs = np.linspace(.5, 0.95, np.round((0.95 - .5) / .05) + 1, endpoint=True)
self.recThrs = np.linspace(.0, 1.00, np.round((1.00 - .0) / .01) + 1, endpoint=True)
self.maxDets = [1, 10, 100]
self.areaRng = [[0 ** 2, 1e5 ** 2], [0 ** 2, 32 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
self.areaRngLbl = ['all', 'small', 'medium', 'large']
self.useCats = 1
def setKpParams(self):
self.imgIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
self.iouThrs = np.linspace(.5, 0.95, np.round((0.95 - .5) / .05) + 1, endpoint=True)
self.recThrs = np.linspace(.0, 1.00, np.round((1.00 - .0) / .01) + 1, endpoint=True)
self.maxDets = [20]
self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
self.areaRngLbl = ['all', 'medium', 'large']
self.useCats = 1
def __init__(self, iouType='segm'):
if iouType == 'segm' or iouType == 'bbox':
self.setDetParams()
elif iouType == 'keypoints':
self.setKpParams()
else:
raise Exception('iouType not supported')
self.iouType = iouType
# useSegm is deprecated
self.useSegm = None | cocoapi-master | PythonAPI/pycocotools/cocoeval.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import os
import platform
import re
import subprocess
import sys
import lit.formats
import lit.util
from lit.llvm import llvm_config
from lit.llvm.subst import ToolSubst
from lit.llvm.subst import FindTool
# Configuration file for the 'lit' test runner.
# The name of this test suite.
config.name = 'CUDAQ'
# The test format to use to interpret tests.
#
# For now we require '&&' between commands, until they get globally killed and
# the test runner updated.
config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
# A list of file extensions to treat as test files.
config.suffixes = ['.cpp', '.ll', '.mlir', '.qke']
config.substitutions.append(('%PATH%', config.environment['PATH']))
config.substitutions.append(('%llvmshlibdir', config.llvm_shlib_dir))
config.substitutions.append(('%pluginext', config.llvm_plugin_ext))
config.substitutions.append(('%llvmInclude', config.llvm_install + "/include"))
config.substitutions.append(('%cudaq_lib_dir', config.cudaq_lib_dir))
config.substitutions.append(('%cudaq_plugin_ext', config.cudaq_plugin_ext))
llvm_config.use_default_substitutions()
# Ask `llvm-config` about asserts
llvm_config.feature_config([('--assertion-mode', {'ON': 'asserts'})])
config.targets = frozenset(config.targets_to_build.split())
for arch in config.targets_to_build.split():
config.available_features.add(arch.lower() + '-registered-target')
# Exclude a list of directories from the test suite:
# - 'Inputs' contain auxiliary inputs for various tests.
local_excludes = ['Inputs', 'CMakeLists.txt', 'README.txt', 'LICENSE.txt']
config.excludes = [exclude for exclude in config.excludes] + local_excludes
# The root path where tests are located.
config.test_source_root = os.path.dirname(__file__)
# The root path where tests should be run.
config.test_exec_root = os.path.join(config.cudaq_obj_root, 'test')
# Tweak the PATH to include the tools directory.
llvm_config.with_environment('PATH', config.cudaq_tools_dir, append_path=True)
llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True)
| cuda-quantum-main | test/lit.cfg.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import cudaq, os, pytest, random, timeit
from cudaq import spin
import numpy as np
skipIfNoMQPU = pytest.mark.skipif(
not (cudaq.num_available_gpus() > 0 and cudaq.has_target('nvidia-mqpu')),
reason="nvidia-mqpu backend not available"
)
# Helper function for asserting two values are within a
# certain tolerance. If we make numpy a dependency,
# this may be replaced in the future with `np.allclose`.
def assert_close(want, got, tolerance=1.e-5) -> bool:
return abs(want - got) < tolerance
@skipIfNoMQPU
def testLargeProblem():
cudaq.set_target('nvidia-mqpu')
# This is not large, but we don't want our CI testing
# to take up too much time, if you want to see more
# of the speedup increase the number of terms. I usually
# set it to 12 and 100000. Here we are just testing the
# mechanics.
nQubits = 4
nTerms = 1000
nLayers = 2
cnotPairs = random.sample(range(nQubits), nQubits)
H = cudaq.SpinOperator.random(nQubits, nTerms)
kernel, params = cudaq.make_kernel(list)
q = kernel.qalloc(nQubits)
paramCounter = 0
for i in range(nQubits):
kernel.rx(params[paramCounter], q[i])
kernel.rz(params[paramCounter + 1], q[i])
paramCounter = paramCounter + 2
for i in range(0, len(cnotPairs), 2):
kernel.cx(q[cnotPairs[i]], q[cnotPairs[i + 1]])
for i in range(nLayers):
for j in range(nQubits):
kernel.rz(params[paramCounter], q[j])
kernel.rz(params[paramCounter + 1], q[j])
kernel.rz(params[paramCounter + 2], q[j])
paramCounter = paramCounter + 3
for i in range(0, len(cnotPairs), 2):
kernel.cx(q[cnotPairs[i]], q[cnotPairs[i + 1]])
execParams = np.random.uniform(low=-np.pi,
high=np.pi,
size=(nQubits *
(3 * nLayers + 2),)).tolist()
# JIT and warm up
kernel(execParams)
# Serial Execution
start = timeit.default_timer()
e = cudaq.observe(kernel, H, execParams)
stop = timeit.default_timer()
print("serial time = ", (stop - start))
# Parallel Execution
start = timeit.default_timer()
e = cudaq.observe(kernel, H, execParams, execution=cudaq.parallel.thread)
stop = timeit.default_timer()
print("mqpu time = ", (stop - start))
assert assert_close(e.expectation_z(), e.expectation_z())
# Reset for the next tests.
cudaq.reset_target()
@skipIfNoMQPU
def testAccuracy():
cudaq.set_target('nvidia-mqpu')
target = cudaq.get_target()
numQpus = target.num_qpus()
assert numQpus > 0
kernel, theta = cudaq.make_kernel(float)
qreg = kernel.qalloc(2)
kernel.x(qreg[0])
kernel.ry(theta, qreg[1])
kernel.cx(qreg[1], qreg[0])
# Define its spin Hamiltonian.
hamiltonian = 5.907 - 2.1433 * spin.x(0) * spin.x(1) - 2.1433 * spin.y(
0) * spin.y(1) + .21829 * spin.z(0) - 6.125 * spin.z(1)
# Confirmed expectation value for this system when `theta=0.59`.
want_expectation_value = -1.7487948611472093
# Get the `cudaq.ObserveResult` back from `cudaq.observe()`.
# No shots provided.
result_no_shots = cudaq.observe(kernel, hamiltonian, 0.59, execution=cudaq.parallel.thread)
expectation_value_no_shots = result_no_shots.expectation_z()
assert assert_close(want_expectation_value, expectation_value_no_shots)
cudaq.reset_target()
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-s"])
| cuda-quantum-main | python/tests/parallel/test_mqpu.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import cudaq, os, pytest, random, timeit
from cudaq import spin
import numpy as np
skipIfUnsupported = pytest.mark.skipif(
not (cudaq.num_available_gpus() > 0 and cudaq.mpi.is_initialized() and cudaq.has_target('nvidia-mqpu')),
reason="nvidia-mqpu backend not available or mpi not found"
)
@skipIfUnsupported
def testMPI():
cudaq.set_target('nvidia-mqpu')
cudaq.mpi.initialize()
target = cudaq.get_target()
numQpus = target.num_qpus()
kernel, theta = cudaq.make_kernel(float)
qreg = kernel.qalloc(2)
kernel.x(qreg[0])
kernel.ry(theta, qreg[1])
kernel.cx(qreg[1], qreg[0])
# Define its spin Hamiltonian.
hamiltonian = 5.907 - 2.1433 * spin.x(0) * spin.x(1) - 2.1433 * spin.y(
0) * spin.y(1) + .21829 * spin.z(0) - 6.125 * spin.z(1)
# Confirmed expectation value for this system when `theta=0.59`.
want_expectation_value = -1.7487948611472093
# Get the `cudaq.ObserveResult` back from `cudaq.observe()`.
# No shots provided.
result_no_shots = cudaq.observe(kernel,
hamiltonian,
0.59,
execution=cudaq.parallel.mpi)
expectation_value_no_shots = result_no_shots.expectation_z()
assert np.isclose(want_expectation_value, expectation_value_no_shots)
# Test all gather
numRanks = cudaq.mpi.num_ranks()
local = [1.0]
globalList = cudaq.mpi.all_gather(numRanks, local)
assert len(globalList) == numRanks
cudaq.reset_target()
cudaq.mpi.finalize()
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-s"])
| cuda-quantum-main | python/tests/parallel/test_mpi_mqpu.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import os
import pytest
import numpy as np
import cudaq
openfermion_pyscf = pytest.importorskip('openfermionpyscf')
def test_HamiltonianGenH2Sto3g():
geometry = [('H', (0., 0., 0.)), ('H', (0., 0., .7474))]
molecule, data = cudaq.chemistry.create_molecular_hamiltonian(
geometry, 'sto-3g', 1, 0)
energy = molecule.to_matrix().minimal_eigenvalue()
assert np.isclose(energy, -1.137, rtol=1e-3)
def test_HamiltonianGenH2631g():
geometry = [('H', (0., 0., 0.)), ('H', (0., 0., .7474))]
molecule, data = cudaq.chemistry.create_molecular_hamiltonian(
geometry, '6-31g', 1, 0)
energy = molecule.to_matrix().minimal_eigenvalue()
assert np.isclose(energy, -1.1516, rtol=1e-3)
def testUCCSD():
geometry = [('H', (0., 0., 0.)), ('H', (0., 0., .7474))]
molecule, data = cudaq.chemistry.create_molecular_hamiltonian(
geometry, 'sto-3g', 1, 0)
# Get the number of fermions and orbitals / qubits
numElectrons = data.n_electrons
numQubits = 2 * data.n_orbitals
# create the ansatz
kernel, thetas = cudaq.make_kernel(list)
qubits = kernel.qalloc(4)
# hartree fock
kernel.x(qubits[0])
kernel.x(qubits[1])
cudaq.kernels.uccsd(kernel, qubits, thetas, numElectrons, numQubits)
num_parameters = cudaq.kernels.uccsd_num_parameters(numElectrons, numQubits)
# Run VQE
optimizer = cudaq.optimizers.COBYLA()
energy, params = cudaq.vqe(kernel,
molecule,
optimizer,
parameter_count=num_parameters)
print(energy, params)
assert np.isclose(-1.137, energy, rtol=1e-3)
def testHWE():
geometry = [('H', (0., 0., 0.)), ('H', (0., 0., .7474))]
molecule, data = cudaq.chemistry.create_molecular_hamiltonian(
geometry, 'sto-3g', 1, 0)
# Get the number of qubits
numQubits = 2 * data.n_orbitals
# select number of repeating layers in ansatz
numLayers = 4
# create the ansatz
kernel, thetas = cudaq.make_kernel(list)
qubits = kernel.qalloc(numQubits)
# hartree fock
kernel.x(qubits[0])
kernel.x(qubits[1])
cudaq.kernels.hwe(kernel, qubits, numQubits, numLayers, thetas)
num_parameters = cudaq.kernels.num_hwe_parameters(numQubits, numLayers)
assert np.equal(40, num_parameters)
# Run VQE
optimizer = cudaq.optimizers.COBYLA()
energy, params = cudaq.vqe(kernel,
molecule,
optimizer,
parameter_count=num_parameters)
print(energy, params)
assert np.isclose(-1.137, energy, rtol=1e-3)
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-rP"])
| cuda-quantum-main | python/tests/domains/test_chemistry.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import os
import pytest
import numpy as np
import cudaq
skipIfNoMQPU = pytest.mark.skipif(
not (cudaq.num_available_gpus() > 0 and cudaq.has_target('nvidia-mqpu')),
reason="nvidia-mqpu backend not available"
)
def test_simpleObserveN_QNN():
qubit_count = 2
samples_count = 5000
h = cudaq.spin.z(0)
parameters_count = qubit_count * 3
parameters = np.random.default_rng(13).uniform(low=0,
high=1,
size=(samples_count,
parameters_count))
np.random.seed(1)
# Build up a parameterized kernel composed of a layer of
# Rx, Ry, Rz, then CZs.
kernel, params = cudaq.make_kernel(list)
qubits = kernel.qalloc(qubit_count)
qubits_list = list(range(qubit_count))
for i in range(qubit_count):
kernel.rx(params[i], qubits[i])
for i in range(qubit_count):
kernel.ry(params[i + qubit_count], qubits[i])
for i in range(qubit_count):
kernel.rz(params[i + qubit_count * 2], qubits[i])
for q1, q2 in zip(qubits_list[0::2], qubits_list[1::2]):
kernel.cz(qubits[q1], qubits[q2])
exp_vals = cudaq.observe(kernel, h, parameters)
assert len(exp_vals) == samples_count
data = np.asarray([e.expectation_z() for e in exp_vals])
# Test a collection of computed exp vals.
assert np.isclose(data[0], 0.44686141)
assert np.isclose(data[1], 0.5014559)
assert np.isclose(data[2], 0.6815774)
assert np.isclose(data[-3], 0.50511996)
assert np.isclose(data[-2], 0.54314517)
assert np.isclose(data[-1], 0.33752631)
@skipIfNoMQPU
def test_observeAsync_QNN():
target = cudaq.get_target('nvidia-mqpu')
cudaq.set_target(target)
num_qpus = target.num_qpus()
n_qubits = 2
n_samples = 2
h = cudaq.spin.z(0)
n_parameters = n_qubits * 3
parameters = np.random.default_rng(13).uniform(low=0,
high=1,
size=(n_samples,
n_parameters))
kernel, params = cudaq.make_kernel(list)
qubits = kernel.qalloc(n_qubits)
qubits_list = list(range(n_qubits))
for i in range(n_qubits):
kernel.rx(params[i], qubits[i])
for i in range(n_qubits):
kernel.ry(params[i + n_qubits], qubits[i])
for i in range(n_qubits):
kernel.rz(params[i + n_qubits * 2], qubits[i])
xi = np.split(parameters, num_qpus)
asyncresults = []
for i in range(len(xi)):
for j in range(xi[i].shape[0]):
asyncresults.append(
cudaq.observe_async(kernel, h, xi[i][j, :], qpu_id=i))
expvals = []
for res in asyncresults:
expvals.append(res.get().expectation_z())
assert np.allclose(np.asarray([0.44686155, 0.50145603]),
np.asarray(expvals))
cudaq.reset_target()
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-rP"]) | cuda-quantum-main | python/tests/domains/test_qnn.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import os
import sys
import time
import pytest
from multiprocessing import Process
import cudaq
from cudaq import spin
try:
from utils.mock_qpu.oqc import startServer
except:
print("Mock qpu not available, skipping OQC tests.")
pytest.skip("Mock qpu not available.", allow_module_level=True)
# Define the port for the mock server
port = 62444
def assert_close(got) -> bool:
return got < -1.1 and got > -2.2
@pytest.fixture(scope="session", autouse=True)
def startUpMockServer():
# Set the targeted QPU
cudaq.set_target('oqc',
url=f'http://localhost:{port}',
email="[email protected]",
password="password")
# Launch the Mock Server
p = Process(target=startServer, args=(port,))
p.start()
time.sleep(1)
yield "Running the tests."
# Kill the server, remove the file
p.terminate()
def test_OQC_sample():
# Create the kernel we'd like to execute on OQC
kernel = cudaq.make_kernel()
qubits = kernel.qalloc(2)
kernel.h(qubits[0])
kernel.cx(qubits[0], qubits[1])
# FIXME CANT HAVE LOOP IN IT YET...
kernel.mz(qubits[0])
kernel.mz(qubits[1])
print(kernel)
# Run sample synchronously, this is fine
# here in testing since we are targeting a mock
# server. In reality you'd probably not want to
# do this with the remote job queue.
counts = cudaq.sample(kernel)
assert (len(counts) == 2)
assert ('00' in counts)
assert ('11' in counts)
# Run sample, but do so asynchronously. This enters
# the execution job into the remote OQC job queue.
future = cudaq.sample_async(kernel)
# We could go do other work, but since this
# is a mock server, get the result
counts = future.get()
assert (len(counts) == 2)
assert ('00' in counts)
assert ('11' in counts)
# Ok now this is the most likely scenario, launch the
# job asynchronously, this puts it in the queue, now
# you can take the future and persist it to file for later.
future = cudaq.sample_async(kernel)
print(future)
# Persist the future to a file (or here a string,
# could write this string to file for later)
futureAsString = str(future)
# Later you can come back and read it in and get
# the results, which are now present because the job
# made it through the queue
futureReadIn = cudaq.AsyncSampleResult(futureAsString)
counts = futureReadIn.get()
assert (len(counts) == 2)
assert ('00' in counts)
assert ('11' in counts)
def test_OQC_observe():
# Create the parameterized ansatz
kernel, theta = cudaq.make_kernel(float)
qreg = kernel.qalloc(2)
kernel.x(qreg[0])
kernel.ry(theta, qreg[1])
kernel.cx(qreg[1], qreg[0])
# Define its spin Hamiltonian.
hamiltonian = 5.907 - 2.1433 * spin.x(0) * spin.x(1) - 2.1433 * spin.y(
0) * spin.y(1) + .21829 * spin.z(0) - 6.125 * spin.z(1)
# Run the observe task on OQC synchronously
res = cudaq.observe(kernel, hamiltonian, .59)
want_expectation_value = -1.71
assert assert_close(res.expectation_z())
# Launch it asynchronously, enters the job into the queue
future = cudaq.observe_async(kernel, hamiltonian, .59)
# Retrieve the results (since we're on a mock server)
res = future.get()
assert assert_close(res.expectation_z())
# Launch the job async, job goes in the queue, and
# we're free to dump the future to file
future = cudaq.observe_async(kernel, hamiltonian, .59)
print(future)
futureAsString = str(future)
# Later you can come back and read it in
# You must provide the spin_op so we can reconstruct
# the results from the term job ids.
futureReadIn = cudaq.AsyncObserveResult(futureAsString, hamiltonian)
res = futureReadIn.get()
assert assert_close(res.expectation_z())
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-s"])
| cuda-quantum-main | python/tests/backends/test_oqc.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import os
import tempfile
import time
from multiprocessing import Process
import cudaq
from cudaq import spin
import pytest
iqm_client = pytest.importorskip("iqm_client")
try:
from utils.mock_qpu.iqm.mock_iqm_cortex_cli import write_a_mock_tokens_file
from utils.mock_qpu.iqm.mock_iqm_server import startServer
except:
pytest.skip("Mock qpu not available, skipping IQM tests.",
allow_module_level=True)
# Define the port for the mock server
port = 9100
def assert_close(want, got, tolerance=1.0e-5) -> bool:
return abs(want - got) < tolerance
@pytest.fixture(scope="session", autouse=True)
def startUpMockServer():
# Write a fake access tokens file
tmp_tokens_file = tempfile.NamedTemporaryFile(delete=False)
write_a_mock_tokens_file(tmp_tokens_file.name)
# Launch the Mock Server
p = Process(target=startServer, args=(port,))
p.start()
time.sleep(1)
# Set the targeted QPU
os.environ["IQM_TOKENS_FILE"] = tmp_tokens_file.name
cudaq.set_target("iqm",
url="http://localhost:{}".format(port),
**{"qpu-architecture": "Apollo"})
yield "Running the tests."
# Kill the server, remove the tokens file
p.terminate()
os.remove(tmp_tokens_file.name)
def test_iqm_ghz():
shots = 100000
kernel = cudaq.make_kernel()
qubits = kernel.qalloc(2)
kernel.h(qubits[0])
kernel.cx(qubits[0], qubits[1])
kernel.mz(qubits[0])
kernel.mz(qubits[1])
counts = cudaq.sample(kernel, shots_count=shots)
assert assert_close(counts["00"], shots / 2, 2)
assert assert_close(counts["01"], 0., 2)
assert assert_close(counts["10"], 0., 2)
assert assert_close(counts["11"], shots / 2, 2)
future = cudaq.sample_async(kernel, shots_count=shots)
counts = future.get()
assert assert_close(counts["00"], shots / 2, 2)
assert assert_close(counts["01"], 0., 2)
assert assert_close(counts["10"], 0., 2)
assert assert_close(counts["11"], shots / 2, 2)
future = cudaq.sample_async(kernel, shots_count=shots)
futureAsString = str(future)
futureReadIn = cudaq.AsyncSampleResult(futureAsString)
counts = futureReadIn.get()
assert assert_close(counts["00"], shots / 2, 2)
assert assert_close(counts["01"], 0., 2)
assert assert_close(counts["10"], 0., 2)
assert assert_close(counts["11"], shots / 2, 2)
# FIXME: This test relies on the mock server to return the correct
# expectation value. IQM Mock server doesn't do it yet.
def test_iqm_observe():
# Create the parameterized ansatz
shots = 100000
kernel, theta = cudaq.make_kernel(float)
qreg = kernel.qalloc(2)
kernel.x(qreg[0])
kernel.ry(theta, qreg[1])
kernel.cx(qreg[1], qreg[0])
# Define its spin Hamiltonian.
hamiltonian = (5.907 - 2.1433 * spin.x(0) * spin.x(1) -
2.1433 * spin.y(0) * spin.y(1) + 0.21829 * spin.z(0) -
6.125 * spin.z(1))
# Run the observe task on IQM synchronously
res = cudaq.observe(kernel, hamiltonian, 0.59, shots_count=shots)
want_expectation_value = -1.71
assert assert_close(want_expectation_value, res.expectation_z(), 5e-2)
# Launch it asynchronously, enters the job into the queue
future = cudaq.observe_async(kernel, hamiltonian, 0.59, shots_count=shots)
# Retrieve the results (since we're on a mock server)
res = future.get()
assert assert_close(want_expectation_value, res.expectation_z(), 5e-2)
# Launch the job async, job goes in the queue, and
# we're free to dump the future to file
future = cudaq.observe_async(kernel, hamiltonian, 0.59, shots_count=shots)
futureAsString = str(future)
# Later you can come back and read it in
# You must provide the spin_op so we can reconstruct
# the results from the term job ids.
futureReadIn = cudaq.AsyncObserveResult(futureAsString, hamiltonian)
res = futureReadIn.get()
assert assert_close(want_expectation_value, res.expectation_z(), 5e-2)
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-s"])
| cuda-quantum-main | python/tests/backends/test_IQM.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import cudaq, pytest, os, time
from cudaq import spin
from multiprocessing import Process
try:
from utils.mock_qpu.quantinuum import startServer
except:
print("Mock qpu not available, skipping Quantinuum tests.")
pytest.skip("Mock qpu not available.", allow_module_level=True)
# Define the port for the mock server
port = 62454
def assert_close(got) -> bool:
return got < -1.5 and got > -1.9
@pytest.fixture(scope="session", autouse=True)
def startUpMockServer():
# We need a Fake Credentials Config file
credsName = '{}/QuantinuumFakeConfig.config'.format(os.environ["HOME"])
f = open(credsName, 'w')
f.write('key: {}\nrefresh: {}\ntime: 0'.format("hello", "rtoken"))
f.close()
cudaq.set_random_seed(13)
# Launch the Mock Server
p = Process(target=startServer, args=(port,))
p.start()
time.sleep(1)
yield credsName
# Kill the server, remove the file
p.terminate()
os.remove(credsName)
@pytest.fixture(scope="function", autouse=True)
def configureTarget(startUpMockServer):
# Set the targeted QPU
cudaq.set_target('quantinuum',
url='http://localhost:{}'.format(port),
credentials=startUpMockServer)
yield "Running the test."
cudaq.reset_target()
def test_quantinuum_sample():
# Create the kernel we'd like to execute on Quantinuum
kernel = cudaq.make_kernel()
qubits = kernel.qalloc(2)
kernel.h(qubits[0])
kernel.cx(qubits[0], qubits[1])
kernel.mz(qubits)
print(kernel)
# Run sample synchronously, this is fine
# here in testing since we are targeting a mock
# server. In reality you'd probably not want to
# do this with the remote job queue.
counts = cudaq.sample(kernel)
counts.dump()
assert (len(counts) == 2)
assert ('00' in counts)
assert ('11' in counts)
# Run sample, but do so asynchronously. This enters
# the execution job into the remote Quantinuum job queue.
future = cudaq.sample_async(kernel)
# We could go do other work, but since this
# is a mock server, get the result
counts = future.get()
assert (len(counts) == 2)
assert ('00' in counts)
assert ('11' in counts)
# Ok now this is the most likely scenario, launch the
# job asynchronously, this puts it in the queue, now
# you can take the future and persist it to file for later.
future = cudaq.sample_async(kernel)
print(future)
# Persist the future to a file (or here a string,
# could write this string to file for later)
futureAsString = str(future)
# Later you can come back and read it in and get
# the results, which are now present because the job
# made it through the queue
futureReadIn = cudaq.AsyncSampleResult(futureAsString)
counts = futureReadIn.get()
assert (len(counts) == 2)
assert ('00' in counts)
assert ('11' in counts)
def test_quantinuum_observe():
# Create the parameterized ansatz
kernel, theta = cudaq.make_kernel(float)
qreg = kernel.qalloc(2)
kernel.x(qreg[0])
kernel.ry(theta, qreg[1])
kernel.cx(qreg[1], qreg[0])
# Define its spin Hamiltonian.
hamiltonian = 5.907 - 2.1433 * spin.x(0) * spin.x(1) - 2.1433 * spin.y(
0) * spin.y(1) + .21829 * spin.z(0) - 6.125 * spin.z(1)
# Run the observe task on quantinuum synchronously
res = cudaq.observe(kernel, hamiltonian, .59)
assert assert_close(res.expectation_z())
# Launch it asynchronously, enters the job into the queue
future = cudaq.observe_async(kernel, hamiltonian, .59)
# Retrieve the results (since we're on a mock server)
res = future.get()
assert assert_close(res.expectation_z())
# Launch the job async, job goes in the queue, and
# we're free to dump the future to file
future = cudaq.observe_async(kernel, hamiltonian, .59)
print(future)
futureAsString = str(future)
# Later you can come back and read it in
# You must provide the spin_op so we can reconstruct
# the results from the term job ids.
futureReadIn = cudaq.AsyncObserveResult(futureAsString, hamiltonian)
res = futureReadIn.get()
assert assert_close(res.expectation_z())
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-s"])
| cuda-quantum-main | python/tests/backends/test_Quantinuum.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import cudaq, pytest, os, time
from cudaq import spin
from multiprocessing import Process
try:
from utils.mock_qpu.ionq import startServer
except:
print("Mock qpu not available, skipping IonQ tests.")
pytest.skip("Mock qpu not available.", allow_module_level=True)
# Define the port for the mock server
port = 62455
def assert_close(got) -> bool:
return got < -1.5 and got > -1.9
@pytest.fixture(scope="session", autouse=True)
def startUpMockServer():
os.environ["IONQ_API_KEY"] = "00000000000000000000000000000000"
# Launch the Mock Server
p = Process(target=startServer, args=(port,))
p.start()
time.sleep(1)
yield "Server started."
# Kill the server
p.terminate()
@pytest.fixture(scope="function", autouse=True)
def configureTarget():
# Set the targeted QPU
cudaq.set_target(
"ionq",
url="http://localhost:{}".format(port)
)
yield "Running the test."
cudaq.reset_target()
def test_ionq_sample():
# Create the kernel we'd like to execute on IonQ
kernel = cudaq.make_kernel()
qubits = kernel.qalloc(2)
kernel.h(qubits[0])
kernel.cx(qubits[0], qubits[1])
kernel.mz(qubits)
print(kernel)
# Run sample synchronously, this is fine
# here in testing since we are targeting a mock
# server. In reality you'd probably not want to
# do this with the remote job queue.
counts = cudaq.sample(kernel)
assert len(counts) == 2
assert "00" in counts
assert "11" in counts
# Run sample, but do so asynchronously. This enters
# the execution job into the remote IonQ job queue.
future = cudaq.sample_async(kernel)
# We could go do other work, but since this
# is a mock server, get the result
counts = future.get()
assert len(counts) == 2
assert "00" in counts
assert "11" in counts
# Ok now this is the most likely scenario, launch the
# job asynchronously, this puts it in the queue, now
# you can take the future and persist it to file for later.
future = cudaq.sample_async(kernel)
print(future)
# Persist the future to a file (or here a string,
# could write this string to file for later)
futureAsString = str(future)
# Later you can come back and read it in and get
# the results, which are now present because the job
# made it through the queue
futureReadIn = cudaq.AsyncSampleResult(futureAsString)
counts = futureReadIn.get()
assert len(counts) == 2
assert "00" in counts
assert "11" in counts
def test_ionq_observe():
# Create the parameterized ansatz
kernel, theta = cudaq.make_kernel(float)
qreg = kernel.qalloc(2)
kernel.x(qreg[0])
kernel.ry(theta, qreg[1])
kernel.cx(qreg[1], qreg[0])
# Define its spin Hamiltonian.
hamiltonian = (5.907 - 2.1433 * spin.x(0) * spin.x(1) -
2.1433 * spin.y(0) * spin.y(1) + 0.21829 * spin.z(0) -
6.125 * spin.z(1))
# Run the observe task on IonQ synchronously
res = cudaq.observe(kernel, hamiltonian, 0.59)
assert assert_close(res.expectation_z())
# Launch it asynchronously, enters the job into the queue
future = cudaq.observe_async(kernel, hamiltonian, 0.59)
# Retrieve the results (since we're on a mock server)
res = future.get()
assert assert_close(res.expectation_z())
# Launch the job async, job goes in the queue, and
# we're free to dump the future to file
future = cudaq.observe_async(kernel, hamiltonian, 0.59)
print(future)
futureAsString = str(future)
# Later you can come back and read it in
# You must provide the spin_op so we can reconstruct
# the results from the term job ids.
futureReadIn = cudaq.AsyncObserveResult(futureAsString, hamiltonian)
res = futureReadIn.get()
assert assert_close(res.expectation_z())
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-s"])
| cuda-quantum-main | python/tests/backends/test_IonQ.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import cudaq, pytest, os, time
from cudaq import spin
from multiprocessing import Process
def assert_close(got) -> bool:
return got < -1.5 and got > -1.9
@pytest.fixture(scope="function", autouse=True)
def configureTarget():
# We need a Fake Credentials Config file
credsName = '{}/FakeConfig2.config'.format(os.environ["HOME"])
f = open(credsName, 'w')
f.write('key: {}\nrefresh: {}\ntime: 0'.format("hello", "rtoken"))
f.close()
# Set the targeted QPU
cudaq.set_target('quantinuum', emulate='true')
yield "Running the tests."
# remove the file
os.remove(credsName)
cudaq.reset_target()
def test_quantinuum_sample():
cudaq.set_random_seed(13)
# Create the kernel we'd like to execute on Quantinuum
kernel = cudaq.make_kernel()
qubits = kernel.qalloc(2)
kernel.h(qubits[0])
kernel.cx(qubits[0], qubits[1])
kernel.mz(qubits)
print(kernel)
# Run sample synchronously, this is fine
# here in testing since we are targeting a mock
# server. In reality you'd probably not want to
# do this with the remote job queue.
counts = cudaq.sample(kernel)
assert (len(counts) == 2)
assert ('00' in counts)
assert ('11' in counts)
# Run sample, but do so asynchronously. This enters
# the execution job into the remote Quantinuum job queue.
future = cudaq.sample_async(kernel)
# We could go do other work, but since this
# is a mock server, get the result
counts = future.get()
assert (len(counts) == 2)
assert ('00' in counts)
assert ('11' in counts)
def test_quantinuum_observe():
cudaq.set_random_seed(13)
# Create the parameterized ansatz
kernel, theta = cudaq.make_kernel(float)
qreg = kernel.qalloc(2)
kernel.x(qreg[0])
kernel.ry(theta, qreg[1])
kernel.cx(qreg[1], qreg[0])
# Define its spin Hamiltonian.
hamiltonian = 5.907 - 2.1433 * spin.x(0) * spin.x(1) - 2.1433 * spin.y(
0) * spin.y(1) + .21829 * spin.z(0) - 6.125 * spin.z(1)
# Run the observe task on quantinuum synchronously
res = cudaq.observe(kernel, hamiltonian, .59, shots_count=100000)
assert assert_close(res.expectation_z())
# Launch it asynchronously, enters the job into the queue
future = cudaq.observe_async(kernel, hamiltonian, .59, shots_count=100000)
# Retrieve the results (since we're emulating)
res = future.get()
assert assert_close(res.expectation_z())
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-s"])
| cuda-quantum-main | python/tests/backends/test_Quantinuum_LocalEmulation.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import os
import math
import pytest
from typing import List, Tuple, Callable
import cudaq
# Helper function for asserting two values are within a
# certain tolerance. If we make numpy a cudaq dependency,
# this may be replaced in the future with `np.allclose`.
def assert_close(want, got, tolerance=1.e-4) -> bool:
return abs(want - got) < tolerance
# Define a custom gradient strategy that can be used on arbitrary
# python functions.
def gradient_central_difference(objective_function: Callable,
parameter_vector: List[float],
dx=1e-4) -> float:
gradient_vector = [0.0] * len(parameter_vector)
parameters_copy = parameter_vector.copy()
for index, parameter in enumerate(parameter_vector):
parameter_minus_dx = parameter - dx
parameter_plus_dx = parameter + dx
# Change the value in this entry to `x_i - dx_i` while
# leaving all other values fixed.
parameters_copy[index] = parameter_minus_dx
low_value = objective_function(parameters_copy)
# Change the value to `x_i + dx_i`.
parameters_copy[index] = parameter_plus_dx
high_value = objective_function(parameters_copy)
# Store the gradient df/dx = (f(x+dx) - f(x-dx)) / 2dx
gradient_vector[index] = (high_value - low_value) / (2 * dx)
# Reset the value back in `parameters_copy`
parameters_copy[index] = parameter
return gradient_vector
# Define functions for benchmarking our optimization suite:
def ackley_function_no_grad(parameter_vector: List[float]) -> float:
"""
Using the Ackley function as a simple test case. This function
was pulled from the following optimization benchmarking resource:
https://www.sfu.ca/~ssurjano/optimization.html
Also see:
https://en.wikipedia.org/wiki/Test_functions_for_optimization
We expect to find a global minimum of `f(parameter_vector) = 0.0`
when `parameter_vector = [0, ..., 0]`.
"""
# Define magic numbers. These are the recommended values
# from the above resource.
a_coefficient = 20
b_coefficient = 0.2
c_coefficient = 2 * math.pi
# term 1 = (1/d) * \sum_{i=1}^{d}{ x_{i}^{2} }
subterm_1 = sum(
(x_i * x_i for x_i in parameter_vector)) / len(parameter_vector)
# term 2 = (1/d) * \sum_{i=1}^{d}{ \cos(2 * pi * x_{i}) }
subterm_2 = sum((math.cos(c_coefficient * x_i)
for x_i in parameter_vector)) / len(parameter_vector)
term_1 = -1 * a_coefficient * math.exp(-1 * b_coefficient * subterm_1)
term_2 = math.exp(subterm_2)
# f(x) = term_1 - term_2 + a_coefficient + exp(1)
return term_1 - term_2 + a_coefficient + math.exp(1)
def ackley_gradient(parameter_vector: List[float],
delta_x=1e-4) -> Tuple[float, List[float]]:
function_value = ackley_function_no_grad(parameter_vector)
gradient_vector = gradient_central_difference(ackley_function_no_grad,
parameter_vector,
dx=delta_x)
# Check the equivalent gradient strategy each time
# this function is called:
cudaq_gradient = cudaq.gradients.CentralDifference()
cudaq_gradient_vector = cudaq_gradient.compute(parameter_vector,
ackley_function_no_grad,
function_value)
assert (gradient_vector == cudaq_gradient_vector)
return function_value, gradient_vector
def schwefel_N_dimension(parameter_vector: List[float]) -> float:
"""
Using the Schwefel function as a more difficult, n-dimensional
test case. This function was pulled from the following
optimization benchmarking resource:
https://www.sfu.ca/~ssurjano/optimization.html
Also see:
https://en.wikipedia.org/wiki/Test_functions_for_optimization
Since we expect to find a global minimum of `f(parameter_vector) = 0`,
we add a fixed constant to the return value (`function_offset`) to have
a non-zero optimal value. This minima is at
`parameter_vector = [420.9687,...,420.9687]`.
"""
function_offset = 0.5
return 418.9829 * len(parameter_vector) - sum(
(x_i * math.sin(math.sqrt(abs(x_i)))
for x_i in parameter_vector)) + function_offset
def schwefel_gradient(parameter_vector: List[float], delta_x=1e-4) -> float:
"""
Using the Schwefel function as a more difficult, n-dimensional
test case. This function was pulled from the following
optimization benchmarking resource:
https://www.sfu.ca/~ssurjano/optimization.html
Also see:
https://en.wikipedia.org/wiki/Test_functions_for_optimization
"""
function_value = schwefel_N_dimension(parameter_vector)
gradient_vector = gradient_central_difference(schwefel_N_dimension,
parameter_vector,
dx=delta_x)
# Check the equivalent CUDA Quantum gradient strategy each time
# this function is called:
# cudaq_gradient = cudaq.gradients.CentralDifference()
# cudaq_gradient_vector = cudaq_gradient.compute(parameter_vector,
# schwefel_N_dimension,
# delta_x)
# assert (gradient_vector == cudaq_gradient_vector)
return function_value, gradient_vector
@pytest.mark.parametrize("optimizer", [
cudaq.optimizers.COBYLA(),
cudaq.optimizers.NelderMead(),
cudaq.optimizers.SPSA()
])
def test_ackley_no_gradients(optimizer):
"""Test gradient free optimizers on the 2-dimension Ackley function."""
dimension = 2
optimizer.max_iterations = 10
want_optimal_value = 0.0
want_optimal_parameters = [0.0, 0.0]
got_optimal_value, got_optimal_parameters = optimizer.optimize(
dimension, ackley_function_no_grad)
assert assert_close(want_optimal_value, got_optimal_value)
assert all(
assert_close(want_parameter, got_parameter) for want_parameter,
got_parameter in zip(want_optimal_parameters, got_optimal_parameters))
@pytest.mark.parametrize("optimizer", [
cudaq.optimizers.COBYLA(),
cudaq.optimizers.NelderMead(),
cudaq.optimizers.SPSA(),
cudaq.optimizers.LBFGS(),
cudaq.optimizers.GradientDescent(),
cudaq.optimizers.Adam(),
cudaq.optimizers.SGD()
])
def test_ackley_gradients(optimizer):
"""Test gradient based optimizers on the 2-dimension Ackley function."""
dimension = 2
optimizer.max_iterations = 10
want_optimal_value = 0.0
want_optimal_parameters = [0.0, 0.0]
got_optimal_value, got_optimal_parameters = optimizer.optimize(
dimension, ackley_gradient)
assert assert_close(want_optimal_value, got_optimal_value)
assert all(
assert_close(want_parameter, got_parameter) for want_parameter,
got_parameter in zip(want_optimal_parameters, got_optimal_parameters))
@pytest.mark.parametrize("optimizer", [
cudaq.optimizers.COBYLA(),
cudaq.optimizers.NelderMead(),
])
@pytest.mark.parametrize("dimension", [2, 3, 4, 5])
def test_schwefel_no_gradients(optimizer, dimension):
"""Test gradient free optimizers on the Schwefel function."""
print(optimizer)
optimizer.max_iterations = 500
# This is a difficult function, so most optimizers require
# a good head start on the parameters. This has been referenced
# against optimizers in the scipy optimization suite.
optimizer.lower_bounds = [300] * dimension
optimizer.upper_bounds = [500] * dimension
optimizer.initial_parameters = [400] * dimension
# The following parameters and expected function values
# were pulled from the resource listed in the source function.
want_optimal_value = 0.5
want_optimal_parameters = [420.9687] * dimension
got_optimal_value, got_optimal_parameters = optimizer.optimize(
dimension, schwefel_N_dimension)
assert assert_close(want_optimal_value, got_optimal_value)
assert all(
assert_close(want_parameter, got_parameter, tolerance=1e-2)
for want_parameter, got_parameter in zip(want_optimal_parameters,
got_optimal_parameters))
@pytest.mark.parametrize("optimizer", [
cudaq.optimizers.COBYLA(),
cudaq.optimizers.NelderMead(),
cudaq.optimizers.SPSA(),
cudaq.optimizers.LBFGS(),
cudaq.optimizers.GradientDescent(),
cudaq.optimizers.Adam(),
cudaq.optimizers.SGD()
])
@pytest.mark.parametrize("dimension", [2, 3, 4, 5])
def test_schwefel_gradients(optimizer, dimension):
"""Test each of the `cudaq.optimizers` on the Schwefel function with gradients."""
optimizer = cudaq.optimizers.COBYLA()
optimizer.max_iterations = 100
# This is a difficult function, so most optimizers require
# a good head start on the parameters. This has been referenced
# against optimizers in the scipy optimization suite.
optimizer.lower_bounds = [300] * dimension
optimizer.upper_bounds = [500] * dimension
optimizer.initial_parameters = [400] * dimension
want_optimal_value = 0.5
want_optimal_parameters = [420.9687] * dimension
got_optimal_value, got_optimal_parameters = optimizer.optimize(
dimension, schwefel_gradient)
assert assert_close(want_optimal_value, got_optimal_value)
assert all(
assert_close(want_parameter, got_parameter, tolerance=1e-2)
for want_parameter, got_parameter in zip(want_optimal_parameters,
got_optimal_parameters))
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-s"])
| cuda-quantum-main | python/tests/unittests/test_optimizer.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import os
import pytest
import numpy as np
import cudaq
def test_depolarization_channel():
"""Tests the depolarization channel in the case of a non-zero probability."""
cudaq.set_random_seed(13)
cudaq.set_target('density-matrix-cpu')
circuit = cudaq.make_kernel()
q = circuit.qalloc()
circuit.x(q)
depol = cudaq.DepolarizationChannel(.1)
noise = cudaq.NoiseModel()
noise.add_channel("x", [0], depol)
counts = cudaq.sample(circuit, noise_model=noise, shots_count=100)
assert (len(counts) == 2)
assert ('0' in counts)
assert ('1' in counts)
assert (counts.count('0') + counts.count('1') == 100)
counts = cudaq.sample(circuit)
assert (len(counts) == 1)
assert ('1' in counts)
cudaq.reset_target()
counts = cudaq.sample(circuit)
assert (len(counts) == 1)
assert ('1' in counts)
def test_depolarization_channel_simple():
"""Tests the depolarization channel in the case of `probability = 1.0`"""
cudaq.set_random_seed(13)
cudaq.set_target('density-matrix-cpu')
kernel = cudaq.make_kernel()
qubit = kernel.qalloc()
noise = cudaq.NoiseModel()
# Depolarization channel with `1.0` probability of the qubit state
# being scrambled.
depolarization_channel = cudaq.DepolarizationChannel(1.0)
# Channel applied to any Y-gate on the depolarization channel.
noise.add_channel('y', [0], depolarization_channel)
# Bring the qubit to the |1> state, where it will remain
# with a probability of `1 - p = 0.0`.
kernel.y(qubit)
kernel.mz(qubit)
# Without noise, the qubit should still be in the |1> state.
counts = cudaq.sample(kernel)
want_counts = 1000
got_counts = counts["1"]
assert got_counts == want_counts
# With noise, the measurements should be a roughly 50/50
# mix between the |0> and |1> states.
noisy_counts = cudaq.sample(kernel, noise_model=noise)
want_probability = 0.5
got_zero_probability = noisy_counts.probability("0")
got_one_probability = noisy_counts.probability("1")
assert np.isclose(got_zero_probability, want_probability, atol=.2)
assert np.isclose(got_one_probability, want_probability, atol=.2)
def test_amplitude_damping_simple():
"""Tests the amplitude damping channel in the case of `probability = 1.0`"""
cudaq.set_random_seed(13)
cudaq.set_target('density-matrix-cpu')
noise = cudaq.NoiseModel()
# Amplitude damping channel with `1.0` probability of the qubit
# decaying to the ground state.
amplitude_damping = cudaq.AmplitudeDampingChannel(1.0)
# Applied to any Hadamard gate on the qubit.
noise.add_channel('h', [0], amplitude_damping)
kernel = cudaq.make_kernel()
qubit = kernel.qalloc()
# This will bring qubit to `1/sqrt(2) (|0> + |1>)`, where it will remain
# with a probability of `1 - p = 0.0`.
kernel.h(qubit)
kernel.mz(qubit)
# Without noise, the qubit will now have a 50/50 mix of measurements
# between |0> and |1>.
counts = cudaq.sample(kernel)
want_probability = 0.5
got_zero_probability = counts.probability("0")
got_one_probability = counts.probability("1")
assert np.isclose(got_zero_probability, want_probability, atol=.1)
assert np.isclose(got_one_probability, want_probability, atol=.1)
# With noise, all measurements should be in the |0> state,
noisy_counts = cudaq.sample(kernel, noise_model=noise)
want_counts = 1000
got_counts = noisy_counts["0"]
assert (got_counts == want_counts)
def test_phase_flip_simple():
"""Tests the phase flip channel in the case of `probability = 1.0`"""
cudaq.set_random_seed(13)
cudaq.set_target('density-matrix-cpu')
noise = cudaq.NoiseModel()
# Phase flip channel with `1.0` probability of the qubit
# undergoing a phase rotation of 180 degrees (π).
phase_flip = cudaq.PhaseFlipChannel(1.0)
noise.add_channel('z', [0], phase_flip)
kernel = cudaq.make_kernel()
qubit = kernel.qalloc()
# Place qubit in superposition state.
kernel.h(qubit)
# Rotate the phase around Z by 180 degrees (π).
kernel.z(qubit)
# Apply another hadamard and measure.
kernel.h(qubit)
kernel.mz(qubit)
# Without noise, we'd expect the qubit to end in the |1>
# state due to the phase rotation between the two hadamard
# gates.
counts = cudaq.sample(kernel)
want_counts = 1000
got_one_counts = counts["1"]
assert got_one_counts == want_counts
# With noise, should be in the |0> state.
noisy_counts = cudaq.sample(kernel, noise_model=noise)
got_zero_counts = noisy_counts["0"]
assert got_zero_counts == want_counts
def test_bit_flip_simple():
"""
Tests the bit flip channel with the probability at `0.0` on qubit 0,
and `1.0` on qubit 1.
"""
cudaq.set_random_seed(13)
cudaq.set_target('density-matrix-cpu')
noise = cudaq.NoiseModel()
# Bit flip channel with `0.0` probability of the qubit flipping 180 degrees.
bit_flip_zero = cudaq.BitFlipChannel(0.0)
noise.add_channel('x', [0], bit_flip_zero)
# Bit flip channel with `1.0` probability of the qubit flipping 180 degrees.
bit_flip_one = cudaq.BitFlipChannel(1.0)
noise.add_channel('x', [1], bit_flip_one)
# Now we may define our simple kernel function and allocate a register
# of qubits to it.
kernel = cudaq.make_kernel()
qubits = kernel.qalloc(2)
# This will bring the qubit to the |1> state.
# Remains with a probability of `1 - p = 1.0`.
kernel.x(qubits[0])
# Now we apply an X-gate to qubit 1.
# Remains in the |1> state with a probability of `1 - p = 0.0`.
kernel.x(qubits[1])
kernel.mz(qubits)
# Without noise, both qubits in the |1> state.
counts = cudaq.sample(kernel)
counts.dump()
want_counts = 1000
got_one_one_counts = counts["11"]
assert got_one_one_counts == want_counts
# With noise, the state should be |1>|0> == |10>
noisy_counts = cudaq.sample(kernel, noise_model=noise)
noisy_counts.dump()
got_one_zero_counts = noisy_counts["10"]
assert got_one_zero_counts == want_counts
def test_kraus_channel():
"""Tests the Kraus Channel with a series of custom Kraus Operators."""
cudaq.set_random_seed(13)
cudaq.set_target('density-matrix-cpu')
k0 = np.array([[0.05773502691896258, 0.0], [0., -0.05773502691896258]],
dtype=np.complex128)
k1 = np.array([[0., 0.05773502691896258], [0.05773502691896258, 0.]],
dtype=np.complex128)
k2 = np.array([[0., -0.05773502691896258j], [0.05773502691896258j, 0.]],
dtype=np.complex128)
k3 = np.array([[0.99498743710662, 0.0], [0., 0.99498743710662]],
dtype=np.complex128)
depolarization = cudaq.KrausChannel([k0, k1, k2, k3])
assert ((depolarization[0] == k0).all())
assert ((depolarization[1] == k1).all())
assert ((depolarization[2] == k2).all())
assert ((depolarization[3] == k3).all())
noise = cudaq.NoiseModel()
noise.add_channel('x', [0], depolarization)
cudaq.set_noise(noise)
circuit = cudaq.make_kernel()
q = circuit.qalloc()
circuit.x(q)
counts = cudaq.sample(circuit)
want_count_length = 2
got_count_length = len(counts)
assert (got_count_length == want_count_length)
assert ('0' in counts)
assert ('1' in counts)
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-rP"])
| cuda-quantum-main | python/tests/unittests/test_NoiseModel.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import os
import pytest
import numpy as np
import cudaq
from cudaq import spin
@pytest.mark.parametrize("qubit_count", [1, 5, 9])
@pytest.mark.parametrize("shots_count", [10, 100, 1000])
def test_sample_result_single_register(qubit_count, shots_count):
"""
Tests the `SampleResult` data-type on a simple circuit
of varying sizes.
"""
kernel = cudaq.make_kernel()
qreg = kernel.qalloc(qubit_count)
# Place every qubit in the 1-state.
kernel.x(qreg)
kernel.mz(qreg)
# Get the QPU result from a call to `sample`.
# Check at a varying number of shots.
sample_result = cudaq.sample(kernel, shots_count=shots_count)
# Check for correctness on each member function of `SampleResult`
want_bitstring = "1" * qubit_count
# `::dump()`
sample_result.dump()
# `__str__`
print(str(sample_result))
# `__iter__`
for sub_counts in sample_result:
# Should just be the `want_bitstring`
assert sub_counts == want_bitstring
# `__getitem__`
# The `want_bitstring` should have `shots_count` observations.
assert sample_result[want_bitstring] == shots_count
# Should have 1 global register.
assert sample_result.register_names == ["__global__"]
# The full `SampleResult` and the extracted counts for the
# global register should be the same in this case.
for counts in [
sample_result,
sample_result.get_register_counts("__global__")
]:
# `__len__`
# Should have only measured 1 different state.
assert len(counts) == 1
# `expectation_z`
# The `qubit_count` is always odd so we should always have
# an expectation of -1. for the 1-state.
assert counts.expectation_z() == -1.
# `probability`
assert counts.probability(want_bitstring) == 1.
# `most_probable`
assert counts.most_probable() == want_bitstring
# `count`
assert counts.count(want_bitstring) == shots_count
# Check the results marginalized over each qubit.
for qubit in range(qubit_count):
marginal_counts = counts.get_marginal_counts([qubit])
print(marginal_counts)
assert marginal_counts.expectation_z() == -1.
# Should be in the 1-state.
assert marginal_counts.probability("1") == 1
assert marginal_counts.most_probable() == "1"
# `get_sequential_data`
# In this case, should just contain the single bitstring in a list.
assert sample_result.get_sequential_data() == [want_bitstring] * shots_count
# `::items()`
for key, value in sample_result.items():
assert key == want_bitstring
assert value == shots_count
# `::values()`
for value in sample_result.values():
assert value == shots_count
# `::clear()`
sample_result.clear()
# Counts should now be empty.
assert str(sample_result) == "{ }\n"
with pytest.raises(RuntimeError) as error:
# Too many args.
result = cudaq.sample(kernel, 0.0)
@pytest.mark.parametrize("qubit_count", [3, 5, 9])
@pytest.mark.parametrize("shots_count", [10, 100, 1000])
def test_sample_result_single_register_float_param(qubit_count, shots_count):
"""
Tests the `SampleResult` data-type on a simple circuit
of varying sizes. The circuit in this case is parameterized
by a single float value.
"""
kernel, angle = cudaq.make_kernel(float)
qreg = kernel.qalloc(qubit_count)
# Place every qubit in the 1-state, parameterized by
# the `angle`.
for index in range(qubit_count):
kernel.rx(angle, qreg[index])
kernel.mz(qreg)
# Get the QPU result from a call to `sample`, at the concrete
# angle of `np.pi`. Should be equivalent to the previous test
# case.
# Check at a varying number of shots.
sample_result = cudaq.sample(kernel, np.pi, shots_count=shots_count)
# Check for correctness on each member function of `SampleResult`
want_bitstring = "1" * qubit_count
# `::dump()`
sample_result.dump()
# `__str__`
print(str(sample_result))
# `__iter__`
for sub_counts in sample_result:
# Should just be the `want_bitstring`
assert sub_counts == want_bitstring
# `__getitem__`
# The `want_bitstring` should have `shots_count` observations.
assert sample_result[want_bitstring] == shots_count
# Should have 1 global register.
assert sample_result.register_names == ["__global__"]
# The full `SampleResult` and the extracted counts for the
# global register should be the same in this case.
for counts in [
sample_result,
sample_result.get_register_counts("__global__")
]:
# `__len__`
# Should have only measured 1 different state.
assert len(counts) == 1
# `expectation_z`
# The `qubit_count` is always odd so we should always have
# an expectation of -1. for the 1-state.
assert counts.expectation_z() == -1.
# `probability`
assert counts.probability(want_bitstring) == 1.
# `most_probable`
assert counts.most_probable() == want_bitstring
# `count`
assert counts.count(want_bitstring) == shots_count
# Check the results marginalized over each qubit.
for qubit in range(qubit_count):
marginal_counts = counts.get_marginal_counts([qubit])
print(marginal_counts)
assert marginal_counts.expectation_z() == -1.
# Should be in the 1-state.
assert marginal_counts.probability("1") == 1
assert marginal_counts.most_probable() == "1"
# `get_sequential_data`
# In this case, should just contain the single bitstring in a list.
assert sample_result.get_sequential_data() == [want_bitstring] * shots_count
# `::items()`
for key, value in sample_result.items():
assert key == want_bitstring
assert value == shots_count
# `::values()`
for value in sample_result.values():
assert value == shots_count
# `::clear()`
sample_result.clear()
# Counts should now be empty.
assert str(sample_result) == "{ }\n"
with pytest.raises(RuntimeError) as error:
# Too few args.
result = cudaq.sample(kernel)
@pytest.mark.parametrize("qubit_count", [3, 5, 9])
@pytest.mark.parametrize("shots_count", [10, 100, 1000])
def test_sample_result_single_register_list_param(qubit_count, shots_count):
"""
Tests the `SampleResult` data-type on a simple circuit
of varying sizes. The circuit in this case is parameterized
by a list.
"""
kernel, angles = cudaq.make_kernel(list)
qreg = kernel.qalloc(qubit_count)
# Place every qubit in the 1-state, parameterized by
# the `angle`.
for index in range(qubit_count):
kernel.rx(angles[0], qreg[index])
kernel.mz(qreg)
# Get the QPU result from a call to `sample`, at the concrete
# angle of `np.pi`. Should be equivalent to the previous test
# case.
# Check at a varying number of shots.
sample_result = cudaq.sample(kernel, [np.pi], shots_count=shots_count)
# Check for correctness on each member function of `SampleResult`
want_bitstring = "1" * qubit_count
# `::dump()`
sample_result.dump()
# `__str__`
print(str(sample_result))
# `__iter__`
for sub_counts in sample_result:
# Should just be the `want_bitstring`
assert sub_counts == want_bitstring
# `__getitem__`
# The `want_bitstring` should have `shots_count` observations.
assert sample_result[want_bitstring] == shots_count
# Should have 1 global register.
assert sample_result.register_names == ["__global__"]
# The full `SampleResult` and the extracted counts for the
# global register should be the same in this case.
for counts in [
sample_result,
sample_result.get_register_counts("__global__")
]:
# `__len__`
# Should have only measured 1 different state.
assert len(counts) == 1
# `expectation_z`
# The `qubit_count` is always odd so we should always have
# an expectation of -1. for the 1-state.
assert counts.expectation_z() == -1.
# `probability`
assert counts.probability(want_bitstring) == 1.
# `most_probable`
assert counts.most_probable() == want_bitstring
# `count`
assert counts.count(want_bitstring) == shots_count
# Check the results marginalized over each qubit.
for qubit in range(qubit_count):
marginal_counts = counts.get_marginal_counts([qubit])
print(marginal_counts)
assert marginal_counts.expectation_z() == -1.
# Should be in the 1-state.
assert marginal_counts.probability("1") == 1
assert marginal_counts.most_probable() == "1"
# `get_sequential_data`
# In this case, should just contain the single bitstring in a list.
assert sample_result.get_sequential_data() == [want_bitstring] * shots_count
# `::items()`
for key, value in sample_result.items():
assert key == want_bitstring
assert value == shots_count
# `::values()`
for value in sample_result.values():
assert value == shots_count
# `::clear()`
sample_result.clear()
# Counts should now be empty.
assert str(sample_result) == "{ }\n"
with pytest.raises(RuntimeError) as error:
# Wrong arg type.
result = cudaq.sample(kernel, 0.0)
@pytest.mark.skip(
reason=
"Mid-circuit measurements not currently supported without the use of `c_if`."
)
@pytest.mark.parametrize("qubit_count", [1, 5, 9])
@pytest.mark.parametrize("shots_count", [10, 100, 1000])
def test_sample_result_multiple_registers(qubit_count, shots_count):
"""
Tests the `SampleResult` data-type on a simple circuit
of varying sizes. The circuit provides a `register_name`
on the measurements in this case.
"""
kernel = cudaq.make_kernel()
qreg = kernel.qalloc(qubit_count)
# Place every qubit in the 1-state.
kernel.x(qreg)
# Name the measurement register.
kernel.mz(qreg, register_name="test_measurement")
# Get the QPU result from a call to `sample`.
# Check at a varying number of shots.
sample_result = cudaq.sample(kernel, shots_count=shots_count)
# Check for correctness on each member function of `SampleResult`
want_bitstring = "1" * qubit_count
# `::dump()`
sample_result.dump()
# `__str__`
print(str(sample_result))
# `__iter__`
for sub_counts in sample_result:
# Should just be the `want_bitstring`
assert sub_counts == want_bitstring
# `__getitem__`
# The `want_bitstring` should have `shots_count` observations.
assert sample_result[want_bitstring] == shots_count
# TODO: once mid-circuit measurements are supported, finish out
# the rest of this test.
@pytest.mark.parametrize("shots_count", [10, 100])
def test_sample_result_observe(shots_count):
"""
Test `cudaq.SampleResult` as its returned from a call
to `cudaq.observe()`.
"""
qubit_count = 3
kernel = cudaq.make_kernel()
qreg = kernel.qalloc(qubit_count)
kernel.x(qreg)
hamiltonian = spin.z(0) + spin.z(1) + spin.z(2)
want_expectation = -3.0
want_state = "111"
# Test via call to `cudaq.sample()`.
observe_result = cudaq.observe(kernel, hamiltonian, shots_count=shots_count)
# Return the entire `cudaq.SampleResult` data from observe_result.
sample_result = observe_result.counts()
# If shots mode was enabled, check those results.
if shots_count != -1:
sample_result = observe_result.counts()
sample_result.dump()
# Should just have 3 measurement registers, one for each spin term.
want_register_names = ["IIZ", "IZI", "ZII"]
got_register_names = sample_result.register_names
if '__global__' in got_register_names:
got_register_names.remove('__global__')
for want_name in want_register_names:
assert want_name in got_register_names
# Check that each register is in the proper state.
for index, sub_term in enumerate(hamiltonian):
# Extract the register name from the spin term.
got_name = str(sub_term).split(" ")[1].rstrip()
# Pull the counts for that hamiltonian sub term from the
# `ObserveResult::counts` overload.
sub_term_counts = observe_result.counts(sub_term=sub_term)
# Pull the counts for that hamiltonian sub term from the
# `SampleResult` dictionary by its name.
sub_register_counts = sample_result.get_register_counts(got_name)
# Sub-term should have the an expectation proportional to the entire
# system.
assert sub_term_counts.expectation_z(
) == want_expectation / qubit_count
assert sub_register_counts.expectation_z(
) == want_expectation / qubit_count
# Should have `shots_count` results for each.
assert sum(sub_term_counts.values()) == shots_count
assert sum(sub_register_counts.values()) == shots_count
print(sub_term_counts)
# Check the state.
assert "1" in sub_term_counts
assert "1" in sub_register_counts
sample_result.dump()
# `::items()`
for key, value in sample_result.items():
assert key == "1"
assert value == shots_count
# `::values()`
for value in sample_result.values():
assert value == shots_count
# `::clear()`
sample_result.clear()
# Counts should now be empty.
assert str(sample_result) == "{ }\n"
def test_sample_async():
"""Tests `cudaq.sample_async` on a simple kernel with no args."""
kernel = cudaq.make_kernel()
qubits = kernel.qalloc(2)
kernel.h(qubits[0])
kernel.cx(qubits[0], qubits[1])
kernel.mz(qubits)
# Invalid QPU
with pytest.raises(Exception) as error:
future = cudaq.sample_async(kernel, qpu_id=1)
# Default 0th qpu
future = cudaq.sample_async(kernel)
counts = future.get()
assert (len(counts) == 2)
assert ('00' in counts)
assert ('11' in counts)
# Can specify qpu id
future = cudaq.sample_async(kernel, qpu_id=0)
counts = future.get()
assert (len(counts) == 2)
assert ('00' in counts)
assert ('11' in counts)
with pytest.raises(Exception) as error:
# Invalid qpu_id type.
result = cudaq.sample_async(kernel, qpu_id=12)
def test_sample_async_params():
"""Tests `cudaq.sample_async` on a simple kernel that accepts args."""
kernel, theta, phi = cudaq.make_kernel(float, float)
qubits = kernel.qalloc(2)
kernel.rx(theta, qubits[0])
kernel.ry(phi, qubits[0])
kernel.cx(qubits[0], qubits[1])
kernel.mz(qubits)
# Creating the bell state with rx and ry instead of hadamard
# need a pi rotation and a pi/2 rotation
future = cudaq.sample_async(kernel, np.pi, np.pi / 2.)
counts = future.get()
assert (len(counts) == 2)
assert ('00' in counts)
assert ('11' in counts)
with pytest.raises(Exception) as error:
# Invalid qpu_id type.
result = cudaq.sample_async(kernel, 0.0, 0.0, qpu_id=12)
def test_sample_marginalize():
"""
A more thorough test of the functionality of `SampleResult::get_marginal_counts`.
"""
kernel = cudaq.make_kernel()
qubits = kernel.qalloc(4)
# Place register in `0101` state.
kernel.x(qubits[1])
kernel.x(qubits[3])
want_bitstring = "0101"
sample_result = cudaq.sample(kernel)
# Marginalize over each qubit and check that it's correct.
for qubit in range(4):
marginal_result = sample_result.get_marginal_counts([qubit])
# Check the individual qubits state.
assert marginal_result.most_probable() == want_bitstring[qubit]
# Marginalize the qubit over pairs and check if correct.
qubit = 0
for other_qubit in [1, 2, 3]:
new_bitstring = want_bitstring[qubit] + want_bitstring[other_qubit]
# Check that qubit paired with every other qubit.
marginal_result = sample_result.get_marginal_counts(
[qubit, other_qubit])
assert marginal_result.most_probable() == new_bitstring
# Marginalize over the first 3 qubits.
marginal_result = sample_result.get_marginal_counts([0, 1, 2])
assert marginal_result.most_probable() == "010"
# Marginalize over the last 3 qubits.
marginal_result = sample_result.get_marginal_counts([1, 2, 3])
assert marginal_result.most_probable() == "101"
def test_swap_2q():
"""
Tests the simple case of swapping the states of two qubits.
"""
kernel = cudaq.make_kernel()
# Allocate a register of size 2.
qreg = kernel.qalloc(2)
qubit_0 = qreg[0]
qubit_1 = qreg[1]
# Place qubit 0 in the 1-state.
kernel.x(qubit_0)
# Swap states with qubit 1.
kernel.swap(qubit_0, qubit_1)
# Check their states.
kernel.mz(qreg)
want_state = "01"
result = cudaq.sample(kernel)
assert (want_state in result)
assert (result[want_state] == 1000)
def test_qubit_reset():
"""
Basic test that we can apply a qubit reset.
"""
kernel = cudaq.make_kernel()
qubit = kernel.qalloc()
kernel.x(qubit)
kernel.reset(qubit)
kernel.mz(qubit)
counts = cudaq.sample(kernel)
assert (len(counts) == 1)
assert ('0' in counts)
def test_qreg_reset():
"""
Basic test that we can apply a qreg reset.
"""
kernel = cudaq.make_kernel()
qubits = kernel.qalloc(2)
kernel.x(qubits)
kernel.reset(qubits)
kernel.mz(qubits)
counts = cudaq.sample(kernel)
assert (len(counts) == 1)
assert ('00' in counts)
def test_for_loop():
"""
Test that we can build a kernel expression with a for loop.
"""
circuit, inSize = cudaq.make_kernel(int)
qubits = circuit.qalloc(inSize)
circuit.h(qubits[0])
# can pass concrete integers for both
circuit.for_loop(0, inSize - 1,
lambda index: circuit.cx(qubits[index], qubits[index + 1]))
print(circuit)
counts = cudaq.sample(circuit, 5)
assert len(counts) == 2
assert '0' * 5 in counts
assert '1' * 5 in counts
counts.dump()
def test_sample_n():
"""
Test that we can broadcast the sample call over a number of argument sets
"""
circuit, inSize = cudaq.make_kernel(int)
qubits = circuit.qalloc(inSize)
circuit.h(qubits[0])
# can pass concrete integers for both
circuit.for_loop(0, inSize - 1,
lambda index: circuit.cx(qubits[index], qubits[index + 1]))
# circuit.mz(qubits)
print(circuit)
allCounts = cudaq.sample(circuit, [3, 4, 5, 6, 7])
first0 = '000'
first1 = '111'
for c in allCounts:
print(c)
assert first0 in c and first1 in c
first0 += '0'
first1 += '1'
testNpArray = np.random.randint(3, high=8, size=6)
print(testNpArray)
allCounts = cudaq.sample(circuit, testNpArray)
for i, c in enumerate(allCounts):
print(c)
assert '0' * testNpArray[i] in c and '1' * testNpArray[i] in c
circuit, angles = cudaq.make_kernel(list)
q = circuit.qalloc(2)
circuit.rx(angles[0], q[0])
circuit.ry(angles[1], q[0])
circuit.cx(q[0], q[1])
runtimeAngles = np.random.uniform(low=1.0, high=np.pi, size=(10, 2))
print(runtimeAngles)
allCounts = cudaq.sample(circuit, runtimeAngles)
for i, c in enumerate(allCounts):
print(runtimeAngles[i, :], c)
assert len(c) == 2
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-s"])
| cuda-quantum-main | python/tests/unittests/test_sample.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import os
import pytest
import numpy as np
import cudaq
def assert_close(want, got, tolerance=1.e-5) -> bool:
return abs(want - got) < tolerance
@pytest.mark.parametrize("want_vector", [
np.array([0.0, 1.0], dtype=np.complex128),
np.array([1.0, 0.0], dtype=np.complex128),
np.array([1.0 / np.sqrt(2), 1.0 / np.sqrt(2)], dtype=np.complex128),
np.array([0.0, 0.0, 0.0, 1.0], dtype=np.complex128),
np.array([1.0, 0.0, 0.0, 0.0], dtype=np.complex128),
np.array([1.0 / np.sqrt(2), 0.0, 0.0, 1.0 / np.sqrt(2)],
dtype=np.complex128),
])
def test_state_buffer_vector(want_vector):
"""
Tests writing to and returning from the :class:`State` buffer
on different state vectors.
"""
got_state_a = cudaq.State(want_vector)
got_state_b = cudaq.State(want_vector)
print("got_state_a = ", got_state_a)
print("got_state_b = ", got_state_b)
# Check all of the `overlap` overloads.
assert np.isclose(got_state_a.overlap(want_vector), 1.0)
assert np.isclose(got_state_b.overlap(want_vector), 1.0)
assert np.isclose(got_state_a.overlap(got_state_b), 1.0)
# Should be identical vectors.
got_vector_a = np.array(got_state_a, copy=False)
got_vector_b = np.array(got_state_b, copy=False)
print(f"want_vector = {want_vector}\n")
print(f"got_vector_a = {got_vector_a}\n")
print(f"got_vector_b = {got_vector_b}\n")
for i in range(len(got_vector_a)):
a_same = np.isclose(want_vector[i], got_vector_a[i])
b_same = np.isclose(want_vector[i], got_vector_b[i])
print(f"{i}-th element. Passes? {a_same and b_same}:\n")
print(f"want = {want_vector[i]}")
print(f"got_a = {got_vector_a[i]}")
print(f"got_b = {got_vector_b[i]}\n")
assert a_same
assert b_same
# assert np.allclose(want_vector, got_vector_a)
# assert np.allclose(want_vector, got_vector_b)
@pytest.mark.parametrize("want_matrix", [
np.array([[0.0, 0.0], [0.0, 1.0]], dtype=np.complex128),
np.array([[1.0, 0.0], [0.0, 0.0]], dtype=np.complex128),
np.array([[0.5, 0.5], [0.5, 0.5]], dtype=np.complex128),
np.array([[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0]],
dtype=np.complex128),
np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0]],
dtype=np.complex128),
np.array([[0.5, 0.0, 0.0, 0.5], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 0.5]],
dtype=np.complex128),
])
def test_state_buffer_density_matrix(want_matrix):
"""
Tests writing to and returning from the :class:`State` buffer
on different density matrices.
"""
got_state_a = cudaq.State(want_matrix)
got_state_b = cudaq.State(want_matrix)
# Check all of the `overlap` overloads.
assert np.isclose(got_state_a.overlap(want_matrix), 1.0)
assert np.isclose(got_state_b.overlap(want_matrix), 1.0)
assert np.isclose(got_state_a.overlap(got_state_b), 1.0)
# Should be identical matrices.
got_matrix_a = np.array(got_state_a, copy=False)
got_matrix_b = np.array(got_state_b, copy=False)
assert np.allclose(want_matrix, got_matrix_a)
assert np.allclose(want_matrix, got_matrix_b)
def test_state_vector_simple():
"""
A simple end-to-end test of the state class on a state vector
backend. Begins with a kernel, converts to state, then checks
its member functions.
"""
kernel = cudaq.make_kernel()
qubits = kernel.qalloc(2)
kernel.h(qubits[0])
kernel.cx(qubits[0], qubits[1])
# Get the quantum state, which should be a vector.
got_state = cudaq.get_state(kernel)
want_state = np.array([1. / np.sqrt(2.), 0., 0., 1. / np.sqrt(2.)],
dtype=np.complex128)
# Check the indexing operators on the State class
# while also checking their values
np.isclose(want_state[0], got_state[0].real)
np.isclose(want_state[1], got_state[1].real)
np.isclose(want_state[2], got_state[2].real)
np.isclose(want_state[3], got_state[3].real)
# Check the entire vector with numpy.
got_vector = np.array(got_state, copy=False)
for i in range(len(want_state)):
assert np.isclose(want_state[i], got_vector[i])
# if not np.isclose(got_vector[i], got_vector_b[i]):
print(f"want = {want_state[i]}")
print(f"got = {got_vector[i]}")
assert np.allclose(want_state, np.array(got_state))
# Check overlaps.
want_state_object = cudaq.State(want_state)
# Check the overlap overload with want numpy array.
assert np.isclose(got_state.overlap(want_state), 1.0)
# Check the overlap overload with want state object.
assert np.isclose(got_state.overlap(want_state_object), 1.0)
# Check the overlap overload with itself.
assert np.isclose(got_state.overlap(got_state), 1.0)
def test_state_vector_integration():
"""
An integration test on the state vector class. Uses a CUDA Quantum
optimizer to find the correct kernel parameters for a Bell state.
"""
# Make a general 2 qubit SO4 rotation.
kernel, parameters = cudaq.make_kernel(list)
qubits = kernel.qalloc(2)
kernel.ry(parameters[0], qubits[0])
kernel.ry(parameters[1], qubits[1])
kernel.cz(qubits[0], qubits[1])
kernel.ry(parameters[2], qubits[0])
kernel.ry(parameters[3], qubits[1])
kernel.cz(qubits[0], qubits[1])
kernel.ry(parameters[4], qubits[0])
kernel.ry(parameters[5], qubits[1])
kernel.cz(qubits[0], qubits[1])
want_state = cudaq.State(
np.array([1. / np.sqrt(2.), 0., 0., 1. / np.sqrt(2.)],
dtype=np.complex128))
def objective(x):
got_state = cudaq.get_state(kernel, x)
return 1. - want_state.overlap(got_state)
# Compute the parameters that make this kernel produce the
# Bell state.
optimizer = cudaq.optimizers.COBYLA()
optimizer.max_iterations = 100
optimal_infidelity, optimal_parameters = optimizer.optimize(6, objective)
# Did we maximize the overlap (i.e, minimize the infidelity)?
assert np.isclose(optimal_infidelity, 0.0, atol=1e-3)
# Check the state from the kernel at the fixed parameters.
bell_state = cudaq.get_state(kernel, optimal_parameters)
print(bell_state)
assert np.allclose(want_state, bell_state, atol=1e-3)
def test_state_density_matrix_simple():
"""
A simple end-to-end test of the state class on a density matrix
backend. Begins with a kernel, converts to state, then checks
its member functions.
"""
cudaq.set_target('density-matrix-cpu')
# Create the bell state
kernel = cudaq.make_kernel()
qubits = kernel.qalloc(2)
kernel.h(qubits[0])
kernel.cx(qubits[0], qubits[1])
got_state = cudaq.get_state(kernel)
print(got_state)
want_state = np.array([[0.5, 0.0, 0.0, 0.5], [0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0], [0.5, 0.0, 0.0, 0.5]],
dtype=np.complex128)
# Check the indexing operators on the State class
# while also checking their values
np.isclose(.5, got_state[0, 0].real)
np.isclose(.5, got_state[0, 3].real)
np.isclose(.5, got_state[3, 0].real)
np.isclose(.5, got_state[3, 3].real)
# Check the entire matrix with numpy.
assert np.allclose(want_state, np.array(got_state))
# Check overlaps.
want_state_object = cudaq.State(want_state)
# Check the overlap overload with want numpy array.
assert np.isclose(got_state.overlap(want_state), 1.0)
# Check the overlap overload with want state object.
assert np.isclose(got_state.overlap(want_state_object), 1.0)
# Check the overlap overload with itself.
assert np.isclose(got_state.overlap(got_state), 1.0)
cudaq.reset_target()
def test_state_density_matrix_integration():
"""
An integration test on the state density matrix class. Uses a CUDA Quantum
optimizer to find the correct kernel parameters for a Bell state.
"""
cudaq.set_target('density-matrix-cpu')
# Make a general 2 qubit SO4 rotation.
kernel, parameters = cudaq.make_kernel(list)
qubits = kernel.qalloc(2)
kernel.ry(parameters[0], qubits[0])
kernel.ry(parameters[1], qubits[1])
kernel.cz(qubits[0], qubits[1])
kernel.ry(parameters[2], qubits[0])
kernel.ry(parameters[3], qubits[1])
kernel.cz(qubits[0], qubits[1])
kernel.ry(parameters[4], qubits[0])
kernel.ry(parameters[5], qubits[1])
kernel.cz(qubits[0], qubits[1])
want_state = cudaq.State(
np.array([[.5, 0., 0., .5], [0., 0., 0., 0.], [0., 0., 0., 0.],
[.5, 0., 0., .5]],
dtype=np.complex128))
def objective(x):
got_state = cudaq.get_state(kernel, x)
return 1. - want_state.overlap(got_state)
# Compute the parameters that make this kernel produce the
# Bell state.
optimizer = cudaq.optimizers.COBYLA()
optimizer.max_iterations = 100
optimal_infidelity, optimal_parameters = optimizer.optimize(6, objective)
# Did we maximize the overlap (i.e, minimize the infidelity)?
assert np.isclose(optimal_infidelity, 0.0, atol=1e-3)
# Check the state from the kernel at the fixed parameters.
bell_state = cudaq.get_state(kernel, optimal_parameters)
assert np.allclose(want_state, bell_state, atol=1e-3)
cudaq.reset_target()
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-rP"])
| cuda-quantum-main | python/tests/unittests/test_state.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import os
import pytest
import collections
import cudaq
from cudaq import spin
import numpy as np
def assert_close(want, got, tolerance=1.e-5) -> bool:
return abs(want - got) < tolerance
def test_spin_class():
"""
Tests that we can construct each of the convenience functions for
the Pauli spin operators on different qubits.
"""
qubit = 0
i_ = spin.i(target=qubit)
x_ = spin.x(target=qubit)
y_ = spin.y(qubit)
z_ = spin.z(qubit)
data, coeffs = i_.get_raw_data()
assert (len(data) == 1)
assert (len(data[0]) == 2)
assert (data[0] == [0, 0])
data, coeffs = x_.get_raw_data()
assert (len(data) == 1)
assert (len(data[0]) == 2)
assert (data[0] == [1, 0])
data, coeffs = y_.get_raw_data()
assert (len(data) == 1)
assert (len(data[0]) == 2)
assert (data[0] == [1, 1])
data, coeffs = z_.get_raw_data()
assert (len(data) == 1)
assert (len(data[0]) == 2)
assert (data[0] == [0, 1])
def test_spin_op_operators():
"""
Tests the binary operators on the `cudaq.SpinOperator` class. We're just
testing that each runs without error and constructs two example strings
that we can verify against. We are not fully testing the accuracy of
each individual operator at the moment.
"""
# Test the empty (identity) constructor.
spin_a = cudaq.SpinOperator()
spin_b = spin.x(0)
# Test the copy constructor.
spin_b_copy = cudaq.SpinOperator(spin_operator=spin_b)
assert (spin_b_copy == spin_b)
assert (spin_b_copy != spin_a)
spin_c = spin.y(1)
spin_d = spin.z(2)
print("Start", spin_a)
# In-place operators:
# this += SpinOperator
spin_a += spin_b
print('next ', spin_a)
# this -= SpinOperator
spin_a -= spin_c
# this *= SpinOperator
spin_a *= spin_d
# this *= float/double
spin_a *= 5.0
# this *= complex
spin_a *= (1.0 + 1.0j)
# Other operators:
# SpinOperator + SpinOperator
spin_f = spin_a + spin_b
# SpinOperator - SpinOperator
spin_g = spin_a - spin_b
# SpinOperator * SpinOperator
spin_h = spin_a * spin_b
# SpinOperator * double
spin_i = spin_a * -1.0
# double * SpinOperator
spin_j = -1.0 * spin_a
# SpinOperator * complex
spin_k = spin_a * (1.0 + 1.0j)
# complex * SpinOperator
spin_l = (1.0 + 1.0j) * spin_a
# SpinOperator + double
spin_m = spin_a + 3.0
# double + SpinOperator
spin_n = 3.0 + spin_a
# SpinOperator - double
spin_o = spin_a - 3.0
# double - SpinOperator
spin_p = 3.0 - spin_a
data, coeffs = spin_a.get_raw_data()
assert (len(data) == 3)
assert (len(data[0]) == 6)
expected = [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 1], [0, 1, 0, 0, 1, 1]]
assert (all([d in expected for d in data]))
expected = [5 + 5j, 5 + 5j, -5 - 5j]
assert (all([c in expected for c in coeffs]))
data, coeffs = spin_b.get_raw_data()
assert (len(data) == 1)
assert (len(data[0]) == 2)
expected = [[1, 0]]
assert (all([d in expected for d in data]))
expected = [1]
assert (all([c in expected for c in coeffs]))
data, coeffs = spin_c.get_raw_data()
assert (len(data) == 1)
assert (len(data[0]) == 4)
expected = [[0, 1, 0, 1]]
assert (all([d in expected for d in data]))
expected = [1]
assert (all([c in expected for c in coeffs]))
data, coeffs = spin_d.get_raw_data()
assert (len(data) == 1)
assert (len(data[0]) == 6)
expected = [[0, 0, 0, 0, 0, 1]]
assert (all([d in expected for d in data]))
expected = [1]
assert (all([c in expected for c in coeffs]))
data, coeffs = spin_f.get_raw_data()
assert (len(data) == 4)
assert (len(data[0]) == 6)
expected = [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 1], [0, 1, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0]]
assert (all([d in expected for d in data]))
expected = [5 + 5j, 5 + 5j, -5 - 5j, 1]
assert (all([c in expected for c in coeffs]))
data, coeffs = spin_g.get_raw_data()
assert (len(data) == 4)
assert (len(data[0]) == 6)
expected = [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 1], [0, 1, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0]]
assert (all([d in expected for d in data]))
expected = [5 + 5j, 5 + 5j, -5 - 5j, -1]
assert (all([c in expected for c in coeffs]))
data, coeffs = spin_h.get_raw_data()
assert (len(data) == 3)
assert (len(data[0]) == 6)
expected = [[1, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [1, 1, 0, 0, 1, 1]]
assert (all([d in expected for d in data]))
expected = [5 + 5j, 5 + 5j, -5 - 5j]
assert (all([c in expected for c in coeffs]))
data, coeffs = spin_i.get_raw_data()
assert (len(data) == 3)
assert (len(data[0]) == 6)
expected = [[1, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 1, 0, 0, 1, 1]]
assert (all([d in expected for d in data]))
expected = [-5 - 5j, 5 + 5j, -5 - 5j]
assert (all([c in expected for c in coeffs]))
data, coeffs = spin_j.get_raw_data()
assert (len(data) == 3)
assert (len(data[0]) == 6)
expected = [[1, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 1, 0, 0, 1, 1]]
assert (all([d in expected for d in data]))
expected = [-5 - 5j, 5 + 5j, -5 - 5j]
assert (all([c in expected for c in coeffs]))
data, coeffs = spin_k.get_raw_data()
assert (len(data) == 3)
assert (len(data[0]) == 6)
expected = [[1, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 1, 0, 0, 1, 1]]
assert (all([d in expected for d in data]))
expected = [10j, 10j, -10j]
assert (all([c in expected for c in coeffs]))
data, coeffs = spin_l.get_raw_data()
assert (len(data) == 3)
assert (len(data[0]) == 6)
expected = [[1, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 1, 0, 0, 1, 1]]
assert (all([d in expected for d in data]))
expected = [10j, 10j, -10j]
assert (all([c in expected for c in coeffs]))
data, coeffs = spin_m.get_raw_data()
assert (len(data) == 4)
assert (len(data[0]) == 6)
expected = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 1, 1]]
assert (all([d in expected for d in data]))
expected = [3, 5 + 5j, 5 + 5j, -5 - 5j]
assert (all([c in expected for c in coeffs]))
data, coeffs = spin_o.get_raw_data()
assert (len(data) == 4)
assert (len(data[0]) == 6)
expected = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 1, 1]]
assert (all([d in expected for d in data]))
expected = [-3, 5 + 5j, 5 + 5j, -5 - 5j]
assert (all([c in expected for c in coeffs]))
data, coeffs = spin_p.get_raw_data()
assert (len(data) == 4)
assert (len(data[0]) == 6)
expected = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 1, 1]]
assert (all([d in expected for d in data]))
expected = [3, 5 + 5j, -5 - 5j, -5 - 5j]
assert (all([c in expected for c in coeffs]))
def test_spin_op_members():
"""
Test all of the bound member functions on the `cudaq.SpinOperator` class.
"""
spin_operator = cudaq.SpinOperator()
# Assert that it's the identity.
assert spin_operator.is_identity()
# Only have 1 term and 1 qubit.
assert spin_operator.get_term_count() == 1
assert spin_operator.get_qubit_count() == 1
spin_operator += -1.0 * spin.x(1)
# Should now have 2 terms and 2 qubits.
assert spin_operator.get_term_count() == 2
assert spin_operator.get_qubit_count() == 2
# No longer identity.
assert not spin_operator.is_identity()
for term in spin_operator:
# Second term should have a coefficient of -1.0
assert term.get_coefficient() == -1.0 or term.get_coefficient() == 1.0
assert term.get_coefficient() == -1.0 or term.get_coefficient() == 1.0
def test_spin_op_vqe():
"""
Test the `cudaq.SpinOperator` class on a simple VQE Hamiltonian.
"""
hamiltonian = 5.907 - 2.1433 * spin.x(0) * spin.x(1) - 2.1433 * spin.y(
0) * spin.y(1) + .21829 * spin.z(0) - 6.125 * spin.z(1)
print(hamiltonian)
# Checking equality operators.
assert spin.x(2) != hamiltonian
assert hamiltonian == hamiltonian
assert hamiltonian.get_term_count() == 5
got_data, got_coefficients = hamiltonian.get_raw_data()
assert (len(got_data) == 5)
assert (len(got_data[0]) == 4)
expected = [[0, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 1], [0, 0, 0, 1],
[0, 0, 1, 0]]
assert (all([d in expected for d in got_data]))
expected = [5.907, -2.1433, -2.1433, .21829, -6.125]
assert (all([c in expected for c in got_coefficients]))
def test_matrix():
"""
Test that the `cudaq.SpinOperator` can produce its matrix representation
and that we can use that matrix with standard python packages like numpy.
"""
hamiltonian = 5.907 - 2.1433 * spin.x(0) * spin.x(1) - 2.1433 * spin.y(
0) * spin.y(1) + .21829 * spin.z(0) - 6.125 * spin.z(1)
mat = hamiltonian.to_matrix()
assert assert_close(-1.74, np.linalg.eigvals(mat)[0], 1e-2)
print(mat)
want_matrix = np.array([[.00029, 0, 0, 0], [0, 12.2503, -4.2866, 0],
[0, -4.2866, -.43629, 0], [0, 0, 0, 11.8137]])
got_matrix = np.array(mat, copy=False)
assert np.allclose(want_matrix, got_matrix, rtol=1e-3)
def test_spin_op_foreach():
"""
Test the `cudaq.SpinOperator` for_each_term method
"""
hamiltonian = 5.907 - 2.1433 * spin.x(0) * spin.x(1) - 2.1433 * spin.y(
0) * spin.y(1) + .21829 * spin.z(0) - 6.125 * spin.z(1)
print(hamiltonian)
counter = 0
def doSomethingWithTerm(term):
nonlocal counter
print(term)
counter += 1
hamiltonian.for_each_term(doSomethingWithTerm)
assert counter == 5
counter = 0
xSupports = []
def doSomethingWithTerm(term):
def doSomethingWithPauli(pauli: cudaq.Pauli, idx: int):
nonlocal counter, xSupports
if pauli == cudaq.Pauli.X:
counter = counter + 1
xSupports.append(idx)
term.for_each_pauli(doSomethingWithPauli)
hamiltonian.for_each_term(doSomethingWithTerm)
assert counter == 2
assert xSupports == [0, 1]
def test_spin_op_iter():
hamiltonian = 5.907 - 2.1433 * spin.x(0) * spin.x(1) - 2.1433 * spin.y(
0) * spin.y(1) + .21829 * spin.z(0) - 6.125 * spin.z(1)
count = 0
for term in hamiltonian:
count += 1
assert count == 5
def test_spin_op_sparse_matrix():
"""
Test that the `cudaq.SpinOperator` can produce its sparse matrix representation
and that we can use that matrix with standard python packages like numpy.
"""
hamiltonian = 5.907 - 2.1433 * spin.x(0) * spin.x(1) - 2.1433 * spin.y(
0) * spin.y(1) + .21829 * spin.z(0) - 6.125 * spin.z(1)
numQubits = hamiltonian.get_qubit_count()
mat = hamiltonian.to_matrix()
data, rows, cols = hamiltonian.to_sparse_matrix()
for i, value in enumerate(data):
print(rows[i], cols[i], value)
assert np.isclose(mat[rows[i], cols[i]], value)
# can use scipy
# scipyM = scipy.sparse.csr_array((data, (rows, cols)), shape=(2**numQubits,2**numQubits))
# E, ev = scipy.sparse.linalg.eigsh(scipyM, k=1, which='SA')
# assert np.isclose(E[0], -1.7488, 1e-2)
def test_spin_op_from_word():
s = cudaq.SpinOperator.from_word("ZZZ")
want_spin_op = spin.z(0) * spin.z(1) * spin.z(2)
got_spin_op = cudaq.SpinOperator.from_word("ZZZ")
assert got_spin_op == want_spin_op
s = cudaq.SpinOperator.from_word("XXIXYZ")
want_spin_op = spin.x(0) * spin.x(1) * spin.i(2) * spin.x(3) * spin.y(
4) * spin.z(5)
got_spin_op = cudaq.SpinOperator.from_word("XXIXYZ")
assert got_spin_op == want_spin_op
want_spin_op = spin.x(0) * spin.y(1) * spin.z(2)
got_spin_op = cudaq.SpinOperator.from_word("XYZ")
assert got_spin_op == want_spin_op
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-s"])
| cuda-quantum-main | python/tests/unittests/test_SpinOperator.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import os
import pytest
import cudaq
from cudaq import spin
# Helper function for asserting two values are within a
# certain tolerance. If we make numpy a dependency,
# this may be replaced in the future with `np.allclose`.
def assert_close(want, got, tolerance=1.e-4) -> bool:
return abs(want - got) < tolerance
@pytest.fixture
def hamiltonian_2q():
"""Spin operator for 2-qubit VQE benchmark used in this test suite."""
hamiltonian = 5.907 - 2.1433 * spin.x(0) * spin.x(1) - 2.1433 * spin.y(
0) * spin.y(1) + .21829 * spin.z(0) - 6.125 * spin.z(1)
return hamiltonian
@pytest.fixture
def kernel_two_qubit_vqe_float() -> cudaq.Kernel:
"""A 2-qubit VQE ansatz used to benchmark `cudaq.vqe`."""
kernel, theta = cudaq.make_kernel(float)
qubits = kernel.qalloc(2)
kernel.x(qubits[0])
kernel.ry(theta, qubits[1])
kernel.cx(qubits[1], qubits[0])
return kernel
@pytest.fixture
def kernel_two_qubit_vqe_list() -> cudaq.Kernel:
"""A 2-qubit VQE ansatz used to benchmark `cudaq.vqe`."""
kernel, thetas = cudaq.make_kernel(list)
qubits = kernel.qalloc(2)
kernel.x(qubits[0])
kernel.ry(thetas[0], qubits[1])
kernel.cx(qubits[1], qubits[0])
return kernel
@pytest.fixture
def hamiltonian_3q():
"""Spin operator for 3-qubit VQE benchmark used in this test suite."""
hamiltonian = 5.907 - 2.1433 * spin.x(0) * spin.x(1) - 2.1433 * spin.y(
0) * spin.y(1) + .21829 * spin.z(0) - 6.125 * spin.z(
1) + 9.625 - 9.625 * spin.z(2) - 3.913119 * spin.x(1) * spin.x(
2) - 3.913119 * spin.y(1) * spin.y(2)
return hamiltonian
@pytest.fixture
def kernel_three_qubit_vqe() -> cudaq.Kernel:
"""
A 3-qubit VQE ansatz used to benchmark `cudaq.vqe`.
Note: the parameters are stored in the kernel as
individual float values.
"""
kernel, theta, phi = cudaq.make_kernel(float, float)
qubits = kernel.qalloc(3)
kernel.x(qubits[0])
kernel.ry(theta, qubits[1])
kernel.ry(phi, qubits[2])
kernel.cx(qubits[2], qubits[0])
kernel.cx(qubits[0], qubits[1])
kernel.ry(theta * -1., qubits[1])
kernel.cx(qubits[0], qubits[1])
kernel.cx(qubits[1], qubits[0])
return kernel
@pytest.fixture
def kernel_three_qubit_vqe_list() -> cudaq.Kernel:
"""
A 3-qubit VQE ansatz used to benchmark `cudaq.vqe`.
Note: the parameters are all stored in the kernel
in a single `list`.
FIXME: List arguments are currently incompatible with
`cudaq.vqe`.
"""
kernel, angles = cudaq.make_kernel(list)
theta = angles[0]
phi = angles[1]
qubits = kernel.qalloc(3)
kernel.x(qubits[0])
kernel.ry(theta, qubits[1])
kernel.ry(phi, qubits[2])
kernel.cx(qubits[2], qubits[0])
kernel.cx(qubits[0], qubits[1])
kernel.ry(theta * -1., qubits[1])
kernel.cx(qubits[0], qubits[1])
kernel.cx(qubits[1], qubits[0])
return kernel
@pytest.mark.parametrize(
"optimizer", [cudaq.optimizers.COBYLA(),
cudaq.optimizers.NelderMead()])
def test_vqe_two_qubit_float(optimizer, kernel_two_qubit_vqe_float,
hamiltonian_2q):
"""
Test `cudaq.vqe` on a 2-qubit benchmark for each gradient-free optimizer, and
for both the `Kernel` and `Callable` overloads.
"""
# Should be able to call this by passing a function that returns a kernel
# along with a lambda (or function) for the `argument_wrapper`:
got_expectation, got_parameters = cudaq.vqe(
kernel_two_qubit_vqe_float,
hamiltonian_2q,
optimizer,
parameter_count=1,
argument_mapper=lambda parameter_vector: parameter_vector[0],
shots=-1)
# Known minimal expectation value for this system:
want_expectation_value = -1.7487948611472093
want_optimal_parameters = [0.59]
assert assert_close(want_expectation_value, got_expectation, tolerance=1e-2)
assert all(
assert_close(want_parameter, got_parameter, tolerance=1e-2)
for want_parameter, got_parameter in zip(want_optimal_parameters,
got_parameters))
@pytest.mark.parametrize(
"optimizer", [cudaq.optimizers.COBYLA(),
cudaq.optimizers.NelderMead()])
def test_vqe_two_qubit_list(optimizer, kernel_two_qubit_vqe_list,
hamiltonian_2q):
"""
Test `cudaq.vqe` on a 2-qubit benchmark for each gradient-free optimizer, and
for both the `Kernel` and `Callable` overloads.
"""
# Should be able to call this by passing a function that returns a kernel:
got_expectation, got_parameters = cudaq.vqe(kernel_two_qubit_vqe_list,
hamiltonian_2q,
optimizer,
parameter_count=1,
shots=-1)
# Known minimal expectation value for this system:
want_expectation_value = -1.7487948611472093
want_optimal_parameters = [0.59]
assert assert_close(want_expectation_value, got_expectation, tolerance=1e-2)
assert all(
assert_close(want_parameter, got_parameter, tolerance=1e-2)
for want_parameter, got_parameter in zip(want_optimal_parameters,
got_parameters))
@pytest.mark.parametrize(
"optimizer",
[
# FIXME: cudaq.optimizers.SPSA(),
cudaq.optimizers.COBYLA(),
cudaq.optimizers.LBFGS(),
cudaq.optimizers.Adam(),
cudaq.optimizers.GradientDescent(),
cudaq.optimizers.SGD(),
])
@pytest.mark.parametrize("gradient", [
cudaq.gradients.CentralDifference(),
cudaq.gradients.ParameterShift(),
cudaq.gradients.ForwardDifference()
])
def test_vqe_two_qubit_float_gradients(optimizer, gradient,
kernel_two_qubit_vqe_float,
hamiltonian_2q):
"""
Test `cudaq.vqe` on a 2-qubit benchmark for each gradient based optimizer,
with each cudaq supported gradient strategy. Also checks for the different
`Kernel` and `Callable` overloads.
"""
def argument_map(parameter_vector):
"""Takes the `parameter_vector` from optimizer as input and returns
its single element indexed out as as float."""
return parameter_vector[0]
# Should be able to call this by passing a function that returns a kernel:
got_expectation, got_parameters = cudaq.vqe(
kernel=kernel_two_qubit_vqe_float,
gradient_strategy=gradient,
spin_operator=hamiltonian_2q,
optimizer=optimizer,
parameter_count=1,
argument_mapper=argument_map,
shots=-1)
# Known minimal expectation value for this system:
want_expectation_value = -1.7487948611472093
want_optimal_parameters = [0.59]
assert assert_close(want_expectation_value, got_expectation, tolerance=1e-2)
assert all(
assert_close(want_parameter, got_parameter, tolerance=1e-2)
for want_parameter, got_parameter in zip(want_optimal_parameters,
got_parameters))
@pytest.mark.parametrize(
"optimizer",
[
# FIXME: cudaq.optimizers.SPSA(),
cudaq.optimizers.COBYLA(),
cudaq.optimizers.LBFGS(),
cudaq.optimizers.Adam(),
cudaq.optimizers.GradientDescent(),
cudaq.optimizers.SGD(),
])
@pytest.mark.parametrize("gradient", [
cudaq.gradients.CentralDifference(),
cudaq.gradients.ParameterShift(),
cudaq.gradients.ForwardDifference()
])
def test_vqe_two_qubit_list_gradients(optimizer, gradient,
kernel_two_qubit_vqe_list,
hamiltonian_2q):
"""
Test `cudaq.vqe` on a 2-qubit benchmark for each gradient based optimizer,
with each cudaq supported gradient strategy. Also checks for the different
`Kernel` and `Callable` overloads.
"""
# Should be able to call this by passing a function that returns a kernel:
got_expectation, got_parameters = cudaq.vqe(
kernel=kernel_two_qubit_vqe_list,
gradient_strategy=gradient,
spin_operator=hamiltonian_2q,
optimizer=optimizer,
parameter_count=1,
shots=-1)
# Known minimal expectation value for this system:
want_expectation_value = -1.7487948611472093
want_optimal_parameters = [0.59]
assert assert_close(want_expectation_value, got_expectation, tolerance=1e-2)
assert all(
assert_close(want_parameter, got_parameter, tolerance=1e-2)
for want_parameter, got_parameter in zip(want_optimal_parameters,
got_parameters))
@pytest.mark.parametrize(
"optimizer", [cudaq.optimizers.COBYLA(),
cudaq.optimizers.NelderMead()])
def test_vqe_three_qubit_float(optimizer, kernel_three_qubit_vqe,
hamiltonian_3q):
# Should be able to call this by passing a function that returns a kernel:
got_expectation, got_parameters = cudaq.vqe(
kernel_three_qubit_vqe,
hamiltonian_3q,
optimizer,
parameter_count=2,
argument_mapper=lambda parameter_vector: tuple(parameter_vector))
# Known minimal expectation value for this system:
want_expectation_value = -2.045375
want_optimal_parameters = [0.359, 0.257]
assert assert_close(want_expectation_value, got_expectation, tolerance=1e-3)
assert all(
assert_close(want_parameter, got_parameter, tolerance=1e-2)
for want_parameter, got_parameter in zip(want_optimal_parameters,
got_parameters))
@pytest.mark.parametrize(
"optimizer", [cudaq.optimizers.COBYLA(),
cudaq.optimizers.NelderMead()])
def test_vqe_three_qubit_list(optimizer, kernel_three_qubit_vqe_list,
hamiltonian_3q):
# Should be able to call this by passing a function that returns a kernel:
got_expectation, got_parameters = cudaq.vqe(kernel_three_qubit_vqe_list,
hamiltonian_3q,
optimizer,
parameter_count=2)
# Known minimal expectation value for this system:
want_expectation_value = -2.045375
want_optimal_parameters = [0.359, 0.257]
assert assert_close(want_expectation_value, got_expectation, tolerance=1e-3)
assert all(
assert_close(want_parameter, got_parameter, tolerance=1e-2)
for want_parameter, got_parameter in zip(want_optimal_parameters,
got_parameters))
@pytest.mark.parametrize(
"optimizer",
[
# FIXME: cudaq.optimizers.SPSA(),
cudaq.optimizers.COBYLA(),
cudaq.optimizers.LBFGS(),
cudaq.optimizers.Adam(),
cudaq.optimizers.GradientDescent(),
cudaq.optimizers.SGD(),
])
@pytest.mark.parametrize(
"gradient",
[
cudaq.gradients.CentralDifference(),
cudaq.gradients.ForwardDifference(),
# FIXME: cudaq.gradients.ParameterShift()
])
def test_vqe_three_qubit_float_gradients(optimizer, gradient,
kernel_three_qubit_vqe,
hamiltonian_3q):
def argument_map(parameter_vector):
"""Takes the `parameter_vector` from optimizer as input and returns
both of its elements as a tuple."""
return tuple(parameter_vector)
optimizer.max_iterations = 100
# Should be able to call this by passing a function that returns a kernel:
got_expectation, got_parameters = cudaq.vqe(kernel=kernel_three_qubit_vqe,
gradient_strategy=gradient,
spin_operator=hamiltonian_3q,
optimizer=optimizer,
parameter_count=2,
argument_mapper=argument_map)
# Known minimal expectation value for this system:
want_expectation_value = -2.045375
want_optimal_parameters = [0.359, 0.257]
assert assert_close(want_expectation_value, got_expectation, tolerance=1e-2)
assert all(
assert_close(want_parameter, got_parameter, tolerance=1e-1)
for want_parameter, got_parameter in zip(want_optimal_parameters,
got_parameters))
@pytest.mark.parametrize(
"optimizer",
[
# FIXME: cudaq.optimizers.SPSA(),
cudaq.optimizers.COBYLA(),
cudaq.optimizers.LBFGS(),
cudaq.optimizers.Adam(),
cudaq.optimizers.GradientDescent(),
cudaq.optimizers.SGD(),
])
@pytest.mark.parametrize(
"gradient",
[
cudaq.gradients.CentralDifference(),
cudaq.gradients.ForwardDifference(),
# FIXME: cudaq.gradients.ParameterShift()
])
def test_vqe_three_qubit_list_gradients(optimizer, gradient,
kernel_three_qubit_vqe_list,
hamiltonian_3q):
optimizer.max_iterations = 100
# Should be able to call this by passing a function that returns a kernel:
got_expectation, got_parameters = cudaq.vqe(
kernel=kernel_three_qubit_vqe_list,
gradient_strategy=gradient,
spin_operator=hamiltonian_3q,
optimizer=optimizer,
parameter_count=2)
# Known minimal expectation value for this system:
want_expectation_value = -2.045375
want_optimal_parameters = [0.359, 0.257]
assert assert_close(want_expectation_value, got_expectation, tolerance=1e-2)
assert all(
assert_close(want_parameter, got_parameter, tolerance=1e-1)
for want_parameter, got_parameter in zip(want_optimal_parameters,
got_parameters))
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-s"])
| cuda-quantum-main | python/tests/unittests/test_vqe.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import pytest
import cudaq
@pytest.mark.parametrize("type_", [float, int])
def test_quake_value_operators(type_):
"""
Test `cudaq.QuakeValue` and each of its binary operators
for every applicable `QuakeValue` type.
"""
kernel, value_0, value_1 = cudaq.make_kernel(type_, type_)
# Checking the binary operators.
# Multiplication.
test = value_0 * 1.
# Ensure we're returning a `QuakeValue`.
assert type(test) == cudaq.QuakeValue
# Ensure this is a new `QuakeValue`.
assert test != value_0
test = value_0 * value_1
assert type(test) == cudaq.QuakeValue
assert test != value_0 and test != value_1
test = 1. * value_0
assert type(test) == cudaq.QuakeValue
assert test != value_0
# Addition.
test = value_0 + 1.
assert type(test) == cudaq.QuakeValue
assert test != value_0
test = value_0 + value_1
assert type(test) == cudaq.QuakeValue
assert test != value_0 and test != value_1
test = 1 + value_0
assert type(test) == cudaq.QuakeValue
assert test != value_0
# Subtraction.
test = value_0 - 1.
assert type(test) == cudaq.QuakeValue
assert test != value_0
test = value_0 - value_1
assert type(test) == cudaq.QuakeValue
assert test != value_0 and test != value_1
test = 1 - value_0
assert type(test) == cudaq.QuakeValue
assert test != value_0
# Negation.
test = -value_0
assert type(test) == cudaq.QuakeValue
assert test != value_0
test = -value_1
assert type(test) == cudaq.QuakeValue
assert test != value_1
def test_QuakeValueLifetimeAndPrint():
"""Tests Bug #64 for the lifetime of a QuakeValue"""
circuit = cudaq.make_kernel()
qubitRegister = circuit.qalloc(2)
circuit.x(qubitRegister[0])
s = str(circuit)
print(s)
assert s.count('quake.x') == 1
circuit.x(qubitRegister[0])
s = str(circuit)
print(s)
assert s.count('quake.x') == 2 | cuda-quantum-main | python/tests/unittests/test_quake_value.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import os
import pytest
import numpy as np
import cudaq
from cudaq import spin
def assert_close(want, got, tolerance=1.e-4) -> bool:
return abs(want - got) < tolerance
def test_observe_result():
"""
Test the `cudaq.ObserveResult` class to ensure its member
functions are working as expected.
"""
kernel = cudaq.make_kernel()
qreg = kernel.qalloc(2)
kernel.x(qreg[0])
hamiltonian = spin.z(0) + spin.x(1) + spin.y(0)
shots_count = 1000
# Shots provided.
observe_result = cudaq.observe(kernel, hamiltonian, shots_count=shots_count)
# Return the entire `cudaq.SampleResult` data from observe_result.
sample_result = observe_result.counts()
# Get the list of all register names in the `SampleResult`.
register_names = sample_result.register_names
if '__global__' in register_names:
register_names.remove('__global__')
# Loop through each of term of the hamiltonian.
# Note: we don't have an `__iter__` defined on cudaq.SpinOperator,
# so this must be a bounded loop.
# Extract the register name from the spin term and check
# that our `SampleResult` is as expected.
for index, sub_term in enumerate(hamiltonian):
print(sub_term)
# Extract the register name from the spin term.
name = str(sub_term).split(" ")[1].rstrip()
# Does the register exist in the measurement results?
assert name in register_names
# Check `cudaq.ObserveResult::counts(sub_term)`
# against `cudaq.SampleResult::get_register_counts(sub_term_str)`
sub_term_counts = observe_result.counts(sub_term=sub_term)
sub_register_counts = sample_result.get_register_counts(name)
# Check that each has `shots_count` number of total observations
assert sum(sub_term_counts.values()) == shots_count
assert sum(sub_register_counts.values()) == shots_count
# Check they have the same number of elements
assert len(sub_register_counts) == len(sub_term_counts)
# Check `cudaq.ObserveResult::expectation_z(sub_term)`
# against each of the the expectation values returned
# from `cudaq.SampleResult`.
expectation_z = observe_result.expectation_z(sub_term=sub_term)
assert assert_close(sub_register_counts.expectation_z(), expectation_z,
1e-1)
assert assert_close(sub_term_counts.expectation_z(), expectation_z,
1e-1)
observe_result.dump()
@pytest.mark.parametrize("want_state, want_expectation",
[["0", 1.0], ["1", -1.0]])
@pytest.mark.parametrize("shots_count", [-1, 10])
def test_observe_no_params(want_state, want_expectation, shots_count):
"""
Test `cudaq.observe()` when no parameters are provided for
two instances:
1. We leave the qubit in the 0 state and call `observe()`
2. We rotate the qubit to the 1 state and call `observe()`
Tests both with and without shots.
"""
kernel = cudaq.make_kernel()
qubit = kernel.qalloc()
if want_state == "0":
# Keep qubit in the 0-state.
# <kernel |H| kernel> = 1.0
pass
else:
# Place the qubit in the 1-state.
# <kernel |H| kernel> = -1.0
kernel.x(qubit)
# Measuring in the Z-basis.
hamiltonian = spin.z(0)
# Call `cudaq.observe()` at the specified number of shots.
observe_result = cudaq.observe(kernel=kernel,
spin_operator=hamiltonian,
shots_count=shots_count)
got_expectation = observe_result.expectation_z()
assert want_expectation == got_expectation
# If shots mode was enabled, check those results.
if shots_count != -1:
sample_result = observe_result.counts()
register_names = sample_result.register_names
if '__global__' in register_names:
register_names.remove('__global__')
# Check that each register is in the proper state.
for index, sub_term in enumerate(hamiltonian):
# Extract the register name from the spin term.
got_name = str(sub_term).split(" ")[1].rstrip()
# Pull the counts for that hamiltonian sub term from the
# `ObserveResult::counts` overload.
sub_term_counts = observe_result.counts(sub_term=sub_term)
# Pull the counts for that hamiltonian sub term from the
# `SampleResult` dictionary by its name.
sub_register_counts = sample_result.get_register_counts(got_name)
# Sub-term should have the same expectation as the entire
# system.
assert sub_term_counts.expectation_z() == want_expectation
assert sub_register_counts.expectation_z() == want_expectation
# Should have `shots_count` results for each.
assert sum(sub_term_counts.values()) == shots_count
assert sum(sub_register_counts.values()) == shots_count
# Check the state.
assert want_state in sub_term_counts
assert want_state in sub_register_counts
with pytest.raises(RuntimeError) as error:
# Can't accept args.
cudaq.observe(kernel, hamiltonian, 0.0)
@pytest.mark.parametrize("angle, want_state, want_expectation",
[[np.pi, "1", -2.0], [0.0, "0", 2.0]])
@pytest.mark.parametrize("shots_count", [-1, 10])
def test_observe_single_param(angle, want_state, want_expectation, shots_count):
"""
Test `cudaq.observe()` on a parameterized circuit that takes
one argument. Checks with shots mode turned both on and off.
First round we test a kernel with rx gates by np.pi. This should
result in the 1-state for both qubits and `<Z> = -2.0`.
Second round we test a kernel with rx gates by 0.0. This should
result in the 0-state for both qubits and `<Z> = 2.0`.
"""
qubit_count = 2
kernel, theta = cudaq.make_kernel(float)
qreg = kernel.qalloc(qubit_count)
# Rotate both qubits by the provided `theta`.
kernel.rx(theta, qreg[0])
kernel.rx(theta, qreg[1])
# Measure both qubits in the Z-basis.
hamiltonian = spin.z(0) + spin.z(1)
# Call `cudaq.observe()` at the specified number of shots.
observe_result = cudaq.observe(kernel,
hamiltonian,
angle,
shots_count=shots_count)
got_expectation = observe_result.expectation_z()
assert want_expectation == got_expectation
# If shots mode was enabled, check those results.
if shots_count != -1:
sample_result = observe_result.counts()
register_names = sample_result.register_names
if '__global__' in register_names:
register_names.remove('__global__')
# Check that each register is in the proper state.
for index, sub_term in enumerate(hamiltonian):
# Extract the register name from the spin term.
got_name = str(sub_term).split(" ")[1].rstrip()
# Pull the counts for that hamiltonian sub term from the
# `ObserveResult::counts` overload.
sub_term_counts = observe_result.counts(sub_term=sub_term)
# Pull the counts for that hamiltonian sub term from the
# `SampleResult` dictionary by its name.
sub_register_counts = sample_result.get_register_counts(got_name)
# Sub-term should have an expectation value proportional to the
# expectation over the entire system.
assert sub_term_counts.expectation_z(
) == want_expectation / qubit_count
assert sub_register_counts.expectation_z(
) == want_expectation / qubit_count
# Should have `shots_count` results for each.
assert sum(sub_term_counts.values()) == shots_count
assert sum(sub_register_counts.values()) == shots_count
# Check the state.
assert want_state in sub_term_counts
assert want_state in sub_register_counts
# Make sure that we throw an exception if user provides no/the wrong args.
with pytest.raises(RuntimeError) as error:
# None.
cudaq.observe(kernel, hamiltonian)
with pytest.raises(RuntimeError) as error:
# Too many.
cudaq.observe(kernel, hamiltonian, np.pi, np.pi)
@pytest.mark.parametrize(
"angle_0, angle_1, angles, want_state, want_expectation",
[[np.pi, np.pi, [np.pi, np.pi], "1", -4.0],
[0.0, 0.0, [0.0, 0.0], "0", 4.0]])
@pytest.mark.parametrize("shots_count", [-1, 10])
def test_observe_multi_param(angle_0, angle_1, angles, want_state,
want_expectation, shots_count):
"""
Test `cudaq.observe()` on a parameterized circuit that takes
multiple arguments of different types. Checks with shots mode
turned both on and off.
First round we test a kernel with rx gates by np.pi. This should
result in the 1-state for all qubits and `<Z> = -4.0`.
Second round we test a kernel with rx gates by 0.0. This should
result in the 0-state for all qubits and `<Z> = 4.0`.
"""
qubit_count = 4
kernel, theta_0, theta_1, thetas = cudaq.make_kernel(float, float, list)
qreg = kernel.qalloc(qubit_count)
# Rotate each qubit by their respective angles.
kernel.rx(theta_0, qreg[0])
kernel.rx(theta_1, qreg[1])
kernel.rx(thetas[0], qreg[2])
kernel.rx(thetas[1], qreg[3])
# Measure each qubit in the Z-basis.
hamiltonian = spin.z(0) + spin.z(1) + spin.z(2) + spin.z(3)
# Call `cudaq.observe()` at the specified number of shots.
observe_result = cudaq.observe(kernel,
hamiltonian,
angle_0,
angle_1,
angles,
shots_count=shots_count)
got_expectation = observe_result.expectation_z()
assert want_expectation == got_expectation
# If shots mode was enabled, check those results.
if shots_count != -1:
sample_result = observe_result.counts()
register_names = sample_result.register_names
if '__global__' in register_names:
register_names.remove('__global__')
# Check that each register is in the proper state.
for index, sub_term in enumerate(hamiltonian):
# Extract the register name from the spin term.
got_name = str(sub_term).split(" ")[1].rstrip()
# Pull the counts for that hamiltonian sub term from the
# `ObserveResult::counts` overload.
sub_term_counts = observe_result.counts(sub_term=sub_term)
# Pull the counts for that hamiltonian sub term from the
# `SampleResult` dictionary by its name.
sub_register_counts = sample_result.get_register_counts(got_name)
# Sub-term should have an expectation value proportional to the
# expectation over the entire system.
assert sub_term_counts.expectation_z(
) == want_expectation / qubit_count
assert sub_register_counts.expectation_z(
) == want_expectation / qubit_count
# Should have `shots_count` results for each.
assert sum(sub_term_counts.values()) == shots_count
assert sum(sub_register_counts.values()) == shots_count
# Check the state.
assert want_state in sub_term_counts
assert want_state in sub_register_counts
# Make sure that we throw an exception if user provides no/the wrong args.
with pytest.raises(RuntimeError) as error:
# None.
cudaq.observe(kernel, hamiltonian)
with pytest.raises(RuntimeError) as error:
# Too few.
cudaq.observe(kernel, hamiltonian, np.pi, np.pi)
with pytest.raises(RuntimeError) as error:
# Too many list elements.
cudaq.observe(kernel, hamiltonian, np.pi, np.pi, [np.pi, np.pi, np.pi])
@pytest.mark.parametrize("want_state, want_expectation",
[["0", 1.0], ["1", -1.0]])
@pytest.mark.parametrize("shots_count", [-1, 10])
def test_observe_async_no_params(want_state, want_expectation, shots_count):
"""
Test `cudaq.observe_async()` when no parameters are provided for
two instances:
1. We leave the qubit in the 0 state and call `observe()`
2. We rotate the qubit to the 1 state and call `observe()`
Tests both with and without shots mode.
"""
kernel = cudaq.make_kernel()
qubit = kernel.qalloc()
if want_state == "0":
# Keep qubit in the 0-state.
# <kernel |H| kernel> = 1.0
pass
else:
# Place the qubit in the 1-state.
# <kernel |H| kernel> = -1.0
kernel.x(qubit)
# Measuring in the Z-basis.
hamiltonian = spin.z(0)
# Call `cudaq.observe()` at the specified number of shots.
future = cudaq.observe_async(kernel=kernel,
spin_operator=hamiltonian,
qpu_id=0,
shots_count=shots_count)
observe_result = future.get()
got_expectation = observe_result.expectation_z()
assert want_expectation == got_expectation
# Test that this throws an exception, the problem here
# is we are on a quantum platform with 1 QPU, and we're asking
# to run an async job on the 13th QPU with device id 12.
with pytest.raises(Exception) as error:
future = cudaq.observe_async(kernel, hamiltonian, qpu_id=12)
@pytest.mark.parametrize("angle, want_state, want_expectation",
[[np.pi, "1", -2.0], [0.0, "0", 2.0]])
@pytest.mark.parametrize("shots_count", [-1, 10])
def test_observe_async_single_param(angle, want_state, want_expectation,
shots_count):
"""
Test `cudaq.observe_async()` on a parameterized circuit that takes
one argument. Checks with shots mode turned both on and off.
First round we test a kernel with rx gates by np.pi. This should
result in the 1-state for both qubits and `<Z> = -2.0`.
Second round we test a kernel with rx gates by 0.0. This should
result in the 0-state for both qubits and `<Z> = 2.0`.
"""
qubit_count = 2
kernel, theta = cudaq.make_kernel(float)
qreg = kernel.qalloc(qubit_count)
# Rotate both qubits by the provided `theta`.
kernel.rx(theta, qreg[0])
kernel.rx(theta, qreg[1])
# Measure both qubits in the Z-basis.
hamiltonian = spin.z(0) + spin.z(1)
# Call `cudaq.observe()` at the specified number of shots.
future = cudaq.observe_async(kernel,
hamiltonian,
angle,
shots_count=shots_count)
observe_result = future.get()
got_expectation = observe_result.expectation_z()
assert want_expectation == got_expectation
# If shots mode was enabled, check those results.
if shots_count != -1:
sample_result = observe_result.counts()
register_names = sample_result.register_names
if '__global__' in register_names:
register_names.remove('__global__')
# Check that each register is in the proper state.
for index, sub_term in enumerate(hamiltonian):
# Extract the register name from the spin term.
got_name = str(sub_term).split(" ")[1].rstrip()
# Pull the counts for that hamiltonian sub term from the
# `ObserveResult::counts` overload.
sub_term_counts = observe_result.counts(sub_term=sub_term)
# Pull the counts for that hamiltonian sub term from the
# `SampleResult` dictionary by its name.
sub_register_counts = sample_result.get_register_counts(got_name)
# Sub-term should have an expectation value proportional to the
# expectation over the entire system.
assert sub_term_counts.expectation_z(
) == want_expectation / qubit_count
assert sub_register_counts.expectation_z(
) == want_expectation / qubit_count
# Should have `shots_count` results for each.
assert sum(sub_term_counts.values()) == shots_count
assert sum(sub_register_counts.values()) == shots_count
# Check the state.
assert want_state in sub_term_counts
assert want_state in sub_register_counts
# Make sure that we throw an exception if user provides no/the wrong args.
with pytest.raises(RuntimeError) as error:
# None.
cudaq.observe_async(kernel, hamiltonian)
with pytest.raises(RuntimeError) as error:
# Too many.
cudaq.observe_async(kernel, hamiltonian, np.pi, np.pi)
with pytest.raises(Exception) as error:
# Bad QPU id.
future = cudaq.observe_async(kernel, hamiltonian, np.pi, qpu_id=12)
@pytest.mark.parametrize(
"angle_0, angle_1, angles, want_state, want_expectation",
[[np.pi, np.pi, [np.pi, np.pi], "1", -4.0],
[0.0, 0.0, [0.0, 0.0], "0", 4.0]])
@pytest.mark.parametrize("shots_count", [-1, 10])
def test_observe_async_multi_param(angle_0, angle_1, angles, want_state,
want_expectation, shots_count):
"""
Test `cudaq.observe_async()` on a parameterized circuit that takes
multiple arguments of different types. Checks with shots mode
turned both on and off.
First round we test a kernel with rx gates by np.pi. This should
result in the 1-state for all qubits and `<Z> = -4.0`.
Second round we test a kernel with rx gates by 0.0. This should
result in the 0-state for all qubits and `<Z> = 4.0`.
"""
qubit_count = 4
kernel, theta_0, theta_1, thetas = cudaq.make_kernel(float, float, list)
qreg = kernel.qalloc(qubit_count)
# Rotate each qubit by their respective angles.
kernel.rx(theta_0, qreg[0])
kernel.rx(theta_1, qreg[1])
kernel.rx(thetas[0], qreg[2])
kernel.rx(thetas[1], qreg[3])
# Measure each qubit in the Z-basis.
hamiltonian = spin.z(0) + spin.z(1) + spin.z(2) + spin.z(3)
# Call `cudaq.observe()` at the specified number of shots.
future = cudaq.observe_async(kernel,
hamiltonian,
angle_0,
angle_1,
angles,
shots_count=shots_count)
observe_result = future.get()
got_expectation = observe_result.expectation_z()
assert want_expectation == got_expectation
# If shots mode was enabled, check those results.
if shots_count != -1:
sample_result = observe_result.counts()
register_names = sample_result.register_names
if '__global__' in register_names:
register_names.remove('__global__')
# Check that each register is in the proper state.
for index, sub_term in enumerate(hamiltonian):
# Extract the register name from the spin term.
got_name = str(sub_term).split(" ")[1].rstrip()
# Pull the counts for that hamiltonian sub term from the
# `ObserveResult::counts` overload.
sub_term_counts = observe_result.counts(sub_term=sub_term)
# Pull the counts for that hamiltonian sub term from the
# `SampleResult` dictionary by its name.
sub_register_counts = sample_result.get_register_counts(got_name)
# Sub-term should have an expectation value proportional to the
# expectation over the entire system.
assert sub_term_counts.expectation_z(
) == want_expectation / qubit_count
assert sub_register_counts.expectation_z(
) == want_expectation / qubit_count
# Should have `shots_count` results for each.
assert sum(sub_term_counts.values()) == shots_count
assert sum(sub_register_counts.values()) == shots_count
# Check the state.
assert want_state in sub_term_counts
assert want_state in sub_register_counts
# Make sure that we throw an exception if user provides no/the wrong args.
with pytest.raises(RuntimeError) as error:
# None.
cudaq.observe_async(kernel, hamiltonian)
with pytest.raises(RuntimeError) as error:
# Too few.
cudaq.observe_async(kernel, hamiltonian, np.pi, np.pi)
with pytest.raises(Exception) as error:
# Too many list elements.
future = cudaq.observe_async(kernel,
hamiltonian,
np.pi,
np.pi, [np.pi, np.pi, np.pi],
qpu_id=12)
with pytest.raises(Exception) as error:
# Bad QPU id.
future = cudaq.observe_async(kernel,
hamiltonian,
np.pi,
np.pi, [np.pi, np.pi],
qpu_id=12)
@pytest.mark.parametrize("angles, want_state, want_expectation",
[[[np.pi, np.pi, np.pi, np.pi], "1", -4.0],
[[0.0, 0.0, 0.0, 0.0], "0", 4.0]])
def test_observe_numpy_array(angles, want_state, want_expectation):
"""
Tests that a numpy array can be passed to `cudaq.observe` in place of
a list.
"""
qubit_count = 4
shots_count = 10
kernel, thetas = cudaq.make_kernel(list)
qreg = kernel.qalloc(qubit_count)
# Rotate each qubit by their respective angles.
kernel.rx(thetas[0], qreg[0])
kernel.rx(thetas[1], qreg[1])
kernel.rx(thetas[2], qreg[2])
kernel.rx(thetas[3], qreg[3])
print(cudaq.get_state(kernel, angles))
# Measure each qubit in the Z-basis.
hamiltonian = spin.z(0) + spin.z(1) + spin.z(2) + spin.z(3)
# Convert our angles values to a numpy array from a list.
numpy_angles = np.asarray(angles)
# Try calling the kernel function at those angles.
kernel(numpy_angles)
# Call `cudaq.observe()` on the numpy array with 10 shots.
observe_result = cudaq.observe(kernel,
hamiltonian,
numpy_angles,
shots_count=10)
got_expectation = observe_result.expectation_z()
assert want_expectation == got_expectation
# Since shots mode was enabled, check the results.
sample_result = observe_result.counts()
register_names = sample_result.register_names
if '__global__' in register_names:
register_names.remove('__global__')
# Check that each register is in the proper state.
for index, sub_term in enumerate(hamiltonian):
# Extract the register name from the spin term.
got_name = str(sub_term).split(" ")[1].rstrip()
# Pull the counts for that hamiltonian sub term from the
# `ObserveResult::counts` overload.
sub_term_counts = observe_result.counts(sub_term=sub_term)
# Pull the counts for that hamiltonian sub term from the
# `SampleResult` dictionary by its name.
sub_register_counts = sample_result.get_register_counts(got_name)
# Sub-term should have an expectation value proportional to the
# expectation over the entire system.
assert sub_term_counts.expectation_z() == want_expectation / qubit_count
assert sub_register_counts.expectation_z(
) == want_expectation / qubit_count
# Should have `shots_count` results for each.
assert sum(sub_term_counts.values()) == shots_count
assert sum(sub_register_counts.values()) == shots_count
# Check the state.
assert want_state in sub_term_counts
assert want_state in sub_register_counts
# Cannot pass numpy array that is not a vector.
bad_params = np.random.uniform(low=-np.pi, high=np.pi, size=(2, 2))
with pytest.raises(Exception) as error:
# Test kernel call.
kernel(bad_params)
with pytest.raises(Exception) as error:
# Test observe call.
cudaq.observe(kernel, hamiltonian, bad_params, qpu_id=0, shots_count=10)
with pytest.raises(Exception) as error:
# Test too few elements in array.
bad_params = np.random.uniform(low=-np.pi, high=np.pi, size=(2,))
cudaq.observe(kernel, hamiltonian, bad_params, qpu_id=0, shots_count=10)
with pytest.raises(Exception) as error:
# Test too many elements in array.
bad_params = np.random.uniform(low=-np.pi, high=np.pi, size=(8,))
cudaq.observe(kernel, hamiltonian, bad_params, qpu_id=0, shots_count=10)
def test_observe_n():
"""
Test that we can broadcast the observe call over a number of argument sets
"""
hamiltonian = 5.907 - 2.1433 * spin.x(0) * spin.x(1) - 2.1433 * spin.y(
0) * spin.y(1) + .21829 * spin.z(0) - 6.125 * spin.z(1)
angles = np.linspace(-np.pi, np.pi, 50)
circuit, theta = cudaq.make_kernel(float)
q = circuit.qalloc(2)
circuit.x(q[0])
circuit.ry(theta, q[1])
circuit.cx(q[1], q[0])
results = cudaq.observe(circuit, hamiltonian, angles)
energies = np.array([r.expectation_z() for r in results])
print(energies)
expected = np.array([
12.250289999999993, 12.746369918061657, 13.130147571153335,
13.395321340821365, 13.537537081098929, 13.554459613462432,
13.445811070398316, 13.213375457979938, 12.860969362537181,
12.39437928241443, 11.821266613827706, 11.151041850950664,
10.39471006586037, 9.56469020555809, 8.674611173202855,
7.7390880418983645, 6.773482075596711, 5.793648497568958,
4.815676148077341, 3.8556233060630225, 2.929254012649781,
2.051779226024591, 1.2376070579247536, 0.5001061928414527,
-0.14861362540995326, -0.6979004353486014, -1.1387349627411503,
-1.4638787168353469, -1.6679928461780573, -1.7477258024084987,
-1.701768372589711, -1.5308751764487525, -1.2378522755416648,
-0.8275110978002891, -0.30658943401863836, 0.3163591964856498,
1.0311059944220289, 1.8259148371286382, 2.687734985381901,
3.6024153761738114, 4.55493698277526, 5.529659426739748,
6.510577792485027, 7.481585427564503, 8.42673841345514,
9.330517364258766, 10.178082254589516, 10.955516092380341,
11.650053435508049, 12.250289999999993
])
assert np.allclose(energies, expected)
hamiltonian = 5.907 - 2.1433 * spin.x(0) * spin.x(1) - 2.1433 * spin.y(
0) * spin.y(1) + .21829 * spin.z(0) - 6.125 * spin.z(
1) + 9.625 - 9.625 * spin.z(2) - 3.913119 * spin.x(1) * spin.x(
2) - 3.913119 * spin.y(1) * spin.y(2)
kernel, theta, phi = cudaq.make_kernel(float, float)
qubits = kernel.qalloc(3)
kernel.x(qubits[0])
kernel.ry(theta, qubits[1])
kernel.ry(phi, qubits[2])
kernel.cx(qubits[2], qubits[0])
kernel.cx(qubits[0], qubits[1])
kernel.ry(theta * -1., qubits[1])
kernel.cx(qubits[0], qubits[1])
kernel.cx(qubits[1], qubits[0])
runtimeAngles = np.random.uniform(low=-np.pi, high=np.pi, size=(50, 2))
print(runtimeAngles)
results = cudaq.observe(kernel, hamiltonian, runtimeAngles[:, 0],
runtimeAngles[:, 1])
energies = np.array([r.expectation_z() for r in results])
print(energies)
assert len(energies) == 50
kernel, thetas = cudaq.make_kernel(list)
qubits = kernel.qalloc(3)
kernel.x(qubits[0])
kernel.ry(thetas[0], qubits[1])
kernel.ry(thetas[1], qubits[2])
kernel.cx(qubits[2], qubits[0])
kernel.cx(qubits[0], qubits[1])
kernel.ry(thetas[0] * -1., qubits[1])
kernel.cx(qubits[0], qubits[1])
kernel.cx(qubits[1], qubits[0])
runtimeAngles = np.random.uniform(low=-np.pi, high=np.pi, size=(50, 2))
print(runtimeAngles)
results = cudaq.observe(kernel, hamiltonian, runtimeAngles)
energies = np.array([r.expectation_z() for r in results])
print(energies)
assert len(energies) == 50
def test_observe_list():
hamiltonianList = [
-2.1433 * spin.x(0) * spin.x(1), -2.1433 * spin.y(0) * spin.y(1),
.21829 * spin.z(0), -6.125 * spin.z(1)
]
circuit, theta = cudaq.make_kernel(float)
q = circuit.qalloc(2)
circuit.x(q[0])
circuit.ry(theta, q[1])
circuit.cx(q[1], q[0])
results = cudaq.observe(circuit, hamiltonianList, .59)
sum = 5.907
for r in results:
sum += r.expectation_z() * np.real(r.get_spin().get_coefficient())
print(sum)
want_expectation_value = -1.7487948611472093
assert assert_close(want_expectation_value, sum, tolerance=1e-2)
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-s"])
| cuda-quantum-main | python/tests/unittests/test_observe.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
# This file is responsible for testing the accuracy of gates within
# the kernel builder.
import numpy as np
import cudaq
from cudaq import spin
def test_sdg_0_state():
"""Tests the adjoint S-gate on a qubit starting in the 0-state."""
kernel = cudaq.make_kernel()
qubit = kernel.qalloc(1)
# Place qubit in superposition state.
kernel.h(qubit)
# Rotate around Z by -pi/2, twice. Total rotation of -pi.
kernel.sdg(qubit)
kernel.sdg(qubit)
# Apply another hadamard.
kernel.h(qubit)
kernel.mz(qubit)
counts = cudaq.sample(kernel)
counts.dump()
# Since the qubit began in the 0-state, it should now be in the
# 1-state.
assert counts["1"] == 1000
def test_sdg_1_state():
"""Tests the adjoint S-gate on a qubit starting in the 1-state."""
kernel = cudaq.make_kernel()
qubit = kernel.qalloc(1)
# Place qubit in 1-state.
kernel.x(qubit)
# Superpositoin.
kernel.h(qubit)
# Rotate around Z by -pi/2, twice. Total rotation of -pi.
kernel.sdg(qubit)
kernel.sdg(qubit)
# Apply another hadamard.
kernel.h(qubit)
kernel.mz(qubit)
counts = cudaq.sample(kernel)
counts.dump()
# Since the qubit began in the 1-state, it should now be in the
# 0-state.
assert counts["0"] == 1000
def test_sdg_0_state_negate():
"""Tests that the sdg and s gates cancel each other out."""
kernel = cudaq.make_kernel()
qubit = kernel.qalloc(1)
# Place qubit in superposition state.
kernel.h(qubit)
# Rotate around Z by -pi/2, twice. Total rotation of -pi.
kernel.sdg(qubit)
kernel.sdg(qubit)
# Rotate back around by pi. Will use two gates here, but will
# also test with a plain Z-gate in the 1-state test.
kernel.s(qubit)
kernel.s(qubit)
# Apply another hadamard.
kernel.h(qubit)
kernel.mz(qubit)
counts = cudaq.sample(kernel)
counts.dump()
# Qubit should still be in 0 state.
assert counts["0"] == 1000
def test_sdg_1_state_negate():
"""Tests that the sdg and s gates cancel each other out."""
kernel = cudaq.make_kernel()
qubit = kernel.qalloc(1)
# Place qubit in 1-state.
kernel.x(qubit)
# Superpositoin.
kernel.h(qubit)
# Rotate around Z by -pi/2, twice. Total rotation of -pi.
kernel.sdg(qubit)
kernel.sdg(qubit)
# Rotate back by pi.
kernel.z(qubit)
# Apply another hadamard.
kernel.h(qubit)
kernel.mz(qubit)
counts = cudaq.sample(kernel)
counts.dump()
# Qubit should still be in 1 state.
assert counts["1"] == 1000
def test_tdg_0_state():
"""Tests the adjoint T-gate on a qubit starting in the 0-state."""
kernel = cudaq.make_kernel()
qubit = kernel.qalloc(1)
# Place qubit in superposition state.
kernel.h(qubit)
# Rotate around Z by -pi/4, four times. Total rotation of -pi.
kernel.tdg(qubit)
kernel.tdg(qubit)
kernel.tdg(qubit)
kernel.tdg(qubit)
# Apply another hadamard.
kernel.h(qubit)
kernel.mz(qubit)
counts = cudaq.sample(kernel)
counts.dump()
# Since the qubit began in the 0-state, it should now be in the
# 1-state.
assert counts["1"] == 1000
def test_tdg_1_state():
"""Tests the adjoint T-gate on a qubit starting in the 1-state."""
kernel = cudaq.make_kernel()
qubit = kernel.qalloc(1)
# Place qubit in 1-state.
kernel.x(qubit)
# Superposition.
kernel.h(qubit)
# Rotate around Z by -pi/4, four times. Total rotation of -pi.
kernel.tdg(qubit)
kernel.tdg(qubit)
kernel.tdg(qubit)
kernel.tdg(qubit)
# Apply another hadamard.
kernel.h(qubit)
kernel.mz(qubit)
counts = cudaq.sample(kernel)
counts.dump()
# Since the qubit began in the 1-state, it should now be in the
# 0-state.
assert counts["0"] == 1000
def test_tdg_0_state_negate():
"""Tests that the adjoint T gate cancels with a T gate."""
kernel = cudaq.make_kernel()
qubit = kernel.qalloc(1)
# Place qubit in superposition state.
kernel.h(qubit)
# Rotate around Z by -pi/4, four times. Total rotation of -pi.
kernel.tdg(qubit)
kernel.tdg(qubit)
kernel.tdg(qubit)
kernel.tdg(qubit)
# Rotate back by pi.
kernel.z(qubit)
# Apply another hadamard.
kernel.h(qubit)
kernel.mz(qubit)
counts = cudaq.sample(kernel)
counts.dump()
# Qubit should remain in 0-state.
assert counts["0"] == 1000
def test_tdg_1_state_negate():
"""Tests that the adjoint T gate cancels with a T gate."""
kernel = cudaq.make_kernel()
qubit = kernel.qalloc(1)
# Place qubit in 1-state.
kernel.x(qubit)
# Superposition.
kernel.h(qubit)
# Rotate around Z by -pi/4, four times. Total rotation of -pi.
kernel.tdg(qubit)
kernel.tdg(qubit)
kernel.tdg(qubit)
kernel.tdg(qubit)
# Rotate back by pi.
kernel.t(qubit)
kernel.t(qubit)
kernel.t(qubit)
kernel.t(qubit)
# Apply another hadamard.
kernel.h(qubit)
kernel.mz(qubit)
counts = cudaq.sample(kernel)
counts.dump()
# Qubit should remain in 1-state.
assert counts["1"] == 1000
def test_can_progressively_build():
"""Tests that a kernel can be build progressively."""
cudaq.reset_target()
kernel = cudaq.make_kernel()
q = kernel.qalloc(2)
kernel.h(q[0])
print(kernel)
state = cudaq.get_state(kernel)
assert np.isclose(1. / np.sqrt(2.), state[0].real)
assert np.isclose(0., state[1].real)
assert np.isclose(1. / np.sqrt(2.), state[2].real)
assert np.isclose(0., state[3].real)
counts = cudaq.sample(kernel)
print(counts)
assert '10' in counts
assert '00' in counts
# Continue building the kernel
kernel.cx(q[0], q[1])
print(kernel)
state = cudaq.get_state(kernel)
assert np.isclose(1. / np.sqrt(2.), state[0].real)
assert np.isclose(0., state[1].real)
assert np.isclose(0., state[2].real)
assert np.isclose(1. / np.sqrt(2.), state[3].real)
counts = cudaq.sample(kernel)
print(counts)
assert '11' in counts
assert '00' in counts
def test_from_state():
cudaq.reset_target()
state = np.asarray([.70710678, 0., 0., 0.70710678])
kernel = cudaq.make_kernel()
qubits = kernel.qalloc(2)
cudaq.from_state(kernel, qubits, state)
print(kernel)
counts = cudaq.sample(kernel)
print(counts)
assert '11' in counts
assert '00' in counts
kernel = cudaq.from_state(state)
counts = cudaq.sample(kernel)
print(counts)
assert '11' in counts
assert '00' in counts
hamiltonian = 5.907 - 2.1433 * spin.x(0) * spin.x(1) - 2.1433 * spin.y(
0) * spin.y(1) + .21829 * spin.z(0) - 6.125 * spin.z(1)
state = np.asarray([0., .292786, .956178, 0.])
kernel = cudaq.make_kernel()
qubits = kernel.qalloc(2)
cudaq.from_state(kernel, qubits, state)
energy = cudaq.observe(kernel, hamiltonian).expectation_z()
assert np.isclose(-1.748, energy, 1e-3)
ss = cudaq.get_state(kernel)
for i in range(4):
assert np.isclose(ss[i], state[i], 1e-3)
| cuda-quantum-main | python/tests/unittests/test_kernel_builder.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
#!/usr/bin/env python3
"""
To generate comments for FileCheck in our python testing suite:
1. Run `pytest -rP cudaq/python/tests/test_*.py -k `test_*` . Make sure the MLIR
that you're hoping to generate checks for is being printed within the test.
2. Write the output of the test to a .txt file: `pytest ... `test_*` &> out.txt`
3. Pass `out.txt` into this file and generate another .txt file that will contain
the final MLIR checks: `python3 quake_filecheck_gen.py < out.txt > final_out.txt`.
4. The labels from (3) can then be pasted into the appropriate location of the
original pytest file.
"""
"""A script to generate FileCheck statements for mlir unit tests.
This script is a utility to add FileCheck patterns to an mlir file.
NOTE: The input .mlir is expected to be the output from the parser, not a
stripped down variant.
Example usage:
$ generate-test-checks.py foo.mlir
$ mlir-opt foo.mlir -transformation | generate-test-checks.py
$ mlir-opt foo.mlir -transformation | generate-test-checks.py --source foo.mlir
$ mlir-opt foo.mlir -transformation | generate-test-checks.py --source foo.mlir -i
$ mlir-opt foo.mlir -transformation | generate-test-checks.py --source foo.mlir -i --source_delim_regex='gpu.func @'
The script will heuristically generate CHECK/CHECK-LABEL commands for each line
within the file. By default this script will also try to insert string
substitution blocks for all SSA value names. If --source file is specified, the
script will attempt to insert the generated CHECKs to the source file by looking
for line positions matched by --source_delim_regex.
The script is designed to make adding checks to a test case fast, it is *not*
designed to be authoritative about what constitutes a good test!
"""
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import argparse
import os # Used to advertise this file's name ("autogenerated_note").
import re
import sys
ADVERT_BEGIN = '// NOTE: Assertions have been autogenerated by '
ADVERT_END = """
// The script is designed to make adding checks to
// a test case fast, it is *not* designed to be authoritative
// about what constitutes a good test! The CHECK should be
// minimized and named to reflect the test intent.
"""
# Regex command to match an SSA identifier.
SSA_RE_STR = '[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*'
SSA_RE = re.compile(SSA_RE_STR)
# Class used to generate and manage string substitution blocks for SSA value
# names.
class SSAVariableNamer:
def __init__(self):
self.scopes = []
self.name_counter = 0
# Generate a substitution name for the given ssa value name.
def generate_name(self, ssa_name):
variable = 'VAL_' + str(self.name_counter)
self.name_counter += 1
self.scopes[-1][ssa_name] = variable
return variable
# Push a new variable name scope.
def push_name_scope(self):
self.scopes.append({})
# Pop the last variable name scope.
def pop_name_scope(self):
self.scopes.pop()
# Return the level of nesting (number of pushed scopes).
def num_scopes(self):
return len(self.scopes)
# Reset the counter.
def clear_counter(self):
self.name_counter = 0
# Process a line of input that has been split at each SSA identifier '%'.
def process_line(line_chunks, variable_namer):
output_line = ''
# Process the rest that contained an SSA value name.
for chunk in line_chunks:
m = SSA_RE.match(chunk)
ssa_name = m.group(0)
# Check if an existing variable exists for this name.
variable = None
for scope in variable_namer.scopes:
variable = scope.get(ssa_name)
if variable is not None:
break
# If one exists, then output the existing name.
if variable is not None:
output_line += '%[[' + variable + ']]'
else:
# Otherwise, generate a new variable.
variable = variable_namer.generate_name(ssa_name)
output_line += '%[[' + variable + ':.*]]'
# Append the non named group.
output_line += chunk[len(ssa_name):]
return output_line.rstrip() + '\n'
# Process the source file lines. The source file doesn't have to be .mlir.
def process_source_lines(source_lines, note, args):
source_split_re = re.compile(args.source_delim_regex)
source_segments = [[]]
for line in source_lines:
# Remove previous note.
if line == note:
continue
# Remove previous CHECK lines.
if line.find(args.check_prefix) != -1:
continue
# Segment the file based on --source_delim_regex.
if source_split_re.search(line):
source_segments.append([])
source_segments[-1].append(line + '\n')
return source_segments
# Pre-process a line of input to remove any character sequences that will be
# problematic with FileCheck.
def preprocess_line(line):
# Replace any double brackets, '[[' with escaped replacements. '[['
# corresponds to variable names in FileCheck.
output_line = line.replace('[[', '{{\\[\\[}}')
# Replace any single brackets that are followed by an SSA identifier, the
# identifier will be replace by a variable; Creating the same situation as
# above.
output_line = output_line.replace('[%', '{{\\[}}%')
return output_line
def main():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--check-prefix',
default='CHECK',
help='Prefix to use from check file.')
parser.add_argument('-o',
'--output',
nargs='?',
type=argparse.FileType('w'),
default=None)
parser.add_argument('input',
nargs='?',
type=argparse.FileType('r'),
default=sys.stdin)
parser.add_argument(
'--source',
type=str,
help='Print each CHECK chunk before each delimiter line in the source'
'file, respectively. The delimiter lines are identified by '
'--source_delim_regex.')
parser.add_argument('--source_delim_regex', type=str, default='func @')
parser.add_argument(
'--starts_from_scope',
type=int,
default=1,
help='Omit the top specified level of content. For example, by default '
'it omits "module {"')
parser.add_argument('-i', '--inplace', action='store_true', default=False)
args = parser.parse_args()
# Open the given input file.
input_lines = [l.rstrip() for l in args.input]
args.input.close()
# Generate a note used for the generated check file.
script_name = os.path.basename(__file__)
autogenerated_note = (ADVERT_BEGIN + 'utils/' + script_name + "\n" +
ADVERT_END)
source_segments = None
if args.source:
source_segments = process_source_lines(
[l.rstrip() for l in open(args.source, 'r')], autogenerated_note,
args)
if args.inplace:
assert args.output is None
output = open(args.source, 'w')
elif args.output is None:
output = sys.stdout
else:
output = args.output
output_segments = [[]]
# A map containing data used for naming SSA value names.
variable_namer = SSAVariableNamer()
for input_line in input_lines:
if not input_line:
continue
lstripped_input_line = input_line.lstrip()
# Lines with blocks begin with a ^. These lines have a trailing comment
# that needs to be stripped.
is_block = lstripped_input_line[0] == '^'
if is_block:
input_line = input_line.rsplit('#', 1)[0].rstrip()
cur_level = variable_namer.num_scopes()
# If the line starts with a '}', pop the last name scope.
if lstripped_input_line[0] == '}':
variable_namer.pop_name_scope()
cur_level = variable_namer.num_scopes()
# If the line ends with a '{', push a new name scope.
if input_line[-1] == '{':
variable_namer.push_name_scope()
if cur_level == args.starts_from_scope:
output_segments.append([])
# Omit lines at the near top level e.g. "module {".
if cur_level < args.starts_from_scope:
continue
if len(output_segments[-1]) == 0:
variable_namer.clear_counter()
# Preprocess the input to remove any sequences that may be problematic with
# FileCheck.
input_line = preprocess_line(input_line)
# Split the line at the each SSA value name.
ssa_split = input_line.split('%')
# If this is a top-level operation use 'CHECK-LABEL', otherwise 'CHECK:'.
if len(output_segments[-1]) != 0 or not ssa_split[0]:
output_line = '# ' + args.check_prefix + ': '
# Pad to align with the 'LABEL' statements.
output_line += (' ' * len('-LABEL'))
# Output the first line chunk that does not contain an SSA name.
output_line += ssa_split[0]
# Process the rest of the input line.
output_line += process_line(ssa_split[1:], variable_namer)
else:
# Output the first line chunk that does not contain an SSA name for the
# label.
output_line = '# ' + args.check_prefix + '-LABEL: ' + ssa_split[
0] + '\n'
# Process the rest of the input line on separate check lines.
for argument in ssa_split[1:]:
output_line += '# ' + args.check_prefix + '-SAME: '
# Pad to align with the original position in the line.
output_line += ' ' * len(ssa_split[0])
# Process the rest of the line.
output_line += process_line([argument], variable_namer)
# Append the output line.
output_segments[-1].append(output_line)
output.write(autogenerated_note + '\n')
# Write the output.
if source_segments:
assert len(output_segments) == len(source_segments)
for check_segment, source_segment in zip(output_segments,
source_segments):
for line in check_segment:
output.write(line)
for line in source_segment:
output.write(line)
else:
for segment in output_segments:
output.write('\n')
for output_line in segment:
output.write(output_line)
output.write('\n')
output.close()
if __name__ == '__main__':
main()
| cuda-quantum-main | python/tests/utils/quake_filecheck_gen.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
# RUN: PYTHONPATH=../../ pytest -rP %s | FileCheck %s
import os
import pytest
import numpy as np
import cudaq
def test_kernel_measure_1q():
"""
Test the measurement instruction for `cudaq.Kernel` by applying
measurements to qubits one at a time.
"""
kernel = cudaq.make_kernel()
qreg = kernel.qalloc(2)
qubit_0 = qreg[0]
qubit_1 = qreg[1]
# Check that we can apply measurements to 1 qubit at a time.
kernel.mx(qubit_0)
kernel.mx(qubit_1)
kernel.my(qubit_0)
kernel.my(qubit_1)
kernel.mz(qubit_0)
kernel.mz(qubit_1)
kernel()
assert kernel.arguments == []
assert kernel.argument_count == 0
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_2:.*]] = quake.alloca !quake.veq<2>
# CHECK: %[[VAL_3:.*]] = quake.extract_ref %[[VAL_2]][0] : (!quake.veq<2>) -> !quake.ref
# CHECK: %[[VAL_4:.*]] = quake.extract_ref %[[VAL_2]][1] : (!quake.veq<2>) -> !quake.ref
# CHECK: %[[VAL_5:.*]] = quake.mx %[[VAL_3]] name "" : (!quake.ref) -> i1
# CHECK: %[[VAL_6:.*]] = quake.mx %[[VAL_4]] name "" : (!quake.ref) -> i1
# CHECK: %[[VAL_7:.*]] = quake.my %[[VAL_3]] name "" : (!quake.ref) -> i1
# CHECK: %[[VAL_8:.*]] = quake.my %[[VAL_4]] name "" : (!quake.ref) -> i1
# CHECK: %[[VAL_9:.*]] = quake.mz %[[VAL_3]] name "" : (!quake.ref) -> i1
# CHECK: %[[VAL_10:.*]] = quake.mz %[[VAL_4]] name "" : (!quake.ref) -> i1
# CHECK: return
# CHECK: }
def test_kernel_measure_qreg():
"""
Test the measurement instruciton for `cudaq.Kernel` by applying
measurements to an entire qreg.
"""
kernel = cudaq.make_kernel()
qreg = kernel.qalloc(3)
# Check that we can apply measurements to an entire register.
kernel.mx(qreg)
kernel.my(qreg)
kernel.mz(qreg)
kernel()
assert kernel.arguments == []
assert kernel.argument_count == 0
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_0:.*]] = arith.constant 3 : index
# CHECK: %[[VAL_2:.*]] = arith.constant 1 : index
# CHECK: %[[VAL_3:.*]] = arith.constant 0 : index
# CHECK: %[[VAL_4:.*]] = quake.alloca !quake.veq<3>
# CHECK: %[[VAL_5:.*]] = cc.alloca !cc.array<i1 x 3>
# CHECK: %[[VAL_6:.*]] = cc.loop while ((%[[VAL_7:.*]] = %[[VAL_3]]) -> (index)) {
# CHECK: %[[VAL_8:.*]] = arith.cmpi slt, %[[VAL_7]], %[[VAL_0]] : index
# CHECK: cc.condition %[[VAL_8]](%[[VAL_7]] : index)
# CHECK: } do {
# CHECK: ^bb0(%[[VAL_9:.*]]: index):
# CHECK: %[[VAL_10:.*]] = quake.extract_ref %[[VAL_4]][%[[VAL_9]]] : (!quake.veq<3>, index) -> !quake.ref
# CHECK: %[[VAL_11:.*]] = quake.mx %[[VAL_10]] : (!quake.ref) -> i1
# CHECK: %[[VAL_12:.*]] = arith.index_cast %[[VAL_9]] : index to i64
# CHECK: %[[VAL_13:.*]] = cc.compute_ptr %[[VAL_5]][%[[VAL_12]]] : (!cc.ptr<!cc.array<i1 x 3>>, i64) -> !cc.ptr<i1>
# CHECK: cc.store %[[VAL_11]], %[[VAL_13]] : !cc.ptr<i1>
# CHECK: cc.continue %[[VAL_9]] : index
# CHECK: } step {
# CHECK: ^bb0(%[[VAL_14:.*]]: index):
# CHECK: %[[VAL_15:.*]] = arith.addi %[[VAL_14]], %[[VAL_2]] : index
# CHECK: cc.continue %[[VAL_15]] : index
# CHECK: } {invariant}
# CHECK: %[[VAL_16:.*]] = cc.alloca !cc.array<i1 x 3>
# CHECK: %[[VAL_17:.*]] = cc.loop while ((%[[VAL_18:.*]] = %[[VAL_3]]) -> (index)) {
# CHECK: %[[VAL_19:.*]] = arith.cmpi slt, %[[VAL_18]], %[[VAL_0]] : index
# CHECK: cc.condition %[[VAL_19]](%[[VAL_18]] : index)
# CHECK: } do {
# CHECK: ^bb0(%[[VAL_20:.*]]: index):
# CHECK: %[[VAL_21:.*]] = quake.extract_ref %[[VAL_4]][%[[VAL_20]]] : (!quake.veq<3>, index) -> !quake.ref
# CHECK: %[[VAL_22:.*]] = quake.my %[[VAL_21]] : (!quake.ref) -> i1
# CHECK: %[[VAL_23:.*]] = arith.index_cast %[[VAL_20]] : index to i64
# CHECK: %[[VAL_24:.*]] = cc.compute_ptr %[[VAL_16]][%[[VAL_23]]] : (!cc.ptr<!cc.array<i1 x 3>>, i64) -> !cc.ptr<i1>
# CHECK: cc.store %[[VAL_22]], %[[VAL_24]] : !cc.ptr<i1>
# CHECK: cc.continue %[[VAL_20]] : index
# CHECK: } step {
# CHECK: ^bb0(%[[VAL_25:.*]]: index):
# CHECK: %[[VAL_26:.*]] = arith.addi %[[VAL_25]], %[[VAL_2]] : index
# CHECK: cc.continue %[[VAL_26]] : index
# CHECK: } {invariant}
# CHECK: %[[VAL_27:.*]] = cc.alloca !cc.array<i1 x 3>
# CHECK: %[[VAL_28:.*]] = cc.loop while ((%[[VAL_29:.*]] = %[[VAL_3]]) -> (index)) {
# CHECK: %[[VAL_30:.*]] = arith.cmpi slt, %[[VAL_29]], %[[VAL_0]] : index
# CHECK: cc.condition %[[VAL_30]](%[[VAL_29]] : index)
# CHECK: } do {
# CHECK: ^bb0(%[[VAL_31:.*]]: index):
# CHECK: %[[VAL_32:.*]] = quake.extract_ref %[[VAL_4]][%[[VAL_31]]] : (!quake.veq<3>, index) -> !quake.ref
# CHECK: %[[VAL_33:.*]] = quake.mz %[[VAL_32]] : (!quake.ref) -> i1
# CHECK: %[[VAL_34:.*]] = arith.index_cast %[[VAL_31]] : index to i64
# CHECK: %[[VAL_35:.*]] = cc.compute_ptr %[[VAL_27]][%[[VAL_34]]] : (!cc.ptr<!cc.array<i1 x 3>>, i64) -> !cc.ptr<i1>
# CHECK: cc.store %[[VAL_33]], %[[VAL_35]] : !cc.ptr<i1>
# CHECK: cc.continue %[[VAL_31]] : index
# CHECK: } step {
# CHECK: ^bb0(%[[VAL_36:.*]]: index):
# CHECK: %[[VAL_37:.*]] = arith.addi %[[VAL_36]], %[[VAL_2]] : index
# CHECK: cc.continue %[[VAL_37]] : index
# CHECK: } {invariant}
# CHECK: return
# CHECK: }
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-rP"])
| cuda-quantum-main | python/tests/compiler/measure.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
# RUN: PYTHONPATH=../../ pytest -rP %s | FileCheck %s
import os
import pytest
import numpy as np
import cudaq
def test_kernel_adjoint_no_args():
"""
Tests the adjoint of a kernel that takes no arguments.
"""
other_kernel = cudaq.make_kernel()
other_qubit = other_kernel.qalloc()
other_kernel.x(other_qubit)
kernel = cudaq.make_kernel()
kernel.adjoint(other_kernel)
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: quake.apply<adj> @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}} : () -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() {
# CHECK: %[[VAL_0:.*]] = quake.alloca !quake.ref
# CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: return
# CHECK: }
def test_kernel_adjoint_qubit_args():
"""
Tests the adjoint of a kernel that takes a qubit as an argument.
"""
other_kernel, other_qubit = cudaq.make_kernel(cudaq.qubit)
other_kernel.h(other_qubit)
kernel = cudaq.make_kernel()
qubit = kernel.qalloc()
kernel.adjoint(other_kernel, qubit)
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_0:.*]] = quake.alloca !quake.ref
# CHECK: quake.apply<adj> @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}} %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: !quake.ref) {
# CHECK: quake.h %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: return
# CHECK: }
def test_kernel_adjoint_qreg_args():
"""
Tests the adjoint of a kernel that takes a qreg as an argument.
"""
other_kernel, other_qreg = cudaq.make_kernel(cudaq.qreg)
other_kernel.h(other_qreg)
kernel = cudaq.make_kernel()
qreg = kernel.qalloc(5)
kernel.adjoint(other_kernel, qreg)
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<5>
# CHECK: %[[VAL_1:.*]] = quake.relax_size %[[VAL_0]]
# CHECK: quake.apply<adj> @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}} %[[VAL_1]] : (!quake.veq<?>) -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: !quake.veq<?>) {
# CHECK: %[[VAL_1:.*]] = arith.constant 1 : index
# CHECK: %[[VAL_2:.*]] = arith.constant 0 : index
# CHECK: %[[VAL_3:.*]] = quake.veq_size %[[VAL_0]] : (!quake.veq<?>) -> i64
# CHECK: %[[VAL_4:.*]] = arith.index_cast %[[VAL_3]] : i64 to index
# CHECK: %[[VAL_5:.*]] = cc.loop while ((%[[VAL_6:.*]] = %[[VAL_2]]) -> (index)) {
# CHECK: %[[VAL_7:.*]] = arith.cmpi slt, %[[VAL_6]], %[[VAL_4]] : index
# CHECK: cc.condition %[[VAL_7]](%[[VAL_6]] : index)
# CHECK: } do {
# CHECK: ^bb0(%[[VAL_8:.*]]: index):
# CHECK: %[[VAL_9:.*]] = quake.extract_ref %[[VAL_0]][%[[VAL_8]]] : (!quake.veq<?>, index) -> !quake.ref
# CHECK: quake.h %[[VAL_9]] : (!quake.ref) -> ()
# CHECK: cc.continue %[[VAL_8]] : index
# CHECK: } step {
# CHECK: ^bb0(%[[VAL_10:.*]]: index):
# CHECK: %[[VAL_11:.*]] = arith.addi %[[VAL_10]], %[[VAL_1]] : index
# CHECK: cc.continue %[[VAL_11]] : index
# CHECK: } {invariant}
# CHECK: return
# CHECK: }
def test_kernel_adjoint_float_args():
"""
Tests the adjoint of a kernel that is parameterized
by a float.
"""
other_kernel, other_value = cudaq.make_kernel(float)
other_qubit = other_kernel.qalloc()
other_kernel.x(other_qubit)
other_kernel.rx(other_value, other_qubit)
kernel, _float = cudaq.make_kernel(float)
kernel.adjoint(other_kernel, _float)
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint"} {
# CHECK: quake.apply<adj> @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}} %[[VAL_0]] : (f64) -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: f64) {
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref
# CHECK: quake.x %[[VAL_1]] : (!quake.ref) -> ()
# CHECK: quake.rx (%[[VAL_0]]) %[[VAL_1]] : (f64, !quake.ref) -> ()
# CHECK: return
# CHECK: }
def test_kernel_adjoint_int_args():
"""
Tests the adjoint of a kernel that is parameterized
by an int.
Note: we can't currently do anything with `int` kernel
parameters in `other_kernel`.
"""
other_kernel, other_value = cudaq.make_kernel(int)
other_qubit = other_kernel.qalloc()
other_kernel.x(other_qubit)
kernel, _int = cudaq.make_kernel(int)
kernel.adjoint(other_kernel, _int)
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {"cudaq-entrypoint"} {
# CHECK: quake.apply<adj> @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}} %[[VAL_0]] : (i32) -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: i32) {
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref
# CHECK: quake.x %[[VAL_1]] : (!quake.ref) -> ()
# CHECK: return
# CHECK: }
def test_kernel_adjoint_list_args():
"""
Tests the adjoint of a kernel that is parameterized
by a list.
"""
other_kernel, other_value = cudaq.make_kernel(list)
other_qubit = other_kernel.qalloc()
other_kernel.rx(other_value[0], other_qubit)
kernel, _list = cudaq.make_kernel(list)
kernel.adjoint(other_kernel, _list)
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec<f64>) attributes {"cudaq-entrypoint"} {
# CHECK: quake.apply<adj> @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}} %[[VAL_0]] : (!cc.stdvec<f64>) -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec<f64>) {
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref
# CHECK: %[[VAL_2:.*]] = cc.stdvec_data %[[VAL_0]] : (!cc.stdvec<f64>) -> !cc.ptr<!cc.array<f64 x ?>>
# CHECK: %[[VAL_4:.*]] = cc.compute_ptr %[[VAL_2]][0] : (!cc.ptr<!cc.array<f64 x ?>>) -> !cc.ptr<f64>
# CHECK: %[[VAL_3:.*]] = cc.load %[[VAL_4]] : !cc.ptr<f64>
# CHECK: quake.rx (%[[VAL_3]]) %[[VAL_1]] : (f64, !quake.ref) -> ()
# CHECK: return
# CHECK: }
def test_sample_adjoint_qubit():
"""
Tests the adjoint of a kernel that is parameterized
by a qubit. Checks for correctness on simulator.
"""
other_kernel, other_qubit = cudaq.make_kernel(cudaq.qubit)
other_kernel.x(other_qubit)
kernel = cudaq.make_kernel()
qubit = kernel.qalloc()
kernel.x(qubit)
# Call the other kernel on `qubit`.
kernel.apply_call(other_kernel, qubit)
# Apply adjoint of the other kernel to `qubit`.
kernel.adjoint(other_kernel, qubit)
# Measure `qubit`.
kernel.mz(qubit)
result = cudaq.sample(kernel)
assert len(result) == 1
# Qubit should be in the 1-state.
assert '1' in result
# Check the MLIR.
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_0:.*]] = quake.alloca !quake.ref
# CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%[[VAL_0]]) : (!quake.ref) -> ()
# CHECK: quake.apply<adj> @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}} %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: %[[VAL_1:.*]] = quake.mz %[[VAL_0]] name "" : (!quake.ref) -> i1
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: !quake.ref) {
# CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: return
# CHECK: }
def test_sample_adjoint_qreg():
"""
Tests the adjoint of a kernel that is parameterized
by a qreg. Checks for correctness on simulator.
"""
other_kernel, other_qreg = cudaq.make_kernel(cudaq.qreg)
other_kernel.x(other_qreg)
kernel, qubit_variable = cudaq.make_kernel(int)
qreg = kernel.qalloc(qubit_variable)
kernel.x(qreg)
# Call the other kernel on `qreg`.
kernel.apply_call(other_kernel, qreg)
# Apply adjoint of the other kernel to `qreg`.
kernel.adjoint(other_kernel, qreg)
# Measure `qreg`.
kernel.mz(qreg)
qubit_count = 5
result = cudaq.sample(kernel, qubit_count)
assert len(result) == 1
# Qubits should be in the 1-state.
assert '1' * qubit_count in result
# Check the MLIR.
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_1:.*]] = arith.constant 1 : index
# CHECK: %[[VAL_2:.*]] = arith.constant 0 : index
# CHECK: %[[VAL_3:.*]] = quake.alloca !quake.veq<?>[%[[VAL_0]] : i32]
# CHECK: %[[VAL_4:.*]] = quake.veq_size %[[VAL_3]] : (!quake.veq<?>) -> i64
# CHECK: %[[VAL_5:.*]] = arith.index_cast %[[VAL_4]] : i64 to index
# CHECK: %[[VAL_6:.*]] = cc.loop while ((%[[VAL_7:.*]] = %[[VAL_2]]) -> (index)) {
# CHECK: %[[VAL_8:.*]] = arith.cmpi slt, %[[VAL_7]], %[[VAL_5]] : index
# CHECK: cc.condition %[[VAL_8]](%[[VAL_7]] : index)
# CHECK: } do {
# CHECK: ^bb0(%[[VAL_9:.*]]: index):
# CHECK: %[[VAL_10:.*]] = quake.extract_ref %[[VAL_3]]{{\[}}%[[VAL_9]]] : (!quake.veq<?>, index) -> !quake.ref
# CHECK: quake.x %[[VAL_10]] : (!quake.ref) -> ()
# CHECK: cc.continue %[[VAL_9]] : index
# CHECK: } step {
# CHECK: ^bb0(%[[VAL_11:.*]]: index):
# CHECK: %[[VAL_12:.*]] = arith.addi %[[VAL_11]], %[[VAL_1]] : index
# CHECK: cc.continue %[[VAL_12]] : index
# CHECK: } {invariant}
# CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%[[VAL_3]]) : (!quake.veq<?>) -> ()
# CHECK: quake.apply<adj> @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}} %[[VAL_3]] : (!quake.veq<?>) -> ()
# CHECK: %[[VAL_13:.*]] = cc.alloca i1[%[[VAL_4]] : i64]
# CHECK: %[[VAL_14:.*]] = cc.loop while ((%[[VAL_15:.*]] = %[[VAL_2]]) -> (index)) {
# CHECK: %[[VAL_16:.*]] = arith.cmpi slt, %[[VAL_15]], %[[VAL_5]] : index
# CHECK: cc.condition %[[VAL_16]](%[[VAL_15]] : index)
# CHECK: } do {
# CHECK: ^bb0(%[[VAL_17:.*]]: index):
# CHECK: %[[VAL_18:.*]] = quake.extract_ref %[[VAL_3]]{{\[}}%[[VAL_17]]] : (!quake.veq<?>, index) -> !quake.ref
# CHECK: %[[VAL_19:.*]] = quake.mz %[[VAL_18]] : (!quake.ref) -> i1
# CHECK: %[[VAL_20:.*]] = arith.index_cast %[[VAL_17]] : index to i64
# CHECK: %[[VAL_21:.*]] = cc.compute_ptr %[[VAL_13]][%[[VAL_20]]] : (!cc.ptr<!cc.array<i1 x ?>>, i64) -> !cc.ptr<i1>
# CHECK: cc.store %[[VAL_19]], %[[VAL_21]] : !cc.ptr<i1>
# CHECK: cc.continue %[[VAL_17]] : index
# CHECK: } step {
# CHECK: ^bb0(%[[VAL_22:.*]]: index):
# CHECK: %[[VAL_23:.*]] = arith.addi %[[VAL_22]], %[[VAL_1]] : index
# CHECK: cc.continue %[[VAL_23]] : index
# CHECK: } {invariant}
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: !quake.veq<?>) {
# CHECK: %[[VAL_1:.*]] = arith.constant 1 : index
# CHECK: %[[VAL_2:.*]] = arith.constant 0 : index
# CHECK: %[[VAL_3:.*]] = quake.veq_size %[[VAL_0]] : (!quake.veq<?>) -> i64
# CHECK: %[[VAL_4:.*]] = arith.index_cast %[[VAL_3]] : i64 to index
# CHECK: %[[VAL_5:.*]] = cc.loop while ((%[[VAL_6:.*]] = %[[VAL_2]]) -> (index)) {
# CHECK: %[[VAL_7:.*]] = arith.cmpi slt, %[[VAL_6]], %[[VAL_4]] : index
# CHECK: cc.condition %[[VAL_7]](%[[VAL_6]] : index)
# CHECK: } do {
# CHECK: ^bb0(%[[VAL_8:.*]]: index):
# CHECK: %[[VAL_9:.*]] = quake.extract_ref %[[VAL_0]]{{\[}}%[[VAL_8]]] : (!quake.veq<?>, index) -> !quake.ref
# CHECK: quake.x %[[VAL_9]] : (!quake.ref) -> ()
# CHECK: cc.continue %[[VAL_8]] : index
# CHECK: } step {
# CHECK: ^bb0(%[[VAL_10:.*]]: index):
# CHECK: %[[VAL_11:.*]] = arith.addi %[[VAL_10]], %[[VAL_1]] : index
# CHECK: cc.continue %[[VAL_11]] : index
# CHECK: } {invariant}
# CHECK: return
# CHECK: }
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-rP"])
| cuda-quantum-main | python/tests/compiler/adjoint.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
# RUN: PYTHONPATH=../../ pytest -rP %s | FileCheck %s
import os
import pytest
import numpy as np
import cudaq
def test_make_kernel_list():
"""
Test `cudaq.make_kernel` with a list of floats as parameters.
"""
kernel, parameter = cudaq.make_kernel(list)
# Kernel should only have 1 argument and parameter.
got_arguments = kernel.arguments
got_argument_count = kernel.argument_count
assert len(got_arguments) == 1
assert got_argument_count == 1
# Dump the MLIR for FileCheck.
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec<f64>) attributes {"cudaq-entrypoint"} {
# CHECK: return
# CHECK: }
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-rP"])
| cuda-quantum-main | python/tests/compiler/list.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
# RUN: PYTHONPATH=../../ pytest -rP %s | FileCheck %s
import os
import pytest
import numpy as np
import cudaq
def test_make_kernel_float():
"""
Test `cudaq.make_kernel` with one float parameter.
"""
kernel, parameter = cudaq.make_kernel(float)
# Kernel should only have 1 argument and parameter.
got_arguments = kernel.arguments
got_argument_count = kernel.argument_count
assert len(got_arguments) == 1
assert got_argument_count == 1
# Dump the MLIR for FileCheck.
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint"} {
# CHECK: return
# CHECK: }
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-rP"])
| cuda-quantum-main | python/tests/compiler/float.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
# RUN: PYTHONPATH=../../ pytest -rP %s | FileCheck %s
import pytest
import cudaq
def test_swap_2q():
"""
Tests the simple case of swapping the states of two qubits.
"""
kernel = cudaq.make_kernel()
# Allocate a register of size 2.
qreg = kernel.qalloc(2)
qubit_0 = qreg[0]
qubit_1 = qreg[1]
# Place qubit 0 in the 1-state.
kernel.x(qubit_0)
# Swap states with qubit 1.
kernel.swap(qubit_0, qubit_1)
# Check their states.
kernel.mz(qreg)
assert kernel.arguments == []
assert kernel.argument_count == 0
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_0:.*]] = arith.constant 2 : index
# CHECK: %[[VAL_2:.*]] = arith.constant 1 : index
# CHECK: %[[VAL_3:.*]] = arith.constant 0 : index
# CHECK: %[[VAL_4:.*]] = quake.alloca !quake.veq<2>
# CHECK: %[[VAL_5:.*]] = quake.extract_ref %[[VAL_4]][0] : (!quake.veq<2>) -> !quake.ref
# CHECK: %[[VAL_6:.*]] = quake.extract_ref %[[VAL_4]][1] : (!quake.veq<2>) -> !quake.ref
# CHECK: quake.x %[[VAL_5]] : (!quake.ref) -> ()
# CHECK: quake.swap %[[VAL_5]], %[[VAL_6]] : (!quake.ref, !quake.ref) -> ()
# CHECK: %[[VAL_7:.*]] = cc.alloca !cc.array<i1 x 2>
# CHECK: %[[VAL_8:.*]] = cc.loop while ((%[[VAL_9:.*]] = %[[VAL_3]]) -> (index)) {
# CHECK: %[[VAL_10:.*]] = arith.cmpi slt, %[[VAL_9]], %[[VAL_0]] : index
# CHECK: cc.condition %[[VAL_10]](%[[VAL_9]] : index)
# CHECK: } do {
# CHECK: ^bb0(%[[VAL_11:.*]]: index):
# CHECK: %[[VAL_12:.*]] = quake.extract_ref %[[VAL_4]]{{\[}}%[[VAL_11]]] : (!quake.veq<2>, index) -> !quake.ref
# CHECK: %[[VAL_13:.*]] = quake.mz %[[VAL_12]] : (!quake.ref) -> i1
# CHECK: %[[VAL_14:.*]] = arith.index_cast %[[VAL_11]] : index to i64
# CHECK: %[[VAL_15:.*]] = cc.compute_ptr %[[VAL_7]][%[[VAL_14]]] : (!cc.ptr<!cc.array<i1 x 2>>, i64) -> !cc.ptr<i1>
# CHECK: cc.store %[[VAL_13]], %[[VAL_15]] : !cc.ptr<i1>
# CHECK: cc.continue %[[VAL_11]] : index
# CHECK: } step {
# CHECK: ^bb0(%[[VAL_16:.*]]: index):
# CHECK: %[[VAL_17:.*]] = arith.addi %[[VAL_16]], %[[VAL_2]] : index
# CHECK: cc.continue %[[VAL_17]] : index
# CHECK: } {invariant}
# CHECK: return
# CHECK: }
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-rP"])
| cuda-quantum-main | python/tests/compiler/swap.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
# RUN: PYTHONPATH=../../ pytest -rP %s | FileCheck %s
import os
import pytest
import numpy as np
import cudaq
def test_make_kernel_multiple_floats():
"""
Test `cudaq.make_kernel` with multiple parameters.
"""
kernel, parameter_1, parameter_2 = cudaq.make_kernel(float, float)
# Kernel should have 2 arguments and parameters.
got_arguments = kernel.arguments
got_argument_count = kernel.argument_count
assert len(got_arguments) == 2
assert got_argument_count == 2
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: f64,
# CHECK: %[[VAL_1:.*]]: f64) attributes {"cudaq-entrypoint"} {
# CHECK: return
# CHECK: }
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-rP"])
| cuda-quantum-main | python/tests/compiler/multiple_floats.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
# RUN: PYTHONPATH=../../ pytest -rP %s | FileCheck %s
import os
import pytest
import numpy as np
import cudaq
def test_kernel_qreg():
"""
Test the `cudaq.Kernel` on each non-parameterized single qubit gate.
Each gate is applied to both qubits in a 2-qubit register.
"""
kernel = cudaq.make_kernel()
# Allocate a register of size 2.
qreg = kernel.qalloc(2)
# Apply each gate to entire register.
# Test both with and without keyword arguments.
kernel.h(target=qreg)
kernel.x(target=qreg)
kernel.y(target=qreg)
kernel.z(qreg)
kernel.t(qreg)
kernel.s(qreg)
kernel()
assert kernel.arguments == []
assert kernel.argument_count == 0
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_0:.*]] = arith.constant 2 : index
# CHECK: %[[VAL_1:.*]] = arith.constant 1 : index
# CHECK: %[[VAL_2:.*]] = arith.constant 0 : index
# CHECK: %[[VAL_3:.*]] = quake.alloca !quake.veq<2>
# CHECK: %[[VAL_4:.*]] = cc.loop while ((%[[VAL_5:.*]] = %[[VAL_2]]) -> (index)) {
# CHECK: %[[VAL_6:.*]] = arith.cmpi slt, %[[VAL_5]], %[[VAL_0]] : index
# CHECK: cc.condition %[[VAL_6]](%[[VAL_5]] : index)
# CHECK: } do {
# CHECK: ^bb0(%[[VAL_7:.*]]: index):
# CHECK: %[[VAL_8:.*]] = quake.extract_ref %[[VAL_3]]{{\[}}%[[VAL_7]]] : (!quake.veq<2>, index) -> !quake.ref
# CHECK: quake.h %[[VAL_8]] : (!quake.ref) -> ()
# CHECK: cc.continue %[[VAL_7]] : index
# CHECK: } step {
# CHECK: ^bb0(%[[VAL_9:.*]]: index):
# CHECK: %[[VAL_10:.*]] = arith.addi %[[VAL_9]], %[[VAL_1]] : index
# CHECK: cc.continue %[[VAL_10]] : index
# CHECK: } {invariant}
# CHECK: %[[VAL_11:.*]] = cc.loop while ((%[[VAL_12:.*]] = %[[VAL_2]]) -> (index)) {
# CHECK: %[[VAL_13:.*]] = arith.cmpi slt, %[[VAL_12]], %[[VAL_0]] : index
# CHECK: cc.condition %[[VAL_13]](%[[VAL_12]] : index)
# CHECK: } do {
# CHECK: ^bb0(%[[VAL_14:.*]]: index):
# CHECK: %[[VAL_15:.*]] = quake.extract_ref %[[VAL_3]]{{\[}}%[[VAL_14]]] : (!quake.veq<2>, index) -> !quake.ref
# CHECK: quake.x %[[VAL_15]] : (!quake.ref) -> ()
# CHECK: cc.continue %[[VAL_14]] : index
# CHECK: } step {
# CHECK: ^bb0(%[[VAL_16:.*]]: index):
# CHECK: %[[VAL_17:.*]] = arith.addi %[[VAL_16]], %[[VAL_1]] : index
# CHECK: cc.continue %[[VAL_17]] : index
# CHECK: } {invariant}
# CHECK: %[[VAL_18:.*]] = cc.loop while ((%[[VAL_19:.*]] = %[[VAL_2]]) -> (index)) {
# CHECK: %[[VAL_20:.*]] = arith.cmpi slt, %[[VAL_19]], %[[VAL_0]] : index
# CHECK: cc.condition %[[VAL_20]](%[[VAL_19]] : index)
# CHECK: } do {
# CHECK: ^bb0(%[[VAL_21:.*]]: index):
# CHECK: %[[VAL_22:.*]] = quake.extract_ref %[[VAL_3]]{{\[}}%[[VAL_21]]] : (!quake.veq<2>, index) -> !quake.ref
# CHECK: quake.y %[[VAL_22]] : (!quake.ref) -> ()
# CHECK: cc.continue %[[VAL_21]] : index
# CHECK: } step {
# CHECK: ^bb0(%[[VAL_23:.*]]: index):
# CHECK: %[[VAL_24:.*]] = arith.addi %[[VAL_23]], %[[VAL_1]] : index
# CHECK: cc.continue %[[VAL_24]] : index
# CHECK: } {invariant}
# CHECK: %[[VAL_25:.*]] = cc.loop while ((%[[VAL_26:.*]] = %[[VAL_2]]) -> (index)) {
# CHECK: %[[VAL_27:.*]] = arith.cmpi slt, %[[VAL_26]], %[[VAL_0]] : index
# CHECK: cc.condition %[[VAL_27]](%[[VAL_26]] : index)
# CHECK: } do {
# CHECK: ^bb0(%[[VAL_28:.*]]: index):
# CHECK: %[[VAL_29:.*]] = quake.extract_ref %[[VAL_3]]{{\[}}%[[VAL_28]]] : (!quake.veq<2>, index) -> !quake.ref
# CHECK: quake.z %[[VAL_29]] : (!quake.ref) -> ()
# CHECK: cc.continue %[[VAL_28]] : index
# CHECK: } step {
# CHECK: ^bb0(%[[VAL_30:.*]]: index):
# CHECK: %[[VAL_31:.*]] = arith.addi %[[VAL_30]], %[[VAL_1]] : index
# CHECK: cc.continue %[[VAL_31]] : index
# CHECK: } {invariant}
# CHECK: %[[VAL_32:.*]] = cc.loop while ((%[[VAL_33:.*]] = %[[VAL_2]]) -> (index)) {
# CHECK: %[[VAL_34:.*]] = arith.cmpi slt, %[[VAL_33]], %[[VAL_0]] : index
# CHECK: cc.condition %[[VAL_34]](%[[VAL_33]] : index)
# CHECK: } do {
# CHECK: ^bb0(%[[VAL_35:.*]]: index):
# CHECK: %[[VAL_36:.*]] = quake.extract_ref %[[VAL_3]]{{\[}}%[[VAL_35]]] : (!quake.veq<2>, index) -> !quake.ref
# CHECK: quake.t %[[VAL_36]] : (!quake.ref) -> ()
# CHECK: cc.continue %[[VAL_35]] : index
# CHECK: } step {
# CHECK: ^bb0(%[[VAL_37:.*]]: index):
# CHECK: %[[VAL_38:.*]] = arith.addi %[[VAL_37]], %[[VAL_1]] : index
# CHECK: cc.continue %[[VAL_38]] : index
# CHECK: } {invariant}
# CHECK: %[[VAL_39:.*]] = cc.loop while ((%[[VAL_40:.*]] = %[[VAL_2]]) -> (index)) {
# CHECK: %[[VAL_41:.*]] = arith.cmpi slt, %[[VAL_40]], %[[VAL_0]] : index
# CHECK: cc.condition %[[VAL_41]](%[[VAL_40]] : index)
# CHECK: } do {
# CHECK: ^bb0(%[[VAL_42:.*]]: index):
# CHECK: %[[VAL_43:.*]] = quake.extract_ref %[[VAL_3]]{{\[}}%[[VAL_42]]] : (!quake.veq<2>, index) -> !quake.ref
# CHECK: quake.s %[[VAL_43]] : (!quake.ref) -> ()
# CHECK: cc.continue %[[VAL_42]] : index
# CHECK: } step {
# CHECK: ^bb0(%[[VAL_44:.*]]: index):
# CHECK: %[[VAL_45:.*]] = arith.addi %[[VAL_44]], %[[VAL_1]] : index
# CHECK: cc.continue %[[VAL_45]] : index
# CHECK: } {invariant}
# CHECK: return
# CHECK: }
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-rP"])
| cuda-quantum-main | python/tests/compiler/qreg_apply.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import os
import lit.formats
import lit.util
from lit.llvm import llvm_config
# Configuration file for the 'lit' test runner.
# name: The name of this test suite.
config.name = 'pyCUDAQ'
# testFormat: The test format to use to interpret tests.
#
# For now we require '&&' between commands, until they get globally killed and
# the test runner updated.
config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
# suffixes: A list of file extensions to treat as test files.
config.suffixes = ['.py']
config.substitutions.append(('%PATH%', config.environment['PATH']))
config.substitutions.append(('%llvmshlibdir', config.llvm_shlib_dir))
config.substitutions.append(('%pluginext', config.llvm_plugin_ext))
config.substitutions.append(('%llvmInclude', config.llvm_install + "/include"))
llvm_config.use_default_substitutions()
# ask llvm-config about asserts
llvm_config.feature_config([('--assertion-mode', {'ON': 'asserts'})])
# Targets
config.targets = frozenset(config.targets_to_build.split())
for arch in config.targets_to_build.split():
config.available_features.add(arch.lower() + '-registered-target')
# excludes: A list of directories to exclude from the testsuite. The 'Inputs'
# subdirectories contain auxiliary inputs for various tests in their parent
# directories.
config.excludes = [
'Inputs', 'CMakeLists.txt', 'README.txt', 'LICENSE.txt', 'lit.cfg.py'
]
# test_source_root: The root path where tests are located.
config.test_source_root = os.path.dirname(__file__)
# test_exec_root: The root path where tests should be run.
config.test_exec_root = os.path.join(config.cudaq_obj_root,
'python/tests/compiler')
# Tweak the PATH to include the tools dir.
llvm_config.with_environment('PATH', config.cudaq_tools_dir, append_path=True)
llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True)
| cuda-quantum-main | python/tests/compiler/lit.cfg.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
# RUN: PYTHONPATH=../../ pytest -rP %s | FileCheck %s
import os
import pytest
import numpy as np
import cudaq
@pytest.mark.parametrize("qubit_count", [1, 5])
def test_kernel_control_no_args(qubit_count):
"""
Tests `Kernel::control()` with another kernel that's
not parameterized. Test for both a single qubit and a register
of qubits as the controls.
"""
other_kernel = cudaq.make_kernel()
other_qubit = other_kernel.qalloc(qubit_count)
other_kernel.x(other_qubit)
kernel = cudaq.make_kernel()
control_qubit = kernel.qalloc(qubit_count)
# Call `kernel.control()`.
kernel.control(target=other_kernel, control=control_qubit)
# Check the MLIR.
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<1>
# CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}{{\[}}%[[VAL_0]]] : (!quake.veq<1>) -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() {
# CHECK: %[[VAL_0:.*]] = arith.constant 1 : index
# CHECK: %[[VAL_1:.*]] = arith.constant 0 : index
# CHECK: %[[VAL_2:.*]] = quake.alloca !quake.veq<1>
# CHECK: %[[VAL_3:.*]] = cc.loop while ((%[[VAL_4:.*]] = %[[VAL_1]]) -> (index)) {
# CHECK: %[[VAL_5:.*]] = arith.cmpi slt, %[[VAL_4]], %[[VAL_0]] : index
# CHECK: cc.condition %[[VAL_5]](%[[VAL_4]] : index)
# CHECK: } do {
# CHECK: ^bb0(%[[VAL_6:.*]]: index):
# CHECK: %[[VAL_7:.*]] = quake.extract_ref %[[VAL_2]]{{\[}}%[[VAL_6]]] : (!quake.veq<1>, index) -> !quake.ref
# CHECK: quake.x %[[VAL_7]] : (!quake.ref) -> ()
# CHECK: cc.continue %[[VAL_6]] : index
# CHECK: } step {
# CHECK: ^bb0(%[[VAL_8:.*]]: index):
# CHECK: %[[VAL_9:.*]] = arith.addi %[[VAL_8]], %[[VAL_0]] : index
# CHECK: cc.continue %[[VAL_9]] : index
# CHECK: } {invariant}
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<5>
# CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}{{\[}}%[[VAL_0]]] : (!quake.veq<5>) -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() {
# CHECK: %[[VAL_0:.*]] = arith.constant 5 : index
# CHECK: %[[VAL_1:.*]] = arith.constant 1 : index
# CHECK: %[[VAL_2:.*]] = arith.constant 0 : index
# CHECK: %[[VAL_3:.*]] = quake.alloca !quake.veq<5>
# CHECK: %[[VAL_4:.*]] = cc.loop while ((%[[VAL_5:.*]] = %[[VAL_2]]) -> (index)) {
# CHECK: %[[VAL_6:.*]] = arith.cmpi slt, %[[VAL_5]], %[[VAL_0]] : index
# CHECK: cc.condition %[[VAL_6]](%[[VAL_5]] : index)
# CHECK: } do {
# CHECK: ^bb0(%[[VAL_7:.*]]: index):
# CHECK: %[[VAL_8:.*]] = quake.extract_ref %[[VAL_3]][%[[VAL_7]]] : (!quake.veq<5>, index) -> !quake.ref
# CHECK: quake.x %[[VAL_8]] : (!quake.ref) -> ()
# CHECK: cc.continue %[[VAL_7]] : index
# CHECK: } step {
# CHECK: ^bb0(%[[VAL_9:.*]]: index):
# CHECK: %[[VAL_10:.*]] = arith.addi %[[VAL_9]], %[[VAL_1]] : index
# CHECK: cc.continue %[[VAL_10]] : index
# CHECK: } {invariant}
# CHECK: return
# CHECK: }
@pytest.mark.parametrize("qubit_count", [1, 5])
def test_kernel_control_float_args(qubit_count):
"""
Tests `Kernel::control()` with another kernel that's
parameterized by a float. Test for both a single qubit
and a register of qubits as the controls.
"""
other_kernel, other_float = cudaq.make_kernel(float)
other_qubit = other_kernel.qalloc()
other_kernel.rx(other_float, other_qubit)
kernel, float_ = cudaq.make_kernel(float)
control_qubit = kernel.qalloc(qubit_count)
# Call `kernel.control()`.
kernel.control(other_kernel, control_qubit, float_)
# Check the MLIR.
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<1>
# CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}{{\[}}%[[VAL_1]]] %[[VAL_0]] : (!quake.veq<1>, f64) -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: f64) {
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref
# CHECK: quake.rx (%[[VAL_0]]) %[[VAL_1]] : (f64, !quake.ref) -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<5>
# CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}{{\[}}%[[VAL_1]]] %[[VAL_0]] : (!quake.veq<5>, f64) -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: f64) {
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref
# CHECK: quake.rx (%[[VAL_0]]) %[[VAL_1]] : (f64, !quake.ref) -> ()
# CHECK: return
# CHECK: }
@pytest.mark.parametrize("qubit_count", [1, 5])
def test_kernel_control_int_args(qubit_count):
"""
Tests `Kernel::control()` with another kernel that's
parameterized by an int. Test for both a single qubit
and a register of qubits as the controls.
"""
other_kernel, other_int = cudaq.make_kernel(int)
other_qubit = other_kernel.qalloc(qubit_count)
# TODO:
# Would like to be able to test kernel operations that
# can accept an int.
kernel, _int = cudaq.make_kernel(int)
control_qubit = kernel.qalloc(qubit_count)
kernel.control(other_kernel, control_qubit, _int)
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<1>
# CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}{{\[}}%[[VAL_1]]] %[[VAL_0]] : (!quake.veq<1>, i32) -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: i32) {
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<1>
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<5>
# CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}{{\[}}%[[VAL_1]]] %[[VAL_0]] : (!quake.veq<5>, i32) -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: i32) {
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<5>
# CHECK: return
# CHECK: }
@pytest.mark.parametrize("qubit_count", [1, 5])
def test_kernel_control_list_args(qubit_count):
"""
Tests `Kernel::control()` with another kernel that's
parameterized by a list. Test for both a single qubit
and a register of qubits as the controls.
"""
other_kernel, other_list = cudaq.make_kernel(list)
other_qubit = other_kernel.qalloc()
other_kernel.rx(other_list[0], other_qubit)
kernel, _list = cudaq.make_kernel(list)
control_qubit = kernel.qalloc(qubit_count)
# Call `kernel.control()`.
kernel.control(other_kernel, control_qubit, _list)
# Check the MLIR.
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec<f64>) attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<1>
# CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}{{\[}}%[[VAL_1]]] %[[VAL_0]] : (!quake.veq<1>, !cc.stdvec<f64>) -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec<f64>) {
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref
# CHECK: %[[VAL_2:.*]] = cc.stdvec_data %[[VAL_0]] : (!cc.stdvec<f64>) -> !cc.ptr<!cc.array<f64 x ?>>
# CHECK: %[[VAL_4:.*]] = cc.compute_ptr %[[VAL_2]][0] : (!cc.ptr<!cc.array<f64 x ?>>) -> !cc.ptr<f64>
# CHECK: %[[VAL_3:.*]] = cc.load %[[VAL_4]] : !cc.ptr<f64>
# CHECK: quake.rx (%[[VAL_3]]) %[[VAL_1]] : (f64, !quake.ref) -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec<f64>) attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<5>
# CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}{{\[}}%[[VAL_1]]] %[[VAL_0]] : (!quake.veq<5>, !cc.stdvec<f64>) -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec<f64>) {
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref
# CHECK: %[[VAL_2:.*]] = cc.stdvec_data %[[VAL_0]] : (!cc.stdvec<f64>) -> !cc.ptr<!cc.array<f64 x ?>>
# CHECK: %[[VAL_4:.*]] = cc.compute_ptr %[[VAL_2]][0] : (!cc.ptr<!cc.array<f64 x ?>>) -> !cc.ptr<f64>
# CHECK: %[[VAL_3:.*]] = cc.load %[[VAL_4]] : !cc.ptr<f64>
# CHECK: quake.rx (%[[VAL_3]]) %[[VAL_1]] : (f64, !quake.ref) -> ()
# CHECK: return
# CHECK: }
def test_sample_control_qubit_args():
"""
Tests `Kernel::control()` with another kernel that's
parameterized by a `cudaq.qubit`. Uses a single qubit
as the `control`. Checks for correctness on simulator.
"""
# `other_kernel` applies an X-gate to the
# parameterized qubit.
other_kernel, other_qubit = cudaq.make_kernel(cudaq.qubit)
other_kernel.x(other_qubit)
kernel = cudaq.make_kernel()
# Allocate control and target qubits to `kernel`
target_qubit = kernel.qalloc()
control_qubit = kernel.qalloc()
# Apply `other_kernel` within `kernel` on the `target_qubit`.
kernel.apply_call(other_kernel, target_qubit)
kernel.h(control_qubit)
# Apply `other_kernel` to `kernel` as a controlled operation.
# `other_kernel` takes `target_qubit` as its argument, while `control_qubit`
# serves as the control qubit for the operation.
kernel.control(other_kernel, control_qubit, target_qubit)
# Apply another hadamard to `control_qubit` and measure.
kernel.h(control_qubit)
kernel.mz(control_qubit)
# Simulate `kernel` and check its expectation value.
result = cudaq.sample(kernel)
want_expectation = 0.0
got_expectation = result.expectation_z()
assert np.isclose(want_expectation, got_expectation, atol=1e-1)
# Check the MLIR.
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_0:.*]] = quake.alloca !quake.ref
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref
# CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%[[VAL_0]]) : (!quake.ref) -> ()
# CHECK: quake.h %[[VAL_1]] : (!quake.ref) -> ()
# CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*\[}}%[[VAL_1]]] %[[VAL_0]] : (!quake.ref, !quake.ref) -> ()
# CHECK: quake.h %[[VAL_1]] : (!quake.ref) -> ()
# CHECK: %[[VAL_2:.*]] = quake.mz %[[VAL_1]] name "" : (!quake.ref) -> i1
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: !quake.ref) {
# CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: return
# CHECK: }
def test_sample_control_qreg_args():
"""
Tests `Kernel::control()` with another kernel that's
parameterized by a `cudaq.qubit`. Uses a register as
the `control`. Checks for correctness on the simulator.
"""
# `other_kernel` applies an X-gate to the
# parameterized qubit.
other_kernel, other_qubit = cudaq.make_kernel(cudaq.qubit)
other_kernel.x(other_qubit)
kernel = cudaq.make_kernel()
# Allocating a register to use as the `control`
# in our call to `kernel.control()`.
control_register = kernel.qalloc(2)
target_qubit = kernel.qalloc()
kernel.x(control_register[0])
kernel.x(target_qubit)
# Call `other_kernel` with the argument `target_qubit` from `kernel`.
# Apply `other_kernel` (with the argument `target_qubit`), as a controlled
# operation onto this `kernel`.
kernel.control(other_kernel, control_register, target_qubit)
# Measure.
kernel.mz(control_register)
kernel.mz(target_qubit)
# Simulate and get results.
result = cudaq.sample(kernel)
assert len(result) == 1
# Should be in the state `101`
assert '101' in result
# Check the MLIR.
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_0:.*]] = arith.constant 2 : index
# CHECK: %[[VAL_2:.*]] = arith.constant 1 : index
# CHECK: %[[VAL_3:.*]] = arith.constant 0 : index
# CHECK: %[[VAL_5:.*]] = quake.alloca !quake.veq<2>
# CHECK: %[[VAL_6:.*]] = quake.alloca !quake.ref
# CHECK: %[[VAL_7:.*]] = quake.extract_ref %[[VAL_5]][0] : (!quake.veq<2>) -> !quake.ref
# CHECK: quake.x %[[VAL_7]] : (!quake.ref) -> ()
# CHECK: quake.x %[[VAL_6]] : (!quake.ref) -> ()
# CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}{{\[}}%[[VAL_5]]] %[[VAL_6]] : (!quake.veq<2>, !quake.ref) -> ()
# CHECK: %[[VAL_8:.*]] = cc.alloca !cc.array<i1 x 2>
# CHECK: %[[VAL_9:.*]] = cc.loop while ((%[[VAL_10:.*]] = %[[VAL_3]]) -> (index)) {
# CHECK: %[[VAL_11:.*]] = arith.cmpi slt, %[[VAL_10]], %[[VAL_0]] : index
# CHECK: cc.condition %[[VAL_11]](%[[VAL_10]] : index)
# CHECK: } do {
# CHECK: ^bb0(%[[VAL_12:.*]]: index):
# CHECK: %[[VAL_13:.*]] = quake.extract_ref %[[VAL_5]][%[[VAL_12]]] : (!quake.veq<2>, index) -> !quake.ref
# CHECK: %[[VAL_14:.*]] = quake.mz %[[VAL_13]] : (!quake.ref) -> i1
# CHECK: %[[VAL_15:.*]] = arith.index_cast %[[VAL_12]] : index to i64
# CHECK: %[[VAL_16:.*]] = cc.compute_ptr %[[VAL_8]][%[[VAL_15]]] : (!cc.ptr<!cc.array<i1 x 2>>, i64) -> !cc.ptr<i1>
# CHECK: cc.store %[[VAL_14]], %[[VAL_16]] : !cc.ptr<i1>
# CHECK: cc.continue %[[VAL_12]] : index
# CHECK: } step {
# CHECK: ^bb0(%[[VAL_17:.*]]: index):
# CHECK: %[[VAL_18:.*]] = arith.addi %[[VAL_17]], %[[VAL_2]] : index
# CHECK: cc.continue %[[VAL_18]] : index
# CHECK: } {invariant}
# CHECK: %[[VAL_19:.*]] = quake.mz %[[VAL_6]] name "" : (!quake.ref) -> i1
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: !quake.ref) {
# CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: return
# CHECK: }
def test_sample_apply_call_control():
"""
More advanced integration that tests a kernel that:
1. Calls `apply_call()` on another parameterized kernel (`x_kernel`).
2. Calls `control()` on another parameterized kernel (`h_kernel`).
"""
# Create an other kernel that applies an X-rotation
# to a parameterized qubit.
x_kernel, x_qubit = cudaq.make_kernel(cudaq.qubit)
x_kernel.x(x_qubit)
# Create an other kernel that applies a Hadamard to
# a parameterized qubit.
h_kernel, h_qubit = cudaq.make_kernel(cudaq.qubit)
h_kernel.h(h_qubit)
kernel = cudaq.make_kernel()
target_qubit = kernel.qalloc()
control_qubit = kernel.qalloc()
# Call `x_kernel` from `kernel` with `target_qubit` as its argument.
kernel.apply_call(x_kernel, target_qubit)
kernel.h(control_qubit)
# Apply `h_kernel` to `kernel` as a controlled operation.
# `h_kernel` takes `target_qubit` as its argument, while `control_qubit`
# serves as the control qubit for the operation.
kernel.control(h_kernel, control_qubit, target_qubit)
kernel.h(control_qubit)
kernel.mz(control_qubit)
# Simulate `kernel` and check its expectation value.
result = cudaq.sample(kernel)
want_expectation = -1. / np.sqrt(2.)
got_expectation = result.expectation_z()
assert np.isclose(want_expectation, got_expectation, atol=1e-1)
# Check the MLIR.
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_0:.*]] = quake.alloca !quake.ref
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref
# CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%[[VAL_0]]) : (!quake.ref) -> ()
# CHECK: quake.h %[[VAL_1]] : (!quake.ref) -> ()
# CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}{{\[}}%[[VAL_1]]] %[[VAL_0]] : (!quake.ref, !quake.ref) -> ()
# CHECK: quake.h %[[VAL_1]] : (!quake.ref) -> ()
# CHECK: %[[VAL_2:.*]] = quake.mz %[[VAL_1]] name "" : (!quake.ref) -> i1
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: !quake.ref) {
# CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: !quake.ref) {
# CHECK: quake.h %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: return
# CHECK: }
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-rP"])
| cuda-quantum-main | python/tests/compiler/control.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
# RUN: PYTHONPATH=../../ pytest -rP %s | FileCheck %s
import os
import pytest
import numpy as np
import cudaq
def test_make_kernel_mixed_args():
"""
Test `cudaq.make_kernel` with arguments of different types.
"""
kernel, parameter_1, parameter_2 = cudaq.make_kernel(list, float)
# Kernel should have 2 arguments and parameters.
got_arguments = kernel.arguments
got_argument_count = kernel.argument_count
assert len(got_arguments) == 2
assert got_argument_count == 2
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec<f64>,
# CHECK-SAME: %[[VAL_1:.*]]: f64) attributes {"cudaq-entrypoint"} {
# CHECK: return
# CHECK: }
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-rP"])
| cuda-quantum-main | python/tests/compiler/mixed_args.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
# RUN: PYTHONPATH=../../ pytest -rP %s | FileCheck %s
import os
import pytest
import numpy as np
import cudaq
def test_kernel_conditional():
"""
Test the conditional measurement functionality of `cudaq.Kernel`.
"""
kernel = cudaq.make_kernel()
qreg = kernel.qalloc(2)
qubit_0 = qreg[0]
qubit_1 = qreg[1]
# Rotate qubit 0 with an X-gate and measure.
kernel.x(qubit_0)
measurement_ = kernel.mz(qubit_0, "measurement_")
# Check that we can use conditionals on a measurement
def test_function():
"""Rotate and measure the first qubit."""
kernel.x(qubit_1)
kernel.mz(qubit_1)
# If measurement is true, run the test function.
kernel.c_if(measurement_, test_function)
# Apply instructions to each qubit and repeat `c_if`
# using keyword arguments.
kernel.x(qreg)
kernel.c_if(measurement=measurement_, function=test_function)
kernel()
assert kernel.arguments == []
assert kernel.argument_count == 0
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_0:.*]] = arith.constant 2 : index
# CHECK: %[[VAL_1:.*]] = arith.constant 1 : index
# CHECK: %[[VAL_2:.*]] = arith.constant 0 : index
# CHECK: %[[VAL_5:.*]] = quake.alloca !quake.veq<2>
# CHECK: %[[VAL_6:.*]] = quake.extract_ref %[[VAL_5]][0] : (!quake.veq<2>) -> !quake.ref
# CHECK: %[[VAL_7:.*]] = quake.extract_ref %[[VAL_5]][1] : (!quake.veq<2>) -> !quake.ref
# CHECK: quake.x %[[VAL_6]] : (!quake.ref) -> ()
# CHECK: %[[VAL_8:.*]] = quake.mz %[[VAL_6]] name "measurement_" : (!quake.ref) -> i1
# CHECK: cc.if(%[[VAL_8]]) {
# CHECK: quake.x %[[VAL_7]] : (!quake.ref) -> ()
# CHECK: %[[VAL_9:.*]] = quake.mz %[[VAL_7]] name "" : (!quake.ref) -> i1
# CHECK: }
# CHECK: %[[VAL_10:.*]] = cc.loop while ((%[[VAL_11:.*]] = %[[VAL_2]]) -> (index)) {
# CHECK: %[[VAL_12:.*]] = arith.cmpi slt, %[[VAL_11]], %[[VAL_0]] : index
# CHECK: cc.condition %[[VAL_12]](%[[VAL_11]] : index)
# CHECK: } do {
# CHECK: ^bb0(%[[VAL_13:.*]]: index):
# CHECK: %[[VAL_14:.*]] = quake.extract_ref %[[VAL_5]][%[[VAL_13]]] : (!quake.veq<2>, index) -> !quake.ref
# CHECK: quake.x %[[VAL_14]] : (!quake.ref) -> ()
# CHECK: cc.continue %[[VAL_13]] : index
# CHECK: } step {
# CHECK: ^bb0(%[[VAL_15:.*]]: index):
# CHECK: %[[VAL_16:.*]] = arith.addi %[[VAL_15]], %[[VAL_1]] : index
# CHECK: cc.continue %[[VAL_16]] : index
# CHECK: } {invariant}
# CHECK: cc.if(%[[VAL_8]]) {
# CHECK: quake.x %[[VAL_7]] : (!quake.ref) -> ()
# CHECK: %[[VAL_17:.*]] = quake.mz %[[VAL_7]] name "" : (!quake.ref) -> i1
# CHECK: }
# CHECK: return
# CHECK: }
def test_kernel_conditional_with_sample():
"""
Test the conditional measurement functionality of `cudaq.Kernel`
and assert that it runs as expected on the QPU.
"""
kernel = cudaq.make_kernel()
qubit = kernel.qalloc()
def then_function():
kernel.x(qubit)
kernel.x(qubit)
# Measure the qubit.
measurement_ = kernel.mz(qubit)
# Apply `then_function` to the `kernel` if
# the qubit was measured in the 1-state.
kernel.c_if(measurement_, then_function)
print(kernel)
# Measure the qubit again.
result = cudaq.sample(kernel, shots_count=10)
result.dump()
assert len(result) == 1
# Qubit should be in the 0-state after undergoing
# two X rotations.
assert '0' in result
# Check the MLIR.
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_0:.*]] = quake.alloca !quake.ref
# CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: %[[VAL_1:.*]] = quake.mz %[[VAL_0]] name "auto_register_0" : (!quake.ref) -> i1
# CHECK: cc.if(%[[VAL_1]]) {
# CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: }
# CHECK: return
# CHECK: }
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-rP"])
| cuda-quantum-main | python/tests/compiler/conditional.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
# RUN: PYTHONPATH=../../ pytest -rP %s | FileCheck %s
import os
import pytest
import numpy as np
import cudaq
def test_kernel_apply_call_no_args():
"""
Tests that we can call a non-parameterized kernel (`other_kernel`),
from a :class:`Kernel`.
"""
other_kernel = cudaq.make_kernel()
other_qubit = other_kernel.qalloc()
other_kernel.x(other_qubit)
kernel = cudaq.make_kernel()
kernel.apply_call(other_kernel)
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() : () -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() {
# CHECK: %[[VAL_0:.*]] = quake.alloca !quake.ref
# CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: return
# CHECK: }
def test_kernel_apply_call_qubit_args():
"""
Tests that we can call another kernel that's parameterized
by a qubit (`other_kernel`), from a :class:`Kernel`.
"""
other_kernel, other_qubit = cudaq.make_kernel(cudaq.qubit)
other_kernel.h(other_qubit)
kernel = cudaq.make_kernel()
qubit = kernel.qalloc()
kernel.apply_call(other_kernel, qubit)
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_0:.*]] = quake.alloca !quake.ref
# CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%[[VAL_0]]) : (!quake.ref) -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: !quake.ref) {
# CHECK: quake.h %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: return
# CHECK: }
def test_kernel_apply_call_qreg_args():
"""
Tests that we can call another kernel that's parameterized
by a qubit (`other_kernel`), from a :class:`Kernel`.
"""
other_kernel, other_qreg = cudaq.make_kernel(cudaq.qreg)
other_kernel.h(other_qreg)
kernel = cudaq.make_kernel()
qreg = kernel.qalloc(5)
kernel.apply_call(other_kernel, qreg)
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<5>
# CHECK: %[[VAL_1:.*]] = quake.relax_size %[[VAL_0]] : (!quake.veq<5>) -> !quake.veq<?>
# CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%[[VAL_1]]) : (!quake.veq<?>) -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: !quake.veq<?>) {
# CHECK: %[[VAL_1:.*]] = arith.constant 1 : index
# CHECK: %[[VAL_2:.*]] = arith.constant 0 : index
# CHECK: %[[VAL_3:.*]] = quake.veq_size %[[VAL_0]] : (!quake.veq<?>) -> i64
# CHECK: %[[VAL_4:.*]] = arith.index_cast %[[VAL_3]] : i64 to index
# CHECK: %[[VAL_5:.*]] = cc.loop while ((%[[VAL_6:.*]] = %[[VAL_2]]) -> (index)) {
# CHECK: %[[VAL_7:.*]] = arith.cmpi slt, %[[VAL_6]], %[[VAL_4]] : index
# CHECK: cc.condition %[[VAL_7]](%[[VAL_6]] : index)
# CHECK: } do {
# CHECK: ^bb0(%[[VAL_8:.*]]: index):
# CHECK: %[[VAL_9:.*]] = quake.extract_ref %[[VAL_0]]{{\[}}%[[VAL_8]]] : (!quake.veq<?>, index) -> !quake.ref
# CHECK: quake.h %[[VAL_9]] : (!quake.ref) -> ()
# CHECK: cc.continue %[[VAL_8]] : index
# CHECK: } step {
# CHECK: ^bb0(%[[VAL_10:.*]]: index):
# CHECK: %[[VAL_11:.*]] = arith.addi %[[VAL_10]], %[[VAL_1]] : index
# CHECK: cc.continue %[[VAL_11]] : index
# CHECK: } {invariant}
# CHECK: return
# CHECK: }
def test_kernel_apply_call_float_args():
"""
Tests that we can call another kernel that's parameterized
by a float (`other_kernel`), from a :class:`Kernel`.
"""
other_kernel, other_float = cudaq.make_kernel(float)
other_qubit = other_kernel.qalloc()
other_kernel.rx(other_float, other_qubit)
kernel, _float = cudaq.make_kernel(float)
kernel.apply_call(other_kernel, _float)
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint"} {
# CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%[[VAL_0]]) : (f64) -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: f64) {
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref
# CHECK: quake.rx (%[[VAL_0]]) %[[VAL_1]] : (f64, !quake.ref) -> ()
# CHECK: return
# CHECK: }
def test_kernel_apply_call_int_args():
"""
Tests that we can call another kernel that's parameterized
by an int (`other_kernel`), from a :class:`Kernel`.
"""
other_kernel, other_int = cudaq.make_kernel(int)
other_qubit = other_kernel.qalloc()
# TODO:
# Would like to be able to test kernel operations that
# can accept an int.
kernel, _int = cudaq.make_kernel(int)
kernel.apply_call(other_kernel, _int)
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {"cudaq-entrypoint"} {
# CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%[[VAL_0]]) : (i32) -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: i32) {
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref
# CHECK: return
# CHECK: }
def test_kernel_apply_call_list_args():
"""
Tests that we can call another kernel that's parameterized
by a list (`other_kernel`), from a :class:`Kernel`.
"""
other_kernel, other_list = cudaq.make_kernel(list)
other_qubit = other_kernel.qalloc()
other_kernel.rx(other_list[0], other_qubit)
kernel, _list = cudaq.make_kernel(list)
kernel.apply_call(other_kernel, _list)
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec<f64>) attributes {"cudaq-entrypoint"} {
# CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%[[VAL_0]]) : (!cc.stdvec<f64>) -> ()
# CHECK: return
# CHECK: }
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec<f64>) {
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref
# CHECK: %[[VAL_2:.*]] = cc.stdvec_data %[[VAL_0]] : (!cc.stdvec<f64>) -> !cc.ptr<!cc.array<f64 x ?>>
# CHECK: %[[VAL_4:.*]] = cc.compute_ptr %[[VAL_2]][0] : (!cc.ptr<!cc.array<f64 x ?>>) -> !cc.ptr<f64>
# CHECK: %[[VAL_3:.*]] = cc.load %[[VAL_4]] : !cc.ptr<f64>
# CHECK: quake.rx (%[[VAL_3]]) %[[VAL_1]] : (f64,
# CHECK: return
# CHECK: }
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-rP"])
| cuda-quantum-main | python/tests/compiler/call.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
# RUN: PYTHONPATH=../../ pytest -rP %s | FileCheck %s
import os
import pytest
import numpy as np
import cudaq
def test_kernel_non_param_1q():
"""
Test the `cudaq.Kernel` on each non-parameterized single qubit gate.
Each gate is applied to a single qubit in a 1-qubit register.
"""
# Empty constructor (no kernel type).
kernel = cudaq.make_kernel()
# Allocating a register of size 1 returns just a qubit.
qubit = kernel.qalloc(1)
# Apply each gate to the qubit.
kernel.h(target=qubit[0])
kernel.x(target=qubit[0])
kernel.y(target=qubit[0])
kernel.z(qubit[0])
kernel.t(qubit[0])
kernel.s(qubit[0])
kernel.tdg(qubit[0])
kernel.sdg(qubit[0])
kernel()
# Kernel arguments should still be an empty list.
assert kernel.arguments == []
# Kernel should still have 0 parameters.
assert kernel.argument_count == 0
# Check the conversion to Quake.
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %0 = quake.alloca !quake.veq<1>
# CHECK: %[[VAL_0:.*]] = quake.extract_ref %0[0] : (!quake.veq<1>) -> !quake.ref
# CHECK: quake.h %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: quake.y %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: quake.z %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: quake.t %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: quake.s %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: quake.t<adj> %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: quake.s<adj> %[[VAL_0]] : (!quake.ref) -> ()
# CHECK: return
# CHECK: }
def test_kernel_param_1q():
"""
Test the `cudaq.Kernel` on each single-qubit, parameterized gate.
Each gate is applied to a single qubit in a 1-qubit register.
Note: at this time, we can only apply rotation gates to one qubit at a time,
not to an entire register.
"""
kernel, parameter = cudaq.make_kernel(float)
qubit = kernel.qalloc(1)
# Apply each parameterized gate to the qubit.
# Test both with and without keyword arguments.
kernel.rx(parameter=parameter, target=qubit[0])
kernel.ry(parameter, qubit[0])
kernel.rz(parameter, qubit[0])
kernel.r1(parameter, qubit[0])
kernel(3.14)
# Should have 1 argument and parameter.
got_arguments = kernel.arguments
got_argument_count = kernel.argument_count
assert len(got_arguments) == 1
assert got_argument_count == 1
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint"} {
# CHECK: %0 = quake.alloca !quake.veq<1>
# CHECK: %[[VAL_1:.*]] = quake.extract_ref %0[0] : (!quake.veq<1>) -> !quake.ref
# CHECK: quake.rx (%[[VAL_0]]) %[[VAL_1]] : (f64, !quake.ref) -> ()
# CHECK: quake.ry (%[[VAL_0]]) %[[VAL_1]] : (f64, !quake.ref) -> ()
# CHECK: quake.rz (%[[VAL_0]]) %[[VAL_1]] : (f64, !quake.ref) -> ()
# CHECK: quake.r1 (%[[VAL_0]]) %[[VAL_1]] : (f64, !quake.ref) -> ()
# CHECK: return
# CHECK: }
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-rP"])
| cuda-quantum-main | python/tests/compiler/one_qubit.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
# RUN: PYTHONPATH=../../ pytest -rP %s | FileCheck %s
import os
import pytest
import numpy as np
import cudaq
def test_kernel_2q():
"""
Test the `cudaq.Kernel` on each two-qubit gate (controlled
single qubit gates). We alternate the order of the control and target
qubits between each successive gate.
"""
kernel = cudaq.make_kernel()
# Allocate a register of size 2.
qreg = kernel.qalloc(2)
qubit_0 = qreg[0]
qubit_1 = qreg[1]
# First three gates check the overload for providing a single control
# qubit as a list of length 1.
# Test both with and without keyword arguments.
kernel.ch(controls=[qubit_0], target=qubit_1)
kernel.cx([qubit_1], qubit_0)
kernel.cy([qubit_0], qubit_1)
# Check the overload for providing a single control qubit on its own.
# Test both with and without keyword arguments.
kernel.cz(control=qubit_1, target=qubit_0)
kernel.ct(qubit_0, qubit_1)
kernel.cs(qubit_1, qubit_0)
kernel()
assert kernel.arguments == []
assert kernel.argument_count == 0
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_2:.*]] = quake.alloca !quake.veq<2>
# CHECK: %[[VAL_3:.*]] = quake.extract_ref %[[VAL_2]][0] : (!quake.veq<2>) -> !quake.ref
# CHECK: %[[VAL_4:.*]] = quake.extract_ref %[[VAL_2]][1] : (!quake.veq<2>) -> !quake.ref
# CHECK: quake.h [%[[VAL_3]]] %[[VAL_4]] : (!quake.ref, !quake.ref) -> ()
# CHECK: quake.x [%[[VAL_4]]] %[[VAL_3]] : (!quake.ref, !quake.ref) -> ()
# CHECK: quake.y [%[[VAL_3]]] %[[VAL_4]] : (!quake.ref, !quake.ref) -> ()
# CHECK: quake.z [%[[VAL_4]]] %[[VAL_3]] : (!quake.ref, !quake.ref) -> ()
# CHECK: quake.t [%[[VAL_3]]] %[[VAL_4]] : (!quake.ref, !quake.ref) -> ()
# CHECK: quake.s [%[[VAL_4]]] %[[VAL_3]] : (!quake.ref, !quake.ref) -> ()
# CHECK: return
# CHECK: }
def test_kernel_3q():
"""
Test the `cudaq.Kernel` on each multi-qubit gate (multi-controlled single
qubit gates). We do this for the case of a 3-qubit kernel.
"""
kernel = cudaq.make_kernel()
# Allocate a register of size 3.
qreg = kernel.qalloc(3)
qubit_0 = qreg[0]
qubit_1 = qreg[1]
qubit_2 = qreg[2]
# Apply each gate to entire register.
# Note: we alternate between orders to make the circuit less trivial.
kernel.ch([qubit_0, qubit_1], qubit_2)
kernel.cx([qubit_2, qubit_0], qubit_1)
kernel.cy([qubit_1, qubit_2], qubit_0)
kernel.cz([qubit_0, qubit_1], qubit_2)
kernel.ct([qubit_2, qubit_0], qubit_1)
kernel.cs([qubit_1, qubit_2], qubit_0)
kernel()
assert (kernel.arguments == [])
assert (kernel.argument_count == 0)
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_3:.*]] = quake.alloca !quake.veq<3>
# CHECK: %[[VAL_4:.*]] = quake.extract_ref %[[VAL_3]][0] : (!quake.veq<3>) -> !quake.ref
# CHECK: %[[VAL_5:.*]] = quake.extract_ref %[[VAL_3]][1] : (!quake.veq<3>) -> !quake.ref
# CHECK: %[[VAL_6:.*]] = quake.extract_ref %[[VAL_3]][2] : (!quake.veq<3>) -> !quake.ref
# CHECK: quake.h [%[[VAL_4]], %[[VAL_5]]] %[[VAL_6]] : (!quake.ref, !quake.ref, !quake.ref) -> ()
# CHECK: quake.x [%[[VAL_6]], %[[VAL_4]]] %[[VAL_5]] : (!quake.ref, !quake.ref, !quake.ref) -> ()
# CHECK: quake.y [%[[VAL_5]], %[[VAL_6]]] %[[VAL_4]] : (!quake.ref, !quake.ref, !quake.ref) -> ()
# CHECK: quake.z [%[[VAL_4]], %[[VAL_5]]] %[[VAL_6]] : (!quake.ref, !quake.ref, !quake.ref) -> ()
# CHECK: quake.t [%[[VAL_6]], %[[VAL_4]]] %[[VAL_5]] : (!quake.ref, !quake.ref, !quake.ref) -> ()
# CHECK: quake.s [%[[VAL_5]], %[[VAL_6]]] %[[VAL_4]] : (!quake.ref, !quake.ref, !quake.ref) -> ()
# CHECK: return
# CHECK: }
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-rP"])
| cuda-quantum-main | python/tests/compiler/multi_qubit.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
# RUN: PYTHONPATH=../../ pytest -rP %s | FileCheck %s
import os
import pytest
import numpy as np
import cudaq
def test_make_kernel_no_input():
"""
Test `cudaq.make_kernel` without any inputs.
"""
# Empty constructor (no kernel type).
kernel = cudaq.make_kernel()
# Kernel arguments should be an empty list.
assert kernel.arguments == []
# Kernel should have 0 parameters.
assert kernel.argument_count == 0
# Print the quake string to the terminal. FileCheck will ensure that
# the MLIR doesn't contain any instructions or register allocations.
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK-NEXT: return
# CHECK-NEXT: }
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-rP"]) | cuda-quantum-main | python/tests/compiler/no_input.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
# RUN: PYTHONPATH=../../ pytest -rP %s | FileCheck %s
import os
import pytest
import numpy as np
import cudaq
def test_kernel_qalloc_empty():
"""
Test `cudaq.Kernel.qalloc()` when no arguments are provided.
"""
kernel = cudaq.make_kernel()
# Use `qalloc()` with no function arguments.
qubit = kernel.qalloc()
# Assert that only 1 qubit is allocated in the MLIR.
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_0:.*]] = quake.alloca !quake.ref
# CHECK: return
# CHECK: }
def test_kernel_qalloc_qreg():
"""
Test `cudaq.Kernel.qalloc()` when a handle to a register of
qubits is provided.
"""
kernel = cudaq.make_kernel()
# Use `qalloc()` with 10 qubits allocated.
qubit = kernel.qalloc(10)
# Assert that 10 qubits have been allocated in the MLIR.
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<10>
# CHECK: return
# CHECK: }
def test_kernel_qalloc_qreg_keyword():
"""
Test `cudaq.Kernel.qalloc()` when a handle to a register of
qubits is provided with a keyword argument.
"""
kernel = cudaq.make_kernel()
# Use `qalloc()` with 10 qubits allocated.
qubit = kernel.qalloc(qubit_count=10)
# Assert that 10 qubits have been allocated in the MLIR.
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<10>
# CHECK: return
# CHECK: }
def test_kernel_qalloc_quake_val():
"""
Test `cudaq.Kernel.qalloc()` when a `QuakeValue` is provided.
"""
kernel, value = cudaq.make_kernel(int)
qreg = kernel.qalloc(value)
qubit_count = 10
kernel(qubit_count)
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(
# CHECK-SAME: %[[VAL_0:.*]]: i32) attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<?>[%[[VAL_0]] : i32]
# CHECK: return
# CHECK: }
def test_kernel_qalloc_qubit():
"""
Test `cudaq.Kernel.qalloc()` when a handle to a single qubit
is provided.
"""
kernel = cudaq.make_kernel()
# Use `qalloc()` with 1 qubit allocated.
qubit = kernel.qalloc(1)
# Assert that only 1 qubit is allocated in the MLIR.
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<1>
# CHECK: return
# CHECK: }
def test_kernel_qalloc_qubit_keyword():
"""
Test `cudaq.Kernel.qalloc()` when a handle to a single qubit
is provided with a keyword argument.
"""
kernel = cudaq.make_kernel()
# Use `qalloc()` with 1 qubit allocated and `qubit_count` keyword used.
qubit = kernel.qalloc(qubit_count=1)
# Assert that only 1 qubit is allocated in the MLIR.
print(kernel)
# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} {
# CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<1>
# CHECK: return
# CHECK: }
# leave for gdb debugging
if __name__ == "__main__":
loc = os.path.abspath(__file__)
pytest.main([loc, "-rP"])
| cuda-quantum-main | python/tests/compiler/qalloc.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
from importlib.metadata import distribution
import os.path
def _find_package_location_by_root(package_name):
dist = distribution(package_name)
roots = set()
for f in dist.files:
dirname = os.path.dirname(str(f.locate()))
if not dirname.endswith("dist-info") and not dirname.endswith(
"__pycache__"):
roots.add(dirname)
path = os.path.commonprefix(tuple(roots))
return path
def _find_package_location_by_license(package_name):
dist = distribution(package_name)
for f in dist.files:
if str(f).endswith("LICENSE"):
license = f
break
else:
raise RuntimeError(f"cannot locate the directory for {package_name}")
path = os.path.dirname(license.locate())
return path
def get_library_path(package_name):
subdir = ""
if package_name.startswith("nvidia-"):
subdir = "-".join(package_name.split("-")[1:-1])
try:
package_location = _find_package_location_by_license(package_name)
except:
package_location = _find_package_location_by_root(package_name)
dirname = os.path.join(package_location, subdir, "lib")
assert os.path.isdir(dirname)
return dirname
| cuda-quantum-main | python/cudaq/_packages.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import sys
import os, os.path
from ._packages import *
if not "CUDAQ_DYNLIBS" in os.environ:
try:
cublas_libs = get_library_path("nvidia-cublas-cu11")
cublas_path = os.path.join(cublas_libs, "libcublas.so.11")
cublasLt_path = os.path.join(cublas_libs, "libcublasLt.so.11")
custatevec_libs = get_library_path("custatevec-cu11")
custatevec_path = os.path.join(custatevec_libs, "libcustatevec.so.1")
cutensornet_libs = get_library_path("cutensornet-cu11")
cutensornet_path = os.path.join(cutensornet_libs, "libcutensornet.so.2")
os.environ[
"CUDAQ_DYNLIBS"] = f"{cublasLt_path}:{cublas_path}:{custatevec_path}:{cutensornet_path}"
except:
pass
from ._pycudaq import *
from .domains import chemistry
initKwargs = {'target': 'default'}
if '-target' in sys.argv:
initKwargs['target'] = sys.argv[sys.argv.index('-target') + 1]
if '--target' in sys.argv:
initKwargs['target'] = sys.argv[sys.argv.index('--target') + 1]
initialize_cudaq(**initKwargs)
| cuda-quantum-main | python/cudaq/__init__.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
from .chemistry import * | cuda-quantum-main | python/cudaq/domains/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.