repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
airflow
|
airflow-main/airflow/providers/google/cloud/links/dataform.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Dataflow links."""
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.models import BaseOperator
from airflow.providers.google.cloud.links.base import BaseGoogleLink
if TYPE_CHECKING:
from airflow.utils.context import Context
DATAFORM_BASE_LINK = "/bigquery/dataform"
DATAFORM_WORKFLOW_INVOCATION_LINK = (
DATAFORM_BASE_LINK
+ "/locations/{region}/repositories/{repository_id}/workflows/"
+ "{workflow_invocation_id}?project={project_id}"
)
DATAFORM_REPOSITORY_LINK = (
DATAFORM_BASE_LINK
+ "/locations/{region}/repositories/{repository_id}/"
+ "details/workspaces?project={project_id}"
)
DATAFORM_WORKSPACE_LINK = (
DATAFORM_BASE_LINK
+ "/locations/{region}/repositories/{repository_id}/"
+ "workspaces/{workspace_id}/"
+ "files/?project={project_id}"
)
class DataformWorkflowInvocationLink(BaseGoogleLink):
"""Helper class for constructing Dataflow Job Link."""
name = "Dataform Workflow Invocation"
key = "dataform_workflow_invocation_config"
format_str = DATAFORM_WORKFLOW_INVOCATION_LINK
@staticmethod
def persist(
operator_instance: BaseOperator,
context: Context,
project_id: str,
region: str,
repository_id: str,
workflow_invocation_id: str,
):
operator_instance.xcom_push(
context,
key=DataformWorkflowInvocationLink.key,
value={
"project_id": project_id,
"region": region,
"repository_id": repository_id,
"workflow_invocation_id": workflow_invocation_id,
},
)
class DataformRepositoryLink(BaseGoogleLink):
"""Helper class for constructing Dataflow repository link."""
name = "Dataform Repository"
key = "dataform_repository"
format_str = DATAFORM_REPOSITORY_LINK
@staticmethod
def persist(
operator_instance: BaseOperator,
context: Context,
project_id: str,
region: str,
repository_id: str,
) -> None:
operator_instance.xcom_push(
context=context,
key=DataformRepositoryLink.key,
value={
"project_id": project_id,
"region": region,
"repository_id": repository_id,
},
)
class DataformWorkspaceLink(BaseGoogleLink):
"""Helper class for constructing Dataform workspace link."""
name = "Dataform Workspace"
key = "dataform_workspace"
format_str = DATAFORM_WORKSPACE_LINK
@staticmethod
def persist(
operator_instance: BaseOperator,
context: Context,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
) -> None:
operator_instance.xcom_push(
context=context,
key=DataformWorkspaceLink.key,
value={
"project_id": project_id,
"region": region,
"repository_id": repository_id,
"workspace_id": workspace_id,
},
)
| 3,927 | 29.6875 | 68 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/links/bigquery_dts.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google BigQuery Data Transfer links."""
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.models import BaseOperator
from airflow.providers.google.cloud.links.base import BaseGoogleLink
if TYPE_CHECKING:
from airflow.utils.context import Context
BIGQUERY_BASE_LINK = "/bigquery/transfers"
BIGQUERY_DTS_LINK = BIGQUERY_BASE_LINK + "/locations/{region}/configs/{config_id}/runs?project={project_id}"
class BigQueryDataTransferConfigLink(BaseGoogleLink):
"""Helper class for constructing BigQuery Data Transfer Config Link."""
name = "BigQuery Data Transfer Config"
key = "bigquery_dts_config"
format_str = BIGQUERY_DTS_LINK
@staticmethod
def persist(
context: Context,
task_instance: BaseOperator,
region: str,
config_id: str,
project_id: str,
):
task_instance.xcom_push(
context,
key=BigQueryDataTransferConfigLink.key,
value={"project_id": project_id, "region": region, "config_id": config_id},
)
| 1,871 | 34.320755 | 108 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/links/datafusion.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Compute Engine links."""
from __future__ import annotations
from typing import TYPE_CHECKING, ClassVar
from airflow.models import BaseOperatorLink, XCom
if TYPE_CHECKING:
from airflow.models import BaseOperator
from airflow.models.taskinstancekey import TaskInstanceKey
from airflow.utils.context import Context
BASE_LINK = "https://console.cloud.google.com/data-fusion"
DATAFUSION_INSTANCE_LINK = BASE_LINK + "/locations/{region}/instances/{instance_name}?project={project_id}"
DATAFUSION_PIPELINES_LINK = "{uri}/cdap/ns/default/pipelines"
DATAFUSION_PIPELINE_LINK = "{uri}/pipelines/ns/default/view/{pipeline_name}"
class BaseGoogleLink(BaseOperatorLink):
"""Link for Google operators.
Prevent adding ``https://console.cloud.google.com`` in front of every link
where URI is used.
"""
name: ClassVar[str]
key: ClassVar[str]
format_str: ClassVar[str]
def get_link(
self,
operator: BaseOperator,
*,
ti_key: TaskInstanceKey,
) -> str:
conf = XCom.get_value(key=self.key, ti_key=ti_key)
if not conf:
return ""
if self.format_str.startswith("http"):
return self.format_str.format(**conf)
return self.format_str.format(**conf)
class DataFusionInstanceLink(BaseGoogleLink):
"""Helper class for constructing Data Fusion Instance link."""
name = "Data Fusion Instance"
key = "instance_conf"
format_str = DATAFUSION_INSTANCE_LINK
@staticmethod
def persist(
context: Context,
task_instance: BaseOperator,
location: str,
instance_name: str,
project_id: str,
):
task_instance.xcom_push(
context=context,
key=DataFusionInstanceLink.key,
value={
"region": location,
"instance_name": instance_name,
"project_id": project_id,
},
)
class DataFusionPipelineLink(BaseGoogleLink):
"""Helper class for constructing Data Fusion Pipeline link."""
name = "Data Fusion Pipeline"
key = "pipeline_conf"
format_str = DATAFUSION_PIPELINE_LINK
@staticmethod
def persist(
context: Context,
task_instance: BaseOperator,
uri: str,
pipeline_name: str,
):
task_instance.xcom_push(
context=context,
key=DataFusionPipelineLink.key,
value={
"uri": uri,
"pipeline_name": pipeline_name,
},
)
class DataFusionPipelinesLink(BaseGoogleLink):
"""Helper class for constructing list of Data Fusion Pipelines link."""
name = "Data Fusion Pipelines List"
key = "pipelines_conf"
format_str = DATAFUSION_PIPELINES_LINK
@staticmethod
def persist(
context: Context,
task_instance: BaseOperator,
uri: str,
):
task_instance.xcom_push(
context=context,
key=DataFusionPipelinesLink.key,
value={
"uri": uri,
},
)
| 3,900 | 28.55303 | 107 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/links/bigtable.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.providers.google.cloud.links.base import BaseGoogleLink
if TYPE_CHECKING:
from airflow.utils.context import Context
BIGTABLE_BASE_LINK = "/bigtable"
BIGTABLE_INSTANCE_LINK = BIGTABLE_BASE_LINK + "/instances/{instance_id}/overview?project={project_id}"
BIGTABLE_CLUSTER_LINK = (
BIGTABLE_BASE_LINK + "/instances/{instance_id}/clusters/{cluster_id}?project={project_id}"
)
BIGTABLE_TABLES_LINK = BIGTABLE_BASE_LINK + "/instances/{instance_id}/tables?project={project_id}"
class BigtableInstanceLink(BaseGoogleLink):
"""Helper class for constructing Bigtable Instance link."""
name = "Bigtable Instance"
key = "instance_key"
format_str = BIGTABLE_INSTANCE_LINK
@staticmethod
def persist(
context: Context,
task_instance,
):
task_instance.xcom_push(
context=context,
key=BigtableInstanceLink.key,
value={
"instance_id": task_instance.instance_id,
"project_id": task_instance.project_id,
},
)
class BigtableClusterLink(BaseGoogleLink):
"""Helper class for constructing Bigtable Cluster link."""
name = "Bigtable Cluster"
key = "cluster_key"
format_str = BIGTABLE_CLUSTER_LINK
@staticmethod
def persist(
context: Context,
task_instance,
):
task_instance.xcom_push(
context=context,
key=BigtableClusterLink.key,
value={
"instance_id": task_instance.instance_id,
"cluster_id": task_instance.cluster_id,
"project_id": task_instance.project_id,
},
)
class BigtableTablesLink(BaseGoogleLink):
"""Helper class for constructing Bigtable Tables link."""
name = "Bigtable Tables"
key = "tables_key"
format_str = BIGTABLE_TABLES_LINK
@staticmethod
def persist(
context: Context,
task_instance,
):
task_instance.xcom_push(
context=context,
key=BigtableTablesLink.key,
value={
"instance_id": task_instance.instance_id,
"project_id": task_instance.project_id,
},
)
| 3,072 | 30.040404 | 102 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/links/dataproc.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Dataproc links."""
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.models import BaseOperatorLink, XCom
from airflow.providers.google.cloud.links.base import BASE_LINK
if TYPE_CHECKING:
from airflow.models import BaseOperator
from airflow.models.taskinstancekey import TaskInstanceKey
from airflow.utils.context import Context
DATAPROC_BASE_LINK = BASE_LINK + "/dataproc"
DATAPROC_JOB_LOG_LINK = DATAPROC_BASE_LINK + "/jobs/{resource}?region={region}&project={project_id}"
DATAPROC_CLUSTER_LINK = (
DATAPROC_BASE_LINK + "/clusters/{resource}/monitoring?region={region}&project={project_id}"
)
DATAPROC_WORKFLOW_TEMPLATE_LINK = (
DATAPROC_BASE_LINK + "/workflows/templates/{region}/{resource}?project={project_id}"
)
DATAPROC_WORKFLOW_LINK = DATAPROC_BASE_LINK + "/workflows/instances/{region}/{resource}?project={project_id}"
DATAPROC_BATCH_LINK = DATAPROC_BASE_LINK + "/batches/{region}/{resource}/monitoring?project={project_id}"
DATAPROC_BATCHES_LINK = DATAPROC_BASE_LINK + "/batches?project={project_id}"
class DataprocLink(BaseOperatorLink):
"""Helper class for constructing Dataproc resource link."""
name = "Dataproc resource"
key = "conf"
@staticmethod
def persist(
context: Context,
task_instance,
url: str,
resource: str,
):
task_instance.xcom_push(
context=context,
key=DataprocLink.key,
value={
"region": task_instance.region,
"project_id": task_instance.project_id,
"url": url,
"resource": resource,
},
)
def get_link(
self,
operator: BaseOperator,
*,
ti_key: TaskInstanceKey,
) -> str:
conf = XCom.get_value(key=self.key, ti_key=ti_key)
return (
conf["url"].format(
region=conf["region"], project_id=conf["project_id"], resource=conf["resource"]
)
if conf
else ""
)
class DataprocListLink(BaseOperatorLink):
"""Helper class for constructing list of Dataproc resources link."""
name = "Dataproc resources"
key = "list_conf"
@staticmethod
def persist(
context: Context,
task_instance,
url: str,
):
task_instance.xcom_push(
context=context,
key=DataprocListLink.key,
value={
"project_id": task_instance.project_id,
"url": url,
},
)
def get_link(
self,
operator: BaseOperator,
*,
ti_key: TaskInstanceKey,
) -> str:
list_conf = XCom.get_value(key=self.key, ti_key=ti_key)
return (
list_conf["url"].format(
project_id=list_conf["project_id"],
)
if list_conf
else ""
)
| 3,745 | 30.478992 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/links/dataflow.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Dataflow links."""
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.models import BaseOperator
from airflow.providers.google.cloud.links.base import BaseGoogleLink
if TYPE_CHECKING:
from airflow.utils.context import Context
DATAFLOW_BASE_LINK = "/dataflow/jobs"
DATAFLOW_JOB_LINK = DATAFLOW_BASE_LINK + "/{region}/{job_id}?project={project_id}"
class DataflowJobLink(BaseGoogleLink):
"""Helper class for constructing Dataflow Job Link."""
name = "Dataflow Job"
key = "dataflow_job_config"
format_str = DATAFLOW_JOB_LINK
@staticmethod
def persist(
operator_instance: BaseOperator,
context: Context,
project_id: str | None,
region: str | None,
job_id: str | None,
):
operator_instance.xcom_push(
context,
key=DataflowJobLink.key,
value={"project_id": project_id, "region": region, "job_id": job_id},
)
| 1,782 | 32.641509 | 82 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/links/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/links/cloud_sql.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud SQL links."""
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.models import BaseOperator
from airflow.providers.google.cloud.links.base import BaseGoogleLink
if TYPE_CHECKING:
from airflow.utils.context import Context
CLOUD_SQL_BASE_LINK = "/sql"
CLOUD_SQL_INSTANCE_LINK = CLOUD_SQL_BASE_LINK + "/instances/{instance}/overview?project={project_id}"
CLOUD_SQL_INSTANCE_DATABASE_LINK = (
CLOUD_SQL_BASE_LINK + "/instances/{instance}/databases?project={project_id}"
)
class CloudSQLInstanceLink(BaseGoogleLink):
"""Helper class for constructing Cloud SQL Instance Link."""
name = "Cloud SQL Instance"
key = "cloud_sql_instance"
format_str = CLOUD_SQL_INSTANCE_LINK
@staticmethod
def persist(
context: Context,
task_instance: BaseOperator,
cloud_sql_instance: str,
project_id: str | None,
):
task_instance.xcom_push(
context,
key=CloudSQLInstanceLink.key,
value={"instance": cloud_sql_instance, "project_id": project_id},
)
class CloudSQLInstanceDatabaseLink(BaseGoogleLink):
"""Helper class for constructing Cloud SQL Instance Database Link."""
name = "Cloud SQL Instance Database"
key = "cloud_sql_instance_database"
format_str = CLOUD_SQL_INSTANCE_DATABASE_LINK
@staticmethod
def persist(
context: Context,
task_instance: BaseOperator,
cloud_sql_instance: str,
project_id: str | None,
):
task_instance.xcom_push(
context,
key=CloudSQLInstanceDatabaseLink.key,
value={"instance": cloud_sql_instance, "project_id": project_id},
)
| 2,529 | 31.857143 | 101 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/links/automl.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google AutoML links."""
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.providers.google.cloud.links.base import BaseGoogleLink
if TYPE_CHECKING:
from airflow.utils.context import Context
AUTOML_BASE_LINK = "https://console.cloud.google.com/automl-tables"
AUTOML_DATASET_LINK = (
AUTOML_BASE_LINK + "/locations/{location}/datasets/{dataset_id}/schemav2?project={project_id}"
)
AUTOML_DATASET_LIST_LINK = AUTOML_BASE_LINK + "/datasets?project={project_id}"
AUTOML_MODEL_LINK = (
AUTOML_BASE_LINK
+ "/locations/{location}/datasets/{dataset_id};modelId={model_id}/evaluate?project={project_id}"
)
AUTOML_MODEL_TRAIN_LINK = (
AUTOML_BASE_LINK + "/locations/{location}/datasets/{dataset_id}/train?project={project_id}"
)
AUTOML_MODEL_PREDICT_LINK = (
AUTOML_BASE_LINK
+ "/locations/{location}/datasets/{dataset_id};modelId={model_id}/predict?project={project_id}"
)
class AutoMLDatasetLink(BaseGoogleLink):
"""Helper class for constructing AutoML Dataset link."""
name = "AutoML Dataset"
key = "automl_dataset"
format_str = AUTOML_DATASET_LINK
@staticmethod
def persist(
context: Context,
task_instance,
dataset_id: str,
project_id: str,
):
task_instance.xcom_push(
context,
key=AutoMLDatasetLink.key,
value={"location": task_instance.location, "dataset_id": dataset_id, "project_id": project_id},
)
class AutoMLDatasetListLink(BaseGoogleLink):
"""Helper class for constructing AutoML Dataset List link."""
name = "AutoML Dataset List"
key = "automl_dataset_list"
format_str = AUTOML_DATASET_LIST_LINK
@staticmethod
def persist(
context: Context,
task_instance,
project_id: str,
):
task_instance.xcom_push(
context,
key=AutoMLDatasetListLink.key,
value={
"project_id": project_id,
},
)
class AutoMLModelLink(BaseGoogleLink):
"""Helper class for constructing AutoML Model link."""
name = "AutoML Model"
key = "automl_model"
format_str = AUTOML_MODEL_LINK
@staticmethod
def persist(
context: Context,
task_instance,
dataset_id: str,
model_id: str,
project_id: str,
):
task_instance.xcom_push(
context,
key=AutoMLModelLink.key,
value={
"location": task_instance.location,
"dataset_id": dataset_id,
"model_id": model_id,
"project_id": project_id,
},
)
class AutoMLModelTrainLink(BaseGoogleLink):
"""Helper class for constructing AutoML Model Train link."""
name = "AutoML Model Train"
key = "automl_model_train"
format_str = AUTOML_MODEL_TRAIN_LINK
@staticmethod
def persist(
context: Context,
task_instance,
project_id: str,
):
task_instance.xcom_push(
context,
key=AutoMLModelTrainLink.key,
value={
"location": task_instance.location,
"dataset_id": task_instance.model["dataset_id"],
"project_id": project_id,
},
)
class AutoMLModelPredictLink(BaseGoogleLink):
"""Helper class for constructing AutoML Model Predict link."""
name = "AutoML Model Predict"
key = "automl_model_predict"
format_str = AUTOML_MODEL_PREDICT_LINK
@staticmethod
def persist(
context: Context,
task_instance,
model_id: str,
project_id: str,
):
task_instance.xcom_push(
context,
key=AutoMLModelPredictLink.key,
value={
"location": task_instance.location,
"dataset_id": "-",
"model_id": model_id,
"project_id": project_id,
},
)
| 4,780 | 28.152439 | 107 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/links/kubernetes_engine.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
from typing import TYPE_CHECKING
from google.cloud.container_v1.types import Cluster
from airflow.providers.google.cloud.links.base import BaseGoogleLink
if TYPE_CHECKING:
from airflow.utils.context import Context
KUBERNETES_BASE_LINK = "/kubernetes"
KUBERNETES_CLUSTER_LINK = (
KUBERNETES_BASE_LINK + "/clusters/details/{location}/{cluster_name}/details?project={project_id}"
)
KUBERNETES_POD_LINK = (
KUBERNETES_BASE_LINK
+ "/pod/{location}/{cluster_name}/{namespace}/{pod_name}/details?project={project_id}"
)
class KubernetesEngineClusterLink(BaseGoogleLink):
"""Helper class for constructing Kubernetes Engine Cluster Link."""
name = "Kubernetes Cluster"
key = "kubernetes_cluster_conf"
format_str = KUBERNETES_CLUSTER_LINK
@staticmethod
def persist(context: Context, task_instance, cluster: dict | Cluster | None):
if isinstance(cluster, dict):
cluster = Cluster.from_json(json.dumps(cluster))
task_instance.xcom_push(
context=context,
key=KubernetesEngineClusterLink.key,
value={
"location": task_instance.location,
"cluster_name": cluster.name, # type: ignore
"project_id": task_instance.project_id,
},
)
class KubernetesEnginePodLink(BaseGoogleLink):
"""Helper class for constructing Kubernetes Engine Pod Link."""
name = "Kubernetes Pod"
key = "kubernetes_pod_conf"
format_str = KUBERNETES_POD_LINK
@staticmethod
def persist(
context: Context,
task_instance,
):
task_instance.xcom_push(
context=context,
key=KubernetesEnginePodLink.key,
value={
"location": task_instance.location,
"cluster_name": task_instance.cluster_name,
"namespace": task_instance.pod.metadata.namespace,
"pod_name": task_instance.pod.metadata.name,
"project_id": task_instance.project_id,
},
)
| 2,883 | 32.929412 | 101 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/links/cloud_storage_transfer.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Storage Transfer Service links."""
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.providers.google.cloud.links.base import BaseGoogleLink
if TYPE_CHECKING:
from airflow.utils.context import Context
CLOUD_STORAGE_TRANSFER_BASE_LINK = "https://console.cloud.google.com/transfer"
CLOUD_STORAGE_TRANSFER_LIST_LINK = CLOUD_STORAGE_TRANSFER_BASE_LINK + "/jobs?project={project_id}"
CLOUD_STORAGE_TRANSFER_JOB_LINK = (
CLOUD_STORAGE_TRANSFER_BASE_LINK + "/jobs/transferJobs%2F{transfer_job}/runs?project={project_id}"
)
CLOUD_STORAGE_TRANSFER_OPERATION_LINK = (
CLOUD_STORAGE_TRANSFER_BASE_LINK
+ "/jobs/transferJobs%2F{transfer_job}/runs/transferOperations%2F{transfer_operation}"
+ "?project={project_id}"
)
class CloudStorageTransferLinkHelper:
"""Helper class for Storage Transfer links."""
@staticmethod
def extract_parts(operation_name: str | None):
if not operation_name:
return "", ""
transfer_operation = operation_name.split("/")[1]
transfer_job = operation_name.split("-")[1]
return transfer_operation, transfer_job
class CloudStorageTransferListLink(BaseGoogleLink):
"""Helper class for constructing Cloud Storage Transfer Link."""
name = "Cloud Storage Transfer"
key = "cloud_storage_transfer"
format_str = CLOUD_STORAGE_TRANSFER_LIST_LINK
@staticmethod
def persist(
context: Context,
task_instance,
project_id: str,
):
task_instance.xcom_push(
context,
key=CloudStorageTransferListLink.key,
value={"project_id": project_id},
)
class CloudStorageTransferJobLink(BaseGoogleLink):
"""Helper class for constructing Storage Transfer Job Link."""
name = "Cloud Storage Transfer Job"
key = "cloud_storage_transfer_job"
format_str = CLOUD_STORAGE_TRANSFER_JOB_LINK
@staticmethod
def persist(
task_instance,
context: Context,
project_id: str,
job_name: str,
):
job_name = job_name.split("/")[1] if job_name else ""
task_instance.xcom_push(
context,
key=CloudStorageTransferJobLink.key,
value={
"project_id": project_id,
"transfer_job": job_name,
},
)
class CloudStorageTransferDetailsLink(BaseGoogleLink):
"""Helper class for constructing Cloud Storage Transfer Operation Link."""
name = "Cloud Storage Transfer Details"
key = "cloud_storage_transfer_details"
format_str = CLOUD_STORAGE_TRANSFER_OPERATION_LINK
@staticmethod
def persist(
task_instance,
context: Context,
project_id: str,
operation_name: str,
):
transfer_operation, transfer_job = CloudStorageTransferLinkHelper.extract_parts(operation_name)
task_instance.xcom_push(
context,
key=CloudStorageTransferDetailsLink.key,
value={
"project_id": project_id,
"transfer_job": transfer_job,
"transfer_operation": transfer_operation,
},
)
| 3,994 | 30.210938 | 103 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/links/compute.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Compute Engine links."""
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.models import BaseOperator
from airflow.providers.google.cloud.links.base import BaseGoogleLink
if TYPE_CHECKING:
from airflow.utils.context import Context
COMPUTE_BASE_LINK = "https://console.cloud.google.com/compute"
COMPUTE_LINK = (
COMPUTE_BASE_LINK + "/instancesDetail/zones/{location_id}/instances/{resource_id}?project={project_id}"
)
COMPUTE_TEMPLATE_LINK = COMPUTE_BASE_LINK + "/instanceTemplates/details/{resource_id}?project={project_id}"
COMPUTE_GROUP_MANAGER_LINK = (
COMPUTE_BASE_LINK + "/instanceGroups/details/{location_id}/{resource_id}?project={project_id}"
)
class ComputeInstanceDetailsLink(BaseGoogleLink):
"""Helper class for constructing Compute Instance details Link."""
name = "Compute Instance details"
key = "compute_instance_details"
format_str = COMPUTE_LINK
@staticmethod
def persist(
context: Context,
task_instance: BaseOperator,
location_id: str,
resource_id: str,
project_id: str | None,
):
task_instance.xcom_push(
context,
key=ComputeInstanceDetailsLink.key,
value={
"location_id": location_id,
"resource_id": resource_id,
"project_id": project_id,
},
)
class ComputeInstanceTemplateDetailsLink(BaseGoogleLink):
"""Helper class for constructing Compute Instance Template details Link."""
name = "Compute Instance Template details"
key = "compute_instance_template_details"
format_str = COMPUTE_TEMPLATE_LINK
@staticmethod
def persist(
context: Context,
task_instance: BaseOperator,
resource_id: str,
project_id: str | None,
):
task_instance.xcom_push(
context,
key=ComputeInstanceTemplateDetailsLink.key,
value={
"resource_id": resource_id,
"project_id": project_id,
},
)
class ComputeInstanceGroupManagerDetailsLink(BaseGoogleLink):
"""Helper class for constructing Compute Instance Group Manager details Link."""
name = "Compute Instance Group Manager"
key = "compute_instance_group_manager_details"
format_str = COMPUTE_GROUP_MANAGER_LINK
@staticmethod
def persist(
context: Context,
task_instance: BaseOperator,
location_id: str,
resource_id: str,
project_id: str | None,
):
task_instance.xcom_push(
context,
key=ComputeInstanceGroupManagerDetailsLink.key,
value={
"location_id": location_id,
"resource_id": resource_id,
"project_id": project_id,
},
)
| 3,660 | 31.39823 | 107 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/links/workflows.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Workflows links."""
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.models import BaseOperator
from airflow.providers.google.cloud.links.base import BaseGoogleLink
if TYPE_CHECKING:
from airflow.utils.context import Context
WORKFLOWS_BASE_LINK = "workflows"
WORKFLOW_LINK = WORKFLOWS_BASE_LINK + "/workflow/{location_id}/{workflow_id}/executions?project={project_id}"
WORKFLOWS_LINK = WORKFLOWS_BASE_LINK + "?project={project_id}"
EXECUTION_LINK = (
WORKFLOWS_BASE_LINK
+ "/workflow/{location_id}/{workflow_id}/execution/{execution_id}?project={project_id}"
)
class WorkflowsWorkflowDetailsLink(BaseGoogleLink):
"""Helper class for constructing Workflow details Link."""
name = "Workflow details"
key = "workflow_details"
format_str = WORKFLOW_LINK
@staticmethod
def persist(
context: Context,
task_instance: BaseOperator,
location_id: str,
workflow_id: str,
project_id: str | None,
):
task_instance.xcom_push(
context,
key=WorkflowsWorkflowDetailsLink.key,
value={"location_id": location_id, "workflow_id": workflow_id, "project_id": project_id},
)
class WorkflowsListOfWorkflowsLink(BaseGoogleLink):
"""Helper class for constructing list of Workflows Link."""
name = "List of workflows"
key = "list_of_workflows"
format_str = WORKFLOWS_LINK
@staticmethod
def persist(
context: Context,
task_instance: BaseOperator,
project_id: str | None,
):
task_instance.xcom_push(
context,
key=WorkflowsListOfWorkflowsLink.key,
value={"project_id": project_id},
)
class WorkflowsExecutionLink(BaseGoogleLink):
"""Helper class for constructing Workflows Execution Link."""
name = "Workflow Execution"
key = "workflow_execution"
format_str = EXECUTION_LINK
@staticmethod
def persist(
context: Context,
task_instance: BaseOperator,
location_id: str,
workflow_id: str,
execution_id: str,
project_id: str | None,
):
task_instance.xcom_push(
context,
key=WorkflowsExecutionLink.key,
value={
"location_id": location_id,
"workflow_id": workflow_id,
"execution_id": execution_id,
"project_id": project_id,
},
)
| 3,297 | 30.113208 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/azure_blob_to_gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import tempfile
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.microsoft.azure.hooks.wasb import WasbHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class AzureBlobStorageToGCSOperator(BaseOperator):
"""
Operator transfers data from Azure Blob Storage to specified bucket in Google Cloud Storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AzureBlobStorageToGCSOperator`
:param wasb_conn_id: Reference to the wasb connection.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param blob_name: Name of the blob
:param container_name: Name of the container
:param bucket_name: The bucket to upload to
:param object_name: The object name to set when uploading the file
:param filename: The local file path to the file to be uploaded
:param gzip: Option to compress local file or file data for upload
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
*,
wasb_conn_id="wasb_default",
gcp_conn_id: str = "google_cloud_default",
blob_name: str,
container_name: str,
bucket_name: str,
object_name: str,
filename: str,
gzip: bool,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.wasb_conn_id = wasb_conn_id
self.gcp_conn_id = gcp_conn_id
self.blob_name = blob_name
self.container_name = container_name
self.bucket_name = bucket_name
self.object_name = object_name
self.filename = filename
self.gzip = gzip
self.impersonation_chain = impersonation_chain
template_fields: Sequence[str] = (
"blob_name",
"container_name",
"bucket_name",
"object_name",
"filename",
)
def execute(self, context: Context) -> str:
azure_hook = WasbHook(wasb_conn_id=self.wasb_conn_id)
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
with tempfile.NamedTemporaryFile() as temp_file:
self.log.info("Downloading data from blob: %s", self.blob_name)
azure_hook.get_file(
file_path=temp_file.name,
container_name=self.container_name,
blob_name=self.blob_name,
)
self.log.info(
"Uploading data from blob's: %s into GCP bucket: %s", self.object_name, self.bucket_name
)
gcs_hook.upload(
bucket_name=self.bucket_name,
object_name=self.object_name,
filename=temp_file.name,
gzip=self.gzip,
)
self.log.info(
"Resources have been uploaded from blob: %s to GCS bucket:%s",
self.blob_name,
self.bucket_name,
)
return f"gs://{self.bucket_name}/{self.object_name}"
| 4,641 | 38.008403 | 104 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/bigquery_to_sql.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Base operator for BigQuery to SQL operators."""
from __future__ import annotations
import abc
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.common.sql.hooks.sql import DbApiHook
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook
from airflow.providers.google.cloud.utils.bigquery_get_data import bigquery_get_data
if TYPE_CHECKING:
from airflow.utils.context import Context
class BigQueryToSqlBaseOperator(BaseOperator):
"""
Fetch data from a BigQuery table (alternatively fetch selected columns) and insert it into an SQL table.
This is a BaseOperator; an abstract class. Refer to children classes
which are related to specific SQL databases (MySQL, MsSQL, Postgres...).
.. note::
If you pass fields to ``selected_fields`` which are in different order than the
order of columns already in
BQ table, the data will still be in the order of BQ table.
For example if the BQ table has 3 columns as
``[A,B,C]`` and you pass 'B,A' in the ``selected_fields``
the data would still be of the form ``'A,B'`` and passed through this form
to the SQL database.
:param dataset_table: A dotted ``<dataset>.<table>``: the big query table of origin
:param target_table_name: target SQL table
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param gcp_conn_id: reference to a specific Google Cloud hook.
:param database: name of database which overwrite defined one in connection
:param replace: Whether to replace instead of insert
:param batch_size: The number of rows to take in each batch
:param location: The location used for the operation.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"target_table_name",
"impersonation_chain",
)
def __init__(
self,
*,
dataset_table: str,
target_table_name: str | None,
selected_fields: list[str] | str | None = None,
gcp_conn_id: str = "google_cloud_default",
database: str | None = None,
replace: bool = False,
batch_size: int = 1000,
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.selected_fields = selected_fields
self.gcp_conn_id = gcp_conn_id
self.database = database
self.target_table_name = target_table_name
self.replace = replace
self.batch_size = batch_size
self.location = location
self.impersonation_chain = impersonation_chain
try:
self.dataset_id, self.table_id = dataset_table.split(".")
except ValueError:
raise ValueError(f"Could not parse {dataset_table} as <dataset>.<table>") from None
@abc.abstractmethod
def get_sql_hook(self) -> DbApiHook:
"""Return a concrete SQL Hook (a PostgresHook for instance)."""
def persist_links(self, context: Context) -> None:
"""This function persists the connection to the SQL provider."""
def execute(self, context: Context) -> None:
big_query_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
self.persist_links(context)
sql_hook = self.get_sql_hook()
for rows in bigquery_get_data(
self.log,
self.dataset_id,
self.table_id,
big_query_hook,
self.batch_size,
self.selected_fields,
):
sql_hook.insert_rows(
table=self.target_table_name,
rows=rows,
target_fields=self.selected_fields,
replace=self.replace,
)
| 5,363 | 40.261538 | 108 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/calendar_to_gcs.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
from datetime import datetime
from tempfile import NamedTemporaryFile
from typing import Any, Sequence
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.suite.hooks.calendar import GoogleCalendarHook
class GoogleCalendarToGCSOperator(BaseOperator):
"""
Writes Google Calendar data into Google Cloud Storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleCalendarToGCSOperator`
:param calendar_id: The Google Calendar ID to interact with.
:param i_cal_uid: Optional. Specifies event ID in the ``iCalendar`` format in the response.
:param max_attendees: Optional. If there are more than the specified number of attendees,
only the participant is returned.
:param max_results: Optional. Maximum number of events returned on one result page.
Incomplete pages can be detected by a non-empty ``nextPageToken`` field in the response.
By default the value is 250 events. The page size can never be larger than 2500 events
:param order_by: Optional. Acceptable values are ``"startTime"`` or "updated"
:param private_extended_property: Optional. Extended properties constraint specified as
``propertyName=value``. Matches only private properties. This parameter might be repeated
multiple times to return events that match all given constraints.
:param text_search_query: Optional. Free text search.
:param shared_extended_property: Optional. Extended properties constraint specified as
``propertyName=value``. Matches only shared properties. This parameter might be repeated
multiple times to return events that match all given constraints.
:param show_deleted: Optional. False by default
:param show_hidden_invitation: Optional. False by default
:param single_events: Optional. False by default
:param sync_token: Optional. Token obtained from the ``nextSyncToken`` field returned
:param time_max: Optional. Upper bound (exclusive) for an event's start time to filter by.
Default is no filter
:param time_min: Optional. Lower bound (exclusive) for an event's end time to filter by.
Default is no filter
:param time_zone: Optional. Time zone used in response. Default is calendars time zone.
:param updated_min: Optional. Lower bound for an event's last modification time
:param destination_bucket: The destination Google Cloud Storage bucket where the
report should be written to. (templated)
:param destination_path: The Google Cloud Storage URI array for the object created by the operator.
For example: ``path/to/my/files``.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = [
"calendar_id",
"destination_bucket",
"destination_path",
"impersonation_chain",
]
def __init__(
self,
*,
destination_bucket: str,
api_version: str,
calendar_id: str = "primary",
i_cal_uid: str | None = None,
max_attendees: int | None = None,
max_results: int | None = None,
order_by: str | None = None,
private_extended_property: str | None = None,
text_search_query: str | None = None,
shared_extended_property: str | None = None,
show_deleted: bool | None = None,
show_hidden_invitation: bool | None = None,
single_events: bool | None = None,
sync_token: str | None = None,
time_max: datetime | None = None,
time_min: datetime | None = None,
time_zone: str | None = None,
updated_min: datetime | None = None,
destination_path: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.gcp_conn_id = gcp_conn_id
self.calendar_id = calendar_id
self.api_version = api_version
self.i_cal_uid = i_cal_uid
self.max_attendees = max_attendees
self.max_results = max_results
self.order_by = order_by
self.private_extended_property = private_extended_property
self.text_search_query = text_search_query
self.shared_extended_property = shared_extended_property
self.show_deleted = show_deleted
self.show_hidden_invitation = show_hidden_invitation
self.single_events = single_events
self.sync_token = sync_token
self.time_max = time_max
self.time_min = time_min
self.time_zone = time_zone
self.updated_min = updated_min
self.destination_bucket = destination_bucket
self.destination_path = destination_path
self.impersonation_chain = impersonation_chain
def _upload_data(
self,
events: list[Any],
) -> str:
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
# Construct destination file path
file_name = f"{self.calendar_id}.json".replace(" ", "_")
dest_file_name = (
f"{self.destination_path.strip('/')}/{file_name}" if self.destination_path else file_name
)
with NamedTemporaryFile("w+") as temp_file:
# Write data
json.dump(events, temp_file)
temp_file.flush()
# Upload to GCS
gcs_hook.upload(
bucket_name=self.destination_bucket,
object_name=dest_file_name,
filename=temp_file.name,
)
return dest_file_name
def execute(self, context):
calendar_hook = GoogleCalendarHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
events = calendar_hook.get_events(
calendar_id=self.calendar_id,
i_cal_uid=self.i_cal_uid,
max_attendees=self.max_attendees,
max_results=self.max_results,
order_by=self.order_by,
private_extended_property=self.private_extended_property,
q=self.text_search_query,
shared_extended_property=self.shared_extended_property,
show_deleted=self.show_deleted,
show_hidden_invitation=self.show_hidden_invitation,
single_events=self.single_events,
sync_token=self.sync_token,
time_max=self.time_max,
time_min=self.time_min,
time_zone=self.time_zone,
updated_min=self.updated_min,
)
gcs_path_to_file = self._upload_data(events)
return gcs_path_to_file
| 8,361 | 43.243386 | 103 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/adls_to_gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Azure Data Lake Storage to Google Cloud Storage operator."""
from __future__ import annotations
import os
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Sequence
from airflow.providers.google.cloud.hooks.gcs import GCSHook, _parse_gcs_url
from airflow.providers.microsoft.azure.hooks.data_lake import AzureDataLakeHook
from airflow.providers.microsoft.azure.operators.adls import ADLSListOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class ADLSToGCSOperator(ADLSListOperator):
"""
Synchronizes an Azure Data Lake Storage path with a GCS bucket.
:param src_adls: The Azure Data Lake path to find the objects (templated)
:param dest_gcs: The Google Cloud Storage bucket and prefix to
store the objects. (templated)
:param replace: If true, replaces same-named files in GCS
:param gzip: Option to compress file for upload
:param azure_data_lake_conn_id: The connection ID to use when
connecting to Azure Data Lake Storage.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
**Examples**:
The following Operator would copy a single file named
``hello/world.avro`` from ADLS to the GCS bucket ``mybucket``. Its full
resulting gcs path will be ``gs://mybucket/hello/world.avro`` ::
copy_single_file = AdlsToGoogleCloudStorageOperator(
task_id='copy_single_file',
src_adls='hello/world.avro',
dest_gcs='gs://mybucket',
replace=False,
azure_data_lake_conn_id='azure_data_lake_default',
gcp_conn_id='google_cloud_default'
)
The following Operator would copy all parquet files from ADLS
to the GCS bucket ``mybucket``. ::
copy_all_files = AdlsToGoogleCloudStorageOperator(
task_id='copy_all_files',
src_adls='*.parquet',
dest_gcs='gs://mybucket',
replace=False,
azure_data_lake_conn_id='azure_data_lake_default',
gcp_conn_id='google_cloud_default'
)
The following Operator would copy all parquet files from ADLS
path ``/hello/world``to the GCS bucket ``mybucket``. ::
copy_world_files = AdlsToGoogleCloudStorageOperator(
task_id='copy_world_files',
src_adls='hello/world/*.parquet',
dest_gcs='gs://mybucket',
replace=False,
azure_data_lake_conn_id='azure_data_lake_default',
gcp_conn_id='google_cloud_default'
)
"""
template_fields: Sequence[str] = (
"src_adls",
"dest_gcs",
"google_impersonation_chain",
)
ui_color = "#f0eee4"
def __init__(
self,
*,
src_adls: str,
dest_gcs: str,
azure_data_lake_conn_id: str,
gcp_conn_id: str = "google_cloud_default",
replace: bool = False,
gzip: bool = False,
google_impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(path=src_adls, azure_data_lake_conn_id=azure_data_lake_conn_id, **kwargs)
self.src_adls = src_adls
self.dest_gcs = dest_gcs
self.replace = replace
self.gcp_conn_id = gcp_conn_id
self.gzip = gzip
self.google_impersonation_chain = google_impersonation_chain
def execute(self, context: Context):
# use the super to list all files in an Azure Data Lake path
files = super().execute(context)
g_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
if not self.replace:
# if we are not replacing -> list all files in the ADLS path
# and only keep those files which are present in
# ADLS and not in Google Cloud Storage
bucket_name, prefix = _parse_gcs_url(self.dest_gcs)
existing_files = g_hook.list(bucket_name=bucket_name, prefix=prefix)
files = list(set(files) - set(existing_files))
if files:
hook = AzureDataLakeHook(azure_data_lake_conn_id=self.azure_data_lake_conn_id)
for obj in files:
with NamedTemporaryFile(mode="wb", delete=True) as f:
hook.download_file(local_path=f.name, remote_path=obj)
f.flush()
dest_gcs_bucket, dest_gcs_prefix = _parse_gcs_url(self.dest_gcs)
dest_path = os.path.join(dest_gcs_prefix, obj)
self.log.info("Saving file to %s", dest_path)
g_hook.upload(
bucket_name=dest_gcs_bucket, object_name=dest_path, filename=f.name, gzip=self.gzip
)
self.log.info("All done, uploaded %d files to GCS", len(files))
else:
self.log.info("In sync, no files needed to be uploaded to GCS")
return files
| 6,563 | 40.283019 | 107 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/gcs_to_gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Storage operator."""
from __future__ import annotations
import warnings
from typing import TYPE_CHECKING, Sequence
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
WILDCARD = "*"
if TYPE_CHECKING:
from airflow.utils.context import Context
class GCSToGCSOperator(BaseOperator):
"""
Copies objects from a bucket to another, with renaming if requested.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSToGCSOperator`
:param source_bucket: The source Google Cloud Storage bucket where the
object is. (templated)
:param source_object: The source name of the object to copy in the Google cloud
storage bucket. (templated)
You can use only one wildcard for objects (filenames) within your
bucket. The wildcard can appear inside the object name or at the
end of the object name. Appending a wildcard to the bucket name is
unsupported.
:param source_objects: A list of source name of the objects to copy in the Google cloud
storage bucket. (templated)
:param destination_bucket: The destination Google Cloud Storage bucket
where the object should be. If the destination_bucket is None, it defaults
to source_bucket. (templated)
:param destination_object: The destination name of the object in the
destination Google Cloud Storage bucket. (templated)
If a wildcard is supplied in the source_object argument, this is the
prefix that will be prepended to the final destination objects' paths.
Note that the source path's part before the wildcard will be removed;
if it needs to be retained it should be appended to destination_object.
For example, with prefix ``foo/*`` and destination_object ``blah/``, the
file ``foo/baz`` will be copied to ``blah/baz``; to retain the prefix write
the destination_object as e.g. ``blah/foo``, in which case the copied file
will be named ``blah/foo/baz``.
The same thing applies to source objects inside source_objects.
:param move_object: When move object is True, the object is moved instead
of copied to the new location. This is the equivalent of a mv command
as opposed to a cp command.
:param replace: Whether you want to replace existing destination files or not.
:param delimiter: (Deprecated) This is used to restrict the result to only the 'files' in a given
'folder'. If source_objects = ['foo/bah/'] and delimiter = '.avro', then only the 'files' in the
folder 'foo/bah/' with '.avro' delimiter will be copied to the destination object.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param last_modified_time: When specified, the objects will be copied or moved,
only if they were modified after last_modified_time.
If tzinfo has not been set, UTC will be assumed.
:param maximum_modified_time: When specified, the objects will be copied or moved,
only if they were modified before maximum_modified_time.
If tzinfo has not been set, UTC will be assumed.
:param is_older_than: When specified, the objects will be copied if they are older
than the specified time in seconds.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param source_object_required: Whether you want to raise an exception when the source object
doesn't exist. It doesn't have any effect when the source objects are folders or patterns.
:param exact_match: When specified, only exact match of the source object (filename) will be
copied.
:param match_glob: (Optional) filters objects based on the glob pattern given by the string (
e.g, ``'**/*/.json'``)
:Example:
The following Operator would copy a single file named
``sales/sales-2017/january.avro`` in the ``data`` bucket to the file named
``copied_sales/2017/january-backup.avro`` in the ``data_backup`` bucket ::
copy_single_file = GCSToGCSOperator(
task_id='copy_single_file',
source_bucket='data',
source_objects=['sales/sales-2017/january.avro'],
destination_bucket='data_backup',
destination_object='copied_sales/2017/january-backup.avro',
exact_match=True,
gcp_conn_id=google_cloud_conn_id
)
The following Operator would copy all the Avro files from ``sales/sales-2017``
folder (i.e. all files with names starting with that prefix) in ``data`` bucket to the
``copied_sales/2017`` folder in the ``data_backup`` bucket. ::
copy_files = GCSToGCSOperator(
task_id='copy_files',
source_bucket='data',
source_objects=['sales/sales-2017'],
destination_bucket='data_backup',
destination_object='copied_sales/2017/',
match_glob='**/*.avro'
gcp_conn_id=google_cloud_conn_id
)
Or ::
copy_files = GCSToGCSOperator(
task_id='copy_files',
source_bucket='data',
source_object='sales/sales-2017/*.avro',
destination_bucket='data_backup',
destination_object='copied_sales/2017/',
gcp_conn_id=google_cloud_conn_id
)
The following Operator would move all the Avro files from ``sales/sales-2017``
folder (i.e. all files with names starting with that prefix) in ``data`` bucket to the
same folder in the ``data_backup`` bucket, deleting the original files in the
process. ::
move_files = GCSToGCSOperator(
task_id='move_files',
source_bucket='data',
source_object='sales/sales-2017/*.avro',
destination_bucket='data_backup',
move_object=True,
gcp_conn_id=google_cloud_conn_id
)
The following Operator would move all the Avro files from ``sales/sales-2019``
and ``sales/sales-2020`` folder in ``data`` bucket to the same folder in the
``data_backup`` bucket, deleting the original files in the process. ::
move_files = GCSToGCSOperator(
task_id='move_files',
source_bucket='data',
source_objects=['sales/sales-2019/*.avro', 'sales/sales-2020'],
destination_bucket='data_backup',
delimiter='.avro',
move_object=True,
gcp_conn_id=google_cloud_conn_id
)
"""
template_fields: Sequence[str] = (
"source_bucket",
"source_object",
"source_objects",
"destination_bucket",
"destination_object",
"delimiter",
"impersonation_chain",
)
ui_color = "#f0eee4"
def __init__(
self,
*,
source_bucket,
source_object=None,
source_objects=None,
destination_bucket=None,
destination_object=None,
delimiter=None,
move_object=False,
replace=True,
gcp_conn_id="google_cloud_default",
last_modified_time=None,
maximum_modified_time=None,
is_older_than=None,
impersonation_chain: str | Sequence[str] | None = None,
source_object_required=False,
exact_match=False,
match_glob: str | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.source_bucket = source_bucket
if source_object and WILDCARD in source_object:
warnings.warn(
"Usage of wildcard (*) in 'source_object' is deprecated, utilize 'match_glob' instead",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.source_object = source_object
if source_objects and any([WILDCARD in obj for obj in source_objects]):
warnings.warn(
"Usage of wildcard (*) in 'source_objects' is deprecated, utilize 'match_glob' instead",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.source_objects = source_objects
self.destination_bucket = destination_bucket
self.destination_object = destination_object
if delimiter:
warnings.warn(
"Usage of 'delimiter' is deprecated, please use 'match_glob' instead",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.delimiter = delimiter
self.move_object = move_object
self.replace = replace
self.gcp_conn_id = gcp_conn_id
self.last_modified_time = last_modified_time
self.maximum_modified_time = maximum_modified_time
self.is_older_than = is_older_than
self.impersonation_chain = impersonation_chain
self.source_object_required = source_object_required
self.exact_match = exact_match
self.match_glob = match_glob
def execute(self, context: Context):
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if self.source_objects and self.source_object:
error_msg = (
f"You can either set source_object parameter or source_objects parameter but not both. "
f"Found source_object={self.source_object} and source_objects={self.source_objects}"
)
raise AirflowException(error_msg)
if not self.source_object and not self.source_objects:
error_msg = "You must set source_object parameter or source_objects parameter. None set"
raise AirflowException(error_msg)
if self.source_objects and not all(isinstance(item, str) for item in self.source_objects):
raise AirflowException("At least, one of the `objects` in the `source_objects` is not a string")
# If source_object is set, default it to source_objects
if self.source_object:
self.source_objects = [self.source_object]
if self.destination_bucket is None:
self.log.warning(
"destination_bucket is None. Defaulting it to source_bucket (%s)", self.source_bucket
)
self.destination_bucket = self.source_bucket
# An empty source_object means to copy all files
if len(self.source_objects) == 0:
self.source_objects = [""]
# Raise exception if empty string `''` is used twice in source_object, this is to avoid double copy
if self.source_objects.count("") > 1:
raise AirflowException("You can't have two empty strings inside source_object")
# Iterate over the source_objects and do the copy
for prefix in self.source_objects:
# Check if prefix contains wildcard
if WILDCARD in prefix:
self._copy_source_with_wildcard(hook=hook, prefix=prefix)
# Now search with prefix using provided delimiter if any
else:
self._copy_source_without_wildcard(hook=hook, prefix=prefix)
def _ignore_existing_files(self, hook, prefix, **kwargs):
# list all files in the Destination GCS bucket
# and only keep those files which are present in
# Source GCS bucket and not in Destination GCS bucket
delimiter = kwargs.get("delimiter")
match_glob = kwargs.get("match_glob")
objects = kwargs.get("objects")
if self.destination_object is None:
existing_objects = hook.list(
self.destination_bucket, prefix=prefix, delimiter=delimiter, match_glob=match_glob
)
else:
self.log.info("Replaced destination_object with source_object prefix.")
destination_objects = hook.list(
self.destination_bucket,
prefix=self.destination_object,
delimiter=delimiter,
match_glob=match_glob,
)
existing_objects = [
dest_object.replace(self.destination_object, prefix, 1) for dest_object in destination_objects
]
objects = set(objects) - set(existing_objects)
if len(objects) > 0:
self.log.info("%s files are going to be synced: %s.", len(objects), objects)
else:
self.log.info("There are no new files to sync. Have a nice day!")
return objects
def _copy_source_without_wildcard(self, hook, prefix):
"""
List all files in source_objects, copy files to destination_object, and rename each source file.
For source_objects with no wildcard, this operator would first list
all files in source_objects, using provided delimiter if any. Then copy
files from source_objects to destination_object and rename each source
file. Note that if the flag exact_match=False, then each item in the source_objects
(or source_object itself) will be considered as a prefix for the source objects search.
Example 1:
The following Operator would copy all the files from ``a/`` folder
(i.e a/a.csv, a/b.csv, a/c.csv) in ``data`` bucket to the ``b/`` folder in
the ``data_backup`` bucket (b/a.csv, b/b.csv, b/c.csv) ::
copy_files = GCSToGCSOperator(
task_id='copy_files_without_wildcard',
source_bucket='data',
source_objects=['a/'],
destination_bucket='data_backup',
destination_object='b/',
gcp_conn_id=google_cloud_conn_id
)
Example 2:
The following Operator would copy all avro files from ``a/`` folder
(i.e a/a.avro, a/b.avro, a/c.avro) in ``data`` bucket to the ``b/`` folder in
the ``data_backup`` bucket (b/a.avro, b/b.avro, b/c.avro) ::
copy_files = GCSToGCSOperator(
task_id='copy_files_without_wildcard',
source_bucket='data',
source_objects=['a/'],
destination_bucket='data_backup',
destination_object='b/',
delimiter='.avro',
gcp_conn_id=google_cloud_conn_id
)
Example 3:
The following Operator would copy files (a/file_1.txt, a/file_2.csv, a/file_3.avro)
in ``data`` bucket to the ``b/`` folder in
the ``data_backup`` bucket (b/file_1.txt, b/file_2.csv, b/file_3.avro) ::
copy_files = GCSToGCSOperator(
task_id='copy_files_without_wildcard',
source_bucket='data',
source_objects=['a/file_1.txt', 'a/file_2.csv', 'a/file_3.avro'],
destination_bucket='data_backup',
destination_object='b/',
gcp_conn_id=google_cloud_conn_id
)
Example 4:
The following Operator would copy files corresponding to the prefix 'a/foo.txt'
(a/foo.txt, a/foo.txt.abc, a/foo.txt/subfolder/file.txt) in ``data`` bucket to
the ``b/`` folder in the ``data_backup`` bucket
(b/foo.txt, b/foo.txt.abc, b/foo.txt/subfolder/file.txt) ::
copy_files = GCSToGCSOperator(
task_id='copy_files_without_wildcard',
source_bucket='data',
source_object='a/foo.txt',
destination_bucket='data_backup',
destination_object='b/',
gcp_conn_id=google_cloud_conn_id
)
"""
objects = hook.list(
self.source_bucket, prefix=prefix, delimiter=self.delimiter, match_glob=self.match_glob
)
objects = [obj for obj in objects if self._check_exact_match(obj, prefix)]
if not self.replace:
# If we are not replacing, ignore files already existing in source buckets
objects = self._ignore_existing_files(
hook, prefix, objects=objects, delimiter=self.delimiter, match_glob=self.match_glob
)
# If objects is empty, and we have prefix, let's check if prefix is a blob
# and copy directly
if len(objects) == 0 and prefix:
if hook.exists(self.source_bucket, prefix):
self._copy_single_object(
hook=hook, source_object=prefix, destination_object=self.destination_object
)
elif self.source_object_required:
msg = f"{prefix} does not exist in bucket {self.source_bucket}"
self.log.warning(msg)
raise AirflowException(msg)
if len(objects) == 1 and objects[0][-1] != "/":
self._copy_file(hook=hook, source_object=objects[0])
elif len(objects):
self._copy_multiple_objects(hook=hook, source_objects=objects, prefix=prefix)
def _copy_file(self, hook, source_object):
destination_object = self.destination_object or source_object
if self.destination_object and self.destination_object[-1] == "/":
file_name = source_object.split("/")[-1]
destination_object += file_name
self._copy_single_object(
hook=hook, source_object=source_object, destination_object=destination_object
)
def _copy_multiple_objects(self, hook, source_objects, prefix):
# Check whether the prefix is a root directory for all the rest of objects.
_pref = prefix.rstrip("/")
is_directory = prefix.endswith("/") or all(
[obj.replace(_pref, "", 1).startswith("/") for obj in source_objects]
)
if is_directory:
base_path = prefix.rstrip("/") + "/"
else:
base_path = prefix[0 : prefix.rfind("/") + 1] if "/" in prefix else ""
for source_obj in source_objects:
if not self._check_exact_match(source_obj, prefix):
continue
if self.destination_object is None:
destination_object = source_obj
else:
file_name_postfix = source_obj.replace(base_path, "", 1)
destination_object = self.destination_object.rstrip("/") + "/" + file_name_postfix
self._copy_single_object(
hook=hook, source_object=source_obj, destination_object=destination_object
)
def _check_exact_match(self, source_object: str, prefix: str) -> bool:
"""Checks whether source_object's name matches the prefix according to the exact_match flag."""
if self.exact_match and (source_object != prefix or not source_object.endswith(prefix)):
return False
return True
def _copy_source_with_wildcard(self, hook, prefix):
total_wildcards = prefix.count(WILDCARD)
if total_wildcards > 1:
error_msg = (
"Only one wildcard '*' is allowed in source_object parameter. "
f"Found {total_wildcards} in {prefix}."
)
raise AirflowException(error_msg)
self.log.info("Delimiter ignored because wildcard is in prefix")
prefix_, delimiter = prefix.split(WILDCARD, 1)
objects = hook.list(self.source_bucket, prefix=prefix_, delimiter=delimiter)
# TODO: After deprecating delimiter and wildcards in source objects,
# remove previous line and uncomment the following:
# match_glob = f"**/*{delimiter}" if delimiter else None
# objects = hook.list(self.source_bucket, prefix=prefix_, match_glob=match_glob)
if not self.replace:
# If we are not replacing, list all files in the Destination GCS bucket
# and only keep those files which are present in
# Source GCS bucket and not in Destination GCS bucket
objects = self._ignore_existing_files(hook, prefix_, delimiter=delimiter, objects=objects)
# TODO: After deprecating delimiter and wildcards in source objects,
# remove previous line and uncomment the following:
# objects = self._ignore_existing_files(hook, prefix_, match_glob=match_glob, objects=objects)
for source_object in objects:
if self.destination_object is None:
destination_object = source_object
else:
destination_object = source_object.replace(prefix_, self.destination_object, 1)
self._copy_single_object(
hook=hook, source_object=source_object, destination_object=destination_object
)
def _copy_single_object(self, hook, source_object, destination_object):
if self.is_older_than:
# Here we check if the given object is older than the given time
# If given, last_modified_time and maximum_modified_time is ignored
if hook.is_older_than(self.source_bucket, source_object, self.is_older_than):
self.log.info("Object is older than %s seconds ago", self.is_older_than)
else:
self.log.debug("Object is not older than %s seconds ago", self.is_older_than)
return
elif self.last_modified_time and self.maximum_modified_time:
# check to see if object was modified between last_modified_time and
# maximum_modified_time
if hook.is_updated_between(
self.source_bucket, source_object, self.last_modified_time, self.maximum_modified_time
):
self.log.info(
"Object has been modified between %s and %s",
self.last_modified_time,
self.maximum_modified_time,
)
else:
self.log.debug(
"Object was not modified between %s and %s",
self.last_modified_time,
self.maximum_modified_time,
)
return
elif self.last_modified_time is not None:
# Check to see if object was modified after last_modified_time
if hook.is_updated_after(self.source_bucket, source_object, self.last_modified_time):
self.log.info("Object has been modified after %s ", self.last_modified_time)
else:
self.log.debug("Object was not modified after %s ", self.last_modified_time)
return
elif self.maximum_modified_time is not None:
# Check to see if object was modified before maximum_modified_time
if hook.is_updated_before(self.source_bucket, source_object, self.maximum_modified_time):
self.log.info("Object has been modified before %s ", self.maximum_modified_time)
else:
self.log.debug("Object was not modified before %s ", self.maximum_modified_time)
return
self.log.info(
"Executing copy of gs://%s/%s to gs://%s/%s",
self.source_bucket,
source_object,
self.destination_bucket,
destination_object,
)
hook.rewrite(self.source_bucket, source_object, self.destination_bucket, destination_object)
if self.move_object:
hook.delete(self.source_bucket, source_object)
| 24,770 | 44.285192 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/gcs_to_sftp.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Storage to SFTP operator."""
from __future__ import annotations
import os
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Sequence
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.sftp.hooks.sftp import SFTPHook
WILDCARD = "*"
if TYPE_CHECKING:
from airflow.utils.context import Context
class GCSToSFTPOperator(BaseOperator):
"""
Transfer files from a Google Cloud Storage bucket to SFTP server.
.. code-block:: python
with models.DAG(
"example_gcs_to_sftp",
start_date=datetime(2020, 6, 19),
schedule=None,
) as dag:
# downloads file to /tmp/sftp/folder/subfolder/file.txt
copy_file_from_gcs_to_sftp = GCSToSFTPOperator(
task_id="file-copy-gsc-to-sftp",
source_bucket="test-gcs-sftp-bucket-name",
source_object="folder/subfolder/file.txt",
destination_path="/tmp/sftp",
)
# moves file to /tmp/data.txt
move_file_from_gcs_to_sftp = GCSToSFTPOperator(
task_id="file-move-gsc-to-sftp",
source_bucket="test-gcs-sftp-bucket-name",
source_object="folder/subfolder/data.txt",
destination_path="/tmp",
move_object=True,
keep_directory_structure=False,
)
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSToSFTPOperator`
:param source_bucket: The source Google Cloud Storage bucket where the
object is. (templated)
:param source_object: The source name of the object to copy in the Google cloud
storage bucket. (templated)
You can use only one wildcard for objects (filenames) within your
bucket. The wildcard can appear inside the object name or at the
end of the object name. Appending a wildcard to the bucket name is
unsupported.
:param destination_path: The sftp remote path. This is the specified directory path for
uploading to the SFTP server.
:param keep_directory_structure: (Optional) When set to False the path of the file
on the bucket is recreated within path passed in destination_path.
:param move_object: When move object is True, the object is moved instead
of copied to the new location. This is the equivalent of a mv command
as opposed to a cp command.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param sftp_conn_id: The sftp connection id. The name or identifier for
establishing a connection to the SFTP server.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"source_bucket",
"source_object",
"destination_path",
"impersonation_chain",
)
ui_color = "#f0eee4"
def __init__(
self,
*,
source_bucket: str,
source_object: str,
destination_path: str,
keep_directory_structure: bool = True,
move_object: bool = False,
gcp_conn_id: str = "google_cloud_default",
sftp_conn_id: str = "ssh_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.source_bucket = source_bucket
self.source_object = source_object
self.destination_path = destination_path
self.keep_directory_structure = keep_directory_structure
self.move_object = move_object
self.gcp_conn_id = gcp_conn_id
self.sftp_conn_id = sftp_conn_id
self.impersonation_chain = impersonation_chain
self.sftp_dirs = None
def execute(self, context: Context):
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
sftp_hook = SFTPHook(self.sftp_conn_id)
if WILDCARD in self.source_object:
total_wildcards = self.source_object.count(WILDCARD)
if total_wildcards > 1:
raise AirflowException(
"Only one wildcard '*' is allowed in source_object parameter. "
f"Found {total_wildcards} in {self.source_object}."
)
prefix, delimiter = self.source_object.split(WILDCARD, 1)
prefix_dirname = os.path.dirname(prefix)
objects = gcs_hook.list(self.source_bucket, prefix=prefix, delimiter=delimiter)
# TODO: After deprecating delimiter and wildcards in source objects,
# remove the previous line and uncomment the following:
# match_glob = f"**/*{delimiter}" if delimiter else None
# objects = gcs_hook.list(self.source_bucket, prefix=prefix, match_glob=match_glob)
for source_object in objects:
destination_path = self._resolve_destination_path(source_object, prefix=prefix_dirname)
self._copy_single_object(gcs_hook, sftp_hook, source_object, destination_path)
self.log.info("Done. Uploaded '%d' files to %s", len(objects), self.destination_path)
else:
destination_path = self._resolve_destination_path(self.source_object)
self._copy_single_object(gcs_hook, sftp_hook, self.source_object, destination_path)
self.log.info("Done. Uploaded '%s' file to %s", self.source_object, destination_path)
def _resolve_destination_path(self, source_object: str, prefix: str | None = None) -> str:
if not self.keep_directory_structure:
if prefix:
source_object = os.path.relpath(source_object, start=prefix)
else:
source_object = os.path.basename(source_object)
return os.path.join(self.destination_path, source_object)
def _copy_single_object(
self,
gcs_hook: GCSHook,
sftp_hook: SFTPHook,
source_object: str,
destination_path: str,
) -> None:
"""Helper function to copy single object."""
self.log.info(
"Executing copy of gs://%s/%s to %s",
self.source_bucket,
source_object,
destination_path,
)
dir_path = os.path.dirname(destination_path)
sftp_hook.create_directory(dir_path)
with NamedTemporaryFile("w") as tmp:
gcs_hook.download(
bucket_name=self.source_bucket,
object_name=source_object,
filename=tmp.name,
)
sftp_hook.store_file(destination_path, tmp.name)
if self.move_object:
self.log.info("Executing delete of gs://%s/%s", self.source_bucket, source_object)
gcs_hook.delete(self.source_bucket, source_object)
| 8,427 | 40.930348 | 103 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/bigquery_to_bigquery.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google BigQuery to BigQuery operator."""
from __future__ import annotations
import warnings
from typing import TYPE_CHECKING, Sequence
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook
from airflow.providers.google.cloud.links.bigquery import BigQueryTableLink
if TYPE_CHECKING:
from airflow.utils.context import Context
class BigQueryToBigQueryOperator(BaseOperator):
"""
Copies data from one BigQuery table to another.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryToBigQueryOperator`
.. seealso::
For more details about these parameters:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
:param source_project_dataset_tables: One or more
dotted ``(project:|project.)<dataset>.<table>`` BigQuery tables to use as the
source data. If ``<project>`` is not included, project will be the
project defined in the connection json. Use a list if there are multiple
source tables. (templated)
:param destination_project_dataset_table: The destination BigQuery
table. Format is: ``(project:|project.)<dataset>.<table>`` (templated)
:param write_disposition: The write disposition if the table already exists.
:param create_disposition: The create disposition if the table doesn't exist.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
.. code-block:: python
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key",
}
:param location: The geographic location of the job. You must specify the location to run the job if
the location to run a job is not in the US or the EU multi-regional location or
the location is in a single region (for example, us-central1).
For more details check:
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"source_project_dataset_tables",
"destination_project_dataset_table",
"labels",
"impersonation_chain",
)
template_ext: Sequence[str] = (".sql",)
ui_color = "#e6f0e4"
operator_extra_links = (BigQueryTableLink(),)
def __init__(
self,
*,
source_project_dataset_tables: list[str] | str,
destination_project_dataset_table: str,
write_disposition: str = "WRITE_EMPTY",
create_disposition: str = "CREATE_IF_NEEDED",
gcp_conn_id: str = "google_cloud_default",
labels: dict | None = None,
encryption_configuration: dict | None = None,
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.source_project_dataset_tables = source_project_dataset_tables
self.destination_project_dataset_table = destination_project_dataset_table
self.write_disposition = write_disposition
self.create_disposition = create_disposition
self.gcp_conn_id = gcp_conn_id
self.labels = labels
self.encryption_configuration = encryption_configuration
self.location = location
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
self.log.info(
"Executing copy of %s into: %s",
self.source_project_dataset_tables,
self.destination_project_dataset_table,
)
hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", AirflowProviderDeprecationWarning)
job_id = hook.run_copy(
source_project_dataset_tables=self.source_project_dataset_tables,
destination_project_dataset_table=self.destination_project_dataset_table,
write_disposition=self.write_disposition,
create_disposition=self.create_disposition,
labels=self.labels,
encryption_configuration=self.encryption_configuration,
)
job = hook.get_job(job_id=job_id, location=self.location).to_api_repr()
conf = job["configuration"]["copy"]["destinationTable"]
BigQueryTableLink.persist(
context=context,
task_instance=self,
dataset_id=conf["datasetId"],
project_id=conf["projectId"],
table_id=conf["tableId"],
)
| 6,546 | 43.842466 | 104 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/local_to_gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains operator for uploading local file(s) to GCS."""
from __future__ import annotations
import os
from glob import glob
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class LocalFilesystemToGCSOperator(BaseOperator):
"""
Uploads a file or list of files to Google Cloud Storage; optionally can compress the file for upload.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:LocalFilesystemToGCSOperator`
:param src: Path to the local file, or list of local files. Path can be either absolute
(e.g. /path/to/file.ext) or relative (e.g. ../../foo/*/*.csv). (templated)
:param dst: Destination path within the specified bucket on GCS (e.g. /path/to/file.ext).
If multiple files are being uploaded, specify object prefix with trailing backslash
(e.g. /path/to/directory/) (templated)
:param bucket: The bucket to upload to. (templated)
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param mime_type: The mime-type string
:param gzip: Allows for file to be compressed and uploaded as gzip
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"src",
"dst",
"bucket",
"impersonation_chain",
)
def __init__(
self,
*,
src,
dst,
bucket,
gcp_conn_id="google_cloud_default",
mime_type="application/octet-stream",
gzip=False,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.src = src
self.dst = dst
self.bucket = bucket
self.gcp_conn_id = gcp_conn_id
self.mime_type = mime_type
self.gzip = gzip
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
"""Uploads a file or list of files to Google Cloud Storage."""
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
filepaths = self.src if isinstance(self.src, list) else glob(self.src)
if not filepaths:
raise FileNotFoundError(self.src)
if os.path.basename(self.dst): # path to a file
if len(filepaths) > 1: # multiple file upload
raise ValueError(
"'dst' parameter references filepath. Please specify "
"directory (with trailing backslash) to upload multiple "
"files. e.g. /path/to/directory/"
)
object_paths = [self.dst]
else: # directory is provided
object_paths = [os.path.join(self.dst, os.path.basename(filepath)) for filepath in filepaths]
for filepath, object_path in zip(filepaths, object_paths):
hook.upload(
bucket_name=self.bucket,
object_name=object_path,
mime_type=self.mime_type,
filename=filepath,
gzip=self.gzip,
)
| 4,702 | 39.196581 | 105 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/sql_to_gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Base operator for SQL to GCS operators."""
from __future__ import annotations
import abc
import csv
import json
import os
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Any, Sequence
import pyarrow as pa
import pyarrow.parquet as pq
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class BaseSQLToGCSOperator(BaseOperator):
"""
Copy data from SQL to Google Cloud Storage in JSON, CSV, or Parquet format.
:param sql: The SQL to execute.
:param bucket: The bucket to upload to.
:param filename: The filename to use as the object name when uploading
to Google Cloud Storage. A ``{}`` should be specified in the filename
to allow the operator to inject file numbers in cases where the
file is split due to size.
:param schema_filename: If set, the filename to use as the object name
when uploading a .json file containing the BigQuery schema fields
for the table that was dumped from the database.
:param approx_max_file_size_bytes: This operator supports the ability
to split large table dumps into multiple files (see notes in the
filename param docs above). This param allows developers to specify the
file size of the splits. Check https://cloud.google.com/storage/quotas
to see the maximum allowed file size for a single object.
:param export_format: Desired format of files to be exported. (json, csv or parquet)
:param stringify_dict: Whether to dump Dictionary type objects
(such as JSON columns) as a string. Applies only to CSV/JSON export format.
:param field_delimiter: The delimiter to be used for CSV files.
:param null_marker: The null marker to be used for CSV files.
:param gzip: Option to compress file for upload (does not apply to schemas).
:param schema: The schema to use, if any. Should be a list of dict or
a str. Pass a string if using Jinja template, otherwise, pass a list of
dict. Examples could be seen: https://cloud.google.com/bigquery/docs
/schemas#specifying_a_json_schema_file
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param parameters: a parameters dict that is substituted at query runtime.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param upload_metadata: whether to upload the row count metadata as blob metadata
:param exclude_columns: set of columns to exclude from transmission
:param partition_columns: list of columns to use for file partitioning. In order to use
this parameter, you must sort your dataset by partition_columns. Do this by
passing an ORDER BY clause to the sql query. Files are uploaded to GCS as objects
with a hive style partitioning directory structure (templated).
:param write_on_empty: Optional parameter to specify whether to write a file if the
export does not return any rows. Default is False so we will not write a file
if the export returns no rows.
:param parquet_row_group_size: The approximate number of rows in each row group
when using parquet format. Using a large row group size can reduce the file size
and improve the performance of reading the data, but it needs more memory to
execute the operator. (default: 1)
"""
template_fields: Sequence[str] = (
"sql",
"bucket",
"filename",
"schema_filename",
"schema",
"parameters",
"impersonation_chain",
"partition_columns",
)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"sql": "sql"}
ui_color = "#a0e08c"
def __init__(
self,
*,
sql: str,
bucket: str,
filename: str,
schema_filename: str | None = None,
approx_max_file_size_bytes: int = 1900000000,
export_format: str = "json",
stringify_dict: bool = False,
field_delimiter: str = ",",
null_marker: str | None = None,
gzip: bool = False,
schema: str | list | None = None,
parameters: dict | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
upload_metadata: bool = False,
exclude_columns: set | None = None,
partition_columns: list | None = None,
write_on_empty: bool = False,
parquet_row_group_size: int = 1,
**kwargs,
) -> None:
super().__init__(**kwargs)
if exclude_columns is None:
exclude_columns = set()
self.sql = sql
self.bucket = bucket
self.filename = filename
self.schema_filename = schema_filename
self.approx_max_file_size_bytes = approx_max_file_size_bytes
self.export_format = export_format.lower()
self.stringify_dict = stringify_dict
self.field_delimiter = field_delimiter
self.null_marker = null_marker
self.gzip = gzip
self.schema = schema
self.parameters = parameters
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.upload_metadata = upload_metadata
self.exclude_columns = exclude_columns
self.partition_columns = partition_columns
self.write_on_empty = write_on_empty
self.parquet_row_group_size = parquet_row_group_size
def execute(self, context: Context):
if self.partition_columns:
self.log.info(
f"Found partition columns: {','.join(self.partition_columns)}. "
"Assuming the SQL statement is properly sorted by these columns in "
"ascending or descending order."
)
self.log.info("Executing query")
cursor = self.query()
# If a schema is set, create a BQ schema JSON file.
if self.schema_filename:
self.log.info("Writing local schema file")
schema_file = self._write_local_schema_file(cursor)
# Flush file before uploading
schema_file["file_handle"].flush()
self.log.info("Uploading schema file to GCS.")
self._upload_to_gcs(schema_file)
schema_file["file_handle"].close()
counter = 0
files = []
total_row_count = 0
total_files = 0
self.log.info("Writing local data files")
for file_to_upload in self._write_local_data_files(cursor):
# Flush file before uploading
file_to_upload["file_handle"].flush()
self.log.info("Uploading chunk file #%d to GCS.", counter)
self._upload_to_gcs(file_to_upload)
self.log.info("Removing local file")
file_to_upload["file_handle"].close()
# Metadata to be outputted to Xcom
total_row_count += file_to_upload["file_row_count"]
total_files += 1
files.append(
{
"file_name": file_to_upload["file_name"],
"file_mime_type": file_to_upload["file_mime_type"],
"file_row_count": file_to_upload["file_row_count"],
}
)
counter += 1
file_meta = {
"bucket": self.bucket,
"total_row_count": total_row_count,
"total_files": total_files,
"files": files,
}
return file_meta
def convert_types(self, schema, col_type_dict, row) -> list:
"""Convert values from DBAPI to output-friendly formats."""
return [
self.convert_type(value, col_type_dict.get(name), stringify_dict=self.stringify_dict)
for name, value in zip(schema, row)
]
@staticmethod
def _write_rows_to_parquet(parquet_writer: pq.ParquetWriter, rows):
rows_pydic: dict[str, list[Any]] = {col: [] for col in parquet_writer.schema.names}
for row in rows:
for ind, col in enumerate(parquet_writer.schema.names):
rows_pydic[col].append(row[ind])
tbl = pa.Table.from_pydict(rows_pydic, parquet_writer.schema)
parquet_writer.write_table(tbl)
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
org_schema = list(map(lambda schema_tuple: schema_tuple[0], cursor.description))
schema = [column for column in org_schema if column not in self.exclude_columns]
col_type_dict = self._get_col_type_dict()
file_no = 0
file_mime_type = self._get_file_mime_type()
file_to_upload, tmp_file_handle = self._get_file_to_upload(file_mime_type, file_no)
if self.export_format == "csv":
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
if self.export_format == "parquet":
parquet_schema = self._convert_parquet_schema(cursor)
parquet_writer = self._configure_parquet_file(tmp_file_handle, parquet_schema)
rows_buffer = []
prev_partition_values = None
curr_partition_values = None
for row in cursor:
if self.partition_columns:
row_dict = dict(zip(schema, row))
curr_partition_values = tuple(
[row_dict.get(partition_column, "") for partition_column in self.partition_columns]
)
if prev_partition_values is None:
# We haven't set prev_partition_values before. Set to current
prev_partition_values = curr_partition_values
elif prev_partition_values != curr_partition_values:
# If the partition values differ, write the current local file out
# Yield first before we write the current record
file_no += 1
if self.export_format == "parquet":
# Write out the remaining rows in the buffer
if rows_buffer:
self._write_rows_to_parquet(parquet_writer, rows_buffer)
rows_buffer = []
parquet_writer.close()
file_to_upload["partition_values"] = prev_partition_values
yield file_to_upload
file_to_upload, tmp_file_handle = self._get_file_to_upload(file_mime_type, file_no)
if self.export_format == "csv":
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
if self.export_format == "parquet":
parquet_writer = self._configure_parquet_file(tmp_file_handle, parquet_schema)
# Reset previous to current after writing out the file
prev_partition_values = curr_partition_values
# Incrementing file_row_count after partition yield ensures all rows are written
file_to_upload["file_row_count"] += 1
# Proceed to write the row to the localfile
if self.export_format == "csv":
row = self.convert_types(schema, col_type_dict, row)
if self.null_marker is not None:
row = [value if value is not None else self.null_marker for value in row]
csv_writer.writerow(row)
elif self.export_format == "parquet":
row = self.convert_types(schema, col_type_dict, row)
if self.null_marker is not None:
row = [value if value is not None else self.null_marker for value in row]
rows_buffer.append(row)
if len(rows_buffer) >= self.parquet_row_group_size:
self._write_rows_to_parquet(parquet_writer, rows_buffer)
rows_buffer = []
else:
row = self.convert_types(schema, col_type_dict, row)
row_dict = dict(zip(schema, row))
json.dump(row_dict, tmp_file_handle, sort_keys=True, ensure_ascii=False)
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write("\n")
# Stop if the file exceeds the file size limit.
fppos = tmp_file_handle.tell()
tmp_file_handle.seek(0, os.SEEK_END)
file_size = tmp_file_handle.tell()
tmp_file_handle.seek(fppos, os.SEEK_SET)
if file_size >= self.approx_max_file_size_bytes:
file_no += 1
if self.export_format == "parquet":
# Write out the remaining rows in the buffer
if rows_buffer:
self._write_rows_to_parquet(parquet_writer, rows_buffer)
rows_buffer = []
parquet_writer.close()
file_to_upload["partition_values"] = curr_partition_values
yield file_to_upload
file_to_upload, tmp_file_handle = self._get_file_to_upload(file_mime_type, file_no)
if self.export_format == "csv":
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
if self.export_format == "parquet":
parquet_writer = self._configure_parquet_file(tmp_file_handle, parquet_schema)
if self.export_format == "parquet":
# Write out the remaining rows in the buffer
if rows_buffer:
self._write_rows_to_parquet(parquet_writer, rows_buffer)
rows_buffer = []
parquet_writer.close()
# Last file may have 0 rows, don't yield if empty
# However, if it is the first file and self.write_on_empty is True, then yield to write an empty file
if file_to_upload["file_row_count"] > 0 or (file_no == 0 and self.write_on_empty):
file_to_upload["partition_values"] = curr_partition_values
yield file_to_upload
def _get_file_to_upload(self, file_mime_type, file_no):
"""Returns a dictionary that represents the file to upload."""
tmp_file_handle = NamedTemporaryFile(mode="w", encoding="utf-8", delete=True)
return (
{
"file_name": self.filename.format(file_no),
"file_handle": tmp_file_handle,
"file_mime_type": file_mime_type,
"file_row_count": 0,
},
tmp_file_handle,
)
def _get_file_mime_type(self):
if self.export_format == "csv":
file_mime_type = "text/csv"
elif self.export_format == "parquet":
file_mime_type = "application/octet-stream"
else:
file_mime_type = "application/json"
return file_mime_type
def _configure_csv_file(self, file_handle, schema):
"""Configure a csv writer with the file_handle and write schema as headers for the new file."""
csv_writer = csv.writer(file_handle, delimiter=self.field_delimiter)
csv_writer.writerow(schema)
return csv_writer
def _configure_parquet_file(self, file_handle, parquet_schema) -> pq.ParquetWriter:
parquet_writer = pq.ParquetWriter(file_handle.name, parquet_schema)
return parquet_writer
def _convert_parquet_schema(self, cursor):
type_map = {
"INTEGER": pa.int64(),
"FLOAT": pa.float64(),
"NUMERIC": pa.float64(),
"BIGNUMERIC": pa.float64(),
"BOOL": pa.bool_(),
"STRING": pa.string(),
"BYTES": pa.binary(),
"DATE": pa.date32(),
"DATETIME": pa.date64(),
"TIMESTAMP": pa.timestamp("s"),
}
columns = [field[0] for field in cursor.description]
bq_fields = [self.field_to_bigquery(field) for field in cursor.description]
bq_types = [bq_field.get("type") if bq_field is not None else None for bq_field in bq_fields]
pq_types = [type_map.get(bq_type, pa.string()) for bq_type in bq_types]
parquet_schema = pa.schema(zip(columns, pq_types))
return parquet_schema
@abc.abstractmethod
def query(self):
"""Execute DBAPI query."""
@abc.abstractmethod
def field_to_bigquery(self, field) -> dict[str, str]:
"""Convert a DBAPI field to BigQuery schema format."""
@abc.abstractmethod
def convert_type(self, value, schema_type, **kwargs):
"""Convert a value from DBAPI to output-friendly formats."""
def _get_col_type_dict(self):
"""Return a dict of column name and column type based on self.schema if not None."""
schema = []
if isinstance(self.schema, str):
schema = json.loads(self.schema)
elif isinstance(self.schema, list):
schema = self.schema
elif self.schema is not None:
self.log.warning("Using default schema due to unexpected type. Should be a string or list.")
col_type_dict = {}
try:
col_type_dict = {col["name"]: col["type"] for col in schema}
except KeyError:
self.log.warning(
"Using default schema due to missing name or type. Please "
"refer to: https://cloud.google.com/bigquery/docs/schemas"
"#specifying_a_json_schema_file"
)
return col_type_dict
def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema for the results to a local file system.
Schema for database will be read from cursor if not specified.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
if self.schema:
self.log.info("Using user schema")
schema = self.schema
else:
self.log.info("Starts generating schema")
schema = [
self.field_to_bigquery(field)
for field in cursor.description
if field[0] not in self.exclude_columns
]
if isinstance(schema, list):
schema = json.dumps(schema, sort_keys=True)
self.log.info("Using schema for %s", self.schema_filename)
self.log.debug("Current schema: %s", schema)
tmp_schema_file_handle = NamedTemporaryFile(mode="w", encoding="utf-8", delete=True)
tmp_schema_file_handle.write(schema)
schema_file_to_upload = {
"file_name": self.schema_filename,
"file_handle": tmp_schema_file_handle,
"file_mime_type": "application/json",
}
return schema_file_to_upload
def _upload_to_gcs(self, file_to_upload):
"""Upload a file (data split or schema .json file) to Google Cloud Storage."""
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
is_data_file = file_to_upload.get("file_name") != self.schema_filename
metadata = None
if is_data_file and self.upload_metadata:
metadata = {"row_count": file_to_upload["file_row_count"]}
object_name = file_to_upload.get("file_name")
if is_data_file and self.partition_columns:
# Add partition column values to object_name
partition_values = file_to_upload.get("partition_values")
head_path, tail_path = os.path.split(object_name)
partition_subprefix = [
f"{col}={val}" for col, val in zip(self.partition_columns, partition_values)
]
object_name = os.path.join(head_path, *partition_subprefix, tail_path)
hook.upload(
self.bucket,
object_name,
file_to_upload.get("file_handle").name,
mime_type=file_to_upload.get("file_mime_type"),
gzip=self.gzip if is_data_file else False,
metadata=metadata,
)
| 21,807 | 42.442231 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/azure_fileshare_to_gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Sequence
from airflow import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook, _parse_gcs_url, gcs_object_is_directory
from airflow.providers.microsoft.azure.hooks.fileshare import AzureFileShareHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class AzureFileShareToGCSOperator(BaseOperator):
"""
Sync an Azure FileShare directory with a Google Cloud Storage destination path.
Does not include subdirectories. May be filtered by prefix.
:param share_name: The Azure FileShare share where to find the objects. (templated)
:param directory_name: (Optional) Path to Azure FileShare directory which content is to be transferred.
Defaults to root directory (templated)
:param prefix: Prefix string which filters objects whose name begin with
such prefix. (templated)
:param azure_fileshare_conn_id: The source WASB connection
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param dest_gcs: The destination Google Cloud Storage bucket and prefix
where you want to store the files. (templated)
:param replace: Whether you want to replace existing destination files
or not.
:param gzip: Option to compress file for upload
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
Note that ``share_name``, ``directory_name``, ``prefix``, ``delimiter`` and ``dest_gcs`` are
templated, so you can use variables in them if you wish.
"""
template_fields: Sequence[str] = (
"share_name",
"directory_name",
"prefix",
"dest_gcs",
)
def __init__(
self,
*,
share_name: str,
dest_gcs: str,
directory_name: str | None = None,
prefix: str = "",
azure_fileshare_conn_id: str = "azure_fileshare_default",
gcp_conn_id: str = "google_cloud_default",
replace: bool = False,
gzip: bool = False,
google_impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.share_name = share_name
self.directory_name = directory_name
self.prefix = prefix
self.azure_fileshare_conn_id = azure_fileshare_conn_id
self.gcp_conn_id = gcp_conn_id
self.dest_gcs = dest_gcs
self.replace = replace
self.gzip = gzip
self.google_impersonation_chain = google_impersonation_chain
def _check_inputs(self) -> None:
if self.dest_gcs and not gcs_object_is_directory(self.dest_gcs):
self.log.info(
"Destination Google Cloud Storage path is not a valid "
'"directory", define a path that ends with a slash "/" or '
"leave it empty for the root of the bucket."
)
raise AirflowException(
'The destination Google Cloud Storage path must end with a slash "/" or be empty.'
)
def execute(self, context: Context):
self._check_inputs()
azure_fileshare_hook = AzureFileShareHook(self.azure_fileshare_conn_id)
files = azure_fileshare_hook.list_files(
share_name=self.share_name, directory_name=self.directory_name
)
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
dest_gcs_bucket, dest_gcs_object_prefix = _parse_gcs_url(self.dest_gcs)
if not self.replace:
# if we are not replacing -> list all files in the GCS bucket
# and only keep those files which are present in
# S3 and not in Google Cloud Storage
existing_files_prefixed = gcs_hook.list(dest_gcs_bucket, prefix=dest_gcs_object_prefix)
existing_files = []
# Remove the object prefix itself, an empty directory was found
if dest_gcs_object_prefix in existing_files_prefixed:
existing_files_prefixed.remove(dest_gcs_object_prefix)
# Remove the object prefix from all object string paths
for file in existing_files_prefixed:
if file.startswith(dest_gcs_object_prefix):
existing_files.append(file[len(dest_gcs_object_prefix) :])
else:
existing_files.append(file)
files = list(set(files) - set(existing_files))
if files:
self.log.info("%s files are going to be synced.", len(files))
if self.directory_name is None:
raise RuntimeError("The directory_name must be set!.")
for file in files:
with NamedTemporaryFile() as temp_file:
azure_fileshare_hook.get_file_to_stream(
stream=temp_file,
share_name=self.share_name,
directory_name=self.directory_name,
file_name=file,
)
temp_file.flush()
# There will always be a '/' before file because it is
# enforced at instantiation time
dest_gcs_object = dest_gcs_object_prefix + file
gcs_hook.upload(dest_gcs_bucket, dest_gcs_object, temp_file.name, gzip=self.gzip)
self.log.info("All done, uploaded %d files to Google Cloud Storage.", len(files))
else:
self.log.info("There are no new files to sync. Have a nice day!")
self.log.info("In sync, no files needed to be uploaded to Google Cloud Storage")
return files
| 7,186 | 42.295181 | 107 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/sheets_to_gcs.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import csv
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Any, Sequence
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.suite.hooks.sheets import GSheetsHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class GoogleSheetsToGCSOperator(BaseOperator):
"""
Writes Google Sheet data into Google Cloud Storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleSheetsToGCSOperator`
:param spreadsheet_id: The Google Sheet ID to interact with.
:param sheet_filter: Default to None, if provided, Should be an array of the sheet
titles to pull from.
:param destination_bucket: The destination Google cloud storage bucket where the
report should be written to. (templated)
:param destination_path: The Google cloud storage URI array for the object created by the operator.
For example: ``path/to/my/files``.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"spreadsheet_id",
"destination_bucket",
"destination_path",
"sheet_filter",
"impersonation_chain",
)
def __init__(
self,
*,
spreadsheet_id: str,
destination_bucket: str,
sheet_filter: list[str] | None = None,
destination_path: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.gcp_conn_id = gcp_conn_id
self.spreadsheet_id = spreadsheet_id
self.sheet_filter = sheet_filter
self.destination_bucket = destination_bucket
self.destination_path = destination_path
self.impersonation_chain = impersonation_chain
def _upload_data(
self,
gcs_hook: GCSHook,
hook: GSheetsHook,
sheet_range: str,
sheet_values: list[Any],
) -> str:
# Construct destination file path
sheet = hook.get_spreadsheet(self.spreadsheet_id)
file_name = f"{sheet['properties']['title']}_{sheet_range}.csv".replace(" ", "_")
dest_file_name = (
f"{self.destination_path.strip('/')}/{file_name}" if self.destination_path else file_name
)
with NamedTemporaryFile("w+") as temp_file:
# Write data
writer = csv.writer(temp_file)
writer.writerows(sheet_values)
temp_file.flush()
# Upload to GCS
gcs_hook.upload(
bucket_name=self.destination_bucket,
object_name=dest_file_name,
filename=temp_file.name,
)
return dest_file_name
def execute(self, context: Context):
sheet_hook = GSheetsHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
# Pull data and upload
destination_array: list[str] = []
sheet_titles = sheet_hook.get_sheet_titles(
spreadsheet_id=self.spreadsheet_id, sheet_filter=self.sheet_filter
)
for sheet_range in sheet_titles:
data = sheet_hook.get_values(spreadsheet_id=self.spreadsheet_id, range_=sheet_range)
gcs_path_to_file = self._upload_data(gcs_hook, sheet_hook, sheet_range, data)
destination_array.append(gcs_path_to_file)
self.xcom_push(context, "destination_objects", destination_array)
return destination_array
| 5,317 | 38.686567 | 103 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/oracle_to_gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import base64
import calendar
from datetime import date, datetime, timedelta
from decimal import Decimal
import oracledb
from airflow.providers.google.cloud.transfers.sql_to_gcs import BaseSQLToGCSOperator
from airflow.providers.oracle.hooks.oracle import OracleHook
class OracleToGCSOperator(BaseSQLToGCSOperator):
"""Copy data from Oracle to Google Cloud Storage in JSON, CSV or Parquet format.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:OracleToGCSOperator`
:param oracle_conn_id: Reference to a specific
:ref:`Oracle hook <howto/connection:oracle>`.
:param ensure_utc: Ensure TIMESTAMP columns exported as UTC. If set to
`False`, TIMESTAMP columns will be exported using the Oracle server's
default timezone.
"""
ui_color = "#a0e08c"
type_map = {
oracledb.DB_TYPE_BINARY_DOUBLE: "DECIMAL", # type: ignore
oracledb.DB_TYPE_BINARY_FLOAT: "DECIMAL", # type: ignore
oracledb.DB_TYPE_BINARY_INTEGER: "INTEGER", # type: ignore
oracledb.DB_TYPE_BOOLEAN: "BOOLEAN", # type: ignore
oracledb.DB_TYPE_DATE: "TIMESTAMP", # type: ignore
oracledb.DB_TYPE_NUMBER: "NUMERIC", # type: ignore
oracledb.DB_TYPE_TIMESTAMP: "TIMESTAMP", # type: ignore
oracledb.DB_TYPE_TIMESTAMP_LTZ: "TIMESTAMP", # type: ignore
oracledb.DB_TYPE_TIMESTAMP_TZ: "TIMESTAMP", # type: ignore
}
def __init__(self, *, oracle_conn_id="oracle_default", ensure_utc=False, **kwargs):
super().__init__(**kwargs)
self.ensure_utc = ensure_utc
self.oracle_conn_id = oracle_conn_id
def query(self):
"""Queries Oracle and returns a cursor to the results."""
oracle = OracleHook(oracle_conn_id=self.oracle_conn_id)
conn = oracle.get_conn()
cursor = conn.cursor()
if self.ensure_utc:
# Ensure TIMESTAMP results are in UTC
tz_query = "SET time_zone = '+00:00'"
self.log.info("Executing: %s", tz_query)
cursor.execute(tz_query)
self.log.info("Executing: %s", self.sql)
cursor.execute(self.sql)
return cursor
def field_to_bigquery(self, field) -> dict[str, str]:
field_type = self.type_map.get(field[1], "STRING")
field_mode = "NULLABLE" if not field[6] or field_type == "TIMESTAMP" else "REQUIRED"
return {
"name": field[0],
"type": field_type,
"mode": field_mode,
}
def convert_type(self, value, schema_type, **kwargs):
"""
Take a value from Oracle db and convert it to a value safe for JSON/Google Cloud Storage/BigQuery.
* Datetimes are converted to UTC seconds.
* Decimals are converted to floats.
* Dates are converted to ISO formatted string if given schema_type is
DATE, or UTC seconds otherwise.
* Binary type fields are converted to integer if given schema_type is
INTEGER, or encoded with base64 otherwise. Imported BYTES data must
be base64-encoded according to BigQuery documentation:
https://cloud.google.com/bigquery/data-types
:param value: Oracle db column value
:param schema_type: BigQuery data type
"""
if value is None:
return value
if isinstance(value, datetime):
value = calendar.timegm(value.timetuple())
elif isinstance(value, timedelta):
value = value.total_seconds()
elif isinstance(value, Decimal):
value = float(value)
elif isinstance(value, date):
if schema_type == "DATE":
value = value.isoformat()
else:
value = calendar.timegm(value.timetuple())
elif isinstance(value, bytes):
if schema_type == "INTEGER":
value = int.from_bytes(value, "big")
else:
value = base64.standard_b64encode(value).decode("ascii")
return value
| 4,903 | 38.869919 | 106 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/presto_to_gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any
from prestodb.client import PrestoResult
from prestodb.dbapi import Cursor as PrestoCursor
from airflow.providers.google.cloud.transfers.sql_to_gcs import BaseSQLToGCSOperator
from airflow.providers.presto.hooks.presto import PrestoHook
class _PrestoToGCSPrestoCursorAdapter:
"""
An adapter that adds additional feature to the Presto cursor.
The implementation of cursor in the prestodb library is not sufficient.
The following changes have been made:
* The poke mechanism for row. You can look at the next row without consuming it.
* The description attribute is available before reading the first row. Thanks to the poke mechanism.
* the iterator interface has been implemented.
A detailed description of the class methods is available in
`PEP-249 <https://www.python.org/dev/peps/pep-0249/>`__.
"""
def __init__(self, cursor: PrestoCursor):
self.cursor: PrestoCursor = cursor
self.rows: list[Any] = []
self.initialized: bool = False
@property
def description(self) -> list[tuple]:
"""
This read-only attribute is a sequence of 7-item sequences.
Each of these sequences contains information describing one result column:
* ``name``
* ``type_code``
* ``display_size``
* ``internal_size``
* ``precision``
* ``scale``
* ``null_ok``
The first two items (``name`` and ``type_code``) are mandatory, the other
five are optional and are set to None if no meaningful values can be provided.
"""
if not self.initialized:
# Peek for first row to load description.
self.peekone()
return self.cursor.description
@property
def rowcount(self) -> int:
"""The read-only attribute specifies the number of rows."""
return self.cursor.rowcount
def close(self) -> None:
"""Close the cursor now."""
self.cursor.close()
def execute(self, *args, **kwargs) -> PrestoResult:
"""Prepare and execute a database operation (query or command)."""
self.initialized = False
self.rows = []
return self.cursor.execute(*args, **kwargs)
def executemany(self, *args, **kwargs):
"""
Prepare and execute a database operation.
Prepare a database operation (query or command) and then execute it against
all parameter sequences or mappings found in the sequence seq_of_parameters.
"""
self.initialized = False
self.rows = []
return self.cursor.executemany(*args, **kwargs)
def peekone(self) -> Any:
"""Return the next row without consuming it."""
self.initialized = True
element = self.cursor.fetchone()
self.rows.insert(0, element)
return element
def fetchone(self) -> Any:
"""Fetch the next row of a query result set, returning a single sequence, or ``None``."""
if self.rows:
return self.rows.pop(0)
return self.cursor.fetchone()
def fetchmany(self, size=None) -> list:
"""
Fetch the next set of rows of a query result, returning a sequence of sequences.
An empty sequence is returned when no more rows are available.
"""
if size is None:
size = self.cursor.arraysize
result = []
for _ in range(size):
row = self.fetchone()
if row is None:
break
result.append(row)
return result
def __next__(self) -> Any:
"""
Return the next row from the current SQL statement using the same semantics as ``.fetchone()``.
A ``StopIteration`` exception is raised when the result set is exhausted.
"""
result = self.fetchone()
if result is None:
raise StopIteration()
return result
def __iter__(self) -> _PrestoToGCSPrestoCursorAdapter:
"""Return self to make cursors compatible to the iteration protocol."""
return self
class PrestoToGCSOperator(BaseSQLToGCSOperator):
"""Copy data from PrestoDB to Google Cloud Storage in JSON, CSV or Parquet format.
:param presto_conn_id: Reference to a specific Presto hook.
"""
ui_color = "#a0e08c"
type_map = {
"BOOLEAN": "BOOL",
"TINYINT": "INT64",
"SMALLINT": "INT64",
"INTEGER": "INT64",
"BIGINT": "INT64",
"REAL": "FLOAT64",
"DOUBLE": "FLOAT64",
"DECIMAL": "NUMERIC",
"VARCHAR": "STRING",
"CHAR": "STRING",
"VARBINARY": "BYTES",
"JSON": "STRING",
"DATE": "DATE",
"TIME": "TIME",
# BigQuery don't time with timezone native.
"TIME WITH TIME ZONE": "STRING",
"TIMESTAMP": "TIMESTAMP",
# BigQuery supports a narrow range of time zones during import.
# You should use TIMESTAMP function, if you want have TIMESTAMP type
"TIMESTAMP WITH TIME ZONE": "STRING",
"IPADDRESS": "STRING",
"UUID": "STRING",
}
def __init__(self, *, presto_conn_id: str = "presto_default", **kwargs):
super().__init__(**kwargs)
self.presto_conn_id = presto_conn_id
def query(self):
"""Queries presto and returns a cursor to the results."""
presto = PrestoHook(presto_conn_id=self.presto_conn_id)
conn = presto.get_conn()
cursor = conn.cursor()
self.log.info("Executing: %s", self.sql)
cursor.execute(self.sql)
return _PrestoToGCSPrestoCursorAdapter(cursor)
def field_to_bigquery(self, field) -> dict[str, str]:
"""Convert presto field type to BigQuery field type."""
clear_field_type = field[1].upper()
# remove type argument e.g. DECIMAL(2, 10) => DECIMAL
clear_field_type, _, _ = clear_field_type.partition("(")
new_field_type = self.type_map.get(clear_field_type, "STRING")
return {"name": field[0], "type": new_field_type}
def convert_type(self, value, schema_type, **kwargs):
"""
Do nothing. Presto uses JSON on the transport layer, so types are simple.
:param value: Presto column value
:param schema_type: BigQuery data type
"""
return value
| 7,149 | 33.375 | 104 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/bigquery_to_gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google BigQuery to Google Cloud Storage operator."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence
from google.api_core.exceptions import Conflict
from google.api_core.retry import Retry
from google.cloud.bigquery import DEFAULT_RETRY, UnknownJob
from airflow import AirflowException
from airflow.configuration import conf
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook, BigQueryJob
from airflow.providers.google.cloud.links.bigquery import BigQueryTableLink
from airflow.providers.google.cloud.triggers.bigquery import BigQueryInsertJobTrigger
if TYPE_CHECKING:
from airflow.utils.context import Context
class BigQueryToGCSOperator(BaseOperator):
"""
Transfers a BigQuery table to a Google Cloud Storage bucket.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryToGCSOperator`
.. seealso::
For more details about these parameters:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
:param source_project_dataset_table: The dotted
``(<project>.|<project>:)<dataset>.<table>`` BigQuery table to use as the
source data. If ``<project>`` is not included, project will be the project
defined in the connection json. (templated)
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). (templated) Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:param project_id: Google Cloud Project where the job is running
:param compression: Type of compression to use.
:param export_format: File format to export.
:param field_delimiter: The delimiter to use when extracting to a CSV.
:param print_header: Whether to print a header for a CSV file extract.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:param location: The location used for the operation.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param result_retry: How to retry the `result` call that retrieves rows
:param result_timeout: The number of seconds to wait for `result` method before using `result_retry`
:param job_id: The ID of the job. It will be suffixed with hash of job configuration
unless ``force_rerun`` is True.
The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or
dashes (-). The maximum length is 1,024 characters. If not provided then uuid will
be generated.
:param force_rerun: If True then operator will use hash of uuid as job id suffix
:param reattach_states: Set of BigQuery job's states in case of which we should reattach
to the job. Should be other than final states.
:param deferrable: Run operator in the deferrable mode
"""
template_fields: Sequence[str] = (
"source_project_dataset_table",
"destination_cloud_storage_uris",
"export_format",
"labels",
"impersonation_chain",
"job_id",
)
template_ext: Sequence[str] = ()
ui_color = "#e4e6f0"
operator_extra_links = (BigQueryTableLink(),)
def __init__(
self,
*,
source_project_dataset_table: str,
destination_cloud_storage_uris: list[str],
project_id: str | None = None,
compression: str = "NONE",
export_format: str = "CSV",
field_delimiter: str = ",",
print_header: bool = True,
gcp_conn_id: str = "google_cloud_default",
labels: dict | None = None,
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
result_retry: Retry = DEFAULT_RETRY,
result_timeout: float | None = None,
job_id: str | None = None,
force_rerun: bool = False,
reattach_states: set[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.source_project_dataset_table = source_project_dataset_table
self.destination_cloud_storage_uris = destination_cloud_storage_uris
self.compression = compression
self.export_format = export_format
self.field_delimiter = field_delimiter
self.print_header = print_header
self.gcp_conn_id = gcp_conn_id
self.labels = labels
self.location = location
self.impersonation_chain = impersonation_chain
self.result_retry = result_retry
self.result_timeout = result_timeout
self.job_id = job_id
self.force_rerun = force_rerun
self.reattach_states: set[str] = reattach_states or set()
self.hook: BigQueryHook | None = None
self.deferrable = deferrable
@staticmethod
def _handle_job_error(job: BigQueryJob | UnknownJob) -> None:
if job.error_result:
raise AirflowException(f"BigQuery job {job.job_id} failed: {job.error_result}")
def _prepare_configuration(self):
source_project, source_dataset, source_table = self.hook.split_tablename(
table_input=self.source_project_dataset_table,
default_project_id=self.hook.project_id,
var_name="source_project_dataset_table",
)
configuration: dict[str, Any] = {
"extract": {
"sourceTable": {
"projectId": source_project,
"datasetId": source_dataset,
"tableId": source_table,
},
"compression": self.compression,
"destinationUris": self.destination_cloud_storage_uris,
"destinationFormat": self.export_format,
}
}
if self.labels:
configuration["labels"] = self.labels
if self.export_format == "CSV":
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration["extract"]["fieldDelimiter"] = self.field_delimiter
configuration["extract"]["printHeader"] = self.print_header
return configuration
def _submit_job(
self,
hook: BigQueryHook,
job_id: str,
configuration: dict,
) -> BigQueryJob:
# Submit a new job without waiting for it to complete.
return hook.insert_job(
configuration=configuration,
project_id=self.project_id or hook.project_id,
location=self.location,
job_id=job_id,
timeout=self.result_timeout,
retry=self.result_retry,
nowait=self.deferrable,
)
def execute(self, context: Context):
self.log.info(
"Executing extract of %s into: %s",
self.source_project_dataset_table,
self.destination_cloud_storage_uris,
)
hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
self.hook = hook
configuration = self._prepare_configuration()
job_id = hook.generate_job_id(
job_id=self.job_id,
dag_id=self.dag_id,
task_id=self.task_id,
logical_date=context["logical_date"],
configuration=configuration,
force_rerun=self.force_rerun,
)
try:
self.log.info("Executing: %s", configuration)
job: BigQueryJob | UnknownJob = self._submit_job(
hook=hook, job_id=job_id, configuration=configuration
)
except Conflict:
# If the job already exists retrieve it
job = hook.get_job(
project_id=self.project_id,
location=self.location,
job_id=job_id,
)
if job.state in self.reattach_states:
# We are reattaching to a job
job.result(timeout=self.result_timeout, retry=self.result_retry)
self._handle_job_error(job)
else:
# Same job configuration so we need force_rerun
raise AirflowException(
f"Job with id: {job_id} already exists and is in {job.state} state. If you "
f"want to force rerun it consider setting `force_rerun=True`."
f"Or, if you want to reattach in this scenario add {job.state} to `reattach_states`"
)
conf = job.to_api_repr()["configuration"]["extract"]["sourceTable"]
dataset_id, project_id, table_id = conf["datasetId"], conf["projectId"], conf["tableId"]
BigQueryTableLink.persist(
context=context,
task_instance=self,
dataset_id=dataset_id,
project_id=project_id,
table_id=table_id,
)
if self.deferrable:
self.defer(
timeout=self.execution_timeout,
trigger=BigQueryInsertJobTrigger(
conn_id=self.gcp_conn_id,
job_id=job_id,
project_id=self.project_id or self.hook.project_id,
),
method_name="execute_complete",
)
else:
job.result(timeout=self.result_timeout, retry=self.result_retry)
def execute_complete(self, context: Context, event: dict[str, Any]):
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info(
"%s completed with response %s ",
self.task_id,
event["message"],
)
| 11,664 | 40.960432 | 104 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/gcs_to_local.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.models import BaseOperator
from airflow.models.xcom import MAX_XCOM_SIZE
from airflow.providers.google.cloud.hooks.gcs import GCSHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class GCSToLocalFilesystemOperator(BaseOperator):
"""
Downloads a file from Google Cloud Storage.
If a filename is supplied, it writes the file to the specified location, alternatively one can
set the ``store_to_xcom_key`` parameter to True push the file content into xcom. When the file size
exceeds the maximum size for xcom it is recommended to write to a file.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSToLocalFilesystemOperator`
:param bucket: The Google Cloud Storage bucket where the object is.
Must not contain 'gs://' prefix. (templated)
:param object_name: The name of the object to download in the Google cloud
storage bucket. (templated)
:param filename: The file path, including filename, on the local file system (where the
operator is being executed) that the file should be downloaded to. (templated)
If no filename passed, the downloaded data will not be stored on the local file
system.
:param store_to_xcom_key: If this param is set, the operator will push
the contents of the downloaded file to XCom with the key set in this
parameter. If not set, the downloaded data will not be pushed to XCom. (templated)
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param file_encoding: Optional encoding used to decode file_bytes into a serializable
string that is suitable for storing to XCom. (templated).
"""
template_fields: Sequence[str] = (
"bucket",
"object_name",
"filename",
"store_to_xcom_key",
"impersonation_chain",
"file_encoding",
)
ui_color = "#f0eee4"
def __init__(
self,
*,
bucket: str,
object_name: str | None = None,
filename: str | None = None,
store_to_xcom_key: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
file_encoding: str = "utf-8",
**kwargs,
) -> None:
# To preserve backward compatibility
# TODO: Remove one day
if object_name is None:
object_name = kwargs.get("object")
if object_name is not None:
self.object_name = object_name
AirflowProviderDeprecationWarning("Use 'object_name' instead of 'object'.")
else:
TypeError("__init__() missing 1 required positional argument: 'object_name'")
if filename is not None and store_to_xcom_key is not None:
raise ValueError("Either filename or store_to_xcom_key can be set")
super().__init__(**kwargs)
self.bucket = bucket
self.filename = filename
self.object_name = object_name
self.store_to_xcom_key = store_to_xcom_key
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.file_encoding = file_encoding
def execute(self, context: Context):
self.log.info("Executing download: %s, %s, %s", self.bucket, self.object_name, self.filename)
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if self.store_to_xcom_key:
file_size = hook.get_size(bucket_name=self.bucket, object_name=self.object_name)
if file_size < MAX_XCOM_SIZE:
file_bytes = hook.download(bucket_name=self.bucket, object_name=self.object_name)
context["ti"].xcom_push(key=self.store_to_xcom_key, value=str(file_bytes, self.file_encoding))
else:
raise AirflowException("The size of the downloaded file is too large to push to XCom!")
else:
hook.download(bucket_name=self.bucket, object_name=self.object_name, filename=self.filename)
| 5,771 | 44.809524 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/bigquery_to_mssql.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google BigQuery to MSSQL operator."""
from __future__ import annotations
import warnings
from typing import TYPE_CHECKING, Sequence
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.providers.google.cloud.links.bigquery import BigQueryTableLink
from airflow.providers.google.cloud.transfers.bigquery_to_sql import BigQueryToSqlBaseOperator
from airflow.providers.microsoft.mssql.hooks.mssql import MsSqlHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class BigQueryToMsSqlOperator(BigQueryToSqlBaseOperator):
"""
Fetch data from a BigQuery table (alternatively fetch selected columns) and insert it into a MSSQL table.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryToMsSqlOperator`
:param source_project_dataset_table: A dotted ``<project>.<dataset>.<table>``:
the big query table of origin
:param mssql_table: target MsSQL table. It is deprecated: use target_table_name instead. (templated)
:param target_table_name: target MsSQL table. It takes precedence over mssql_table. (templated)
:param mssql_conn_id: reference to a specific mssql hook
"""
template_fields: Sequence[str] = tuple(BigQueryToSqlBaseOperator.template_fields) + (
"source_project_dataset_table",
)
operator_extra_links = (BigQueryTableLink(),)
def __init__(
self,
*,
source_project_dataset_table: str,
mssql_table: str | None = None,
target_table_name: str | None = None,
mssql_conn_id: str = "mssql_default",
**kwargs,
) -> None:
if mssql_table is not None:
warnings.warn(
# fmt: off
"The `mssql_table` parameter has been deprecated. "
"Use `target_table_name` instead.",
# fmt: on
AirflowProviderDeprecationWarning,
)
if target_table_name is not None:
raise ValueError(
f"Cannot set both arguments: mssql_table={mssql_table!r} and "
f"target_table_name={target_table_name!r}."
)
target_table_name = mssql_table
try:
_, dataset_id, table_id = source_project_dataset_table.split(".")
except ValueError:
raise ValueError(
f"Could not parse {source_project_dataset_table} as <project>.<dataset>.<table>"
) from None
super().__init__(
target_table_name=target_table_name,
dataset_table=f"{dataset_id}.{table_id}",
**kwargs,
)
self.mssql_conn_id = mssql_conn_id
self.source_project_dataset_table = source_project_dataset_table
def get_sql_hook(self) -> MsSqlHook:
return MsSqlHook(schema=self.database, mysql_conn_id=self.mssql_conn_id)
def persist_links(self, context: Context) -> None:
project_id, dataset_id, table_id = self.source_project_dataset_table.split(".")
BigQueryTableLink.persist(
context=context,
task_instance=self,
dataset_id=dataset_id,
project_id=project_id,
table_id=table_id,
)
| 4,096 | 38.019048 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/bigquery_to_postgres.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google BigQuery to PostgreSQL operator."""
from __future__ import annotations
from typing import Sequence
from airflow.providers.google.cloud.transfers.bigquery_to_sql import BigQueryToSqlBaseOperator
from airflow.providers.postgres.hooks.postgres import PostgresHook
class BigQueryToPostgresOperator(BigQueryToSqlBaseOperator):
"""
Fetch data from a BigQuery table (alternatively fetch selected columns) and insert into PostgreSQL table.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryToPostgresOperator`
:param target_table_name: target Postgres table (templated)
:param postgres_conn_id: Reference to :ref:`postgres connection id <howto/connection:postgres>`.
"""
template_fields: Sequence[str] = tuple(BigQueryToSqlBaseOperator.template_fields) + (
"dataset_id",
"table_id",
)
def __init__(
self,
*,
target_table_name: str,
postgres_conn_id: str = "postgres_default",
**kwargs,
) -> None:
super().__init__(target_table_name=target_table_name, **kwargs)
self.postgres_conn_id = postgres_conn_id
def get_sql_hook(self) -> PostgresHook:
return PostgresHook(schema=self.database, postgres_conn_id=self.postgres_conn_id)
| 2,151 | 37.428571 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/sftp_to_gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains SFTP to Google Cloud Storage operator."""
from __future__ import annotations
import os
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Sequence
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.sftp.hooks.sftp import SFTPHook
if TYPE_CHECKING:
from airflow.utils.context import Context
WILDCARD = "*"
class SFTPToGCSOperator(BaseOperator):
"""
Transfer files to Google Cloud Storage from SFTP server.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SFTPToGCSOperator`
:param source_path: The sftp remote path. This is the specified file path
for downloading the single file or multiple files from the SFTP server.
You can use only one wildcard within your path. The wildcard can appear
inside the path or at the end of the path.
:param destination_bucket: The bucket to upload to.
:param destination_path: The destination name of the object in the
destination Google Cloud Storage bucket.
If destination_path is not provided file/files will be placed in the
main bucket path.
If a wildcard is supplied in the destination_path argument, this is the
prefix that will be prepended to the final destination objects' paths.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param sftp_conn_id: The sftp connection id. The name or identifier for
establishing a connection to the SFTP server.
:param mime_type: The mime-type string
:param gzip: Allows for file to be compressed and uploaded as gzip
:param move_object: When move object is True, the object is moved instead
of copied to the new location. This is the equivalent of a mv command
as opposed to a cp command.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"source_path",
"destination_path",
"destination_bucket",
"impersonation_chain",
)
def __init__(
self,
*,
source_path: str,
destination_bucket: str,
destination_path: str | None = None,
gcp_conn_id: str = "google_cloud_default",
sftp_conn_id: str = "ssh_default",
mime_type: str = "application/octet-stream",
gzip: bool = False,
move_object: bool = False,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.source_path = source_path
self.destination_path = self._set_destination_path(destination_path)
self.destination_bucket = self._set_bucket_name(destination_bucket)
self.gcp_conn_id = gcp_conn_id
self.mime_type = mime_type
self.gzip = gzip
self.sftp_conn_id = sftp_conn_id
self.move_object = move_object
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
sftp_hook = SFTPHook(self.sftp_conn_id)
if WILDCARD in self.source_path:
total_wildcards = self.source_path.count(WILDCARD)
if total_wildcards > 1:
raise AirflowException(
"Only one wildcard '*' is allowed in source_path parameter. "
f"Found {total_wildcards} in {self.source_path}."
)
prefix, delimiter = self.source_path.split(WILDCARD, 1)
base_path = os.path.dirname(prefix)
files, _, _ = sftp_hook.get_tree_map(base_path, prefix=prefix, delimiter=delimiter)
for file in files:
destination_path = file.replace(base_path, self.destination_path, 1)
self._copy_single_object(gcs_hook, sftp_hook, file, destination_path)
else:
destination_object = (
self.destination_path if self.destination_path else self.source_path.rsplit("/", 1)[1]
)
self._copy_single_object(gcs_hook, sftp_hook, self.source_path, destination_object)
def _copy_single_object(
self,
gcs_hook: GCSHook,
sftp_hook: SFTPHook,
source_path: str,
destination_object: str,
) -> None:
"""Helper function to copy single object."""
self.log.info(
"Executing copy of %s to gs://%s/%s",
source_path,
self.destination_bucket,
destination_object,
)
with NamedTemporaryFile("w") as tmp:
sftp_hook.retrieve_file(source_path, tmp.name)
gcs_hook.upload(
bucket_name=self.destination_bucket,
object_name=destination_object,
filename=tmp.name,
mime_type=self.mime_type,
gzip=self.gzip,
)
if self.move_object:
self.log.info("Executing delete of %s", source_path)
sftp_hook.delete_file(source_path)
@staticmethod
def _set_destination_path(path: str | None) -> str:
if path is not None:
return path.lstrip("/") if path.startswith("/") else path
return ""
@staticmethod
def _set_bucket_name(name: str) -> str:
bucket = name if not name.startswith("gs://") else name[5:]
return bucket.strip("/")
| 7,018 | 38.432584 | 102 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/mysql_to_gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""MySQL to GCS operator."""
from __future__ import annotations
import base64
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from MySQLdb.constants import FIELD_TYPE
from airflow.providers.google.cloud.transfers.sql_to_gcs import BaseSQLToGCSOperator
from airflow.providers.mysql.hooks.mysql import MySqlHook
class MySQLToGCSOperator(BaseSQLToGCSOperator):
"""Copy data from MySQL to Google Cloud Storage in JSON, CSV or Parquet format.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:MySQLToGCSOperator`
:param mysql_conn_id: Reference to :ref:`mysql connection id <howto/connection:mysql>`.
:param ensure_utc: Ensure TIMESTAMP columns exported as UTC. If set to
`False`, TIMESTAMP columns will be exported using the MySQL server's
default timezone.
"""
ui_color = "#a0e08c"
type_map = {
FIELD_TYPE.BIT: "INTEGER",
FIELD_TYPE.DATETIME: "TIMESTAMP",
FIELD_TYPE.DATE: "TIMESTAMP",
FIELD_TYPE.DECIMAL: "FLOAT",
FIELD_TYPE.NEWDECIMAL: "FLOAT",
FIELD_TYPE.DOUBLE: "FLOAT",
FIELD_TYPE.FLOAT: "FLOAT",
FIELD_TYPE.INT24: "INTEGER",
FIELD_TYPE.LONG: "INTEGER",
FIELD_TYPE.LONGLONG: "INTEGER",
FIELD_TYPE.SHORT: "INTEGER",
FIELD_TYPE.TIME: "TIME",
FIELD_TYPE.TIMESTAMP: "TIMESTAMP",
FIELD_TYPE.TINY: "INTEGER",
FIELD_TYPE.YEAR: "INTEGER",
}
def __init__(self, *, mysql_conn_id="mysql_default", ensure_utc=False, **kwargs):
super().__init__(**kwargs)
self.mysql_conn_id = mysql_conn_id
self.ensure_utc = ensure_utc
def query(self):
"""Queries mysql and returns a cursor to the results."""
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
conn = mysql.get_conn()
cursor = conn.cursor()
if self.ensure_utc:
# Ensure TIMESTAMP results are in UTC
tz_query = "SET time_zone = '+00:00'"
self.log.info("Executing: %s", tz_query)
cursor.execute(tz_query)
self.log.info("Executing: %s", self.sql)
cursor.execute(self.sql)
return cursor
def field_to_bigquery(self, field) -> dict[str, str]:
field_type = self.type_map.get(field[1], "STRING")
# Always allow TIMESTAMP to be nullable. MySQLdb returns None types
# for required fields because some MySQL timestamps can't be
# represented by Python's datetime (e.g. 0000-00-00 00:00:00).
field_mode = "NULLABLE" if field[6] or field_type == "TIMESTAMP" else "REQUIRED"
return {
"name": field[0],
"type": field_type,
"mode": field_mode,
}
def convert_type(self, value, schema_type: str, **kwargs):
"""
Take a value from MySQLdb and convert it to a value safe for JSON/Google Cloud Storage/BigQuery.
* Datetimes are converted to `str(value)` (`datetime.isoformat(' ')`)
strings.
* Times are converted to `str((datetime.min + value).time())` strings.
* Decimals are converted to floats.
* Dates are converted to ISO formatted strings if given schema_type is
DATE, or `datetime.isoformat(' ')` strings otherwise.
* Binary type fields are converted to integer if given schema_type is
INTEGER, or encoded with base64 otherwise. Imported BYTES data must
be base64-encoded according to BigQuery documentation:
https://cloud.google.com/bigquery/data-types
:param value: MySQLdb column value
:param schema_type: BigQuery data type
"""
if value is None:
return value
if isinstance(value, datetime):
value = str(value)
elif isinstance(value, timedelta):
value = str((datetime.min + value).time())
elif isinstance(value, Decimal):
value = float(value)
elif isinstance(value, date):
if schema_type == "DATE":
value = value.isoformat()
else:
value = str(datetime.combine(value, time.min))
elif isinstance(value, bytes):
if schema_type == "INTEGER":
value = int.from_bytes(value, "big")
else:
value = base64.standard_b64encode(value).decode("ascii")
return value
| 5,248 | 38.765152 | 104 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/bigquery_to_mysql.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google BigQuery to MySQL operator."""
from __future__ import annotations
import warnings
from typing import Sequence
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.providers.google.cloud.transfers.bigquery_to_sql import BigQueryToSqlBaseOperator
from airflow.providers.mysql.hooks.mysql import MySqlHook
class BigQueryToMySqlOperator(BigQueryToSqlBaseOperator):
"""
Fetch data from a BigQuery table (alternatively fetch selected columns) and insert it into a MySQL table.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryToMySqlOperator`
:param mysql_table: target MySQL table, use dot notation to target a
specific database. It is deprecated: use target_table_name instead. (templated)
:param target_table_name: target MySQL table. It takes precedence over mysql_table. (templated)
:param mysql_conn_id: Reference to :ref:`mysql connection id <howto/connection:mysql>`.
"""
template_fields: Sequence[str] = tuple(BigQueryToSqlBaseOperator.template_fields) + (
"dataset_id",
"table_id",
)
def __init__(
self,
*,
mysql_table: str | None = None,
target_table_name: str | None = None,
mysql_conn_id: str = "mysql_default",
**kwargs,
) -> None:
if mysql_table is not None:
warnings.warn(
"The `mysql_table` parameter has been deprecated. Use `target_table_name` instead.",
AirflowProviderDeprecationWarning,
)
if target_table_name is not None:
raise ValueError(
f"Cannot set both arguments: mysql_table={mysql_table!r} and "
f"target_table_name={target_table_name!r}."
)
target_table_name = mysql_table
super().__init__(target_table_name=target_table_name, **kwargs)
self.mysql_conn_id = mysql_conn_id
def get_sql_hook(self) -> MySqlHook:
return MySqlHook(schema=self.database, mysql_conn_id=self.mysql_conn_id)
| 2,951 | 38.36 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/gcs_to_bigquery.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Storage to BigQuery operator."""
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, Sequence
from google.api_core.exceptions import BadRequest, Conflict
from google.api_core.retry import Retry
from google.cloud.bigquery import (
DEFAULT_RETRY,
CopyJob,
ExternalConfig,
ExtractJob,
LoadJob,
QueryJob,
SchemaField,
UnknownJob,
)
from google.cloud.bigquery.table import EncryptionConfiguration, Table, TableReference
from airflow import AirflowException
from airflow.configuration import conf
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook, BigQueryJob
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.cloud.links.bigquery import BigQueryTableLink
from airflow.providers.google.cloud.triggers.bigquery import BigQueryInsertJobTrigger
if TYPE_CHECKING:
from airflow.utils.context import Context
ALLOWED_FORMATS = [
"CSV",
"NEWLINE_DELIMITED_JSON",
"AVRO",
"GOOGLE_SHEETS",
"DATASTORE_BACKUP",
"PARQUET",
]
class GCSToBigQueryOperator(BaseOperator):
"""
Loads files from Google Cloud Storage into BigQuery.
The schema to be used for the BigQuery table may be specified in one of
two ways. You may either directly pass the schema fields in, or you may
point the operator to a Google Cloud Storage object name. The object in
Google Cloud Storage must be a JSON file with the schema fields in it.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSToBigQueryOperator`
:param bucket: The bucket to load from. (templated)
:param source_objects: String or List of Google Cloud Storage URIs to load from. (templated)
If source_format is 'DATASTORE_BACKUP', the list must only contain a single URI.
:param destination_project_dataset_table: The dotted
``(<project>.|<project>:)<dataset>.<table>`` BigQuery table to load data into.
If ``<project>`` is not included, project will be the project defined in
the connection json. (templated)
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
Should not be set when source_format is 'DATASTORE_BACKUP'.
Parameter must be defined if 'schema_object' is null and autodetect is False.
:param schema_object: If set, a GCS object path pointing to a .json file that
contains the schema for the table. (templated)
Parameter must be defined if 'schema_fields' is null and autodetect is False.
:param schema_object_bucket: [Optional] If set, the GCS bucket where the schema object
template is stored. (templated) (Default: the value of ``bucket``)
:param source_format: File format to export.
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:param create_disposition: The create disposition if the table doesn't exist.
:param skip_leading_rows: The number of rows at the top of a CSV file that BigQuery
will skip when loading the data.
When autodetect is on, the behavior is the following:
skip_leading_rows unspecified - Autodetect tries to detect headers in the first row.
If they are not detected, the row is read as data. Otherwise, data is read starting
from the second row.
skip_leading_rows is 0 - Instructs autodetect that there are no headers and data
should be read starting from the first row.
skip_leading_rows = N > 0 - Autodetect skips N-1 rows and tries to detect headers
in row N. If headers are not detected, row N is just skipped. Otherwise, row N is
used to extract column names for the detected schema.
Default value set to None so that autodetect option can detect schema fields.
:param write_disposition: The write disposition if the table already exists.
:param field_delimiter: The delimiter to use when loading from a CSV.
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:param quote_character: The value that is used to quote data sections in a CSV file.
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not (false).
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing trailing
columns are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result. Only applicable to CSV, ignored
for other formats.
:param encoding: The character encoding of the data. See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).csvOptions.encoding
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.csvOptions.encoding
:param max_id_key: If set, the name of a column in the BigQuery table
that's to be loaded. This will be used to select the MAX value from
BigQuery after the load occurs. The results will be returned by the
execute() command, which in turn gets stored in XCom for future
operators to use. This can be helpful with incremental loads--during
future executions, you can pick up from the max ID.
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the load job.
:param src_fmt_configs: configure optional fields specific to the source format
:param external_table: Flag to specify if the destination table should be
a BigQuery external table. Default Value is False.
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
Note that 'field' is not available in concurrency with
dataset.table$partition.
:param cluster_fields: Request that the result of this load be stored sorted
by one or more columns. BigQuery supports clustering for both partitioned and
non-partitioned tables. The order of columns given determines the sort order.
Not applicable for external tables.
:param autodetect: [Optional] Indicates if we should automatically infer the
options and schema for CSV and JSON sources. (Default: ``True``).
Parameter must be set to True if 'schema_fields' and 'schema_object' are undefined.
It is suggested to set to True if table are create outside of Airflow.
If autodetect is None and no schema is provided (neither via schema_fields
nor a schema_object), assume the table already exists.
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
.. code-block:: python
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key",
}
:param location: [Optional] The geographic location of the job. Required except for US and EU.
See details at https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param labels: [Optional] Labels for the BiqQuery table.
:param description: [Optional] Description for the BigQuery table. This will only be used if the
destination table is newly created. If the table already exists and a value different than the
current description is provided, the job will fail.
:param deferrable: Run operator in the deferrable mode
"""
template_fields: Sequence[str] = (
"bucket",
"source_objects",
"schema_object",
"schema_object_bucket",
"destination_project_dataset_table",
"impersonation_chain",
"src_fmt_configs",
)
template_ext: Sequence[str] = (".sql",)
ui_color = "#f0eee4"
operator_extra_links = (BigQueryTableLink(),)
def __init__(
self,
*,
bucket,
source_objects,
destination_project_dataset_table,
schema_fields=None,
schema_object=None,
schema_object_bucket=None,
source_format="CSV",
compression="NONE",
create_disposition="CREATE_IF_NEEDED",
skip_leading_rows=None,
write_disposition="WRITE_EMPTY",
field_delimiter=",",
max_bad_records=0,
quote_character=None,
ignore_unknown_values=False,
allow_quoted_newlines=False,
allow_jagged_rows=False,
encoding="UTF-8",
max_id_key=None,
gcp_conn_id="google_cloud_default",
schema_update_options=(),
src_fmt_configs=None,
external_table=False,
time_partitioning=None,
cluster_fields=None,
autodetect=True,
encryption_configuration=None,
location=None,
impersonation_chain: str | Sequence[str] | None = None,
labels=None,
description=None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
result_retry: Retry = DEFAULT_RETRY,
result_timeout: float | None = None,
cancel_on_kill: bool = True,
job_id: str | None = None,
force_rerun: bool = True,
reattach_states: set[str] | None = None,
project_id: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.hook: BigQueryHook | None = None
self.configuration: dict[str, Any] = {}
# GCS config
if src_fmt_configs is None:
src_fmt_configs = {}
if time_partitioning is None:
time_partitioning = {}
self.bucket = bucket
self.source_objects = source_objects
self.schema_object = schema_object
if schema_object_bucket is None:
schema_object_bucket = bucket
self.schema_object_bucket = schema_object_bucket
# BQ config
self.destination_project_dataset_table = destination_project_dataset_table
self.project_id = project_id
self.schema_fields = schema_fields
if source_format.upper() not in ALLOWED_FORMATS:
raise ValueError(
f"{source_format} is not a valid source format. "
f"Please use one of the following types: {ALLOWED_FORMATS}."
)
else:
self.source_format = source_format.upper()
self.compression = compression
self.create_disposition = create_disposition
self.skip_leading_rows = skip_leading_rows
self.write_disposition = write_disposition
self.field_delimiter = field_delimiter
self.max_bad_records = max_bad_records
self.quote_character = quote_character
self.ignore_unknown_values = ignore_unknown_values
self.allow_quoted_newlines = allow_quoted_newlines
self.allow_jagged_rows = allow_jagged_rows
self.external_table = external_table
self.encoding = encoding
self.max_id_key = max_id_key
self.gcp_conn_id = gcp_conn_id
self.schema_update_options = schema_update_options
self.src_fmt_configs = src_fmt_configs
self.time_partitioning = time_partitioning
self.cluster_fields = cluster_fields
self.autodetect = autodetect
self.encryption_configuration = encryption_configuration
self.location = location
self.impersonation_chain = impersonation_chain
self.labels = labels
self.description = description
self.job_id = job_id
self.deferrable = deferrable
self.result_retry = result_retry
self.result_timeout = result_timeout
self.force_rerun = force_rerun
self.reattach_states: set[str] = reattach_states or set()
self.cancel_on_kill = cancel_on_kill
def _submit_job(
self,
hook: BigQueryHook,
job_id: str,
) -> BigQueryJob:
# Submit a new job without waiting for it to complete.
return hook.insert_job(
configuration=self.configuration,
project_id=self.project_id or hook.project_id,
location=self.location,
job_id=job_id,
timeout=self.result_timeout,
retry=self.result_retry,
nowait=True,
)
@staticmethod
def _handle_job_error(job: BigQueryJob | UnknownJob) -> None:
if job.error_result:
raise AirflowException(f"BigQuery job {job.job_id} failed: {job.error_result}")
def execute(self, context: Context):
hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
self.hook = hook
self.source_format = self.source_format.upper()
job_id = self.hook.generate_job_id(
job_id=self.job_id,
dag_id=self.dag_id,
task_id=self.task_id,
logical_date=context["logical_date"],
configuration=self.configuration,
force_rerun=self.force_rerun,
)
self.source_objects = (
self.source_objects if isinstance(self.source_objects, list) else [self.source_objects]
)
self.source_uris = [f"gs://{self.bucket}/{source_object}" for source_object in self.source_objects]
if not self.schema_fields:
# Check for self.autodetect explicitly False. self.autodetect equal to None
# entails we do not want to detect schema from files. Instead, it means we
# rely on an already existing table's schema
if not self.schema_object and self.autodetect is False:
raise AirflowException(
"Table schema was not found. Neither schema object nor schema fields were specified"
)
if self.schema_object and self.source_format != "DATASTORE_BACKUP":
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.schema_fields = json.loads(
gcs_hook.download(self.schema_object_bucket, self.schema_object).decode("utf-8")
)
self.log.info("Loaded fields from schema object: %s", self.schema_fields)
else:
self.schema_fields = None
if self.external_table:
self.log.info("Creating a new BigQuery table for storing data...")
table_obj_api_repr = self._create_external_table()
BigQueryTableLink.persist(
context=context,
task_instance=self,
dataset_id=table_obj_api_repr["tableReference"]["datasetId"],
project_id=table_obj_api_repr["tableReference"]["projectId"],
table_id=table_obj_api_repr["tableReference"]["tableId"],
)
if self.max_id_key:
max_id = self._find_max_value_in_column()
return max_id
else:
self.log.info("Using existing BigQuery table for storing data...")
self.configuration = self._use_existing_table()
try:
self.log.info("Executing: %s", self.configuration)
job: BigQueryJob | UnknownJob = self._submit_job(self.hook, job_id)
except Conflict:
# If the job already exists retrieve it
job = self.hook.get_job(
project_id=self.project_id or self.hook.project_id,
location=self.location,
job_id=job_id,
)
if job.state in self.reattach_states:
# We are reattaching to a job
job._begin()
self._handle_job_error(job)
else:
# Same job configuration so we need force_rerun
raise AirflowException(
f"Job with id: {job_id} already exists and is in {job.state} state. If you "
f"want to force rerun it consider setting `force_rerun=True`."
f"Or, if you want to reattach in this scenario add {job.state} to `reattach_states`"
)
job_types = {
LoadJob._JOB_TYPE: ["sourceTable", "destinationTable"],
CopyJob._JOB_TYPE: ["sourceTable", "destinationTable"],
ExtractJob._JOB_TYPE: ["sourceTable"],
QueryJob._JOB_TYPE: ["destinationTable"],
}
if self.hook.project_id:
for job_type, tables_prop in job_types.items():
job_configuration = job.to_api_repr()["configuration"]
if job_type in job_configuration:
for table_prop in tables_prop:
if table_prop in job_configuration[job_type]:
table = job_configuration[job_type][table_prop]
persist_kwargs = {
"context": context,
"task_instance": self,
"table_id": table,
}
if not isinstance(table, str):
persist_kwargs["table_id"] = table["tableId"]
persist_kwargs["dataset_id"] = table["datasetId"]
persist_kwargs["project_id"] = table["projectId"]
BigQueryTableLink.persist(**persist_kwargs)
self.job_id = job.job_id
context["ti"].xcom_push(key="job_id", value=self.job_id)
if self.deferrable:
self.defer(
timeout=self.execution_timeout,
trigger=BigQueryInsertJobTrigger(
conn_id=self.gcp_conn_id,
job_id=self.job_id,
project_id=self.project_id or self.hook.project_id,
),
method_name="execute_complete",
)
else:
job.result(timeout=self.result_timeout, retry=self.result_retry)
self._handle_job_error(job)
if self.max_id_key:
return self._find_max_value_in_column()
def execute_complete(self, context: Context, event: dict[str, Any]):
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info(
"%s completed with response %s ",
self.task_id,
event["message"],
)
return self._find_max_value_in_column()
def _find_max_value_in_column(self):
hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
if self.max_id_key:
self.log.info(f"Selecting the MAX value from BigQuery column '{self.max_id_key}'...")
select_command = (
f"SELECT MAX({self.max_id_key}) AS max_value "
f"FROM {self.destination_project_dataset_table}"
)
self.configuration = {
"query": {
"query": select_command,
"useLegacySql": False,
"schemaUpdateOptions": [],
}
}
try:
job_id = hook.insert_job(
configuration=self.configuration, project_id=self.project_id or hook.project_id
)
rows = list(hook.get_job(job_id=job_id, location=self.location).result())
except BadRequest as e:
if "Unrecognized name:" in e.message:
raise AirflowException(
f"Could not determine MAX value in column {self.max_id_key} "
f"since the default value of 'string_field_n' was set by BQ"
)
else:
raise AirflowException(e.message)
if rows:
for row in rows:
max_id = row[0] if row[0] else 0
self.log.info(
"Loaded BQ data with MAX value of column %s.%s: %s",
self.destination_project_dataset_table,
self.max_id_key,
max_id,
)
return str(max_id)
else:
raise RuntimeError(f"The {select_command} returned no rows!")
def _create_external_table(self):
external_config_api_repr = {
"autodetect": self.autodetect,
"sourceFormat": self.source_format,
"sourceUris": self.source_uris,
"compression": self.compression.upper(),
"ignoreUnknownValues": self.ignore_unknown_values,
}
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
backward_compatibility_configs = {
"skipLeadingRows": self.skip_leading_rows,
"fieldDelimiter": self.field_delimiter,
"quote": self.quote_character,
"allowQuotedNewlines": self.allow_quoted_newlines,
"allowJaggedRows": self.allow_jagged_rows,
"encoding": self.encoding,
}
src_fmt_to_param_mapping = {"CSV": "csvOptions", "GOOGLE_SHEETS": "googleSheetsOptions"}
src_fmt_to_configs_mapping = {
"csvOptions": [
"allowJaggedRows",
"allowQuotedNewlines",
"fieldDelimiter",
"skipLeadingRows",
"quote",
"encoding",
"preserveAsciiControlCharacters",
],
"googleSheetsOptions": ["skipLeadingRows"],
}
if self.source_format in src_fmt_to_param_mapping.keys():
valid_configs = src_fmt_to_configs_mapping[src_fmt_to_param_mapping[self.source_format]]
self.src_fmt_configs = self._validate_src_fmt_configs(
self.source_format, self.src_fmt_configs, valid_configs, backward_compatibility_configs
)
external_config_api_repr[src_fmt_to_param_mapping[self.source_format]] = self.src_fmt_configs
external_config = ExternalConfig.from_api_repr(external_config_api_repr)
if self.schema_fields:
external_config.schema = [SchemaField.from_api_repr(f) for f in self.schema_fields]
if self.max_bad_records:
external_config.max_bad_records = self.max_bad_records
# build table definition
table = Table(
table_ref=TableReference.from_string(self.destination_project_dataset_table, self.hook.project_id)
)
table.external_data_configuration = external_config
if self.labels:
table.labels = self.labels
if self.description:
table.description = self.description
if self.encryption_configuration:
table.encryption_configuration = EncryptionConfiguration.from_api_repr(
self.encryption_configuration
)
table_obj_api_repr = table.to_api_repr()
self.log.info("Creating external table: %s", self.destination_project_dataset_table)
self.hook.create_empty_table(
table_resource=table_obj_api_repr,
project_id=self.project_id or self.hook.project_id,
location=self.location,
exists_ok=True,
)
self.log.info("External table created successfully: %s", self.destination_project_dataset_table)
return table_obj_api_repr
def _use_existing_table(self):
destination_project_id, destination_dataset, destination_table = self.hook.split_tablename(
table_input=self.destination_project_dataset_table,
default_project_id=self.hook.project_id,
var_name="destination_project_dataset_table",
)
# bigquery also allows you to define how you want a table's schema to change
# as a side effect of a load
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schemaUpdateOptions
allowed_schema_update_options = ["ALLOW_FIELD_ADDITION", "ALLOW_FIELD_RELAXATION"]
if not set(allowed_schema_update_options).issuperset(set(self.schema_update_options)):
raise ValueError(
f"{self.schema_update_options} contains invalid schema update options. "
f"Please only use one or more of the following options: {allowed_schema_update_options}"
)
self.configuration = {
"load": {
"autodetect": self.autodetect,
"createDisposition": self.create_disposition,
"destinationTable": {
"projectId": destination_project_id,
"datasetId": destination_dataset,
"tableId": destination_table,
},
"sourceFormat": self.source_format,
"sourceUris": self.source_uris,
"writeDisposition": self.write_disposition,
"ignoreUnknownValues": self.ignore_unknown_values,
},
}
self.time_partitioning = self._cleanse_time_partitioning(
self.destination_project_dataset_table, self.time_partitioning
)
if self.time_partitioning:
self.configuration["load"].update({"timePartitioning": self.time_partitioning})
if self.cluster_fields:
self.configuration["load"].update({"clustering": {"fields": self.cluster_fields}})
if self.schema_fields:
self.configuration["load"]["schema"] = {"fields": self.schema_fields}
if self.schema_update_options:
if self.write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError(
"schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'."
)
else:
# To provide backward compatibility
self.schema_update_options = list(self.schema_update_options or [])
self.log.info("Adding experimental 'schemaUpdateOptions': %s", self.schema_update_options)
self.configuration["load"]["schemaUpdateOptions"] = self.schema_update_options
if self.max_bad_records:
self.configuration["load"]["maxBadRecords"] = self.max_bad_records
if self.encryption_configuration:
self.configuration["load"]["destinationEncryptionConfiguration"] = self.encryption_configuration
if self.labels or self.description:
self.configuration["load"].update({"destinationTableProperties": {}})
if self.labels:
self.configuration["load"]["destinationTableProperties"]["labels"] = self.labels
if self.description:
self.configuration["load"]["destinationTableProperties"]["description"] = self.description
src_fmt_to_configs_mapping = {
"CSV": [
"allowJaggedRows",
"allowQuotedNewlines",
"autodetect",
"fieldDelimiter",
"skipLeadingRows",
"ignoreUnknownValues",
"nullMarker",
"quote",
"encoding",
],
"DATASTORE_BACKUP": ["projectionFields"],
"NEWLINE_DELIMITED_JSON": ["autodetect", "ignoreUnknownValues"],
"PARQUET": ["autodetect", "ignoreUnknownValues"],
"AVRO": ["useAvroLogicalTypes"],
}
valid_configs = src_fmt_to_configs_mapping[self.source_format]
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
backward_compatibility_configs = {
"skipLeadingRows": self.skip_leading_rows,
"fieldDelimiter": self.field_delimiter,
"ignoreUnknownValues": self.ignore_unknown_values,
"quote": self.quote_character,
"allowQuotedNewlines": self.allow_quoted_newlines,
"encoding": self.encoding,
}
self.src_fmt_configs = self._validate_src_fmt_configs(
self.source_format, self.src_fmt_configs, valid_configs, backward_compatibility_configs
)
self.configuration["load"].update(self.src_fmt_configs)
if self.allow_jagged_rows:
self.configuration["load"]["allowJaggedRows"] = self.allow_jagged_rows
return self.configuration
def _validate_src_fmt_configs(
self,
source_format: str,
src_fmt_configs: dict,
valid_configs: list[str],
backward_compatibility_configs: dict | None = None,
) -> dict:
"""
Validates the given src_fmt_configs against a valid configuration for the source format.
Adds the backward compatibility config to the src_fmt_configs.
:param source_format: File format to export.
:param src_fmt_configs: Configure optional fields specific to the source format.
:param valid_configs: Valid configuration specific to the source format
:param backward_compatibility_configs: The top-level params for backward-compatibility
"""
if backward_compatibility_configs is None:
backward_compatibility_configs = {}
for k, v in backward_compatibility_configs.items():
if k not in src_fmt_configs and k in valid_configs:
src_fmt_configs[k] = v
for k, v in src_fmt_configs.items():
if k not in valid_configs:
raise ValueError(f"{k} is not a valid src_fmt_configs for type {source_format}.")
return src_fmt_configs
def _cleanse_time_partitioning(
self, destination_dataset_table: str | None, time_partitioning_in: dict | None
) -> dict: # if it is a partitioned table ($ is in the table name) add partition load option
if time_partitioning_in is None:
time_partitioning_in = {}
time_partitioning_out = {}
if destination_dataset_table and "$" in destination_dataset_table:
time_partitioning_out["type"] = "DAY"
time_partitioning_out.update(time_partitioning_in)
return time_partitioning_out
def on_kill(self) -> None:
if self.job_id and self.cancel_on_kill:
self.hook.cancel_job(job_id=self.job_id, location=self.location) # type: ignore[union-attr]
else:
self.log.info("Skipping to cancel job: %s.%s", self.location, self.job_id)
| 33,531 | 44.746248 | 132 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/gdrive_to_gcs.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.suite.hooks.drive import GoogleDriveHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class GoogleDriveToGCSOperator(BaseOperator):
"""
Writes a Google Drive file into Google Cloud Storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDriveToGCSOperator`
:param bucket_name: The destination Google cloud storage bucket where the
file should be written to
:param object_name: The Google Cloud Storage object name for the object created by the operator.
For example: ``path/to/my/file/file.txt``.
:param folder_id: The folder id of the folder in which the Google Drive file resides
:param file_name: The name of the file residing in Google Drive
:param drive_id: Optional. The id of the shared Google Drive in which the file resides.
:param gcp_conn_id: The GCP connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"bucket_name",
"object_name",
"folder_id",
"file_name",
"drive_id",
"impersonation_chain",
)
def __init__(
self,
*,
bucket_name: str,
object_name: str | None = None,
file_name: str,
folder_id: str,
drive_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.object_name = object_name
self.folder_id = folder_id
self.drive_id = drive_id
self.file_name = file_name
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
gdrive_hook = GoogleDriveHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
file_metadata = gdrive_hook.get_file_id(
folder_id=self.folder_id, file_name=self.file_name, drive_id=self.drive_id
)
with gcs_hook.provide_file_and_upload(
bucket_name=self.bucket_name, object_name=self.object_name
) as file:
gdrive_hook.download_file(file_id=file_metadata["id"], file_handle=file)
| 4,149 | 40.089109 | 100 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/s3_to_gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from datetime import datetime
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Any, Sequence
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.google.cloud.hooks.cloud_storage_transfer_service import (
ACCESS_KEY_ID,
AWS_ACCESS_KEY,
AWS_S3_DATA_SOURCE,
AWS_SECRET_ACCESS_KEY,
BUCKET_NAME,
GCS_DATA_SINK,
INCLUDE_PREFIXES,
OBJECT_CONDITIONS,
OVERWRITE_OBJECTS_ALREADY_EXISTING_IN_SINK,
PATH,
PROJECT_ID,
SCHEDULE,
SCHEDULE_END_DATE,
SCHEDULE_START_DATE,
STATUS,
TRANSFER_OPTIONS,
TRANSFER_SPEC,
CloudDataTransferServiceHook,
GcpTransferJobsStatus,
)
from airflow.providers.google.cloud.hooks.gcs import GCSHook, _parse_gcs_url, gcs_object_is_directory
from airflow.providers.google.cloud.triggers.cloud_storage_transfer_service import (
CloudStorageTransferServiceCreateJobsTrigger,
)
try:
from airflow.providers.amazon.aws.operators.s3 import S3ListOperator
except ImportError:
from airflow.providers.amazon.aws.operators.s3_list import S3ListOperator # type: ignore[no-redef]
if TYPE_CHECKING:
from airflow.utils.context import Context
class S3ToGCSOperator(S3ListOperator):
"""
Synchronizes an S3 key, possibly a prefix, with a Google Cloud Storage destination path.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3ToGCSOperator`
:param bucket: The S3 bucket where to find the objects. (templated)
:param prefix: Prefix string which filters objects whose name begin with
such prefix. (templated)
:param apply_gcs_prefix: (Optional) Whether to replace source objects' path by given GCS destination path.
If apply_gcs_prefix is False (default), then objects from S3 will be copied to GCS bucket into a given
GSC path and the source path will be place inside. For example,
<s3_bucket><s3_prefix><content> => <gcs_prefix><s3_prefix><content>
If apply_gcs_prefix is True, then objects from S3 will be copied to GCS bucket into a given
GCS path and the source path will be omitted. For example:
<s3_bucket><s3_prefix><content> => <gcs_prefix><content>
:param delimiter: the delimiter marks key hierarchy. (templated)
:param aws_conn_id: The source S3 connection
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param dest_gcs: The destination Google Cloud Storage bucket and prefix
where you want to store the files. (templated)
:param replace: Whether you want to replace existing destination files
or not.
:param gzip: Option to compress file for upload. Parameter ignored in deferrable mode.
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run operator in the deferrable mode
:param poll_interval: time in seconds between polling for job completion.
The value is considered only when running in deferrable mode. Must be greater than 0.
**Example**:
.. code-block:: python
s3_to_gcs_op = S3ToGCSOperator(
task_id="s3_to_gcs_example",
bucket="my-s3-bucket",
prefix="data/customers-201804",
gcp_conn_id="google_cloud_default",
dest_gcs="gs://my.gcs.bucket/some/customers/",
replace=False,
gzip=True,
dag=my_dag,
)
Note that ``bucket``, ``prefix``, ``delimiter`` and ``dest_gcs`` are
templated, so you can use variables in them if you wish.
"""
template_fields: Sequence[str] = (
"bucket",
"prefix",
"delimiter",
"dest_gcs",
"google_impersonation_chain",
)
ui_color = "#e09411"
transfer_job_max_files_number = 1000
def __init__(
self,
*,
bucket,
prefix="",
apply_gcs_prefix=False,
delimiter="",
aws_conn_id="aws_default",
verify=None,
gcp_conn_id="google_cloud_default",
dest_gcs=None,
replace=False,
gzip=False,
google_impersonation_chain: str | Sequence[str] | None = None,
deferrable=conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: int = 10,
**kwargs,
):
super().__init__(bucket=bucket, prefix=prefix, delimiter=delimiter, aws_conn_id=aws_conn_id, **kwargs)
self.apply_gcs_prefix = apply_gcs_prefix
self.gcp_conn_id = gcp_conn_id
self.dest_gcs = dest_gcs
self.replace = replace
self.verify = verify
self.gzip = gzip
self.google_impersonation_chain = google_impersonation_chain
self.deferrable = deferrable
if poll_interval <= 0:
raise ValueError("Invalid value for poll_interval. Expected value greater than 0")
self.poll_interval = poll_interval
def _check_inputs(self) -> None:
if self.dest_gcs and not gcs_object_is_directory(self.dest_gcs):
self.log.info(
"Destination Google Cloud Storage path is not a valid "
'"directory", define a path that ends with a slash "/" or '
"leave it empty for the root of the bucket."
)
raise AirflowException(
'The destination Google Cloud Storage path must end with a slash "/" or be empty.'
)
def execute(self, context: Context):
self._check_inputs()
# use the super method to list all the files in an S3 bucket/key
s3_objects = super().execute(context)
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
if not self.replace:
s3_objects = self.exclude_existing_objects(s3_objects=s3_objects, gcs_hook=gcs_hook)
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
if not s3_objects:
self.log.info("In sync, no files needed to be uploaded to Google Cloud Storage")
elif self.deferrable:
self.transfer_files_async(s3_objects, gcs_hook, s3_hook)
else:
self.transfer_files(s3_objects, gcs_hook, s3_hook)
return s3_objects
def exclude_existing_objects(self, s3_objects: list[str], gcs_hook: GCSHook) -> list[str]:
"""Excludes from the list objects that already exist in GCS bucket."""
bucket_name, object_prefix = _parse_gcs_url(self.dest_gcs)
existing_gcs_objects = set(gcs_hook.list(bucket_name, prefix=object_prefix))
s3_paths = set(self.gcs_to_s3_object(gcs_object=gcs_object) for gcs_object in existing_gcs_objects)
s3_objects_reduced = list(set(s3_objects) - s3_paths)
if s3_objects_reduced:
self.log.info("%s files are going to be synced: %s.", len(s3_objects_reduced), s3_objects_reduced)
else:
self.log.info("There are no new files to sync. Have a nice day!")
return s3_objects_reduced
def s3_to_gcs_object(self, s3_object: str) -> str:
"""
Transforms S3 path to GCS path according to the operator's logic.
If apply_gcs_prefix == True then <s3_prefix><content> => <gcs_prefix><content>
If apply_gcs_prefix == False then <s3_prefix><content> => <gcs_prefix><s3_prefix><content>
"""
gcs_bucket, gcs_prefix = _parse_gcs_url(self.dest_gcs)
if self.apply_gcs_prefix:
gcs_object = s3_object.replace(self.prefix, gcs_prefix, 1)
return gcs_object
return gcs_prefix + s3_object
def gcs_to_s3_object(self, gcs_object: str) -> str:
"""
Transforms GCS path to S3 path according to the operator's logic.
If apply_gcs_prefix == True then <gcs_prefix><content> => <s3_prefix><content>
If apply_gcs_prefix == False then <gcs_prefix><s3_prefix><content> => <s3_prefix><content>
"""
gcs_bucket, gcs_prefix = _parse_gcs_url(self.dest_gcs)
s3_object = gcs_object.replace(gcs_prefix, "", 1)
if self.apply_gcs_prefix:
return self.prefix + s3_object
return s3_object
def transfer_files(self, s3_objects: list[str], gcs_hook: GCSHook, s3_hook: S3Hook) -> None:
if s3_objects:
dest_gcs_bucket, dest_gcs_object_prefix = _parse_gcs_url(self.dest_gcs)
for obj in s3_objects:
# GCS hook builds its own in-memory file, so we have to create
# and pass the path
file_object = s3_hook.get_key(obj, self.bucket)
with NamedTemporaryFile(mode="wb", delete=True) as file:
file_object.download_fileobj(file)
file.flush()
gcs_file = self.s3_to_gcs_object(s3_object=obj)
gcs_hook.upload(dest_gcs_bucket, gcs_file, file.name, gzip=self.gzip)
self.log.info("All done, uploaded %d files to Google Cloud Storage", len(s3_objects))
def transfer_files_async(self, files: list[str], gcs_hook: GCSHook, s3_hook: S3Hook) -> None:
"""Submits Google Cloud Storage Transfer Service job to copy files from AWS S3 to GCS."""
if not len(files):
raise ValueError("List of transferring files cannot be empty")
job_names = self.submit_transfer_jobs(files=files, gcs_hook=gcs_hook, s3_hook=s3_hook)
self.defer(
trigger=CloudStorageTransferServiceCreateJobsTrigger(
project_id=gcs_hook.project_id,
job_names=job_names,
poll_interval=self.poll_interval,
),
method_name="execute_complete",
)
def submit_transfer_jobs(self, files: list[str], gcs_hook: GCSHook, s3_hook: S3Hook) -> list[str]:
now = datetime.utcnow()
one_time_schedule = {"day": now.day, "month": now.month, "year": now.year}
gcs_bucket, gcs_prefix = _parse_gcs_url(self.dest_gcs)
config = s3_hook.conn_config
body: dict[str, Any] = {
PROJECT_ID: gcs_hook.project_id,
STATUS: GcpTransferJobsStatus.ENABLED,
SCHEDULE: {
SCHEDULE_START_DATE: one_time_schedule,
SCHEDULE_END_DATE: one_time_schedule,
},
TRANSFER_SPEC: {
AWS_S3_DATA_SOURCE: {
BUCKET_NAME: self.bucket,
AWS_ACCESS_KEY: {
ACCESS_KEY_ID: config.aws_access_key_id,
AWS_SECRET_ACCESS_KEY: config.aws_secret_access_key,
},
},
OBJECT_CONDITIONS: {
INCLUDE_PREFIXES: [],
},
GCS_DATA_SINK: {BUCKET_NAME: gcs_bucket, PATH: gcs_prefix},
TRANSFER_OPTIONS: {
OVERWRITE_OBJECTS_ALREADY_EXISTING_IN_SINK: self.replace,
},
},
}
# max size of the field 'transfer_job.transfer_spec.object_conditions.include_prefixes' is 1000,
# that's why we submit multiple jobs transferring 1000 files each.
# See documentation below
# https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec#ObjectConditions
chunk_size = self.transfer_job_max_files_number
job_names = []
transfer_hook = self.get_transfer_hook()
for i in range(0, len(files), chunk_size):
files_chunk = files[i : i + chunk_size]
body[TRANSFER_SPEC][OBJECT_CONDITIONS][INCLUDE_PREFIXES] = files_chunk
job = transfer_hook.create_transfer_job(body=body)
s = "s" if len(files_chunk) > 1 else ""
self.log.info(f"Submitted job {job['name']} to transfer {len(files_chunk)} file{s}")
job_names.append(job["name"])
if len(files) > chunk_size:
js = "s" if len(job_names) > 1 else ""
fs = "s" if len(files) > 1 else ""
self.log.info(f"Overall submitted {len(job_names)} job{js} to transfer {len(files)} file{fs}")
return job_names
def execute_complete(self, context: Context, event: dict[str, Any]) -> None:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was
successful.
"""
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info("%s completed with response %s ", self.task_id, event["message"])
def get_transfer_hook(self):
return CloudDataTransferServiceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
| 14,900 | 41.696275 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/gdrive_to_local.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.google.suite.hooks.drive import GoogleDriveHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class GoogleDriveToLocalOperator(BaseOperator):
"""
Writes a Google Drive file into local Storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDriveToLocalOperator`
:param output_file: Path to downloaded file
:param folder_id: The folder id of the folder in which the Google Drive file resides
:param file_name: The name of the file residing in Google Drive
:param gcp_conn_id: The GCP connection ID to use when fetching connection info.
:param drive_id: Optional. The id of the shared Google Drive in which the file resides.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"output_file",
"folder_id",
"file_name",
"drive_id",
"impersonation_chain",
)
def __init__(
self,
*,
output_file: str,
file_name: str,
folder_id: str,
drive_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.output_file = output_file
self.folder_id = folder_id
self.drive_id = drive_id
self.file_name = file_name
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
self.log.info("Executing download: %s into %s", self.file_name, self.output_file)
gdrive_hook = GoogleDriveHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
file_metadata = gdrive_hook.get_file_id(
folder_id=self.folder_id, file_name=self.file_name, drive_id=self.drive_id
)
with open(self.output_file, "wb") as file:
gdrive_hook.download_file(file_id=file_metadata["id"], file_handle=file)
| 3,634 | 39.388889 | 93 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/facebook_ads_to_gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Facebook Ad Reporting to GCS operators."""
from __future__ import annotations
import csv
import tempfile
from enum import Enum
from typing import TYPE_CHECKING, Any, Sequence
from facebook_business.adobjects.adsinsights import AdsInsights
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.facebook.ads.hooks.ads import FacebookAdsReportingHook
from airflow.providers.google.cloud.hooks.gcs import GCSHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class FlushAction(Enum):
"""Facebook Ads Export Options."""
EXPORT_ONCE = "ExportAtOnce"
EXPORT_EVERY_ACCOUNT = "ExportEveryAccount"
class FacebookAdsReportToGcsOperator(BaseOperator):
"""Fetch from Facebook Ads API.
This converts and saves the data as a temporary JSON file, and uploads the
JSON to Google Cloud Storage.
.. seealso::
For more information on the Facebook Ads API, take a look at the API docs:
https://developers.facebook.com/docs/marketing-apis/
.. seealso::
For more information on the Facebook Ads Python SDK, take a look at the docs:
https://github.com/facebook/facebook-python-business-sdk
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:FacebookAdsReportToGcsOperator`
:param bucket_name: The GCS bucket to upload to
:param object_name: GCS path to save the object. Must be the full file path (ex. `path/to/file.txt`)
:param gcp_conn_id: Airflow Google Cloud connection ID
:param facebook_conn_id: Airflow Facebook Ads connection ID
:param api_version: The version of Facebook API. Default to None. If it is None,
it will use the Facebook business SDK default version.
:param fields: List of fields that is obtained from Facebook. Found in AdsInsights.Field class.
https://developers.facebook.com/docs/marketing-api/insights/parameters/v6.0
:param parameters: Parameters that determine the query for Facebook
https://developers.facebook.com/docs/marketing-api/insights/parameters/v6.0
:param gzip: Option to compress local file or file data for upload
:param upload_as_account: Option to export file with account_id
This parameter only works if Account Id sets as array in Facebook Connection
If set as True, each file will be exported in a separate file that has a prefix of account_id
If set as False, a single file will be exported for all account_id
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"facebook_conn_id",
"bucket_name",
"object_name",
"impersonation_chain",
"parameters",
)
def __init__(
self,
*,
bucket_name: str,
object_name: str,
fields: list[str],
parameters: dict[str, Any] | None = None,
gzip: bool = False,
upload_as_account: bool = False,
api_version: str | None = None,
gcp_conn_id: str = "google_cloud_default",
facebook_conn_id: str = "facebook_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.object_name = object_name
self.gcp_conn_id = gcp_conn_id
self.facebook_conn_id = facebook_conn_id
self.api_version = api_version
self.fields = fields
self.parameters = parameters
self.gzip = gzip
self.upload_as_account = upload_as_account
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
service = FacebookAdsReportingHook(
facebook_conn_id=self.facebook_conn_id, api_version=self.api_version
)
bulk_report = service.bulk_facebook_report(params=self.parameters, fields=self.fields)
if isinstance(bulk_report, list):
converted_rows_with_action = self._generate_rows_with_action(False)
converted_rows_with_action = self._prepare_rows_for_upload(
rows=bulk_report, converted_rows_with_action=converted_rows_with_action, account_id=None
)
elif isinstance(bulk_report, dict):
converted_rows_with_action = self._generate_rows_with_action(True)
for account_id in bulk_report.keys():
rows = bulk_report.get(account_id, [])
if rows:
converted_rows_with_action = self._prepare_rows_for_upload(
rows=rows,
converted_rows_with_action=converted_rows_with_action,
account_id=account_id,
)
else:
self.log.warning("account_id: %s returned empty report", str(account_id))
else:
message = (
"Facebook Ads Hook returned different type than expected. Expected return types should be "
"List or Dict. Actual return type of the Hook: " + str(type(bulk_report))
)
raise AirflowException(message)
total_row_count = self._decide_and_flush(converted_rows_with_action=converted_rows_with_action)
self.log.info("Facebook Returned %s data points in total: ", total_row_count)
def _generate_rows_with_action(self, type_check: bool):
if type_check and self.upload_as_account:
return {FlushAction.EXPORT_EVERY_ACCOUNT: []}
else:
return {FlushAction.EXPORT_ONCE: []}
def _prepare_rows_for_upload(
self,
rows: list[AdsInsights],
converted_rows_with_action: dict[FlushAction, list],
account_id: str | None,
):
converted_rows = [dict(row) for row in rows]
if account_id is not None and self.upload_as_account:
converted_rows_with_action[FlushAction.EXPORT_EVERY_ACCOUNT].append(
{"account_id": account_id, "converted_rows": converted_rows}
)
self.log.info(
"Facebook Returned %s data points for account_id: %s", len(converted_rows), account_id
)
else:
converted_rows_with_action[FlushAction.EXPORT_ONCE].extend(converted_rows)
self.log.info("Facebook Returned %s data points ", len(converted_rows))
return converted_rows_with_action
def _decide_and_flush(self, converted_rows_with_action: dict[FlushAction, list]):
total_data_count = 0
once_action = converted_rows_with_action.get(FlushAction.EXPORT_ONCE)
if once_action is not None:
self._flush_rows(
converted_rows=once_action,
object_name=self.object_name,
)
total_data_count += len(once_action)
else:
every_account_action = converted_rows_with_action.get(FlushAction.EXPORT_EVERY_ACCOUNT)
if every_account_action:
for converted_rows in every_account_action:
self._flush_rows(
converted_rows=converted_rows.get("converted_rows"),
object_name=self._transform_object_name_with_account_id(
account_id=converted_rows.get("account_id")
),
)
total_data_count += len(converted_rows.get("converted_rows"))
else:
message = (
"FlushAction not found in the data. Please check the FlushAction in "
"the operator. Converted Rows with Action: " + str(converted_rows_with_action)
)
raise AirflowException(message)
return total_data_count
def _flush_rows(self, converted_rows: list[Any] | None, object_name: str):
if converted_rows:
headers = converted_rows[0].keys()
with tempfile.NamedTemporaryFile("w", suffix=".csv") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=headers)
writer.writeheader()
writer.writerows(converted_rows)
csvfile.flush()
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.upload(
bucket_name=self.bucket_name,
object_name=object_name,
filename=csvfile.name,
gzip=self.gzip,
)
self.log.info("%s uploaded to GCS", csvfile.name)
def _transform_object_name_with_account_id(self, account_id: str):
directory_parts = self.object_name.split("/")
directory_parts[len(directory_parts) - 1] = (
account_id + "_" + directory_parts[len(directory_parts) - 1]
)
return "/".join(directory_parts)
| 10,401 | 43.643777 | 107 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/salesforce_to_gcs.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import os
import tempfile
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.salesforce.hooks.salesforce import SalesforceHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class SalesforceToGcsOperator(BaseOperator):
"""
Submits Salesforce query and uploads results to Google Cloud Storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SalesforceToGcsOperator`
:param query: The query to make to Salesforce.
:param bucket_name: The bucket to upload to.
:param object_name: The object name to set when uploading the file.
:param salesforce_conn_id: the name of the connection that has the parameters
we need to connect to Salesforce.
:param include_deleted: True if the query should include deleted records.
:param query_params: Additional optional arguments
:param export_format: Desired format of files to be exported.
:param coerce_to_timestamp: True if you want all datetime fields to be converted into Unix timestamps.
False if you want them to be left in the same format as they were in Salesforce.
Leaving the value as False will result in datetimes being strings. Default: False
:param record_time_added: True if you want to add a Unix timestamp field
to the resulting data that marks when the data was fetched from Salesforce. Default: False
:param gzip: Option to compress local file or file data for upload
:param gcp_conn_id: the name of the connection that has the parameters we need to connect to GCS.
"""
template_fields: Sequence[str] = (
"query",
"bucket_name",
"object_name",
)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"sql": "sql"}
def __init__(
self,
*,
query: str,
bucket_name: str,
object_name: str,
salesforce_conn_id: str,
include_deleted: bool = False,
query_params: dict | None = None,
export_format: str = "csv",
coerce_to_timestamp: bool = False,
record_time_added: bool = False,
gzip: bool = False,
gcp_conn_id: str = "google_cloud_default",
**kwargs,
):
super().__init__(**kwargs)
self.query = query
self.bucket_name = bucket_name
self.object_name = object_name
self.salesforce_conn_id = salesforce_conn_id
self.export_format = export_format
self.coerce_to_timestamp = coerce_to_timestamp
self.record_time_added = record_time_added
self.gzip = gzip
self.gcp_conn_id = gcp_conn_id
self.include_deleted = include_deleted
self.query_params = query_params
def execute(self, context: Context):
salesforce = SalesforceHook(salesforce_conn_id=self.salesforce_conn_id)
response = salesforce.make_query(
query=self.query, include_deleted=self.include_deleted, query_params=self.query_params
)
with tempfile.TemporaryDirectory() as tmp:
path = os.path.join(tmp, "salesforce_temp_file")
salesforce.write_object_to_file(
query_results=response["records"],
filename=path,
fmt=self.export_format,
coerce_to_timestamp=self.coerce_to_timestamp,
record_time_added=self.record_time_added,
)
hook = GCSHook(gcp_conn_id=self.gcp_conn_id)
hook.upload(
bucket_name=self.bucket_name,
object_name=self.object_name,
filename=path,
gzip=self.gzip,
)
gcs_uri = f"gs://{self.bucket_name}/{self.object_name}"
self.log.info("%s uploaded to GCS", gcs_uri)
return gcs_uri
| 4,794 | 38.958333 | 106 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/postgres_to_gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""PostgreSQL to GCS operator."""
from __future__ import annotations
import datetime
import json
import time
import uuid
from decimal import Decimal
import pendulum
from airflow.providers.google.cloud.transfers.sql_to_gcs import BaseSQLToGCSOperator
from airflow.providers.postgres.hooks.postgres import PostgresHook
class _PostgresServerSideCursorDecorator:
"""
Inspired by `_PrestoToGCSPrestoCursorAdapter` to keep this consistent.
Decorator for allowing description to be available for postgres cursor in case server side
cursor is used. It doesn't provide other methods except those needed in BaseSQLToGCSOperator,
which is more of a safety feature.
"""
def __init__(self, cursor):
self.cursor = cursor
self.rows = []
self.initialized = False
def __iter__(self):
return self
def __next__(self):
if self.rows:
return self.rows.pop()
else:
self.initialized = True
return next(self.cursor)
@property
def description(self):
"""Fetch first row to initialize cursor description when using server side cursor."""
if not self.initialized:
element = self.cursor.fetchone()
if element is not None:
self.rows.append(element)
self.initialized = True
return self.cursor.description
class PostgresToGCSOperator(BaseSQLToGCSOperator):
"""
Copy data from Postgres to Google Cloud Storage in JSON, CSV or Parquet format.
:param postgres_conn_id: Reference to a specific Postgres hook.
:param use_server_side_cursor: If server-side cursor should be used for querying postgres.
For detailed info, check https://www.psycopg.org/docs/usage.html#server-side-cursors
:param cursor_itersize: How many records are fetched at a time in case of server-side cursor.
"""
ui_color = "#a0e08c"
type_map = {
1114: "DATETIME",
1184: "TIMESTAMP",
1082: "DATE",
1083: "TIME",
1005: "INTEGER",
1007: "INTEGER",
1016: "INTEGER",
20: "INTEGER",
21: "INTEGER",
23: "INTEGER",
16: "BOOL",
700: "FLOAT",
701: "FLOAT",
1700: "FLOAT",
}
def __init__(
self,
*,
postgres_conn_id="postgres_default",
use_server_side_cursor=False,
cursor_itersize=2000,
**kwargs,
):
super().__init__(**kwargs)
self.postgres_conn_id = postgres_conn_id
self.use_server_side_cursor = use_server_side_cursor
self.cursor_itersize = cursor_itersize
def _unique_name(self):
return f"{self.dag_id}__{self.task_id}__{uuid.uuid4()}" if self.use_server_side_cursor else None
def query(self):
"""Queries Postgres and returns a cursor to the results."""
hook = PostgresHook(postgres_conn_id=self.postgres_conn_id)
conn = hook.get_conn()
cursor = conn.cursor(name=self._unique_name())
cursor.execute(self.sql, self.parameters)
if self.use_server_side_cursor:
cursor.itersize = self.cursor_itersize
return _PostgresServerSideCursorDecorator(cursor)
return cursor
def field_to_bigquery(self, field) -> dict[str, str]:
return {
"name": field[0],
"type": self.type_map.get(field[1], "STRING"),
"mode": "REPEATED" if field[1] in (1009, 1005, 1007, 1016) else "NULLABLE",
}
def convert_type(self, value, schema_type, stringify_dict=True):
"""
Take a value from Postgres and convert it to a value safe for JSON/Google Cloud Storage/BigQuery.
Timezone aware Datetime are converted to UTC seconds.
Unaware Datetime, Date and Time are converted to ISO formatted strings.
Decimals are converted to floats.
:param value: Postgres column value.
:param schema_type: BigQuery data type.
:param stringify_dict: Specify whether to convert dict to string.
"""
if isinstance(value, datetime.datetime):
iso_format_value = value.isoformat()
if value.tzinfo is None:
return iso_format_value
return pendulum.parse(iso_format_value).float_timestamp
if isinstance(value, datetime.date):
return value.isoformat()
if isinstance(value, datetime.time):
formatted_time = time.strptime(str(value), "%H:%M:%S")
time_delta = datetime.timedelta(
hours=formatted_time.tm_hour, minutes=formatted_time.tm_min, seconds=formatted_time.tm_sec
)
return str(time_delta)
if stringify_dict and isinstance(value, dict):
return json.dumps(value)
if isinstance(value, Decimal):
return float(value)
return value
| 5,675 | 34.254658 | 106 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/mssql_to_gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""MsSQL to GCS operator."""
from __future__ import annotations
import datetime
import decimal
from typing import Sequence
from airflow.providers.google.cloud.transfers.sql_to_gcs import BaseSQLToGCSOperator
from airflow.providers.microsoft.mssql.hooks.mssql import MsSqlHook
class MSSQLToGCSOperator(BaseSQLToGCSOperator):
"""
Copy data from Microsoft SQL Server to Google Cloud Storage in JSON, CSV or Parquet format.
:param bit_fields: Sequence of fields names of MSSQL "BIT" data type,
to be interpreted in the schema as "BOOLEAN". "BIT" fields that won't
be included in this sequence, will be interpreted as "INTEGER" by
default.
:param mssql_conn_id: Reference to a specific MSSQL hook.
**Example**:
The following operator will export data from the Customers table
within the given MSSQL Database and then upload it to the
'mssql-export' GCS bucket (along with a schema file). ::
export_customers = MsSqlToGoogleCloudStorageOperator(
task_id='export_customers',
sql='SELECT * FROM dbo.Customers;',
bit_fields=['some_bit_field', 'another_bit_field'],
bucket='mssql-export',
filename='data/customers/export.json',
schema_filename='schemas/export.json',
mssql_conn_id='mssql_default',
gcp_conn_id='google_cloud_default',
dag=dag
)
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:MSSQLToGCSOperator`
"""
ui_color = "#e0a98c"
type_map = {2: "BOOLEAN", 3: "INTEGER", 4: "TIMESTAMP", 5: "NUMERIC"}
def __init__(
self,
*,
bit_fields: Sequence[str] | None = None,
mssql_conn_id="mssql_default",
**kwargs,
):
super().__init__(**kwargs)
self.mssql_conn_id = mssql_conn_id
self.bit_fields = bit_fields if bit_fields else []
def query(self):
"""
Queries MSSQL and returns a cursor of results.
:return: mssql cursor
"""
mssql = MsSqlHook(mssql_conn_id=self.mssql_conn_id)
conn = mssql.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
return cursor
def field_to_bigquery(self, field) -> dict[str, str]:
if field[0] in self.bit_fields:
field = (field[0], 2)
return {
"name": field[0].replace(" ", "_"),
"type": self.type_map.get(field[1], "STRING"),
"mode": "NULLABLE",
}
@classmethod
def convert_type(cls, value, schema_type, **kwargs):
"""
Take a value from MSSQL and convert it to a value safe for JSON/Google Cloud Storage/BigQuery.
Datetime, Date and Time are converted to ISO formatted strings.
"""
if isinstance(value, decimal.Decimal):
return float(value)
if isinstance(value, (datetime.date, datetime.time)):
return value.isoformat()
return value
| 3,903 | 34.171171 | 102 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/cassandra_to_gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains operator for copying data from Cassandra to Google Cloud Storage in JSON format."""
from __future__ import annotations
import json
from base64 import b64encode
from datetime import datetime
from decimal import Decimal
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Any, Iterable, NewType, Sequence
from uuid import UUID
from cassandra.util import Date, OrderedMapSerializedKey, SortedSet, Time
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.apache.cassandra.hooks.cassandra import CassandraHook
from airflow.providers.google.cloud.hooks.gcs import GCSHook
if TYPE_CHECKING:
from airflow.utils.context import Context
NotSetType = NewType("NotSetType", object)
NOT_SET = NotSetType(object())
class CassandraToGCSOperator(BaseOperator):
"""
Copy data from Cassandra to Google Cloud Storage in JSON format.
Note: Arrays of arrays are not supported.
:param cql: The CQL to execute on the Cassandra table.
:param bucket: The bucket to upload to.
:param filename: The filename to use as the object name when uploading
to Google Cloud Storage. A {} should be specified in the filename
to allow the operator to inject file numbers in cases where the
file is split due to size.
:param schema_filename: If set, the filename to use as the object name
when uploading a .json file containing the BigQuery schema fields
for the table that was dumped from MySQL.
:param approx_max_file_size_bytes: This operator supports the ability
to split large table dumps into multiple files (see notes in the
filename param docs above). This param allows developers to specify the
file size of the splits. Check https://cloud.google.com/storage/quotas
to see the maximum allowed file size for a single object.
:param cassandra_conn_id: Reference to a specific Cassandra hook.
:param gzip: Option to compress file for upload
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param query_timeout: (Optional) The amount of time, in seconds, used to execute the Cassandra query.
If not set, the timeout value will be set in Session.execute() by Cassandra driver.
If set to None, there is no timeout.
:param encode_uuid: (Optional) Option to encode UUID or not when upload from Cassandra to GCS.
Default is to encode UUID.
"""
template_fields: Sequence[str] = (
"cql",
"bucket",
"filename",
"schema_filename",
"impersonation_chain",
)
template_ext: Sequence[str] = (".cql",)
ui_color = "#a0e08c"
def __init__(
self,
*,
cql: str,
bucket: str,
filename: str,
schema_filename: str | None = None,
approx_max_file_size_bytes: int = 1900000000,
gzip: bool = False,
cassandra_conn_id: str = "cassandra_default",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
query_timeout: float | None | NotSetType = NOT_SET,
encode_uuid: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.cql = cql
self.bucket = bucket
self.filename = filename
self.schema_filename = schema_filename
self.approx_max_file_size_bytes = approx_max_file_size_bytes
self.cassandra_conn_id = cassandra_conn_id
self.gcp_conn_id = gcp_conn_id
self.gzip = gzip
self.impersonation_chain = impersonation_chain
self.query_timeout = query_timeout
self.encode_uuid = encode_uuid
# Default Cassandra to BigQuery type mapping
CQL_TYPE_MAP = {
"BytesType": "STRING",
"DecimalType": "FLOAT",
"UUIDType": "STRING",
"BooleanType": "BOOL",
"ByteType": "INTEGER",
"AsciiType": "STRING",
"FloatType": "FLOAT",
"DoubleType": "FLOAT",
"LongType": "INTEGER",
"Int32Type": "INTEGER",
"IntegerType": "INTEGER",
"InetAddressType": "STRING",
"CounterColumnType": "INTEGER",
"DateType": "TIMESTAMP",
"SimpleDateType": "DATE",
"TimestampType": "TIMESTAMP",
"TimeUUIDType": "STRING",
"ShortType": "INTEGER",
"TimeType": "TIME",
"DurationType": "INTEGER",
"UTF8Type": "STRING",
"VarcharType": "STRING",
}
def execute(self, context: Context):
hook = CassandraHook(cassandra_conn_id=self.cassandra_conn_id)
query_extra = {}
if self.query_timeout is not NOT_SET:
query_extra["timeout"] = self.query_timeout
cursor = hook.get_conn().execute(self.cql, **query_extra)
# If a schema is set, create a BQ schema JSON file.
if self.schema_filename:
self.log.info("Writing local schema file")
schema_file = self._write_local_schema_file(cursor)
# Flush file before uploading
schema_file["file_handle"].flush()
self.log.info("Uploading schema file to GCS.")
self._upload_to_gcs(schema_file)
schema_file["file_handle"].close()
counter = 0
self.log.info("Writing local data files")
for file_to_upload in self._write_local_data_files(cursor):
# Flush file before uploading
file_to_upload["file_handle"].flush()
self.log.info("Uploading chunk file #%d to GCS.", counter)
self._upload_to_gcs(file_to_upload)
self.log.info("Removing local file")
file_to_upload["file_handle"].close()
counter += 1
# Close all sessions and connection associated with this Cassandra cluster
hook.shutdown_cluster()
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
file_to_upload = {
"file_name": self.filename.format(file_no),
"file_handle": tmp_file_handle,
}
for row in cursor:
row_dict = self.generate_data_dict(row._fields, row)
content = json.dumps(row_dict).encode("utf-8")
tmp_file_handle.write(content)
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write(b"\n")
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
yield file_to_upload
tmp_file_handle = NamedTemporaryFile(delete=True)
file_to_upload = {
"file_name": self.filename.format(file_no),
"file_handle": tmp_file_handle,
}
yield file_to_upload
def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema for the results to a local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
schema = []
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
for name, type_ in zip(cursor.column_names, cursor.column_types):
schema.append(self.generate_schema_dict(name, type_))
json_serialized_schema = json.dumps(schema).encode("utf-8")
tmp_schema_file_handle.write(json_serialized_schema)
schema_file_to_upload = {
"file_name": self.schema_filename,
"file_handle": tmp_schema_file_handle,
}
return schema_file_to_upload
def _upload_to_gcs(self, file_to_upload):
"""Upload a file (data split or schema .json file) to Google Cloud Storage."""
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.upload(
bucket_name=self.bucket,
object_name=file_to_upload.get("file_name"),
filename=file_to_upload.get("file_handle").name,
mime_type="application/json",
gzip=self.gzip,
)
def generate_data_dict(self, names: Iterable[str], values: Any) -> dict[str, Any]:
"""Generates data structure that will be stored as file in GCS."""
return {n: self.convert_value(v) for n, v in zip(names, values)}
def convert_value(self, value: Any | None) -> Any | None:
"""Convert value to BQ type."""
if not value:
return value
elif isinstance(value, (str, int, float, bool, dict)):
return value
elif isinstance(value, bytes):
return b64encode(value).decode("ascii")
elif isinstance(value, UUID):
if self.encode_uuid:
return b64encode(value.bytes).decode("ascii")
else:
return str(value)
elif isinstance(value, (datetime, Date)):
return str(value)
elif isinstance(value, Decimal):
return float(value)
elif isinstance(value, Time):
return str(value).split(".")[0]
elif isinstance(value, (list, SortedSet)):
return self.convert_array_types(value)
elif hasattr(value, "_fields"):
return self.convert_user_type(value)
elif isinstance(value, tuple):
return self.convert_tuple_type(value)
elif isinstance(value, OrderedMapSerializedKey):
return self.convert_map_type(value)
else:
raise AirflowException("Unexpected value: " + str(value))
def convert_array_types(self, value: list[Any] | SortedSet) -> list[Any]:
"""Maps convert_value over array."""
return [self.convert_value(nested_value) for nested_value in value]
def convert_user_type(self, value: Any) -> dict[str, Any]:
"""
Converts a user type to RECORD that contains n fields, where n is the number of attributes.
Each element in the user type class will be converted to its corresponding data type in BQ.
"""
names = value._fields
values = [self.convert_value(getattr(value, name)) for name in names]
return self.generate_data_dict(names, values)
def convert_tuple_type(self, values: tuple[Any]) -> dict[str, Any]:
"""
Converts a tuple to RECORD that contains n fields.
Each field will be converted to its corresponding data type in bq and
will be named 'field_<index>', where index is determined by the order
of the tuple elements defined in cassandra.
"""
names = ["field_" + str(i) for i in range(len(values))]
return self.generate_data_dict(names, values)
def convert_map_type(self, value: OrderedMapSerializedKey) -> list[dict[str, Any]]:
"""
Converts a map to a repeated RECORD that contains two fields: 'key' and 'value'.
Each will be converted to its corresponding data type in BQ.
"""
converted_map = []
for k, v in zip(value.keys(), value.values()):
converted_map.append({"key": self.convert_value(k), "value": self.convert_value(v)})
return converted_map
@classmethod
def generate_schema_dict(cls, name: str, type_: Any) -> dict[str, Any]:
"""Generates BQ schema."""
field_schema: dict[str, Any] = {}
field_schema.update({"name": name})
field_schema.update({"type_": cls.get_bq_type(type_)})
field_schema.update({"mode": cls.get_bq_mode(type_)})
fields = cls.get_bq_fields(type_)
if fields:
field_schema.update({"fields": fields})
return field_schema
@classmethod
def get_bq_fields(cls, type_: Any) -> list[dict[str, Any]]:
"""Converts non simple type value to BQ representation."""
if cls.is_simple_type(type_):
return []
# In case of not simple type
names: list[str] = []
types: list[Any] = []
if cls.is_array_type(type_) and cls.is_record_type(type_.subtypes[0]):
names = type_.subtypes[0].fieldnames
types = type_.subtypes[0].subtypes
elif cls.is_record_type(type_):
names = type_.fieldnames
types = type_.subtypes
if types and not names and type_.cassname == "TupleType":
names = ["field_" + str(i) for i in range(len(types))]
elif types and not names and type_.cassname == "MapType":
names = ["key", "value"]
return [cls.generate_schema_dict(n, t) for n, t in zip(names, types)]
@staticmethod
def is_simple_type(type_: Any) -> bool:
"""Check if type is a simple type."""
return type_.cassname in CassandraToGCSOperator.CQL_TYPE_MAP
@staticmethod
def is_array_type(type_: Any) -> bool:
"""Check if type is an array type."""
return type_.cassname in ["ListType", "SetType"]
@staticmethod
def is_record_type(type_: Any) -> bool:
"""Checks the record type."""
return type_.cassname in ["UserType", "TupleType", "MapType"]
@classmethod
def get_bq_type(cls, type_: Any) -> str:
"""Converts type to equivalent BQ type."""
if cls.is_simple_type(type_):
return CassandraToGCSOperator.CQL_TYPE_MAP[type_.cassname]
elif cls.is_record_type(type_):
return "RECORD"
elif cls.is_array_type(type_):
return cls.get_bq_type(type_.subtypes[0])
else:
raise AirflowException("Not a supported type_: " + type_.cassname)
@classmethod
def get_bq_mode(cls, type_: Any) -> str:
"""Converts type to equivalent BQ mode."""
if cls.is_array_type(type_) or type_.cassname == "MapType":
return "REPEATED"
elif cls.is_record_type(type_) or cls.is_simple_type(type_):
return "NULLABLE"
else:
raise AirflowException("Not a supported type_: " + type_.cassname)
| 15,877 | 39.09596 | 107 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/transfers/trino_to_gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any
from trino.client import TrinoResult
from trino.dbapi import Cursor as TrinoCursor
from airflow.providers.google.cloud.transfers.sql_to_gcs import BaseSQLToGCSOperator
from airflow.providers.trino.hooks.trino import TrinoHook
class _TrinoToGCSTrinoCursorAdapter:
"""
An adapter that adds additional feature to the Trino cursor.
The implementation of cursor in the trino library is not sufficient.
The following changes have been made:
* The poke mechanism for row. You can look at the next row without consuming it.
* The description attribute is available before reading the first row. Thanks to the poke mechanism.
* the iterator interface has been implemented.
A detailed description of the class methods is available in
`PEP-249 <https://www.python.org/dev/peps/pep-0249/>`__.
"""
def __init__(self, cursor: TrinoCursor):
self.cursor: TrinoCursor = cursor
self.rows: list[Any] = []
self.initialized: bool = False
@property
def description(self) -> list[tuple]:
"""
This read-only attribute is a sequence of 7-item sequences.
Each of these sequences contains information describing one result column:
* ``name``
* ``type_code``
* ``display_size``
* ``internal_size``
* ``precision``
* ``scale``
* ``null_ok``
The first two items (``name`` and ``type_code``) are mandatory, the other
five are optional and are set to None if no meaningful values can be provided.
"""
if not self.initialized:
# Peek for first row to load description.
self.peekone()
return self.cursor.description
@property
def rowcount(self) -> int:
"""The read-only attribute specifies the number of rows."""
return self.cursor.rowcount
def close(self) -> None:
"""Close the cursor now."""
self.cursor.close()
def execute(self, *args, **kwargs) -> TrinoResult:
"""Prepare and execute a database operation (query or command)."""
self.initialized = False
self.rows = []
return self.cursor.execute(*args, **kwargs)
def executemany(self, *args, **kwargs):
"""
Prepare and execute a database query.
Prepare a database operation (query or command) and then execute it against
all parameter sequences or mappings found in the sequence seq_of_parameters.
"""
self.initialized = False
self.rows = []
return self.cursor.executemany(*args, **kwargs)
def peekone(self) -> Any:
"""Return the next row without consuming it."""
self.initialized = True
element = self.cursor.fetchone()
self.rows.insert(0, element)
return element
def fetchone(self) -> Any:
"""Fetch the next row of a query result set, returning a single sequence, or ``None``."""
if self.rows:
return self.rows.pop(0)
return self.cursor.fetchone()
def fetchmany(self, size=None) -> list:
"""
Fetch the next set of rows of a query result, returning a sequence of sequences.
An empty sequence is returned when no more rows are available.
"""
if size is None:
size = self.cursor.arraysize
result = []
for _ in range(size):
row = self.fetchone()
if row is None:
break
result.append(row)
return result
def __next__(self) -> Any:
"""
Return the next row from the current SQL statement using the same semantics as ``.fetchone()``.
A ``StopIteration`` exception is raised when the result set is exhausted.
"""
result = self.fetchone()
if result is None:
raise StopIteration()
return result
def __iter__(self) -> _TrinoToGCSTrinoCursorAdapter:
"""Return self to make cursors compatible to the iteration protocol."""
return self
class TrinoToGCSOperator(BaseSQLToGCSOperator):
"""Copy data from TrinoDB to Google Cloud Storage in JSON, CSV or Parquet format.
:param trino_conn_id: Reference to a specific Trino hook.
"""
ui_color = "#a0e08c"
type_map = {
"BOOLEAN": "BOOL",
"TINYINT": "INT64",
"SMALLINT": "INT64",
"INTEGER": "INT64",
"BIGINT": "INT64",
"REAL": "FLOAT64",
"DOUBLE": "FLOAT64",
"DECIMAL": "NUMERIC",
"VARCHAR": "STRING",
"CHAR": "STRING",
"VARBINARY": "BYTES",
"JSON": "STRING",
"DATE": "DATE",
"TIME": "TIME",
# BigQuery don't time with timezone native.
"TIME WITH TIME ZONE": "STRING",
"TIMESTAMP": "TIMESTAMP",
# BigQuery supports a narrow range of time zones during import.
# You should use TIMESTAMP function, if you want have TIMESTAMP type
"TIMESTAMP WITH TIME ZONE": "STRING",
"IPADDRESS": "STRING",
"UUID": "STRING",
}
def __init__(self, *, trino_conn_id: str = "trino_default", **kwargs):
super().__init__(**kwargs)
self.trino_conn_id = trino_conn_id
def query(self):
"""Queries trino and returns a cursor to the results."""
trino = TrinoHook(trino_conn_id=self.trino_conn_id)
conn = trino.get_conn()
cursor = conn.cursor()
self.log.info("Executing: %s", self.sql)
cursor.execute(self.sql)
return _TrinoToGCSTrinoCursorAdapter(cursor)
def field_to_bigquery(self, field) -> dict[str, str]:
"""Convert trino field type to BigQuery field type."""
clear_field_type = field[1].upper()
# remove type argument e.g. DECIMAL(2, 10) => DECIMAL
clear_field_type, _, _ = clear_field_type.partition("(")
new_field_type = self.type_map.get(clear_field_type, "STRING")
return {"name": field[0], "type": new_field_type}
def convert_type(self, value, schema_type, **kwargs):
"""
Do nothing. Trino uses JSON on the transport layer, so types are simple.
:param value: Trino column value
:param schema_type: BigQuery data type
"""
return value
| 7,104 | 33.158654 | 104 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/_internal_client/secret_manager_client.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import re
from functools import cached_property
import google
from google.api_core.exceptions import InvalidArgument, NotFound, PermissionDenied
from google.cloud.secretmanager_v1 import SecretManagerServiceClient
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.utils.log.logging_mixin import LoggingMixin
SECRET_ID_PATTERN = r"^[a-zA-Z0-9-_]*$"
class _SecretManagerClient(LoggingMixin):
"""Retrieve Secrets object from Google Cloud Secrets Manager.
This is a common class reused between SecretsManager and Secrets Hook that
provides the shared authentication and verification mechanisms. This class
should not be used directly; use SecretsManager or SecretsHook instead.
:param credentials: Credentials used to authenticate to GCP
"""
def __init__(
self,
credentials: google.auth.credentials.Credentials,
) -> None:
super().__init__()
self.credentials = credentials
@staticmethod
def is_valid_secret_name(secret_name: str) -> bool:
"""Whether the secret name is valid.
:param secret_name: name of the secret
"""
return bool(re.match(SECRET_ID_PATTERN, secret_name))
@cached_property
def client(self) -> SecretManagerServiceClient:
"""Create an authenticated KMS client."""
_client = SecretManagerServiceClient(credentials=self.credentials, client_info=CLIENT_INFO)
return _client
def get_secret(self, secret_id: str, project_id: str, secret_version: str = "latest") -> str | None:
"""Get secret value from the Secret Manager.
:param secret_id: Secret Key
:param project_id: Project id to use
:param secret_version: version of the secret (default is 'latest')
"""
name = self.client.secret_version_path(project_id, secret_id, secret_version)
try:
response = self.client.access_secret_version(request={"name": name})
value = response.payload.data.decode("UTF-8")
return value
except NotFound:
self.log.debug("Google Cloud API Call Error (NotFound): Secret ID %s not found.", secret_id)
return None
except PermissionDenied:
self.log.error(
"""Google Cloud API Call Error (PermissionDenied): No access for Secret ID %s.
Did you add 'secretmanager.versions.access' permission?""",
secret_id,
)
return None
except InvalidArgument:
self.log.error(
"""Google Cloud API Call Error (InvalidArgument): Invalid secret ID %s.
Only ASCII alphabets (a-Z), numbers (0-9), dashes (-), and underscores (_)
are allowed in the secret ID.
""",
secret_id,
)
return None
| 3,691 | 38.276596 | 104 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/_internal_client/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/life_sciences.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Operators that interact with Google Cloud Life Sciences service."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.life_sciences import LifeSciencesHook
from airflow.providers.google.cloud.links.life_sciences import LifeSciencesLink
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class LifeSciencesRunPipelineOperator(GoogleCloudBaseOperator):
"""
Runs a Life Sciences Pipeline.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:LifeSciencesRunPipelineOperator`
:param body: The request body
:param location: The location of the project
:param project_id: ID of the Google Cloud project if None then
default project_id is used.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param api_version: API version used (for example v2beta).
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"body",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
operator_extra_links = (LifeSciencesLink(),)
def __init__(
self,
*,
body: dict,
location: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v2beta",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.body = body
self.location = location
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self._validate_inputs()
self.impersonation_chain = impersonation_chain
def _validate_inputs(self) -> None:
if not self.body:
raise AirflowException("The required parameter 'body' is missing")
if not self.location:
raise AirflowException("The required parameter 'location' is missing")
def execute(self, context: Context) -> dict:
hook = LifeSciencesHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
project_id = self.project_id or hook.project_id
if project_id:
LifeSciencesLink.persist(
context=context,
task_instance=self,
project_id=project_id,
)
return hook.run_pipeline(body=self.body, location=self.location, project_id=self.project_id)
| 4,170 | 39.105769 | 100 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/text_to_speech.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Text to Speech operator."""
from __future__ import annotations
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.texttospeech_v1.types import AudioConfig, SynthesisInput, VoiceSelectionParams
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.cloud.hooks.text_to_speech import CloudTextToSpeechHook
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
from airflow.providers.google.common.links.storage import FileDetailsLink
if TYPE_CHECKING:
from airflow.utils.context import Context
class CloudTextToSpeechSynthesizeOperator(GoogleCloudBaseOperator):
"""
Synthesizes text to speech and stores it in Google Cloud Storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTextToSpeechSynthesizeOperator`
:param input_data: text input to be synthesized. See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.SynthesisInput
:param voice: configuration of voice to be used in synthesis. See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.VoiceSelectionParams
:param audio_config: configuration of the synthesized audio. See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.AudioConfig
:param target_bucket_name: name of the GCS bucket in which output file should be stored
:param target_filename: filename of the output file.
:param project_id: Optional, Google Cloud Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param retry: (Optional) A retry object used to retry requests. If None is specified,
requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_text_to_speech_synthesize_template_fields]
template_fields: Sequence[str] = (
"input_data",
"voice",
"audio_config",
"project_id",
"gcp_conn_id",
"target_bucket_name",
"target_filename",
"impersonation_chain",
)
# [END gcp_text_to_speech_synthesize_template_fields]
operator_extra_links = (FileDetailsLink(),)
def __init__(
self,
*,
input_data: dict | SynthesisInput,
voice: dict | VoiceSelectionParams,
audio_config: dict | AudioConfig,
target_bucket_name: str,
target_filename: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.input_data = input_data
self.voice = voice
self.audio_config = audio_config
self.target_bucket_name = target_bucket_name
self.target_filename = target_filename
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.retry = retry
self.timeout = timeout
self._validate_inputs()
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
for parameter in [
"input_data",
"voice",
"audio_config",
"target_bucket_name",
"target_filename",
]:
if getattr(self, parameter) == "":
raise AirflowException(f"The required parameter '{parameter}' is empty")
def execute(self, context: Context) -> None:
hook = CloudTextToSpeechHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = hook.synthesize_speech(
input_data=self.input_data,
voice=self.voice,
audio_config=self.audio_config,
retry=self.retry,
timeout=self.timeout,
)
with NamedTemporaryFile() as temp_file:
temp_file.write(result.audio_content)
cloud_storage_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
cloud_storage_hook.upload(
bucket_name=self.target_bucket_name, object_name=self.target_filename, filename=temp_file.name
)
FileDetailsLink.persist(
context=context,
task_instance=self,
uri=f"{self.target_bucket_name}/{self.target_filename}",
project_id=cloud_storage_hook.project_id,
)
| 6,858 | 43.830065 | 152 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/cloud_memorystore.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Operators for Google Cloud Memorystore service.
.. spelling:word-list::
FieldMask
memcache
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.memcache_v1beta2.types import cloud_memcache
from google.cloud.redis_v1 import FailoverInstanceRequest, InputConfig, Instance, OutputConfig
from google.protobuf.field_mask_pb2 import FieldMask
from airflow.providers.google.cloud.hooks.cloud_memorystore import (
CloudMemorystoreHook,
CloudMemorystoreMemcachedHook,
)
from airflow.providers.google.cloud.links.cloud_memorystore import (
MemcachedInstanceDetailsLink,
MemcachedInstanceListLink,
RedisInstanceDetailsLink,
RedisInstanceListLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class CloudMemorystoreCreateInstanceOperator(GoogleCloudBaseOperator):
"""
Creates a Redis instance based on the specified tier and memory size.
By default, the instance is accessible from the project's `default network
<https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreCreateInstanceOperator`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: Required. The logical name of the Redis instance in the customer project with the
following restrictions:
- Must contain only lowercase letters, numbers, and hyphens.
- Must start with a letter.
- Must be between 1-40 characters.
- Must end with a number or a letter.
- Must be unique within the customer project / location
:param instance: Required. A Redis [Instance] resource
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.Instance`
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"instance_id",
"instance",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (RedisInstanceDetailsLink(),)
def __init__(
self,
*,
location: str,
instance_id: str,
instance: dict | Instance,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.instance_id = instance_id
self.instance = instance
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudMemorystoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.create_instance(
location=self.location,
instance_id=self.instance_id,
instance=self.instance,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
RedisInstanceDetailsLink.persist(
context=context,
task_instance=self,
instance_id=self.instance_id,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
return Instance.to_dict(result)
class CloudMemorystoreDeleteInstanceOperator(GoogleCloudBaseOperator):
"""
Deletes a specific Redis instance. Instance stops serving and data is deleted.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreDeleteInstanceOperator`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"instance",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
location: str,
instance: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.instance = instance
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudMemorystoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
hook.delete_instance(
location=self.location,
instance=self.instance,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudMemorystoreExportInstanceOperator(GoogleCloudBaseOperator):
"""
Export Redis instance data into a Redis RDB format file in Cloud Storage.
Redis will continue serving during this operation.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreExportInstanceOperator`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param output_config: Required. Specify data to be exported.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.OutputConfig`
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"instance",
"output_config",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (RedisInstanceDetailsLink(),)
def __init__(
self,
*,
location: str,
instance: str,
output_config: dict | OutputConfig,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.instance = instance
self.output_config = output_config
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudMemorystoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
hook.export_instance(
location=self.location,
instance=self.instance,
output_config=self.output_config,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
RedisInstanceDetailsLink.persist(
context=context,
task_instance=self,
instance_id=self.instance,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
class CloudMemorystoreFailoverInstanceOperator(GoogleCloudBaseOperator):
"""
Initiate a failover of the primary node for a specific STANDARD tier Cloud Memorystore for Redis instance.
Uses the current replica node.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreFailoverInstanceOperator`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param data_protection_mode: Optional. Available data protection modes that the user can choose. If it's
unspecified, data protection mode will be LIMITED_DATA_LOSS by default.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"instance",
"data_protection_mode",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (RedisInstanceDetailsLink(),)
def __init__(
self,
*,
location: str,
instance: str,
data_protection_mode: FailoverInstanceRequest.DataProtectionMode,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.instance = instance
self.data_protection_mode = data_protection_mode
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudMemorystoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
hook.failover_instance(
location=self.location,
instance=self.instance,
data_protection_mode=self.data_protection_mode,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
RedisInstanceDetailsLink.persist(
context=context,
task_instance=self,
instance_id=self.instance,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
class CloudMemorystoreGetInstanceOperator(GoogleCloudBaseOperator):
"""
Gets the details of a specific Redis instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreGetInstanceOperator`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"instance",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (RedisInstanceDetailsLink(),)
def __init__(
self,
*,
location: str,
instance: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.instance = instance
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudMemorystoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.get_instance(
location=self.location,
instance=self.instance,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
RedisInstanceDetailsLink.persist(
context=context,
task_instance=self,
instance_id=self.instance,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
return Instance.to_dict(result)
class CloudMemorystoreImportOperator(GoogleCloudBaseOperator):
"""
Import a Redis RDB snapshot file from Cloud Storage into a Redis instance.
Redis may stop serving during this operation. Instance state will be IMPORTING for entire operation. When
complete, the instance will contain only data from the imported file.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreImportOperator`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param input_config: Required. Specify data to be imported.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.InputConfig`
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"instance",
"input_config",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (RedisInstanceDetailsLink(),)
def __init__(
self,
*,
location: str,
instance: str,
input_config: dict | InputConfig,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.instance = instance
self.input_config = input_config
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudMemorystoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
hook.import_instance(
location=self.location,
instance=self.instance,
input_config=self.input_config,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
RedisInstanceDetailsLink.persist(
context=context,
task_instance=self,
instance_id=self.instance,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
class CloudMemorystoreListInstancesOperator(GoogleCloudBaseOperator):
"""
Lists all Redis instances owned by a project in either the specified location (region) or all locations.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreListInstancesOperator`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
If it is specified as ``-`` (wildcard), then all regions available to the project are
queried, and the results are aggregated.
:param page_size: The maximum number of resources contained in the underlying API response. If page
streaming is performed per- resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number of resources in a page.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"page_size",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (RedisInstanceListLink(),)
def __init__(
self,
*,
location: str,
page_size: int,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.page_size = page_size
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudMemorystoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.list_instances(
location=self.location,
page_size=self.page_size,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
RedisInstanceListLink.persist(
context=context,
task_instance=self,
project_id=self.project_id or hook.project_id,
)
instances = [Instance.to_dict(a) for a in result]
return instances
class CloudMemorystoreUpdateInstanceOperator(GoogleCloudBaseOperator):
"""
Updates the metadata and configuration of a specific Redis instance.
:param update_mask: Required. Mask of fields to update. At least one path must be supplied in this field.
The elements of the repeated paths field may only include these fields from ``Instance``:
- ``displayName``
- ``labels``
- ``memorySizeGb``
- ``redisConfig``
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.FieldMask`
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreUpdateInstanceOperator`
:param instance: Required. Update description. Only fields specified in update_mask are updated.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.Instance`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: The logical name of the Redis instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"update_mask",
"instance",
"location",
"instance_id",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (RedisInstanceDetailsLink(),)
def __init__(
self,
*,
update_mask: dict | FieldMask,
instance: dict | Instance,
location: str | None = None,
instance_id: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.update_mask = update_mask
self.instance = instance
self.location = location
self.instance_id = instance_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudMemorystoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
res = hook.update_instance(
update_mask=self.update_mask,
instance=self.instance,
location=self.location,
instance_id=self.instance_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
# projects/PROJECT_NAME/locations/LOCATION/instances/INSTANCE
location_id, instance_id = res.name.split("/")[-3::2]
RedisInstanceDetailsLink.persist(
context=context,
task_instance=self,
instance_id=self.instance_id or instance_id,
location_id=self.location or location_id,
project_id=self.project_id or hook.project_id,
)
class CloudMemorystoreScaleInstanceOperator(GoogleCloudBaseOperator):
"""
Updates the metadata and configuration of a specific Redis instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreScaleInstanceOperator`
:param memory_size_gb: Redis memory size in GiB.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: The logical name of the Redis instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"memory_size_gb",
"location",
"instance_id",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (RedisInstanceDetailsLink(),)
def __init__(
self,
*,
memory_size_gb: int,
location: str | None = None,
instance_id: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.memory_size_gb = memory_size_gb
self.location = location
self.instance_id = instance_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudMemorystoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
res = hook.update_instance(
update_mask={"paths": ["memory_size_gb"]},
instance={"memory_size_gb": self.memory_size_gb},
location=self.location,
instance_id=self.instance_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
# projects/PROJECT_NAME/locations/LOCATION/instances/INSTANCE
location_id, instance_id = res.name.split("/")[-3::2]
RedisInstanceDetailsLink.persist(
context=context,
task_instance=self,
instance_id=self.instance_id or instance_id,
location_id=self.location or location_id,
project_id=self.project_id or hook.project_id,
)
class CloudMemorystoreCreateInstanceAndImportOperator(GoogleCloudBaseOperator):
"""
Create a Redis instance and import a Redis RDB snapshot file from Cloud Storage into this instance.
By default, the instance is accessible from the project's `default network
<https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreCreateInstanceAndImportOperator`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: Required. The logical name of the Redis instance in the customer project with the
following restrictions:
- Must contain only lowercase letters, numbers, and hyphens.
- Must start with a letter.
- Must be between 1-40 characters.
- Must end with a number or a letter.
- Must be unique within the customer project / location
:param instance: Required. A Redis [Instance] resource
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.Instance`
:param input_config: Required. Specify data to be imported.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.InputConfig`
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"instance_id",
"instance",
"input_config",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (RedisInstanceDetailsLink(),)
def __init__(
self,
*,
location: str,
instance_id: str,
instance: dict | Instance,
input_config: dict | InputConfig,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.instance_id = instance_id
self.instance = instance
self.input_config = input_config
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudMemorystoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
hook.create_instance(
location=self.location,
instance_id=self.instance_id,
instance=self.instance,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.import_instance(
location=self.location,
instance=self.instance_id,
input_config=self.input_config,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
RedisInstanceDetailsLink.persist(
context=context,
task_instance=self,
instance_id=self.instance_id,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
class CloudMemorystoreExportAndDeleteInstanceOperator(GoogleCloudBaseOperator):
"""
Export Redis instance data into a Redis RDB format file in Cloud Storage.
In next step, deletes this instance.
Redis will continue serving during this operation.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreExportAndDeleteInstanceOperator`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param output_config: Required. Specify data to be exported.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.OutputConfig`
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"instance",
"output_config",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
location: str,
instance: str,
output_config: dict | OutputConfig,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.instance = instance
self.output_config = output_config
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudMemorystoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
hook.export_instance(
location=self.location,
instance=self.instance,
output_config=self.output_config,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.delete_instance(
location=self.location,
instance=self.instance,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudMemorystoreMemcachedApplyParametersOperator(GoogleCloudBaseOperator):
"""
Will update current set of Parameters to the set of specified nodes of the Memcached Instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreMemcachedApplyParametersOperator`
:param node_ids: Nodes to which we should apply the instance-level parameter group.
:param apply_all: Whether to apply instance-level parameter group to all nodes. If set to true,
will explicitly restrict users from specifying any nodes, and apply parameter group updates
to all nodes within the instance.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: The logical name of the Memcached instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
template_fields: Sequence[str] = (
"node_ids",
"apply_all",
"location",
"instance_id",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (MemcachedInstanceDetailsLink(),)
def __init__(
self,
*,
node_ids: Sequence[str],
apply_all: bool,
location: str,
instance_id: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.node_ids = node_ids
self.apply_all = apply_all
self.location = location
self.instance_id = instance_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudMemorystoreMemcachedHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
hook.apply_parameters(
node_ids=self.node_ids,
apply_all=self.apply_all,
location=self.location,
instance_id=self.instance_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
MemcachedInstanceDetailsLink.persist(
context=context,
task_instance=self,
instance_id=self.instance_id,
location_id=self.location,
project_id=self.project_id,
)
class CloudMemorystoreMemcachedCreateInstanceOperator(GoogleCloudBaseOperator):
"""
Creates a Memcached instance based on the specified tier and memory size.
By default, the instance is accessible from the project's `default network
<https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreMemcachedCreateInstanceOperator`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: Required. The logical name of the Memcached instance in the customer project with the
following restrictions:
- Must contain only lowercase letters, numbers, and hyphens.
- Must start with a letter.
- Must be between 1-40 characters.
- Must end with a number or a letter.
- Must be unique within the customer project / location
:param instance: Required. A Memcached [Instance] resource
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.memcache_v1beta2.types.cloud_memcache.Instance`
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
"""
template_fields: Sequence[str] = (
"location",
"instance_id",
"instance",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
)
operator_extra_links = (MemcachedInstanceDetailsLink(),)
def __init__(
self,
location: str,
instance_id: str,
instance: dict | cloud_memcache.Instance,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.location = location
self.instance_id = instance_id
self.instance = instance
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context: Context):
hook = CloudMemorystoreMemcachedHook(gcp_conn_id=self.gcp_conn_id)
result = hook.create_instance(
location=self.location,
instance_id=self.instance_id,
instance=self.instance,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
MemcachedInstanceDetailsLink.persist(
context=context,
task_instance=self,
instance_id=self.instance_id,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
return cloud_memcache.Instance.to_dict(result)
class CloudMemorystoreMemcachedDeleteInstanceOperator(GoogleCloudBaseOperator):
"""
Deletes a specific Memcached instance. Instance stops serving and data is deleted.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreMemcachedDeleteInstanceOperator`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Memcached instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
"""
template_fields: Sequence[str] = (
"location",
"instance",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
)
def __init__(
self,
location: str,
instance: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.location = location
self.instance = instance
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context: Context):
hook = CloudMemorystoreMemcachedHook(gcp_conn_id=self.gcp_conn_id)
hook.delete_instance(
location=self.location,
instance=self.instance,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudMemorystoreMemcachedGetInstanceOperator(GoogleCloudBaseOperator):
"""
Gets the details of a specific Memcached instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreMemcachedGetInstanceOperator`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Memcached instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"instance",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (MemcachedInstanceDetailsLink(),)
def __init__(
self,
*,
location: str,
instance: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.instance = instance
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudMemorystoreMemcachedHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.get_instance(
location=self.location,
instance=self.instance,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
MemcachedInstanceDetailsLink.persist(
context=context,
task_instance=self,
instance_id=self.instance,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
return cloud_memcache.Instance.to_dict(result)
class CloudMemorystoreMemcachedListInstancesOperator(GoogleCloudBaseOperator):
"""
List all Memcached instances owned by a project in either the specified location/region or all locations.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreMemcachedListInstancesOperator`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
If it is specified as ``-`` (wildcard), then all regions available to the project are
queried, and the results are aggregated.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (MemcachedInstanceListLink(),)
def __init__(
self,
*,
location: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudMemorystoreMemcachedHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.list_instances(
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
MemcachedInstanceListLink.persist(
context=context,
task_instance=self,
project_id=self.project_id or hook.project_id,
)
instances = [cloud_memcache.Instance.to_dict(a) for a in result]
return instances
class CloudMemorystoreMemcachedUpdateInstanceOperator(GoogleCloudBaseOperator):
"""
Updates the metadata and configuration of a specific Memcached instance.
:param update_mask: Required. Mask of fields to update. At least one path must be supplied in this field.
The elements of the repeated paths field may only include these fields from ``Instance``:
- ``displayName``
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreMemcachedUpdateInstanceOperator`
:param instance: Required. Update description. Only fields specified in update_mask are updated.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.memcache_v1beta2.types.cloud_memcache.Instance`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: The logical name of the Memcached instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"update_mask",
"instance",
"location",
"instance_id",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (MemcachedInstanceDetailsLink(),)
def __init__(
self,
*,
update_mask: dict | FieldMask,
instance: dict | cloud_memcache.Instance,
location: str | None = None,
instance_id: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.update_mask = update_mask
self.instance = instance
self.location = location
self.instance_id = instance_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudMemorystoreMemcachedHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
res = hook.update_instance(
update_mask=self.update_mask,
instance=self.instance,
location=self.location,
instance_id=self.instance_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
# projects/PROJECT_NAME/locations/LOCATION/instances/INSTANCE
location_id, instance_id = res.name.split("/")[-3::2]
MemcachedInstanceDetailsLink.persist(
context=context,
task_instance=self,
instance_id=self.instance_id or instance_id,
location_id=self.location or location_id,
project_id=self.project_id or hook.project_id,
)
class CloudMemorystoreMemcachedUpdateParametersOperator(GoogleCloudBaseOperator):
"""
Updates the defined Memcached Parameters for an existing Instance.
This method only stages the parameters, it must be followed by apply_parameters
to apply the parameters to nodes of the Memcached Instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreMemcachedApplyParametersOperator`
:param update_mask: Required. Mask of fields to update.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param parameters: The parameters to apply to the instance.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.memcache_v1beta2.types.cloud_memcache.MemcacheParameters`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: The logical name of the Memcached instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
template_fields: Sequence[str] = (
"update_mask",
"parameters",
"location",
"instance_id",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (MemcachedInstanceDetailsLink(),)
def __init__(
self,
*,
update_mask: dict | FieldMask,
parameters: dict | cloud_memcache.MemcacheParameters,
location: str,
instance_id: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.update_mask = update_mask
self.parameters = parameters
self.location = location
self.instance_id = instance_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudMemorystoreMemcachedHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
hook.update_parameters(
update_mask=self.update_mask,
parameters=self.parameters,
location=self.location,
instance_id=self.instance_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
MemcachedInstanceDetailsLink.persist(
context=context,
task_instance=self,
instance_id=self.instance_id,
location_id=self.location,
project_id=self.project_id,
)
| 70,786 | 40.444379 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/dataplex.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Dataplex operators."""
from __future__ import annotations
from time import sleep
from typing import TYPE_CHECKING, Any, Sequence
if TYPE_CHECKING:
from airflow.utils.context import Context
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry, exponential_sleep_generator
from google.cloud.dataplex_v1.types import Lake, Task
from googleapiclient.errors import HttpError
from airflow.providers.google.cloud.hooks.dataplex import DataplexHook
from airflow.providers.google.cloud.links.dataplex import (
DataplexLakeLink,
DataplexTaskLink,
DataplexTasksLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
class DataplexCreateTaskOperator(GoogleCloudBaseOperator):
"""
Creates a task resource within a lake.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param lake_id: Required. The ID of the Google Cloud lake that the task belongs to.
:param body: Required. The Request body contains an instance of Task.
:param dataplex_task_id: Required. Task identifier.
:param validate_only: Optional. Only validate the request, but do not perform mutations. The default is
false.
:param api_version: The version of the api that will be requested for example 'v3'.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param asynchronous: Flag informing should the Dataplex task be created asynchronously.
This is useful for long running creating tasks and
waiting on them asynchronously using the DataplexTaskSensor
"""
template_fields = (
"project_id",
"dataplex_task_id",
"body",
"validate_only",
"impersonation_chain",
)
template_fields_renderers = {"body": "json"}
operator_extra_links = (DataplexTaskLink(),)
def __init__(
self,
project_id: str,
region: str,
lake_id: str,
body: dict[str, Any],
dataplex_task_id: str,
validate_only: bool | None = None,
api_version: str = "v1",
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
asynchronous: bool = False,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.lake_id = lake_id
self.body = body
self.dataplex_task_id = dataplex_task_id
self.validate_only = validate_only
self.api_version = api_version
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.asynchronous = asynchronous
def execute(self, context: Context) -> dict:
hook = DataplexHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating Dataplex task %s", self.dataplex_task_id)
DataplexTaskLink.persist(context=context, task_instance=self)
try:
operation = hook.create_task(
project_id=self.project_id,
region=self.region,
lake_id=self.lake_id,
body=self.body,
dataplex_task_id=self.dataplex_task_id,
validate_only=self.validate_only,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
if not self.asynchronous:
self.log.info("Waiting for Dataplex task %s to be created", self.dataplex_task_id)
task = hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Task %s created successfully", self.dataplex_task_id)
else:
is_done = operation.done()
self.log.info("Is operation done already? %s", is_done)
return is_done
except HttpError as err:
if err.resp.status not in (409, "409"):
raise
self.log.info("Task %s already exists", self.dataplex_task_id)
# Wait for task to be ready
for time_to_wait in exponential_sleep_generator(initial=10, maximum=120):
task = hook.get_task(
project_id=self.project_id,
region=self.region,
lake_id=self.lake_id,
dataplex_task_id=self.dataplex_task_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
if task["state"] != "CREATING":
break
sleep(time_to_wait)
return Task.to_dict(task)
class DataplexDeleteTaskOperator(GoogleCloudBaseOperator):
"""
Delete the task resource.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param lake_id: Required. The ID of the Google Cloud lake that the task belongs to.
:param dataplex_task_id: Required. Task identifier.
:param api_version: The version of the api that will be requested for example 'v3'.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("project_id", "dataplex_task_id", "impersonation_chain")
def __init__(
self,
project_id: str,
region: str,
lake_id: str,
dataplex_task_id: str,
api_version: str = "v1",
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.lake_id = lake_id
self.dataplex_task_id = dataplex_task_id
self.api_version = api_version
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DataplexHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Deleting Dataplex task %s", self.dataplex_task_id)
operation = hook.delete_task(
project_id=self.project_id,
region=self.region,
lake_id=self.lake_id,
dataplex_task_id=self.dataplex_task_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Dataplex task %s deleted successfully!", self.dataplex_task_id)
class DataplexListTasksOperator(GoogleCloudBaseOperator):
"""
Lists tasks under the given lake.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param lake_id: Required. The ID of the Google Cloud lake that the task belongs to.
:param page_size: Optional. Maximum number of tasks to return. The service may return fewer than this
value. If unspecified, at most 10 tasks will be returned. The maximum value is 1000; values above 1000
will be coerced to 1000.
:param page_token: Optional. Page token received from a previous ListZones call. Provide this to retrieve
the subsequent page. When paginating, all other parameters provided to ListZones must match the call
that provided the page token.
:param filter: Optional. Filter request.
:param order_by: Optional. Order by fields for the result.
:param api_version: The version of the api that will be requested for example 'v3'.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"project_id",
"page_size",
"page_token",
"filter",
"order_by",
"impersonation_chain",
)
operator_extra_links = (DataplexTasksLink(),)
def __init__(
self,
project_id: str,
region: str,
lake_id: str,
page_size: int | None = None,
page_token: str | None = None,
filter: str | None = None,
order_by: str | None = None,
api_version: str = "v1",
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.lake_id = lake_id
self.page_size = page_size
self.page_token = page_token
self.filter = filter
self.order_by = order_by
self.api_version = api_version
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> list[dict]:
hook = DataplexHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Listing Dataplex tasks from lake %s", self.lake_id)
DataplexTasksLink.persist(context=context, task_instance=self)
tasks = hook.list_tasks(
project_id=self.project_id,
region=self.region,
lake_id=self.lake_id,
page_size=self.page_size,
page_token=self.page_token,
filter=self.filter,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return [Task.to_dict(task) for task in tasks]
class DataplexGetTaskOperator(GoogleCloudBaseOperator):
"""
Get task resource.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param lake_id: Required. The ID of the Google Cloud lake that the task belongs to.
:param dataplex_task_id: Required. Task identifier.
:param api_version: The version of the api that will be requested for example 'v3'.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("project_id", "dataplex_task_id", "impersonation_chain")
operator_extra_links = (DataplexTaskLink(),)
def __init__(
self,
project_id: str,
region: str,
lake_id: str,
dataplex_task_id: str,
api_version: str = "v1",
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.lake_id = lake_id
self.dataplex_task_id = dataplex_task_id
self.api_version = api_version
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = DataplexHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Retrieving Dataplex task %s", self.dataplex_task_id)
DataplexTaskLink.persist(context=context, task_instance=self)
task = hook.get_task(
project_id=self.project_id,
region=self.region,
lake_id=self.lake_id,
dataplex_task_id=self.dataplex_task_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataplexTasksLink.persist(context=context, task_instance=self)
return Task.to_dict(task)
class DataplexCreateLakeOperator(GoogleCloudBaseOperator):
"""
Creates a lake resource within a lake.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param lake_id: Required. Lake identifier.
:param body: Required. The Request body contains an instance of Lake.
:param validate_only: Optional. Only validate the request, but do not perform mutations. The default is
false.
:param api_version: The version of the api that will be requested for example 'v1'.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param asynchronous: Flag informing should the Dataplex lake be created asynchronously.
This is useful for long running creating lakes and
waiting on them asynchronously using the DataplexLakeSensor
"""
template_fields = (
"project_id",
"lake_id",
"body",
"validate_only",
"impersonation_chain",
)
template_fields_renderers = {"body": "json"}
operator_extra_links = (DataplexLakeLink(),)
def __init__(
self,
project_id: str,
region: str,
lake_id: str,
body: dict[str, Any],
validate_only: bool | None = None,
api_version: str = "v1",
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
asynchronous: bool = False,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.lake_id = lake_id
self.body = body
self.validate_only = validate_only
self.api_version = api_version
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.asynchronous = asynchronous
def execute(self, context: Context) -> dict:
hook = DataplexHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating Dataplex lake %s", self.lake_id)
try:
operation = hook.create_lake(
project_id=self.project_id,
region=self.region,
lake_id=self.lake_id,
body=self.body,
validate_only=self.validate_only,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
if not self.asynchronous:
self.log.info("Waiting for Dataplex lake %s to be created", self.lake_id)
lake = hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Lake %s created successfully", self.lake_id)
else:
is_done = operation.done()
self.log.info("Is operation done already? %s", is_done)
return is_done
except HttpError as err:
if err.resp.status not in (409, "409"):
raise
self.log.info("Lake %s already exists", self.lake_id)
# Wait for lake to be ready
for time_to_wait in exponential_sleep_generator(initial=10, maximum=120):
lake = hook.get_lake(
project_id=self.project_id,
region=self.region,
lake_id=self.lake_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
if lake["state"] != "CREATING":
break
sleep(time_to_wait)
DataplexLakeLink.persist(
context=context,
task_instance=self,
)
return Lake.to_dict(lake)
class DataplexDeleteLakeOperator(GoogleCloudBaseOperator):
"""
Delete the lake resource.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param lake_id: Required. Lake identifier.
:param api_version: The version of the api that will be requested for example 'v1'.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("project_id", "lake_id", "impersonation_chain")
operator_extra_links = (DataplexLakeLink(),)
def __init__(
self,
project_id: str,
region: str,
lake_id: str,
api_version: str = "v1",
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.lake_id = lake_id
self.api_version = api_version
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DataplexHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Deleting Dataplex lake %s", self.lake_id)
operation = hook.delete_lake(
project_id=self.project_id,
region=self.region,
lake_id=self.lake_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataplexLakeLink.persist(context=context, task_instance=self)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Dataplex lake %s deleted successfully!", self.lake_id)
| 26,478 | 42.195759 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/datastore.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Datastore operators."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.datastore import DatastoreHook
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.cloud.links.datastore import (
CloudDatastoreEntitiesLink,
CloudDatastoreImportExportLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
from airflow.providers.google.common.links.storage import StorageLink
if TYPE_CHECKING:
from airflow.utils.context import Context
class CloudDatastoreExportEntitiesOperator(GoogleCloudBaseOperator):
"""
Export entities from Google Cloud Datastore to Cloud Storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreExportEntitiesOperator`
.. seealso::
https://cloud.google.com/datastore/docs/export-import-entities
:param bucket: name of the cloud storage bucket to back up data
:param namespace: optional namespace path in the specified Cloud Storage bucket
to back up data. If this namespace does not exist in GCS, it will be created.
:param datastore_conn_id: the name of the Datastore connection id to use
:param cloud_storage_conn_id: the name of the cloud storage connection id to
force-write backup
:param entity_filter: description of what data from the project is included in the
export, refer to
https://cloud.google.com/datastore/docs/reference/rest/Shared.Types/EntityFilter
:param labels: client-assigned labels for cloud storage
:param polling_interval_in_seconds: number of seconds to wait before polling for
execution status again
:param overwrite_existing: if the storage bucket + namespace is not empty, it will be
emptied prior to exports. This enables overwriting existing backups.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"bucket",
"namespace",
"entity_filter",
"labels",
"impersonation_chain",
)
operator_extra_links = (StorageLink(),)
def __init__(
self,
*,
bucket: str,
namespace: str | None = None,
datastore_conn_id: str = "google_cloud_default",
cloud_storage_conn_id: str = "google_cloud_default",
entity_filter: dict | None = None,
labels: dict | None = None,
polling_interval_in_seconds: int = 10,
overwrite_existing: bool = False,
project_id: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.datastore_conn_id = datastore_conn_id
self.cloud_storage_conn_id = cloud_storage_conn_id
self.bucket = bucket
self.namespace = namespace
self.entity_filter = entity_filter
self.labels = labels
self.polling_interval_in_seconds = polling_interval_in_seconds
self.overwrite_existing = overwrite_existing
self.project_id = project_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
self.log.info("Exporting data to Cloud Storage bucket %s", self.bucket)
if self.overwrite_existing and self.namespace:
gcs_hook = GCSHook(self.cloud_storage_conn_id, impersonation_chain=self.impersonation_chain)
objects = gcs_hook.list(self.bucket, prefix=self.namespace)
for obj in objects:
gcs_hook.delete(self.bucket, obj)
ds_hook = DatastoreHook(
gcp_conn_id=self.datastore_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = ds_hook.export_to_storage_bucket(
bucket=self.bucket,
namespace=self.namespace,
entity_filter=self.entity_filter,
labels=self.labels,
project_id=self.project_id,
)
operation_name = result["name"]
result = ds_hook.poll_operation_until_done(operation_name, self.polling_interval_in_seconds)
state = result["metadata"]["common"]["state"]
if state != "SUCCESSFUL":
raise AirflowException(f"Operation failed: result={result}")
StorageLink.persist(
context=context,
task_instance=self,
uri=f"{self.bucket}/{result['response']['outputUrl'].split('/')[3]}",
project_id=self.project_id or ds_hook.project_id,
)
return result
class CloudDatastoreImportEntitiesOperator(GoogleCloudBaseOperator):
"""
Import entities from Cloud Storage to Google Cloud Datastore.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreImportEntitiesOperator`
.. seealso::
https://cloud.google.com/datastore/docs/export-import-entities
:param bucket: container in Cloud Storage to store data
:param file: path of the backup metadata file in the specified Cloud Storage bucket.
It should have the extension .overall_export_metadata
:param namespace: optional namespace of the backup metadata file in
the specified Cloud Storage bucket.
:param entity_filter: description of what data from the project is included in
the export, refer to
https://cloud.google.com/datastore/docs/reference/rest/Shared.Types/EntityFilter
:param labels: client-assigned labels for cloud storage
:param datastore_conn_id: the name of the connection id to use
:param polling_interval_in_seconds: number of seconds to wait before polling for
execution status again
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"bucket",
"file",
"namespace",
"entity_filter",
"labels",
"impersonation_chain",
)
operator_extra_links = (CloudDatastoreImportExportLink(),)
def __init__(
self,
*,
bucket: str,
file: str,
namespace: str | None = None,
entity_filter: dict | None = None,
labels: dict | None = None,
datastore_conn_id: str = "google_cloud_default",
polling_interval_in_seconds: float = 10,
project_id: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.datastore_conn_id = datastore_conn_id
self.bucket = bucket
self.file = file
self.namespace = namespace
self.entity_filter = entity_filter
self.labels = labels
self.polling_interval_in_seconds = polling_interval_in_seconds
self.project_id = project_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
self.log.info("Importing data from Cloud Storage bucket %s", self.bucket)
ds_hook = DatastoreHook(
self.datastore_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = ds_hook.import_from_storage_bucket(
bucket=self.bucket,
file=self.file,
namespace=self.namespace,
entity_filter=self.entity_filter,
labels=self.labels,
project_id=self.project_id,
)
operation_name = result["name"]
result = ds_hook.poll_operation_until_done(operation_name, self.polling_interval_in_seconds)
state = result["metadata"]["common"]["state"]
if state != "SUCCESSFUL":
raise AirflowException(f"Operation failed: result={result}")
CloudDatastoreImportExportLink.persist(context=context, task_instance=self)
return result
class CloudDatastoreAllocateIdsOperator(GoogleCloudBaseOperator):
"""
Allocate IDs for incomplete keys. Return list of keys.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreAllocateIdsOperator`
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/allocateIds
:param partial_keys: a list of partial keys.
:param project_id: Google Cloud project ID against which to make the request.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"partial_keys",
"impersonation_chain",
)
operator_extra_links = (CloudDatastoreEntitiesLink(),)
def __init__(
self,
*,
partial_keys: list,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.partial_keys = partial_keys
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> list:
hook = DatastoreHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
keys = hook.allocate_ids(
partial_keys=self.partial_keys,
project_id=self.project_id,
)
CloudDatastoreEntitiesLink.persist(context=context, task_instance=self)
return keys
class CloudDatastoreBeginTransactionOperator(GoogleCloudBaseOperator):
"""
Begins a new transaction. Returns a transaction handle.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreBeginTransactionOperator`
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/beginTransaction
:param transaction_options: Options for a new transaction.
:param project_id: Google Cloud project ID against which to make the request.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"transaction_options",
"impersonation_chain",
)
def __init__(
self,
*,
transaction_options: dict[str, Any],
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.transaction_options = transaction_options
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> str:
hook = DatastoreHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
handle = hook.begin_transaction(
transaction_options=self.transaction_options,
project_id=self.project_id,
)
return handle
class CloudDatastoreCommitOperator(GoogleCloudBaseOperator):
"""
Commit a transaction, optionally creating, deleting or modifying some entities.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreCommitOperator`
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/commit
:param body: the body of the commit request.
:param project_id: Google Cloud project ID against which to make the request.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"body",
"impersonation_chain",
)
operator_extra_links = (CloudDatastoreEntitiesLink(),)
def __init__(
self,
*,
body: dict[str, Any],
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.body = body
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = DatastoreHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
response = hook.commit(
body=self.body,
project_id=self.project_id,
)
CloudDatastoreEntitiesLink.persist(context=context, task_instance=self)
return response
class CloudDatastoreRollbackOperator(GoogleCloudBaseOperator):
"""
Roll back a transaction.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreRollbackOperator`
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/rollback
:param transaction: the transaction to roll back.
:param project_id: Google Cloud project ID against which to make the request.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"transaction",
"impersonation_chain",
)
def __init__(
self,
*,
transaction: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.transaction = transaction
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DatastoreHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.rollback(
transaction=self.transaction,
project_id=self.project_id,
)
class CloudDatastoreRunQueryOperator(GoogleCloudBaseOperator):
"""
Run a query for entities. Returns the batch of query results.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreRunQueryOperator`
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/runQuery
:param body: the body of the query request.
:param project_id: Google Cloud project ID against which to make the request.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"body",
"impersonation_chain",
)
def __init__(
self,
*,
body: dict[str, Any],
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.body = body
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = DatastoreHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
response = hook.run_query(
body=self.body,
project_id=self.project_id,
)
return response
class CloudDatastoreGetOperationOperator(GoogleCloudBaseOperator):
"""
Gets the latest state of a long-running operation.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreGetOperationOperator`
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/get
:param name: the name of the operation resource.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"name",
"impersonation_chain",
)
def __init__(
self,
*,
name: str,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.name = name
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DatastoreHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
op = hook.get_operation(name=self.name)
return op
class CloudDatastoreDeleteOperationOperator(GoogleCloudBaseOperator):
"""
Deletes the long-running operation.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreDeleteOperationOperator`
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/delete
:param name: the name of the operation resource.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"name",
"impersonation_chain",
)
def __init__(
self,
*,
name: str,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.name = name
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DatastoreHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.delete_operation(name=self.name)
| 24,796 | 38.802568 | 104 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/mlengine.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud MLEngine operators."""
from __future__ import annotations
import logging
import re
import time
import warnings
from typing import TYPE_CHECKING, Any, Sequence
from googleapiclient.errors import HttpError
from airflow.configuration import conf
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.providers.google.cloud.hooks.mlengine import MLEngineHook
from airflow.providers.google.cloud.links.mlengine import (
MLEngineJobDetailsLink,
MLEngineJobSListLink,
MLEngineModelLink,
MLEngineModelsListLink,
MLEngineModelVersionDetailsLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
from airflow.providers.google.cloud.triggers.mlengine import MLEngineStartTrainingJobTrigger
if TYPE_CHECKING:
from airflow.utils.context import Context
log = logging.getLogger(__name__)
def _normalize_mlengine_job_id(job_id: str) -> str:
"""
Replaces invalid MLEngine job_id characters with '_'.
This also adds a leading 'z' in case job_id starts with an invalid
character.
:param job_id: A job_id str that may have invalid characters.
:return: A valid job_id representation.
"""
# Add a prefix when a job_id starts with a digit or a template
match = re.search(r"\d|\{{2}", job_id)
if match and match.start() == 0:
job = f"z_{job_id}"
else:
job = job_id
# Clean up 'bad' characters except templates
tracker = 0
cleansed_job_id = ""
for match in re.finditer(r"\{{2}.+?\}{2}", job):
cleansed_job_id += re.sub(r"[^0-9a-zA-Z]+", "_", job[tracker : match.start()])
cleansed_job_id += job[match.start() : match.end()]
tracker = match.end()
# Clean up last substring or the full string if no templates
cleansed_job_id += re.sub(r"[^0-9a-zA-Z]+", "_", job[tracker:])
return cleansed_job_id
class MLEngineStartBatchPredictionJobOperator(GoogleCloudBaseOperator):
"""
Start a Google Cloud ML Engine prediction job.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:MLEngineStartBatchPredictionJobOperator`
NOTE: For model origin, users should consider exactly one from the
three options below:
1. Populate ``uri`` field only, which should be a GCS location that
points to a tensorflow savedModel directory.
2. Populate ``model_name`` field only, which refers to an existing
model, and the default version of the model will be used.
3. Populate both ``model_name`` and ``version_name`` fields, which
refers to a specific version of a specific model.
In options 2 and 3, both model and version name should contain the
minimal identifier. For instance, call::
MLEngineBatchPredictionOperator(
...,
model_name='my_model',
version_name='my_version',
...)
if the desired model version is
``projects/my_project/models/my_model/versions/my_version``.
See https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs
for further documentation on the parameters.
:param job_id: A unique id for the prediction job on Google Cloud
ML Engine. (templated)
:param data_format: The format of the input data.
It will default to 'DATA_FORMAT_UNSPECIFIED' if is not provided
or is not one of ["TEXT", "TF_RECORD", "TF_RECORD_GZIP"].
:param input_paths: A list of GCS paths of input data for batch
prediction. Accepting wildcard operator ``*``, but only at the end. (templated)
:param output_path: The GCS path where the prediction results are
written to. (templated)
:param region: The Google Compute Engine region to run the
prediction job in. (templated)
:param model_name: The Google Cloud ML Engine model to use for prediction.
If version_name is not provided, the default version of this
model will be used.
Should not be None if version_name is provided.
Should be None if uri is provided. (templated)
:param version_name: The Google Cloud ML Engine model version to use for
prediction.
Should be None if uri is provided. (templated)
:param uri: The GCS path of the saved model to use for prediction.
Should be None if model_name is provided.
It should be a GCS path pointing to a tensorflow SavedModel. (templated)
:param max_worker_count: The maximum number of workers to be used
for parallel processing. Defaults to 10 if not specified. Should be a
string representing the worker count ("10" instead of 10, "50" instead
of 50, etc.)
:param runtime_version: The Google Cloud ML Engine runtime version to use
for batch prediction.
:param signature_name: The name of the signature defined in the SavedModel
to use for this job.
:param project_id: The Google Cloud project name where the prediction job is submitted.
If set to None or missing, the default project_id from the Google Cloud connection is used.
(templated)
:param gcp_conn_id: The connection ID used for connection to Google
Cloud Platform.
:param labels: a dictionary containing labels for the job; passed to BigQuery
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:raises: ``ValueError``: if a unique model/version origin cannot be
determined.
"""
template_fields: Sequence[str] = (
"_project_id",
"_job_id",
"_region",
"_input_paths",
"_output_path",
"_model_name",
"_version_name",
"_uri",
"_impersonation_chain",
)
def __init__(
self,
*,
job_id: str,
region: str,
data_format: str,
input_paths: list[str],
output_path: str,
model_name: str | None = None,
version_name: str | None = None,
uri: str | None = None,
max_worker_count: int | None = None,
runtime_version: str | None = None,
signature_name: str | None = None,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
labels: dict[str, str] | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._project_id = project_id
self._job_id = job_id
self._region = region
self._data_format = data_format
self._input_paths = input_paths
self._output_path = output_path
self._model_name = model_name
self._version_name = version_name
self._uri = uri
self._max_worker_count = max_worker_count
self._runtime_version = runtime_version
self._signature_name = signature_name
self._gcp_conn_id = gcp_conn_id
self._labels = labels
self._impersonation_chain = impersonation_chain
if not self._project_id:
raise AirflowException("Google Cloud project id is required.")
if not self._job_id:
raise AirflowException("An unique job id is required for Google MLEngine prediction job.")
if self._uri:
if self._model_name or self._version_name:
raise AirflowException(
"Ambiguous model origin: Both uri and model/version name are provided."
)
if self._version_name and not self._model_name:
raise AirflowException(
"Missing model: Batch prediction expects a model name when a version name is provided."
)
if not (self._uri or self._model_name):
raise AirflowException(
"Missing model origin: Batch prediction expects a model, "
"a model & version combination, or a URI to a savedModel."
)
def execute(self, context: Context):
job_id = _normalize_mlengine_job_id(self._job_id)
prediction_request: dict[str, Any] = {
"jobId": job_id,
"predictionInput": {
"dataFormat": self._data_format,
"inputPaths": self._input_paths,
"outputPath": self._output_path,
"region": self._region,
},
}
if self._labels:
prediction_request["labels"] = self._labels
if self._uri:
prediction_request["predictionInput"]["uri"] = self._uri
elif self._model_name:
origin_name = f"projects/{self._project_id}/models/{self._model_name}"
if not self._version_name:
prediction_request["predictionInput"]["modelName"] = origin_name
else:
prediction_request["predictionInput"]["versionName"] = (
origin_name + f"/versions/{self._version_name}"
)
if self._max_worker_count:
prediction_request["predictionInput"]["maxWorkerCount"] = self._max_worker_count
if self._runtime_version:
prediction_request["predictionInput"]["runtimeVersion"] = self._runtime_version
if self._signature_name:
prediction_request["predictionInput"]["signatureName"] = self._signature_name
hook = MLEngineHook(gcp_conn_id=self._gcp_conn_id, impersonation_chain=self._impersonation_chain)
# Helper method to check if the existing job's prediction input is the
# same as the request we get here.
def check_existing_job(existing_job):
return existing_job.get("predictionInput") == prediction_request["predictionInput"]
finished_prediction_job = hook.create_job(
project_id=self._project_id, job=prediction_request, use_existing_job_fn=check_existing_job
)
if finished_prediction_job["state"] != "SUCCEEDED":
self.log.error("MLEngine batch prediction job failed: %s", str(finished_prediction_job))
raise RuntimeError(finished_prediction_job["errorMessage"])
return finished_prediction_job["predictionOutput"]
class MLEngineManageModelOperator(GoogleCloudBaseOperator):
"""
Operator for managing a Google Cloud ML Engine model.
.. warning::
This operator is deprecated. Consider using operators for specific operations:
MLEngineCreateModelOperator, MLEngineGetModelOperator.
:param model: A dictionary containing the information about the model.
If the `operation` is `create`, then the `model` parameter should
contain all the information about this model such as `name`.
If the `operation` is `get`, the `model` parameter
should contain the `name` of the model.
:param operation: The operation to perform. Available operations are:
* ``create``: Creates a new model as provided by the `model` parameter.
* ``get``: Gets a particular model where the name is specified in `model`.
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the Google Cloud connection is used.
(templated)
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"_project_id",
"_model",
"_impersonation_chain",
)
def __init__(
self,
*,
model: dict,
operation: str = "create",
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
warnings.warn(
"This operator is deprecated. Consider using operators for specific operations: "
"MLEngineCreateModelOperator, MLEngineGetModelOperator.",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
self._project_id = project_id
self._model = model
self._operation = operation
self._gcp_conn_id = gcp_conn_id
self._impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = MLEngineHook(
gcp_conn_id=self._gcp_conn_id,
impersonation_chain=self._impersonation_chain,
)
if self._operation == "create":
return hook.create_model(project_id=self._project_id, model=self._model)
elif self._operation == "get":
return hook.get_model(project_id=self._project_id, model_name=self._model["name"])
else:
raise ValueError(f"Unknown operation: {self._operation}")
class MLEngineCreateModelOperator(GoogleCloudBaseOperator):
"""
Creates a new model.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:MLEngineCreateModelOperator`
The model should be provided by the `model` parameter.
:param model: A dictionary containing the information about the model.
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the Google Cloud connection is used.
(templated)
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"_project_id",
"_model",
"_impersonation_chain",
)
operator_extra_links = (MLEngineModelLink(),)
def __init__(
self,
*,
model: dict,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._project_id = project_id
self._model = model
self._gcp_conn_id = gcp_conn_id
self._impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = MLEngineHook(
gcp_conn_id=self._gcp_conn_id,
impersonation_chain=self._impersonation_chain,
)
project_id = self._project_id or hook.project_id
if project_id:
MLEngineModelLink.persist(
context=context,
task_instance=self,
project_id=project_id,
model_id=self._model["name"],
)
return hook.create_model(project_id=self._project_id, model=self._model)
class MLEngineGetModelOperator(GoogleCloudBaseOperator):
"""
Gets a particular model.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:MLEngineGetModelOperator`
The name of model should be specified in `model_name`.
:param model_name: The name of the model.
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the Google Cloud connection is used.
(templated)
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"_project_id",
"_model_name",
"_impersonation_chain",
)
operator_extra_links = (MLEngineModelLink(),)
def __init__(
self,
*,
model_name: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._project_id = project_id
self._model_name = model_name
self._gcp_conn_id = gcp_conn_id
self._impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = MLEngineHook(
gcp_conn_id=self._gcp_conn_id,
impersonation_chain=self._impersonation_chain,
)
project_id = self._project_id or hook.project_id
if project_id:
MLEngineModelLink.persist(
context=context,
task_instance=self,
project_id=project_id,
model_id=self._model_name,
)
return hook.get_model(project_id=self._project_id, model_name=self._model_name)
class MLEngineDeleteModelOperator(GoogleCloudBaseOperator):
"""
Deletes a model.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:MLEngineDeleteModelOperator`
The model should be provided by the `model_name` parameter.
:param model_name: The name of the model.
:param delete_contents: (Optional) Whether to force the deletion even if the models is not empty.
Will delete all version (if any) in the dataset if set to True.
The default value is False.
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the Google Cloud connection is used.
(templated)
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"_project_id",
"_model_name",
"_impersonation_chain",
)
operator_extra_links = (MLEngineModelsListLink(),)
def __init__(
self,
*,
model_name: str,
delete_contents: bool = False,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._project_id = project_id
self._model_name = model_name
self._delete_contents = delete_contents
self._gcp_conn_id = gcp_conn_id
self._impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = MLEngineHook(
gcp_conn_id=self._gcp_conn_id,
impersonation_chain=self._impersonation_chain,
)
project_id = self._project_id or hook.project_id
if project_id:
MLEngineModelsListLink.persist(
context=context,
task_instance=self,
project_id=project_id,
)
return hook.delete_model(
project_id=self._project_id, model_name=self._model_name, delete_contents=self._delete_contents
)
class MLEngineManageVersionOperator(GoogleCloudBaseOperator):
"""
Operator for managing a Google Cloud ML Engine version.
.. warning::
This operator is deprecated. Consider using operators for specific operations:
MLEngineCreateVersionOperator, MLEngineSetDefaultVersionOperator,
MLEngineListVersionsOperator, MLEngineDeleteVersionOperator.
:param model_name: The name of the Google Cloud ML Engine model that the version
belongs to. (templated)
:param version_name: A name to use for the version being operated upon.
If not None and the `version` argument is None or does not have a value for
the `name` key, then this will be populated in the payload for the
`name` key. (templated)
:param version: A dictionary containing the information about the version.
If the `operation` is `create`, `version` should contain all the
information about this version such as name, and deploymentUrl.
If the `operation` is `get` or `delete`, the `version` parameter
should contain the `name` of the version.
If it is None, the only `operation` possible would be `list`. (templated)
:param operation: The operation to perform. Available operations are:
* ``create``: Creates a new version in the model specified by `model_name`,
in which case the `version` parameter should contain all the
information to create that version
(e.g. `name`, `deploymentUrl`).
* ``set_defaults``: Sets a version in the model specified by `model_name` to be the default.
The name of the version should be specified in the `version`
parameter.
* ``list``: Lists all available versions of the model specified
by `model_name`.
* ``delete``: Deletes the version specified in `version` parameter from the
model specified by `model_name`).
The name of the version should be specified in the `version`
parameter.
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the Google Cloud connection is used.
(templated)
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"_project_id",
"_model_name",
"_version_name",
"_version",
"_impersonation_chain",
)
def __init__(
self,
*,
model_name: str,
version_name: str | None = None,
version: dict | None = None,
operation: str = "create",
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._project_id = project_id
self._model_name = model_name
self._version_name = version_name
self._version = version or {}
self._operation = operation
self._gcp_conn_id = gcp_conn_id
self._impersonation_chain = impersonation_chain
warnings.warn(
"This operator is deprecated. Consider using operators for specific operations: "
"MLEngineCreateVersion, MLEngineSetDefaultVersion, MLEngineListVersions, MLEngineDeleteVersion.",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
def execute(self, context: Context):
if "name" not in self._version:
self._version["name"] = self._version_name
hook = MLEngineHook(
gcp_conn_id=self._gcp_conn_id,
impersonation_chain=self._impersonation_chain,
)
if self._operation == "create":
if not self._version:
raise ValueError(f"version attribute of {self.__class__.__name__} could not be empty")
return hook.create_version(
project_id=self._project_id, model_name=self._model_name, version_spec=self._version
)
elif self._operation == "set_default":
return hook.set_default_version(
project_id=self._project_id, model_name=self._model_name, version_name=self._version["name"]
)
elif self._operation == "list":
return hook.list_versions(project_id=self._project_id, model_name=self._model_name)
elif self._operation == "delete":
return hook.delete_version(
project_id=self._project_id, model_name=self._model_name, version_name=self._version["name"]
)
else:
raise ValueError(f"Unknown operation: {self._operation}")
class MLEngineCreateVersionOperator(GoogleCloudBaseOperator):
"""
Creates a new version in the model.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:MLEngineCreateVersionOperator`
Model should be specified by `model_name`, in which case the `version` parameter should contain all the
information to create that version
:param model_name: The name of the Google Cloud ML Engine model that the version belongs to. (templated)
:param version: A dictionary containing the information about the version. (templated)
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the Google Cloud connection is used.
(templated)
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"_project_id",
"_model_name",
"_version",
"_impersonation_chain",
)
operator_extra_links = (MLEngineModelVersionDetailsLink(),)
def __init__(
self,
*,
model_name: str,
version: dict,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._project_id = project_id
self._model_name = model_name
self._version = version
self._gcp_conn_id = gcp_conn_id
self._impersonation_chain = impersonation_chain
self._validate_inputs()
def _validate_inputs(self):
if not self._model_name:
raise AirflowException("The model_name parameter could not be empty.")
if not self._version:
raise AirflowException("The version parameter could not be empty.")
def execute(self, context: Context):
hook = MLEngineHook(
gcp_conn_id=self._gcp_conn_id,
impersonation_chain=self._impersonation_chain,
)
project_id = self._project_id or hook.project_id
if project_id:
MLEngineModelVersionDetailsLink.persist(
context=context,
task_instance=self,
project_id=project_id,
model_id=self._model_name,
version_id=self._version["name"],
)
return hook.create_version(
project_id=self._project_id, model_name=self._model_name, version_spec=self._version
)
class MLEngineSetDefaultVersionOperator(GoogleCloudBaseOperator):
"""
Sets a version in the model.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:MLEngineSetDefaultVersionOperator`
The model should be specified by `model_name` to be the default. The name of the version should be
specified in the `version_name` parameter.
:param model_name: The name of the Google Cloud ML Engine model that the version belongs to. (templated)
:param version_name: A name to use for the version being operated upon. (templated)
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the Google Cloud connection is used.
(templated)
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"_project_id",
"_model_name",
"_version_name",
"_impersonation_chain",
)
operator_extra_links = (MLEngineModelVersionDetailsLink(),)
def __init__(
self,
*,
model_name: str,
version_name: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._project_id = project_id
self._model_name = model_name
self._version_name = version_name
self._gcp_conn_id = gcp_conn_id
self._impersonation_chain = impersonation_chain
self._validate_inputs()
def _validate_inputs(self):
if not self._model_name:
raise AirflowException("The model_name parameter could not be empty.")
if not self._version_name:
raise AirflowException("The version_name parameter could not be empty.")
def execute(self, context: Context):
hook = MLEngineHook(
gcp_conn_id=self._gcp_conn_id,
impersonation_chain=self._impersonation_chain,
)
project_id = self._project_id or hook.project_id
if project_id:
MLEngineModelVersionDetailsLink.persist(
context=context,
task_instance=self,
project_id=project_id,
model_id=self._model_name,
version_id=self._version_name,
)
return hook.set_default_version(
project_id=self._project_id, model_name=self._model_name, version_name=self._version_name
)
class MLEngineListVersionsOperator(GoogleCloudBaseOperator):
"""
Lists all available versions of the model.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:MLEngineListVersionsOperator`
The model should be specified by `model_name`.
:param model_name: The name of the Google Cloud ML Engine model that the version
belongs to. (templated)
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the Google Cloud connection is used.
(templated)
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"_project_id",
"_model_name",
"_impersonation_chain",
)
operator_extra_links = (MLEngineModelLink(),)
def __init__(
self,
*,
model_name: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._project_id = project_id
self._model_name = model_name
self._gcp_conn_id = gcp_conn_id
self._impersonation_chain = impersonation_chain
self._validate_inputs()
def _validate_inputs(self):
if not self._model_name:
raise AirflowException("The model_name parameter could not be empty.")
def execute(self, context: Context):
hook = MLEngineHook(
gcp_conn_id=self._gcp_conn_id,
impersonation_chain=self._impersonation_chain,
)
project_id = self._project_id or hook.project_id
if project_id:
MLEngineModelLink.persist(
context=context,
task_instance=self,
project_id=project_id,
model_id=self._model_name,
)
return hook.list_versions(
project_id=self._project_id,
model_name=self._model_name,
)
class MLEngineDeleteVersionOperator(GoogleCloudBaseOperator):
"""
Deletes the version from the model.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:MLEngineDeleteVersionOperator`
The name of the version should be specified in `version_name` parameter from the model specified
by `model_name`.
:param model_name: The name of the Google Cloud ML Engine model that the version
belongs to. (templated)
:param version_name: A name to use for the version being operated upon. (templated)
:param project_id: The Google Cloud project name to which MLEngine
model belongs.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"_project_id",
"_model_name",
"_version_name",
"_impersonation_chain",
)
operator_extra_links = (MLEngineModelLink(),)
def __init__(
self,
*,
model_name: str,
version_name: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._project_id = project_id
self._model_name = model_name
self._version_name = version_name
self._gcp_conn_id = gcp_conn_id
self._impersonation_chain = impersonation_chain
self._validate_inputs()
def _validate_inputs(self):
if not self._model_name:
raise AirflowException("The model_name parameter could not be empty.")
if not self._version_name:
raise AirflowException("The version_name parameter could not be empty.")
def execute(self, context: Context):
hook = MLEngineHook(
gcp_conn_id=self._gcp_conn_id,
impersonation_chain=self._impersonation_chain,
)
project_id = self._project_id or hook.project_id
if project_id:
MLEngineModelLink.persist(
context=context,
task_instance=self,
project_id=project_id,
model_id=self._model_name,
)
return hook.delete_version(
project_id=self._project_id, model_name=self._model_name, version_name=self._version_name
)
class MLEngineStartTrainingJobOperator(GoogleCloudBaseOperator):
"""
Operator for launching a MLEngine training job.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:MLEngineStartTrainingJobOperator`
:param job_id: A unique templated id for the submitted Google MLEngine
training job. (templated)
:param region: The Google Compute Engine region to run the MLEngine training
job in (templated).
:param package_uris: A list of Python package locations for the training
job, which should include the main training program and any additional
dependencies. This is mutually exclusive with a custom image specified
via master_config. (templated)
:param training_python_module: The name of the Python module to run within
the training job after installing the packages. This is mutually
exclusive with a custom image specified via master_config. (templated)
:param training_args: A list of command-line arguments to pass to the
training program. (templated)
:param scale_tier: Resource tier for MLEngine training job. (templated)
:param master_type: The type of virtual machine to use for the master
worker. It must be set whenever scale_tier is CUSTOM. (templated)
:param master_config: The configuration for the master worker. If this is
provided, master_type must be set as well. If a custom image is
specified, this is mutually exclusive with package_uris and
training_python_module. (templated)
:param runtime_version: The Google Cloud ML runtime version to use for
training. (templated)
:param python_version: The version of Python used in training. (templated)
:param job_dir: A Google Cloud Storage path in which to store training
outputs and other data needed for training. (templated)
:param service_account: Optional service account to use when running the training application.
(templated)
The specified service account must have the `iam.serviceAccounts.actAs` role. The
Google-managed Cloud ML Engine service account must have the `iam.serviceAccountAdmin` role
for the specified service account.
If set to None or missing, the Google-managed Cloud ML Engine service account will be used.
:param project_id: The Google Cloud project name within which MLEngine training job should run.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param mode: Can be one of 'DRY_RUN'/'CLOUD'. In 'DRY_RUN' mode, no real
training job will be launched, but the MLEngine training job request
will be printed out. In 'CLOUD' mode, a real MLEngine training job
creation request will be issued.
:param labels: a dictionary containing labels for the job; passed to BigQuery
:param hyperparameters: Optional HyperparameterSpec dictionary for hyperparameter tuning.
For further reference, check:
https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.jobs#HyperparameterSpec
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param cancel_on_kill: Flag which indicates whether cancel the hook's job or not, when on_kill is called
:param deferrable: Run operator in the deferrable mode
"""
template_fields: Sequence[str] = (
"_project_id",
"_job_id",
"_region",
"_package_uris",
"_training_python_module",
"_training_args",
"_scale_tier",
"_master_type",
"_master_config",
"_runtime_version",
"_python_version",
"_job_dir",
"_service_account",
"_hyperparameters",
"_impersonation_chain",
)
operator_extra_links = (MLEngineJobDetailsLink(),)
def __init__(
self,
*,
job_id: str,
region: str,
project_id: str,
package_uris: list[str] | None = None,
training_python_module: str | None = None,
training_args: list[str] | None = None,
scale_tier: str | None = None,
master_type: str | None = None,
master_config: dict | None = None,
runtime_version: str | None = None,
python_version: str | None = None,
job_dir: str | None = None,
service_account: str | None = None,
gcp_conn_id: str = "google_cloud_default",
mode: str = "PRODUCTION",
labels: dict[str, str] | None = None,
impersonation_chain: str | Sequence[str] | None = None,
hyperparameters: dict | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
cancel_on_kill: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._project_id = project_id
self._job_id = job_id
self._region = region
self._package_uris = package_uris
self._training_python_module = training_python_module
self._training_args = training_args
self._scale_tier = scale_tier
self._master_type = master_type
self._master_config = master_config
self._runtime_version = runtime_version
self._python_version = python_version
self._job_dir = job_dir
self._service_account = service_account
self._gcp_conn_id = gcp_conn_id
self._mode = mode
self._labels = labels
self._hyperparameters = hyperparameters
self._impersonation_chain = impersonation_chain
self.deferrable = deferrable
self.cancel_on_kill = cancel_on_kill
custom = self._scale_tier is not None and self._scale_tier.upper() == "CUSTOM"
custom_image = (
custom
and self._master_config is not None
and self._master_config.get("imageUri", None) is not None
)
if not self._project_id:
raise AirflowException("Google Cloud project id is required.")
if not self._job_id:
raise AirflowException("An unique job id is required for Google MLEngine training job.")
if not self._region:
raise AirflowException("Google Compute Engine region is required.")
if custom and not self._master_type:
raise AirflowException("master_type must be set when scale_tier is CUSTOM")
if self._master_config and not self._master_type:
raise AirflowException("master_type must be set when master_config is provided")
if not (package_uris and training_python_module) and not custom_image:
raise AirflowException(
"Either a Python package with a Python module or a custom Docker image should be provided."
)
if (package_uris or training_python_module) and custom_image:
raise AirflowException(
"Either a Python package with a Python module or "
"a custom Docker image should be provided but not both."
)
def _handle_job_error(self, finished_training_job) -> None:
if finished_training_job["state"] != "SUCCEEDED":
self.log.error("MLEngine training job failed: %s", str(finished_training_job))
raise RuntimeError(finished_training_job["errorMessage"])
def execute(self, context: Context):
job_id = _normalize_mlengine_job_id(self._job_id)
self.job_id = job_id
training_request: dict[str, Any] = {
"jobId": self.job_id,
"trainingInput": {
"scaleTier": self._scale_tier,
"region": self._region,
},
}
if self._package_uris:
training_request["trainingInput"]["packageUris"] = self._package_uris
if self._training_python_module:
training_request["trainingInput"]["pythonModule"] = self._training_python_module
if self._training_args:
training_request["trainingInput"]["args"] = self._training_args
if self._master_type:
training_request["trainingInput"]["masterType"] = self._master_type
if self._master_config:
training_request["trainingInput"]["masterConfig"] = self._master_config
if self._runtime_version:
training_request["trainingInput"]["runtimeVersion"] = self._runtime_version
if self._python_version:
training_request["trainingInput"]["pythonVersion"] = self._python_version
if self._job_dir:
training_request["trainingInput"]["jobDir"] = self._job_dir
if self._service_account:
training_request["trainingInput"]["serviceAccount"] = self._service_account
if self._hyperparameters:
training_request["trainingInput"]["hyperparameters"] = self._hyperparameters
if self._labels:
training_request["labels"] = self._labels
if self._mode == "DRY_RUN":
self.log.info("In dry_run mode.")
self.log.info("MLEngine Training job request is: %s", training_request)
return
hook = MLEngineHook(
gcp_conn_id=self._gcp_conn_id,
impersonation_chain=self._impersonation_chain,
)
self.hook = hook
try:
self.log.info("Executing: %s'", training_request)
self.job_id = self.hook.create_job_without_waiting_result(
project_id=self._project_id,
body=training_request,
)
except HttpError as e:
if e.resp.status == 409:
# If the job already exists retrieve it
self.hook.get_job(project_id=self._project_id, job_id=self.job_id)
if self._project_id:
MLEngineJobDetailsLink.persist(
context=context,
task_instance=self,
project_id=self._project_id,
job_id=self.job_id,
)
self.log.error(
"Failed to create new job with given name since it already exists. "
"The existing one will be used."
)
else:
raise e
context["ti"].xcom_push(key="job_id", value=self.job_id)
if self.deferrable:
self.defer(
timeout=self.execution_timeout,
trigger=MLEngineStartTrainingJobTrigger(
conn_id=self._gcp_conn_id,
job_id=self.job_id,
project_id=self._project_id,
region=self._region,
runtime_version=self._runtime_version,
python_version=self._python_version,
job_dir=self._job_dir,
package_uris=self._package_uris,
training_python_module=self._training_python_module,
training_args=self._training_args,
labels=self._labels,
gcp_conn_id=self._gcp_conn_id,
impersonation_chain=self._impersonation_chain,
),
method_name="execute_complete",
)
else:
finished_training_job = self._wait_for_job_done(self._project_id, self.job_id)
self._handle_job_error(finished_training_job)
gcp_metadata = {
"job_id": self.job_id,
"project_id": self._project_id,
}
context["task_instance"].xcom_push("gcp_metadata", gcp_metadata)
project_id = self._project_id or hook.project_id
if project_id:
MLEngineJobDetailsLink.persist(
context=context,
task_instance=self,
project_id=project_id,
job_id=job_id,
)
def _wait_for_job_done(self, project_id: str, job_id: str, interval: int = 30):
"""
Waits for the Job to reach a terminal state.
This method will periodically check the job state until the job reach
a terminal state.
:param project_id: The project in which the Job is located. If set to None or missing, the default
project_id from the Google Cloud connection is used. (templated)
:param job_id: A unique id for the Google MLEngine job. (templated)
:param interval: Time expressed in seconds after which the job status is checked again. (templated)
:raises: googleapiclient.errors.HttpError
"""
self.log.info("Waiting for job. job_id=%s", job_id)
if interval <= 0:
raise ValueError("Interval must be > 0")
while True:
job = self.hook.get_job(project_id, job_id)
if job["state"] in ["SUCCEEDED", "FAILED", "CANCELLED"]:
return job
time.sleep(interval)
def execute_complete(self, context: Context, event: dict[str, Any]):
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info(
"%s completed with response %s ",
self.task_id,
event["message"],
)
if self._project_id:
MLEngineJobDetailsLink.persist(
context=context,
task_instance=self,
project_id=self._project_id,
job_id=self._job_id,
)
def on_kill(self) -> None:
if self.job_id and self.cancel_on_kill:
self.hook.cancel_job(job_id=self.job_id, project_id=self._project_id) # type: ignore[union-attr]
else:
self.log.info("Skipping to cancel job: %s:%s.%s", self._project_id, self.job_id)
class MLEngineTrainingCancelJobOperator(GoogleCloudBaseOperator):
"""
Operator for cleaning up failed MLEngine training job.
:param job_id: A unique templated id for the submitted Google MLEngine
training job. (templated)
:param project_id: The Google Cloud project name within which MLEngine training job should run.
If set to None or missing, the default project_id from the Google Cloud connection is used.
(templated)
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"_project_id",
"_job_id",
"_impersonation_chain",
)
operator_extra_links = (MLEngineJobSListLink(),)
def __init__(
self,
*,
job_id: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._project_id = project_id
self._job_id = job_id
self._gcp_conn_id = gcp_conn_id
self._impersonation_chain = impersonation_chain
if not self._project_id:
raise AirflowException("Google Cloud project id is required.")
def execute(self, context: Context):
hook = MLEngineHook(
gcp_conn_id=self._gcp_conn_id,
impersonation_chain=self._impersonation_chain,
)
project_id = self._project_id or hook.project_id
if project_id:
MLEngineJobSListLink.persist(
context=context,
task_instance=self,
project_id=project_id,
)
hook.cancel_job(project_id=self._project_id, job_id=_normalize_mlengine_job_id(self._job_id))
| 57,386 | 40.464595 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/datacatalog.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from google.api_core.exceptions import AlreadyExists, NotFound
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.datacatalog import (
DataCatalogClient,
Entry,
EntryGroup,
SearchCatalogRequest,
SearchCatalogResult,
Tag,
TagTemplate,
TagTemplateField,
)
from google.protobuf.field_mask_pb2 import FieldMask
from airflow.providers.google.cloud.hooks.datacatalog import CloudDataCatalogHook
from airflow.providers.google.cloud.links.datacatalog import (
DataCatalogEntryGroupLink,
DataCatalogEntryLink,
DataCatalogTagTemplateLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class CloudDataCatalogCreateEntryOperator(GoogleCloudBaseOperator):
"""
Creates an entry.
Currently only entries of 'FILESET' type can be created.
The newly created entry ID are saved under the ``entry_id`` key in XCOM.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogCreateEntryOperator`
:param location: Required. The location of the entry to create.
:param entry_group: Required. Entry group ID under which the entry is created.
:param entry_id: Required. The id of the entry to create.
:param entry: Required. The entry to create.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.Entry`
:param project_id: The ID of the Google Cloud project that owns the entry.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If set to ``None`` or missing, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"entry_group",
"entry_id",
"entry",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogEntryLink(),)
def __init__(
self,
*,
location: str,
entry_group: str,
entry_id: str,
entry: dict | Entry,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.entry_group = entry_group
self.entry_id = entry_id
self.entry = entry
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
try:
result = hook.create_entry(
location=self.location,
entry_group=self.entry_group,
entry_id=self.entry_id,
entry=self.entry,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
self.log.info("Entry already exists. Skipping create operation.")
result = hook.get_entry(
location=self.location,
entry_group=self.entry_group,
entry=self.entry_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
_, _, entry_id = result.name.rpartition("/")
self.log.info("Current entry_id ID: %s", entry_id)
self.xcom_push(context, key="entry_id", value=entry_id)
DataCatalogEntryLink.persist(
context=context,
task_instance=self,
entry_id=self.entry_id,
entry_group_id=self.entry_group,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
return Entry.to_dict(result)
class CloudDataCatalogCreateEntryGroupOperator(GoogleCloudBaseOperator):
"""
Creates an EntryGroup.
The newly created entry group ID are saved under the ``entry_group_id`` key in XCOM.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogCreateEntryGroupOperator`
:param location: Required. The location of the entry group to create.
:param entry_group_id: Required. The id of the entry group to create. The id must begin with a letter
or underscore, contain only English letters, numbers and underscores, and be at most 64
characters.
:param entry_group: The entry group to create. Defaults to an empty entry group.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.EntryGroup`
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"entry_group_id",
"entry_group",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogEntryGroupLink(),)
def __init__(
self,
*,
location: str,
entry_group_id: str,
entry_group: dict | EntryGroup,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.entry_group_id = entry_group_id
self.entry_group = entry_group
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
try:
result = hook.create_entry_group(
location=self.location,
entry_group_id=self.entry_group_id,
entry_group=self.entry_group,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
self.log.info("Entry already exists. Skipping create operation.")
result = hook.get_entry_group(
location=self.location,
entry_group=self.entry_group_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
_, _, entry_group_id = result.name.rpartition("/")
self.log.info("Current entry group ID: %s", entry_group_id)
self.xcom_push(context, key="entry_group_id", value=entry_group_id)
DataCatalogEntryGroupLink.persist(
context=context,
task_instance=self,
entry_group_id=self.entry_group_id,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
return EntryGroup.to_dict(result)
class CloudDataCatalogCreateTagOperator(GoogleCloudBaseOperator):
"""
Creates a tag on an entry.
The newly created tag ID are saved under the ``tag_id`` key in XCOM.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogCreateTagOperator`
:param location: Required. The location of the tag to create.
:param entry_group: Required. Entry group ID under which the tag is created.
:param entry: Required. Entry group ID under which the tag is created.
:param tag: Required. The tag to create.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.Tag`
:param template_id: Required. Template ID used to create tag
:param project_id: The ID of the Google Cloud project that owns the tag.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"entry_group",
"entry",
"tag",
"template_id",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogEntryLink(),)
def __init__(
self,
*,
location: str,
entry_group: str,
entry: str,
tag: dict | Tag,
template_id: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.entry_group = entry_group
self.entry = entry
self.tag = tag
self.template_id = template_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
try:
tag = hook.create_tag(
location=self.location,
entry_group=self.entry_group,
entry=self.entry,
tag=self.tag,
template_id=self.template_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
self.log.info("Tag already exists. Skipping create operation.")
project_id = self.project_id or hook.project_id
if project_id is None:
raise RuntimeError("The project id must be set here")
if self.template_id:
template_name = DataCatalogClient.tag_template_path(
project_id, self.location, self.template_id
)
else:
if isinstance(self.tag, Tag):
template_name = self.tag.template
else:
template_name = self.tag["template"]
tag = hook.get_tag_for_template_name(
location=self.location,
entry_group=self.entry_group,
template_name=template_name,
entry=self.entry,
project_id=project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
_, _, tag_id = tag.name.rpartition("/")
self.log.info("Current Tag ID: %s", tag_id)
self.xcom_push(context, key="tag_id", value=tag_id)
DataCatalogEntryLink.persist(
context=context,
task_instance=self,
entry_id=self.entry,
entry_group_id=self.entry_group,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
return Tag.to_dict(tag)
class CloudDataCatalogCreateTagTemplateOperator(GoogleCloudBaseOperator):
"""
Creates a tag template.
The newly created tag template are saved under the ``tag_template_id`` key in XCOM.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogCreateTagTemplateOperator`
:param location: Required. The location of the tag template to create.
:param tag_template_id: Required. The id of the tag template to create.
:param tag_template: Required. The tag template to create.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.TagTemplate`
:param project_id: The ID of the Google Cloud project that owns the tag template.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"tag_template_id",
"tag_template",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogTagTemplateLink(),)
def __init__(
self,
*,
location: str,
tag_template_id: str,
tag_template: dict | TagTemplate,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.tag_template_id = tag_template_id
self.tag_template = tag_template
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
try:
result = hook.create_tag_template(
location=self.location,
tag_template_id=self.tag_template_id,
tag_template=self.tag_template,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
self.log.info("Tag Template already exists. Skipping create operation.")
result = hook.get_tag_template(
location=self.location,
tag_template=self.tag_template_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
_, _, tag_template = result.name.rpartition("/")
self.log.info("Current Tag ID: %s", tag_template)
self.xcom_push(context, key="tag_template_id", value=tag_template)
DataCatalogTagTemplateLink.persist(
context=context,
task_instance=self,
tag_template_id=self.tag_template_id,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
return TagTemplate.to_dict(result)
class CloudDataCatalogCreateTagTemplateFieldOperator(GoogleCloudBaseOperator):
r"""
Creates a field in a tag template.
The newly created tag template field are saved under the ``tag_template_field_id`` key in XCOM.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogCreateTagTemplateFieldOperator`
:param location: Required. The location of the tag template field to create.
:param tag_template: Required. The id of the tag template to create.
:param tag_template_field_id: Required. The ID of the tag template field to create. Field ids can
contain letters (both uppercase and lowercase), numbers (0-9), underscores (\_) and dashes (-).
Field IDs must be at least 1 character long and at most 128 characters long. Field IDs must also
be unique within their template.
:param tag_template_field: Required. The tag template field to create.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.TagTemplateField`
:param project_id: The ID of the Google Cloud project that owns the tag template field.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"tag_template",
"tag_template_field_id",
"tag_template_field",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogTagTemplateLink(),)
def __init__(
self,
*,
location: str,
tag_template: str,
tag_template_field_id: str,
tag_template_field: dict | TagTemplateField,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.tag_template = tag_template
self.tag_template_field_id = tag_template_field_id
self.tag_template_field = tag_template_field
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
try:
result = hook.create_tag_template_field(
location=self.location,
tag_template=self.tag_template,
tag_template_field_id=self.tag_template_field_id,
tag_template_field=self.tag_template_field,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
self.log.info("Tag template field already exists. Skipping create operation.")
tag_template = hook.get_tag_template(
location=self.location,
tag_template=self.tag_template,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = tag_template.fields[self.tag_template_field_id]
self.log.info("Current Tag ID: %s", self.tag_template_field_id)
self.xcom_push(context, key="tag_template_field_id", value=self.tag_template_field_id)
DataCatalogTagTemplateLink.persist(
context=context,
task_instance=self,
tag_template_id=self.tag_template,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
return TagTemplateField.to_dict(result)
class CloudDataCatalogDeleteEntryOperator(GoogleCloudBaseOperator):
"""
Deletes an existing entry.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogDeleteEntryOperator`
:param location: Required. The location of the entry to delete.
:param entry_group: Required. Entry group ID for entries that is deleted.
:param entry: Entry ID that is deleted.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"entry_group",
"entry",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
location: str,
entry_group: str,
entry: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.entry_group = entry_group
self.entry = entry
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
try:
hook.delete_entry(
location=self.location,
entry_group=self.entry_group,
entry=self.entry,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except NotFound:
self.log.info("Entry doesn't exists. Skipping.")
class CloudDataCatalogDeleteEntryGroupOperator(GoogleCloudBaseOperator):
"""
Deletes an EntryGroup.
Only entry groups that do not contain entries can be deleted.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogDeleteEntryGroupOperator`
:param location: Required. The location of the entry group to delete.
:param entry_group: Entry group ID that is deleted.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"entry_group",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
location: str,
entry_group: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.entry_group = entry_group
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
try:
hook.delete_entry_group(
location=self.location,
entry_group=self.entry_group,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except NotFound:
self.log.info("Entry doesn't exists. skipping")
class CloudDataCatalogDeleteTagOperator(GoogleCloudBaseOperator):
"""
Deletes a tag.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogDeleteTagOperator`
:param location: Required. The location of the tag to delete.
:param entry_group: Entry group ID for tag that is deleted.
:param entry: Entry ID for tag that is deleted.
:param tag: Identifier for TAG that is deleted.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"entry_group",
"entry",
"tag",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
location: str,
entry_group: str,
entry: str,
tag: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.entry_group = entry_group
self.entry = entry
self.tag = tag
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
try:
hook.delete_tag(
location=self.location,
entry_group=self.entry_group,
entry=self.entry,
tag=self.tag,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except NotFound:
self.log.info("Entry doesn't exists. skipping")
class CloudDataCatalogDeleteTagTemplateOperator(GoogleCloudBaseOperator):
"""
Deletes a tag template and all tags using the template.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogDeleteTagTemplateOperator`
:param location: Required. The location of the tag template to delete.
:param tag_template: ID for tag template that is deleted.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param force: Required. Currently, this field must always be set to ``true``. This confirms the
deletion of any possible tags using this template. ``force = false`` will be supported in the
future.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"tag_template",
"force",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
location: str,
tag_template: str,
force: bool,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.tag_template = tag_template
self.force = force
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
try:
hook.delete_tag_template(
location=self.location,
tag_template=self.tag_template,
force=self.force,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except NotFound:
self.log.info("Tag Template doesn't exists. skipping")
class CloudDataCatalogDeleteTagTemplateFieldOperator(GoogleCloudBaseOperator):
"""
Deletes a field in a tag template and all uses of that field.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogDeleteTagTemplateFieldOperator`
:param location: Required. The location of the tag template to delete.
:param tag_template: Tag Template ID for tag template field that is deleted.
:param field: Name of field that is deleted.
:param force: Required. This confirms the deletion of this field from any tags using this field.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"tag_template",
"field",
"force",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
location: str,
tag_template: str,
field: str,
force: bool,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.tag_template = tag_template
self.field = field
self.force = force
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
try:
hook.delete_tag_template_field(
location=self.location,
tag_template=self.tag_template,
field=self.field,
force=self.force,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except NotFound:
self.log.info("Tag Template field doesn't exists. skipping")
class CloudDataCatalogGetEntryOperator(GoogleCloudBaseOperator):
"""
Gets an entry.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogGetEntryOperator`
:param location: Required. The location of the entry to get.
:param entry_group: Required. The entry group of the entry to get.
:param entry: The ID of the entry to get.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"entry_group",
"entry",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogEntryLink(),)
def __init__(
self,
*,
location: str,
entry_group: str,
entry: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.entry_group = entry_group
self.entry = entry
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.get_entry(
location=self.location,
entry_group=self.entry_group,
entry=self.entry,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataCatalogEntryLink.persist(
context=context,
task_instance=self,
entry_id=self.entry,
entry_group_id=self.entry_group,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
return Entry.to_dict(result)
class CloudDataCatalogGetEntryGroupOperator(GoogleCloudBaseOperator):
"""
Gets an entry group.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogGetEntryGroupOperator`
:param location: Required. The location of the entry group to get.
:param entry_group: The ID of the entry group to get.
:param read_mask: The fields to return. If not set or empty, all fields are returned.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"entry_group",
"read_mask",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogEntryGroupLink(),)
def __init__(
self,
*,
location: str,
entry_group: str,
read_mask: FieldMask,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.entry_group = entry_group
self.read_mask = read_mask
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.get_entry_group(
location=self.location,
entry_group=self.entry_group,
read_mask=self.read_mask,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataCatalogEntryGroupLink.persist(
context=context,
task_instance=self,
entry_group_id=self.entry_group,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
return EntryGroup.to_dict(result)
class CloudDataCatalogGetTagTemplateOperator(GoogleCloudBaseOperator):
"""
Gets a tag template.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogGetTagTemplateOperator`
:param location: Required. The location of the tag template to get.
:param tag_template: Required. The ID of the tag template to get.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"tag_template",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogTagTemplateLink(),)
def __init__(
self,
*,
location: str,
tag_template: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.tag_template = tag_template
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.get_tag_template(
location=self.location,
tag_template=self.tag_template,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataCatalogTagTemplateLink.persist(
context=context,
task_instance=self,
tag_template_id=self.tag_template,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
return TagTemplate.to_dict(result)
class CloudDataCatalogListTagsOperator(GoogleCloudBaseOperator):
"""
Lists the tags on an Entry.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogListTagsOperator`
:param location: Required. The location of the tags to get.
:param entry_group: Required. The entry group of the tags to get.
:param entry: Required. The entry of the tags to get.
:param page_size: The maximum number of resources contained in the underlying API response. If page
streaming is performed per- resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number of resources in a page.
(Default: 100)
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"entry_group",
"entry",
"page_size",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogEntryLink(),)
def __init__(
self,
*,
location: str,
entry_group: str,
entry: str,
page_size: int = 100,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.entry_group = entry_group
self.entry = entry
self.page_size = page_size
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> list:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.list_tags(
location=self.location,
entry_group=self.entry_group,
entry=self.entry,
page_size=self.page_size,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataCatalogEntryLink.persist(
context=context,
task_instance=self,
entry_id=self.entry,
entry_group_id=self.entry_group,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
return [Tag.to_dict(item) for item in result]
class CloudDataCatalogLookupEntryOperator(GoogleCloudBaseOperator):
r"""
Get an entry by target resource name.
This method allows clients to use the resource name from the source Google Cloud service
to get the Data Catalog Entry.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogLookupEntryOperator`
:param linked_resource: The full name of the Google Cloud resource the Data Catalog entry
represents. See: https://cloud.google.com/apis/design/resource\_names#full\_resource\_name. Full
names are case-sensitive.
:param sql_resource: The SQL name of the entry. SQL names are case-sensitive.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"linked_resource",
"sql_resource",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogEntryLink(),)
def __init__(
self,
*,
linked_resource: str | None = None,
sql_resource: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.linked_resource = linked_resource
self.sql_resource = sql_resource
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.lookup_entry(
linked_resource=self.linked_resource,
sql_resource=self.sql_resource,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id, location_id, entry_group_id, entry_id = result.name.split("/")[1::2]
DataCatalogEntryLink.persist(
context=context,
task_instance=self,
entry_id=entry_id,
entry_group_id=entry_group_id,
location_id=location_id,
project_id=project_id,
)
return Entry.to_dict(result)
class CloudDataCatalogRenameTagTemplateFieldOperator(GoogleCloudBaseOperator):
"""
Renames a field in a tag template.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogRenameTagTemplateFieldOperator`
:param location: Required. The location of the tag template field to rename.
:param tag_template: The tag template ID for field that is renamed.
:param field: Required. The old ID of this tag template field. For example,
``my_old_field``.
:param new_tag_template_field_id: Required. The new ID of this tag template field. For example,
``my_new_field``.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"tag_template",
"field",
"new_tag_template_field_id",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogTagTemplateLink(),)
def __init__(
self,
*,
location: str,
tag_template: str,
field: str,
new_tag_template_field_id: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.tag_template = tag_template
self.field = field
self.new_tag_template_field_id = new_tag_template_field_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
hook.rename_tag_template_field(
location=self.location,
tag_template=self.tag_template,
field=self.field,
new_tag_template_field_id=self.new_tag_template_field_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataCatalogTagTemplateLink.persist(
context=context,
task_instance=self,
tag_template_id=self.tag_template,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
class CloudDataCatalogSearchCatalogOperator(GoogleCloudBaseOperator):
r"""
Searches Data Catalog for multiple resources like entries, tags that match a query.
This does not return the complete resource, only the resource identifier and high level fields.
Clients can subsequently call ``Get`` methods.
Note that searches do not have full recall. There may be results that match your query but are not
returned, even in subsequent pages of results. These missing results may vary across repeated calls to
search. Do not rely on this method if you need to guarantee full recall.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogSearchCatalogOperator`
:param scope: Required. The scope of this search request.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.Scope`
:param query: Required. The query string in search query syntax. The query must be non-empty.
Query strings can be simple as "x" or more qualified as:
- name:x
- column:x
- description:y
Note: Query tokens need to have a minimum of 3 characters for substring matching to work
correctly. See `Data Catalog Search Syntax <https://cloud.google.com/data-catalog/docs/how-
to/search-reference>`__ for more information.
:param page_size: The maximum number of resources contained in the underlying API response. If page
streaming is performed per-resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number of resources in a page.
:param order_by: Specifies the ordering of results, currently supported case-sensitive choices are:
- ``relevance``, only supports descending
- ``last_access_timestamp [asc|desc]``, defaults to descending if not specified
- ``last_modified_timestamp [asc|desc]``, defaults to descending if not specified
If not specified, defaults to ``relevance`` descending.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"scope",
"query",
"page_size",
"order_by",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
scope: dict | SearchCatalogRequest.Scope,
query: str,
page_size: int = 100,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.scope = scope
self.query = query
self.page_size = page_size
self.order_by = order_by
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> list:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.search_catalog(
scope=self.scope,
query=self.query,
page_size=self.page_size,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return [SearchCatalogResult.to_dict(item) for item in result]
class CloudDataCatalogUpdateEntryOperator(GoogleCloudBaseOperator):
"""
Updates an existing entry.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogUpdateEntryOperator`
:param entry: Required. The updated entry. The "name" field must be set.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.Entry`
:param update_mask: The fields to update on the entry. If absent or empty, all modifiable fields are
updated.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param location: Required. The location of the entry to update.
:param entry_group: The entry group ID for the entry that is being updated.
:param entry_id: The entry ID that is being updated.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"entry",
"update_mask",
"location",
"entry_group",
"entry_id",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogEntryLink(),)
def __init__(
self,
*,
entry: dict | Entry,
update_mask: dict | FieldMask,
location: str | None = None,
entry_group: str | None = None,
entry_id: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.entry = entry
self.update_mask = update_mask
self.location = location
self.entry_group = entry_group
self.entry_id = entry_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.update_entry(
entry=self.entry,
update_mask=self.update_mask,
location=self.location,
entry_group=self.entry_group,
entry_id=self.entry_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
location_id, entry_group_id, entry_id = result.name.split("/")[3::2]
DataCatalogEntryLink.persist(
context=context,
task_instance=self,
entry_id=self.entry_id or entry_id,
entry_group_id=self.entry_group or entry_group_id,
location_id=self.location or location_id,
project_id=self.project_id or hook.project_id,
)
class CloudDataCatalogUpdateTagOperator(GoogleCloudBaseOperator):
"""
Updates an existing tag.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogUpdateTagOperator`
:param tag: Required. The updated tag. The "name" field must be set.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.Tag`
:param update_mask: The fields to update on the Tag. If absent or empty, all modifiable fields are
updated. Currently the only modifiable field is the field ``fields``.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param location: Required. The location of the tag to rename.
:param entry_group: The entry group ID for the tag that is being updated.
:param entry: The entry ID for the tag that is being updated.
:param tag_id: The tag ID that is being updated.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"tag",
"update_mask",
"location",
"entry_group",
"entry",
"tag_id",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogEntryLink(),)
def __init__(
self,
*,
tag: dict | Tag,
update_mask: dict | FieldMask,
location: str | None = None,
entry_group: str | None = None,
entry: str | None = None,
tag_id: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.tag = tag
self.update_mask = update_mask
self.location = location
self.entry_group = entry_group
self.entry = entry
self.tag_id = tag_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.update_tag(
tag=self.tag,
update_mask=self.update_mask,
location=self.location,
entry_group=self.entry_group,
entry=self.entry,
tag_id=self.tag_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
location_id, entry_group_id, entry_id = result.name.split("/")[3:8:2]
DataCatalogEntryLink.persist(
context=context,
task_instance=self,
entry_id=self.entry or entry_id,
entry_group_id=self.entry_group or entry_group_id,
location_id=self.location or location_id,
project_id=self.project_id or hook.project_id,
)
class CloudDataCatalogUpdateTagTemplateOperator(GoogleCloudBaseOperator):
"""
Updates a tag template.
This method cannot be used to update the fields of a template. The tag
template fields are represented as separate resources and should be updated using their own
create/update/delete methods.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogUpdateTagTemplateOperator`
:param tag_template: Required. The template to update. The "name" field must be set.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.TagTemplate`
:param update_mask: The field mask specifies the parts of the template to overwrite.
If absent or empty, all of the allowed fields above will be updated.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param location: Required. The location of the tag template to rename.
:param tag_template_id: Optional. The tag template ID for the entry that is being updated.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"tag_template",
"update_mask",
"location",
"tag_template_id",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogTagTemplateLink(),)
def __init__(
self,
*,
tag_template: dict | TagTemplate,
update_mask: dict | FieldMask,
location: str | None = None,
tag_template_id: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.tag_template = tag_template
self.update_mask = update_mask
self.location = location
self.tag_template_id = tag_template_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.update_tag_template(
tag_template=self.tag_template,
update_mask=self.update_mask,
location=self.location,
tag_template_id=self.tag_template_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
location_id, tag_template_id = result.name.split("/")[3::2]
DataCatalogTagTemplateLink.persist(
context=context,
task_instance=self,
tag_template_id=self.tag_template_id or tag_template_id,
location_id=self.location or location_id,
project_id=self.project_id or hook.project_id,
)
class CloudDataCatalogUpdateTagTemplateFieldOperator(GoogleCloudBaseOperator):
"""
Updates a field in a tag template. This method cannot be used to update the field type.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogUpdateTagTemplateFieldOperator`
:param tag_template_field: Required. The template to update.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.TagTemplateField`
:param update_mask: The field mask specifies the parts of the template to be updated. Allowed fields:
- ``display_name``
- ``type.enum_type``
If ``update_mask`` is not set or empty, all of the allowed fields above will be updated.
When updating an enum type, the provided values will be merged with the existing values.
Therefore, enum values can only be added, existing enum values cannot be deleted nor renamed.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param tag_template_field_name: Optional. The name of the tag template field to rename.
:param location: Optional. The location of the tag to rename.
:param tag_template: Optional. The tag template ID for tag template field to rename.
:param tag_template_field_id: Optional. The ID of tag template field to rename.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"tag_template_field",
"update_mask",
"tag_template_field_name",
"location",
"tag_template",
"tag_template_field_id",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogTagTemplateLink(),)
def __init__(
self,
*,
tag_template_field: dict | TagTemplateField,
update_mask: dict | FieldMask,
tag_template_field_name: str | None = None,
location: str | None = None,
tag_template: str | None = None,
tag_template_field_id: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.tag_template_field_name = tag_template_field_name
self.location = location
self.tag_template = tag_template
self.tag_template_field_id = tag_template_field_id
self.project_id = project_id
self.tag_template_field = tag_template_field
self.update_mask = update_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.update_tag_template_field(
tag_template_field=self.tag_template_field,
update_mask=self.update_mask,
tag_template_field_name=self.tag_template_field_name,
location=self.location,
tag_template=self.tag_template,
tag_template_field_id=self.tag_template_field_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
location_id, tag_template_id = result.name.split("/")[3:6:2]
DataCatalogTagTemplateLink.persist(
context=context,
task_instance=self,
tag_template_id=self.tag_template or tag_template_id,
location_id=self.location or location_id,
project_id=self.project_id or hook.project_id,
)
| 92,732 | 41.151364 | 106 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/dlp.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Various Google Cloud DLP operators which allow you to perform basic operations using Cloud DLP."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from google.api_core.exceptions import AlreadyExists, InvalidArgument, NotFound
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.dlp_v2.types import (
ByteContentItem,
ContentItem,
DeidentifyConfig,
DeidentifyContentResponse,
DeidentifyTemplate,
DlpJob,
InspectConfig,
InspectContentResponse,
InspectJobConfig,
InspectTemplate,
JobTrigger,
ListInfoTypesResponse,
RedactImageRequest,
RedactImageResponse,
ReidentifyContentResponse,
RiskAnalysisJobConfig,
StoredInfoType,
StoredInfoTypeConfig,
)
from google.protobuf.field_mask_pb2 import FieldMask
from airflow.providers.google.cloud.hooks.dlp import CloudDLPHook
from airflow.providers.google.cloud.links.data_loss_prevention import (
CloudDLPDeidentifyTemplateDetailsLink,
CloudDLPDeidentifyTemplatesListLink,
CloudDLPInfoTypeDetailsLink,
CloudDLPInfoTypesListLink,
CloudDLPInspectTemplateDetailsLink,
CloudDLPInspectTemplatesListLink,
CloudDLPJobDetailsLink,
CloudDLPJobsListLink,
CloudDLPJobTriggerDetailsLink,
CloudDLPJobTriggersListLink,
CloudDLPPossibleInfoTypesListLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class CloudDLPCancelDLPJobOperator(GoogleCloudBaseOperator):
"""
Starts asynchronous cancellation on a long-running DlpJob.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPCancelDLPJobOperator`
:param dlp_job_id: ID of the DLP job resource to be cancelled.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default project_id
from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"dlp_job_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPJobDetailsLink(),)
def __init__(
self,
*,
dlp_job_id: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dlp_job_id = dlp_job_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.cancel_dlp_job(
dlp_job_id=self.dlp_job_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPJobDetailsLink.persist(
context=context,
task_instance=self,
project_id=project_id,
job_name=self.dlp_job_id,
)
class CloudDLPCreateDeidentifyTemplateOperator(GoogleCloudBaseOperator):
"""
Create a deidentify template to reuse frequently-used configurations for content, images, and storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPCreateDeidentifyTemplateOperator`
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param deidentify_template: (Optional) The DeidentifyTemplate to create.
:param template_id: (Optional) The template ID.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"organization_id",
"project_id",
"deidentify_template",
"template_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPDeidentifyTemplateDetailsLink(),)
def __init__(
self,
*,
organization_id: str | None = None,
project_id: str | None = None,
deidentify_template: dict | DeidentifyTemplate | None = None,
template_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.organization_id = organization_id
self.project_id = project_id
self.deidentify_template = deidentify_template
self.template_id = template_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
template = hook.create_deidentify_template(
organization_id=self.organization_id,
project_id=self.project_id,
deidentify_template=self.deidentify_template,
template_id=self.template_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
if self.template_id is None:
raise RuntimeError("The template_id should be set here!")
template = hook.get_deidentify_template(
organization_id=self.organization_id,
project_id=self.project_id,
template_id=self.template_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = DeidentifyTemplate.to_dict(template)
project_id = self.project_id or hook.project_id
template_id = self.template_id or result["name"].split("/")[-1] if result["name"] else None
if project_id and template_id:
CloudDLPDeidentifyTemplateDetailsLink.persist(
context=context,
task_instance=self,
project_id=project_id,
template_name=template_id,
)
return result
class CloudDLPCreateDLPJobOperator(GoogleCloudBaseOperator):
"""
Creates a new job to inspect storage or calculate risk metrics.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPCreateDLPJobOperator`
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param inspect_job: (Optional) The configuration for the inspect job.
:param risk_job: (Optional) The configuration for the risk job.
:param job_id: (Optional) The job ID.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param wait_until_finished: (Optional) If true, it will keep polling the job state
until it is set to DONE.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"inspect_job",
"risk_job",
"job_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPJobDetailsLink(),)
def __init__(
self,
*,
project_id: str | None = None,
inspect_job: dict | InspectJobConfig | None = None,
risk_job: dict | RiskAnalysisJobConfig | None = None,
job_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
wait_until_finished: bool = True,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.inspect_job = inspect_job
self.risk_job = risk_job
self.job_id = job_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.wait_until_finished = wait_until_finished
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
job = hook.create_dlp_job(
project_id=self.project_id,
inspect_job=self.inspect_job,
risk_job=self.risk_job,
job_id=self.job_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
wait_until_finished=self.wait_until_finished,
)
except AlreadyExists:
if self.job_id is None:
raise RuntimeError("The job_id must be set here!")
job = hook.get_dlp_job(
project_id=self.project_id,
dlp_job_id=self.job_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = DlpJob.to_dict(job)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPJobDetailsLink.persist(
context=context,
task_instance=self,
project_id=project_id,
job_name=result["name"].split("/")[-1] if result["name"] else None,
)
return result
class CloudDLPCreateInspectTemplateOperator(GoogleCloudBaseOperator):
"""
Create an InspectTemplate to reuse frequently-used configurations for content, images, and storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPCreateInspectTemplateOperator`
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param inspect_template: (Optional) The InspectTemplate to create.
:param template_id: (Optional) The template ID.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"organization_id",
"project_id",
"inspect_template",
"template_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPInspectTemplateDetailsLink(),)
def __init__(
self,
*,
organization_id: str | None = None,
project_id: str | None = None,
inspect_template: InspectTemplate | None = None,
template_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.organization_id = organization_id
self.project_id = project_id
self.inspect_template = inspect_template
self.template_id = template_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
template = hook.create_inspect_template(
organization_id=self.organization_id,
project_id=self.project_id,
inspect_template=self.inspect_template,
template_id=self.template_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
if self.template_id is None:
raise RuntimeError("The template_id should be set here!")
template = hook.get_inspect_template(
organization_id=self.organization_id,
project_id=self.project_id,
template_id=self.template_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = InspectTemplate.to_dict(template)
template_id = self.template_id or result["name"].split("/")[-1] if result["name"] else None
project_id = self.project_id or hook.project_id
if project_id and template_id:
CloudDLPInspectTemplateDetailsLink.persist(
context=context,
task_instance=self,
project_id=project_id,
template_name=template_id,
)
return result
class CloudDLPCreateJobTriggerOperator(GoogleCloudBaseOperator):
"""
Create a job trigger to run DLP actions such as scanning storage for sensitive info on a set schedule.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPCreateJobTriggerOperator`
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param job_trigger: (Optional) The JobTrigger to create.
:param trigger_id: (Optional) The JobTrigger ID.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"job_trigger",
"trigger_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPJobTriggerDetailsLink(),)
def __init__(
self,
*,
project_id: str | None = None,
job_trigger: dict | JobTrigger | None = None,
trigger_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.job_trigger = job_trigger
self.trigger_id = trigger_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
trigger = hook.create_job_trigger(
project_id=self.project_id,
job_trigger=self.job_trigger,
trigger_id=self.trigger_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except InvalidArgument as e:
if "already in use" not in e.message:
raise
if self.trigger_id is None:
raise RuntimeError("The trigger_id should be set here!")
trigger = hook.get_job_trigger(
project_id=self.project_id,
job_trigger_id=self.trigger_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = JobTrigger.to_dict(trigger)
project_id = self.project_id or hook.project_id
trigger_name = result["name"].split("/")[-1] if result["name"] else None
if project_id:
CloudDLPJobTriggerDetailsLink.persist(
context=context,
task_instance=self,
project_id=project_id,
trigger_name=trigger_name,
)
return result
class CloudDLPCreateStoredInfoTypeOperator(GoogleCloudBaseOperator):
"""
Creates a pre-built stored infoType to be used for inspection.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPCreateStoredInfoTypeOperator`
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param config: (Optional) The config for the StoredInfoType.
:param stored_info_type_id: (Optional) The StoredInfoType ID.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"organization_id",
"project_id",
"config",
"stored_info_type_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPInfoTypeDetailsLink(),)
def __init__(
self,
*,
organization_id: str | None = None,
project_id: str | None = None,
config: StoredInfoTypeConfig | None = None,
stored_info_type_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.organization_id = organization_id
self.project_id = project_id
self.config = config
self.stored_info_type_id = stored_info_type_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
info = hook.create_stored_info_type(
organization_id=self.organization_id,
project_id=self.project_id,
config=self.config,
stored_info_type_id=self.stored_info_type_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except InvalidArgument as e:
if "already exists" not in e.message:
raise
if self.stored_info_type_id is None:
raise RuntimeError("The stored_info_type_id should be set here!")
info = hook.get_stored_info_type(
organization_id=self.organization_id,
project_id=self.project_id,
stored_info_type_id=self.stored_info_type_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = StoredInfoType.to_dict(info)
project_id = self.project_id or hook.project_id
stored_info_type_id = (
self.stored_info_type_id or result["name"].split("/")[-1] if result["name"] else None
)
if project_id and stored_info_type_id:
CloudDLPInfoTypeDetailsLink.persist(
context=context,
task_instance=self,
project_id=project_id,
info_type_name=stored_info_type_id,
)
return result
class CloudDLPDeidentifyContentOperator(GoogleCloudBaseOperator):
"""
De-identifies potentially sensitive info from a content item; limits input size and output size.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPDeidentifyContentOperator`
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param deidentify_config: (Optional) Configuration for the de-identification of the
content item. Items specified here will override the template referenced by the
deidentify_template_name argument.
:param inspect_config: (Optional) Configuration for the inspector. Items specified
here will override the template referenced by the inspect_template_name argument.
:param item: (Optional) The item to de-identify. Will be treated as text.
:param inspect_template_name: (Optional) Optional template to use. Any configuration
directly specified in inspect_config will override those set in the template.
:param deidentify_template_name: (Optional) Optional template to use. Any
configuration directly specified in deidentify_config will override those set
in the template.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"deidentify_config",
"inspect_config",
"item",
"inspect_template_name",
"deidentify_template_name",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
project_id: str | None = None,
deidentify_config: dict | DeidentifyConfig | None = None,
inspect_config: dict | InspectConfig | None = None,
item: dict | ContentItem | None = None,
inspect_template_name: str | None = None,
deidentify_template_name: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.deidentify_config = deidentify_config
self.inspect_config = inspect_config
self.item = item
self.inspect_template_name = inspect_template_name
self.deidentify_template_name = deidentify_template_name
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
response = hook.deidentify_content(
project_id=self.project_id,
deidentify_config=self.deidentify_config,
inspect_config=self.inspect_config,
item=self.item,
inspect_template_name=self.inspect_template_name,
deidentify_template_name=self.deidentify_template_name,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return DeidentifyContentResponse.to_dict(response)
class CloudDLPDeleteDeidentifyTemplateOperator(GoogleCloudBaseOperator):
"""
Deletes a DeidentifyTemplate.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPDeleteDeidentifyTemplateOperator`
:param template_id: The ID of deidentify template to be deleted.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"template_id",
"organization_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPDeidentifyTemplatesListLink(),)
def __init__(
self,
*,
template_id: str,
organization_id: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.template_id = template_id
self.organization_id = organization_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
hook.delete_deidentify_template(
template_id=self.template_id,
organization_id=self.organization_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPDeidentifyTemplatesListLink.persist(
context=context,
task_instance=self,
project_id=project_id,
)
except NotFound:
self.log.error("Template %s not found.", self.template_id)
class CloudDLPDeleteDLPJobOperator(GoogleCloudBaseOperator):
"""
Deletes a long-running DlpJob.
This method indicates that the client is no longer interested
in the DlpJob result. The job will be cancelled if possible.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPDeleteDLPJobOperator`
:param dlp_job_id: The ID of the DLP job resource to be deleted.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"dlp_job_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPJobsListLink(),)
def __init__(
self,
*,
dlp_job_id: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dlp_job_id = dlp_job_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
hook.delete_dlp_job(
dlp_job_id=self.dlp_job_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPJobsListLink.persist(
context=context,
task_instance=self,
project_id=project_id,
)
except NotFound:
self.log.error("Job %s id not found.", self.dlp_job_id)
class CloudDLPDeleteInspectTemplateOperator(GoogleCloudBaseOperator):
"""
Deletes an InspectTemplate.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPDeleteInspectTemplateOperator`
:param template_id: The ID of the inspect template to be deleted.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"template_id",
"organization_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPInspectTemplatesListLink(),)
def __init__(
self,
*,
template_id: str,
organization_id: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.template_id = template_id
self.organization_id = organization_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
hook.delete_inspect_template(
template_id=self.template_id,
organization_id=self.organization_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPInspectTemplatesListLink.persist(
context=context,
task_instance=self,
project_id=project_id,
)
except NotFound:
self.log.error("Template %s not found", self.template_id)
class CloudDLPDeleteJobTriggerOperator(GoogleCloudBaseOperator):
"""
Deletes a job trigger.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPDeleteJobTriggerOperator`
:param job_trigger_id: The ID of the DLP job trigger to be deleted.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"job_trigger_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPJobTriggersListLink(),)
def __init__(
self,
*,
job_trigger_id: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.job_trigger_id = job_trigger_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
hook.delete_job_trigger(
job_trigger_id=self.job_trigger_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPJobTriggersListLink.persist(
context=context,
task_instance=self,
project_id=project_id,
)
except NotFound:
self.log.error("Trigger %s not found", self.job_trigger_id)
class CloudDLPDeleteStoredInfoTypeOperator(GoogleCloudBaseOperator):
"""
Deletes a stored infoType.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPDeleteStoredInfoTypeOperator`
:param stored_info_type_id: The ID of the stored info type to be deleted.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"stored_info_type_id",
"organization_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPInfoTypesListLink(),)
def __init__(
self,
*,
stored_info_type_id: str,
organization_id: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.stored_info_type_id = stored_info_type_id
self.organization_id = organization_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
hook.delete_stored_info_type(
stored_info_type_id=self.stored_info_type_id,
organization_id=self.organization_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except NotFound:
self.log.error("Stored info %s not found", self.stored_info_type_id)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPInfoTypesListLink.persist(
context=context,
task_instance=self,
project_id=project_id,
)
class CloudDLPGetDeidentifyTemplateOperator(GoogleCloudBaseOperator):
"""
Gets a DeidentifyTemplate.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPGetDeidentifyTemplateOperator`
:param template_id: The ID of deidentify template to be read.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"template_id",
"organization_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPDeidentifyTemplateDetailsLink(),)
def __init__(
self,
*,
template_id: str,
organization_id: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.template_id = template_id
self.organization_id = organization_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
template = hook.get_deidentify_template(
template_id=self.template_id,
organization_id=self.organization_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPDeidentifyTemplateDetailsLink.persist(
context=context, task_instance=self, project_id=project_id, template_name=self.template_id
)
return DeidentifyTemplate.to_dict(template)
class CloudDLPGetDLPJobOperator(GoogleCloudBaseOperator):
"""
Gets the latest state of a long-running DlpJob.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPGetDLPJobOperator`
:param dlp_job_id: The ID of the DLP job resource to be read.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"dlp_job_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPJobDetailsLink(),)
def __init__(
self,
*,
dlp_job_id: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dlp_job_id = dlp_job_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
job = hook.get_dlp_job(
dlp_job_id=self.dlp_job_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPJobDetailsLink.persist(
context=context,
task_instance=self,
project_id=project_id,
job_name=self.dlp_job_id,
)
return DlpJob.to_dict(job)
class CloudDLPGetInspectTemplateOperator(GoogleCloudBaseOperator):
"""
Gets an InspectTemplate.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPGetInspectTemplateOperator`
:param template_id: The ID of inspect template to be read.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"template_id",
"organization_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPInspectTemplateDetailsLink(),)
def __init__(
self,
*,
template_id: str,
organization_id: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.template_id = template_id
self.organization_id = organization_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
template = hook.get_inspect_template(
template_id=self.template_id,
organization_id=self.organization_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPInspectTemplateDetailsLink.persist(
context=context,
task_instance=self,
project_id=project_id,
template_name=self.template_id,
)
return InspectTemplate.to_dict(template)
class CloudDLPGetDLPJobTriggerOperator(GoogleCloudBaseOperator):
"""
Gets a job trigger.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPGetDLPJobTriggerOperator`
:param job_trigger_id: The ID of the DLP job trigger to be read.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"job_trigger_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPJobTriggerDetailsLink(),)
def __init__(
self,
*,
job_trigger_id: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.job_trigger_id = job_trigger_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
trigger = hook.get_job_trigger(
job_trigger_id=self.job_trigger_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPJobTriggerDetailsLink.persist(
context=context,
task_instance=self,
project_id=project_id,
trigger_name=self.job_trigger_id,
)
return JobTrigger.to_dict(trigger)
class CloudDLPGetStoredInfoTypeOperator(GoogleCloudBaseOperator):
"""
Gets a stored infoType.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPGetStoredInfoTypeOperator`
:param stored_info_type_id: The ID of the stored info type to be read.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"stored_info_type_id",
"organization_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPInfoTypeDetailsLink(),)
def __init__(
self,
*,
stored_info_type_id: str,
organization_id: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.stored_info_type_id = stored_info_type_id
self.organization_id = organization_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
info = hook.get_stored_info_type(
stored_info_type_id=self.stored_info_type_id,
organization_id=self.organization_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPInfoTypeDetailsLink.persist(
context=context,
task_instance=self,
project_id=project_id,
info_type_name=self.stored_info_type_id,
)
return StoredInfoType.to_dict(info)
class CloudDLPInspectContentOperator(GoogleCloudBaseOperator):
"""
Finds potentially sensitive info in content; limits input size, processing time, and output size.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPInspectContentOperator`
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param inspect_config: (Optional) Configuration for the inspector. Items specified
here will override the template referenced by the inspect_template_name argument.
:param item: (Optional) The item to de-identify. Will be treated as text.
:param inspect_template_name: (Optional) Optional template to use. Any configuration
directly specified in inspect_config will override those set in the template.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"inspect_config",
"item",
"inspect_template_name",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
project_id: str | None = None,
inspect_config: dict | InspectConfig | None = None,
item: dict | ContentItem | None = None,
inspect_template_name: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.inspect_config = inspect_config
self.item = item
self.inspect_template_name = inspect_template_name
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
response = hook.inspect_content(
project_id=self.project_id,
inspect_config=self.inspect_config,
item=self.item,
inspect_template_name=self.inspect_template_name,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return InspectContentResponse.to_dict(response)
class CloudDLPListDeidentifyTemplatesOperator(GoogleCloudBaseOperator):
"""
Lists DeidentifyTemplates.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPListDeidentifyTemplatesOperator`
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:param order_by: (Optional) Optional comma separated list of fields to order by,
followed by asc or desc postfix.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"organization_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPDeidentifyTemplatesListLink(),)
def __init__(
self,
*,
organization_id: str | None = None,
project_id: str | None = None,
page_size: int | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.organization_id = organization_id
self.project_id = project_id
self.page_size = page_size
self.order_by = order_by
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
templates = hook.list_deidentify_templates(
organization_id=self.organization_id,
project_id=self.project_id,
page_size=self.page_size,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPDeidentifyTemplatesListLink.persist(
context=context,
task_instance=self,
project_id=project_id,
)
return [DeidentifyTemplate.to_dict(template) for template in templates] # type: ignore[arg-type]
class CloudDLPListDLPJobsOperator(GoogleCloudBaseOperator):
"""
Lists DlpJobs that match the specified filter in the request.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPListDLPJobsOperator`
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param results_filter: (Optional) Filter used to specify a subset of results.
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:param job_type: (Optional) The type of job.
:param order_by: (Optional) Optional comma separated list of fields to order by,
followed by asc or desc postfix.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPJobsListLink(),)
def __init__(
self,
*,
project_id: str | None = None,
results_filter: str | None = None,
page_size: int | None = None,
job_type: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.results_filter = results_filter
self.page_size = page_size
self.job_type = job_type
self.order_by = order_by
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
jobs = hook.list_dlp_jobs(
project_id=self.project_id,
results_filter=self.results_filter,
page_size=self.page_size,
job_type=self.job_type,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPJobsListLink.persist(
context=context,
task_instance=self,
project_id=project_id,
)
# the DlpJob.to_dict does not have the right type defined as possible to pass in constructor
return [DlpJob.to_dict(job) for job in jobs] # type: ignore[arg-type]
class CloudDLPListInfoTypesOperator(GoogleCloudBaseOperator):
"""
Returns a list of the sensitive information types that the DLP API supports.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPListInfoTypesOperator`
:param language_code: (Optional) Optional BCP-47 language code for localized infoType
friendly names. If omitted, or if localized strings are not available, en-US
strings will be returned.
:param results_filter: (Optional) Filter used to specify a subset of results.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"language_code",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPPossibleInfoTypesListLink(),)
def __init__(
self,
*,
project_id: str | None = None,
language_code: str | None = None,
results_filter: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.language_code = language_code
self.results_filter = results_filter
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
response = hook.list_info_types(
language_code=self.language_code,
results_filter=self.results_filter,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPPossibleInfoTypesListLink.persist(
context=context,
task_instance=self,
project_id=project_id,
)
return ListInfoTypesResponse.to_dict(response)
class CloudDLPListInspectTemplatesOperator(GoogleCloudBaseOperator):
"""
Lists InspectTemplates.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPListInspectTemplatesOperator`
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:param order_by: (Optional) Optional comma separated list of fields to order by,
followed by asc or desc postfix.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"organization_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPInspectTemplatesListLink(),)
def __init__(
self,
*,
organization_id: str | None = None,
project_id: str | None = None,
page_size: int | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.organization_id = organization_id
self.project_id = project_id
self.page_size = page_size
self.order_by = order_by
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
templates = hook.list_inspect_templates(
organization_id=self.organization_id,
project_id=self.project_id,
page_size=self.page_size,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPInspectTemplatesListLink.persist(
context=context,
task_instance=self,
project_id=project_id,
)
return [InspectTemplate.to_dict(t) for t in templates]
class CloudDLPListJobTriggersOperator(GoogleCloudBaseOperator):
"""
Lists job triggers.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPListJobTriggersOperator`
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:param order_by: (Optional) Optional comma separated list of fields to order by,
followed by asc or desc postfix.
:param results_filter: (Optional) Filter used to specify a subset of results.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPJobTriggersListLink(),)
def __init__(
self,
*,
project_id: str | None = None,
page_size: int | None = None,
order_by: str | None = None,
results_filter: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.page_size = page_size
self.order_by = order_by
self.results_filter = results_filter
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
jobs = hook.list_job_triggers(
project_id=self.project_id,
page_size=self.page_size,
order_by=self.order_by,
results_filter=self.results_filter,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPJobTriggersListLink.persist(
context=context,
task_instance=self,
project_id=project_id,
)
return [JobTrigger.to_dict(j) for j in jobs]
class CloudDLPListStoredInfoTypesOperator(GoogleCloudBaseOperator):
"""
Lists stored infoTypes.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPListStoredInfoTypesOperator`
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:param order_by: (Optional) Optional comma separated list of fields to order by,
followed by asc or desc postfix.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"organization_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPInfoTypesListLink(),)
def __init__(
self,
*,
organization_id: str | None = None,
project_id: str | None = None,
page_size: int | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.organization_id = organization_id
self.project_id = project_id
self.page_size = page_size
self.order_by = order_by
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
infos = hook.list_stored_info_types(
organization_id=self.organization_id,
project_id=self.project_id,
page_size=self.page_size,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPInfoTypesListLink.persist(
context=context,
task_instance=self,
project_id=project_id,
)
return [StoredInfoType.to_dict(i) for i in infos]
class CloudDLPRedactImageOperator(GoogleCloudBaseOperator):
"""
Redacts potentially sensitive info from an image; limits input size, processing time, and output size.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPRedactImageOperator`
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param inspect_config: (Optional) Configuration for the inspector. Items specified
here will override the template referenced by the inspect_template_name argument.
:param image_redaction_configs: (Optional) The configuration for specifying what
content to redact from images.
:param include_findings: (Optional) Whether the response should include findings
along with the redacted image.
:param byte_item: (Optional) The content must be PNG, JPEG, SVG or BMP.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"inspect_config",
"image_redaction_configs",
"include_findings",
"byte_item",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
project_id: str | None = None,
inspect_config: dict | InspectConfig | None = None,
image_redaction_configs: None | (list[dict] | list[RedactImageRequest.ImageRedactionConfig]) = None,
include_findings: bool | None = None,
byte_item: dict | ByteContentItem | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.inspect_config = inspect_config
self.image_redaction_configs = image_redaction_configs
self.include_findings = include_findings
self.byte_item = byte_item
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
response = hook.redact_image(
project_id=self.project_id,
inspect_config=self.inspect_config,
image_redaction_configs=self.image_redaction_configs,
include_findings=self.include_findings,
byte_item=self.byte_item,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return RedactImageResponse.to_dict(response)
class CloudDLPReidentifyContentOperator(GoogleCloudBaseOperator):
"""
Re-identifies content that has been de-identified.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPReidentifyContentOperator`
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param reidentify_config: (Optional) Configuration for the re-identification of
the content item.
:param inspect_config: (Optional) Configuration for the inspector.
:param item: (Optional) The item to re-identify. Will be treated as text.
:param inspect_template_name: (Optional) Optional template to use. Any configuration
directly specified in inspect_config will override those set in the template.
:param reidentify_template_name: (Optional) Optional template to use. References an
instance of DeidentifyTemplate. Any configuration directly specified in
reidentify_config or inspect_config will override those set in the template.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"reidentify_config",
"inspect_config",
"item",
"inspect_template_name",
"reidentify_template_name",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
project_id: str | None = None,
reidentify_config: dict | DeidentifyConfig | None = None,
inspect_config: dict | InspectConfig | None = None,
item: dict | ContentItem | None = None,
inspect_template_name: str | None = None,
reidentify_template_name: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.reidentify_config = reidentify_config
self.inspect_config = inspect_config
self.item = item
self.inspect_template_name = inspect_template_name
self.reidentify_template_name = reidentify_template_name
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
response = hook.reidentify_content(
project_id=self.project_id,
reidentify_config=self.reidentify_config,
inspect_config=self.inspect_config,
item=self.item,
inspect_template_name=self.inspect_template_name,
reidentify_template_name=self.reidentify_template_name,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return ReidentifyContentResponse.to_dict(response)
class CloudDLPUpdateDeidentifyTemplateOperator(GoogleCloudBaseOperator):
"""
Updates the DeidentifyTemplate.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPUpdateDeidentifyTemplateOperator`
:param template_id: The ID of deidentify template to be updated.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param deidentify_template: New DeidentifyTemplate value.
:param update_mask: Mask to control which fields get updated.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"template_id",
"organization_id",
"project_id",
"deidentify_template",
"update_mask",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPDeidentifyTemplateDetailsLink(),)
def __init__(
self,
*,
template_id: str,
organization_id: str | None = None,
project_id: str | None = None,
deidentify_template: dict | DeidentifyTemplate | None = None,
update_mask: dict | FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.template_id = template_id
self.organization_id = organization_id
self.project_id = project_id
self.deidentify_template = deidentify_template
self.update_mask = update_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
template = hook.update_deidentify_template(
template_id=self.template_id,
organization_id=self.organization_id,
project_id=self.project_id,
deidentify_template=self.deidentify_template,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPDeidentifyTemplateDetailsLink.persist(
context=context,
task_instance=self,
project_id=project_id,
template_name=self.template_id,
)
return DeidentifyTemplate.to_dict(template)
class CloudDLPUpdateInspectTemplateOperator(GoogleCloudBaseOperator):
"""
Updates the InspectTemplate.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPUpdateInspectTemplateOperator`
:param template_id: The ID of the inspect template to be updated.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param inspect_template: New InspectTemplate value.
:param update_mask: Mask to control which fields get updated.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"template_id",
"organization_id",
"project_id",
"inspect_template",
"update_mask",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPInspectTemplateDetailsLink(),)
def __init__(
self,
*,
template_id: str,
organization_id: str | None = None,
project_id: str | None = None,
inspect_template: dict | InspectTemplate | None = None,
update_mask: dict | FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.template_id = template_id
self.organization_id = organization_id
self.project_id = project_id
self.inspect_template = inspect_template
self.update_mask = update_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
template = hook.update_inspect_template(
template_id=self.template_id,
organization_id=self.organization_id,
project_id=self.project_id,
inspect_template=self.inspect_template,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPInspectTemplateDetailsLink.persist(
context=context,
task_instance=self,
project_id=project_id,
template_name=self.template_id,
)
return InspectTemplate.to_dict(template)
class CloudDLPUpdateJobTriggerOperator(GoogleCloudBaseOperator):
"""
Updates a job trigger.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPUpdateJobTriggerOperator`
:param job_trigger_id: The ID of the DLP job trigger to be updated.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param job_trigger: New JobTrigger value.
:param update_mask: Mask to control which fields get updated.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"job_trigger_id",
"project_id",
"job_trigger",
"update_mask",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPJobTriggerDetailsLink(),)
def __init__(
self,
*,
job_trigger_id,
project_id: str | None = None,
job_trigger: dict | JobTrigger | None = None,
update_mask: dict | FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.job_trigger_id = job_trigger_id
self.project_id = project_id
self.job_trigger = job_trigger
self.update_mask = update_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
trigger = hook.update_job_trigger(
job_trigger_id=self.job_trigger_id,
project_id=self.project_id,
job_trigger=self.job_trigger,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPJobTriggerDetailsLink.persist(
context=context,
task_instance=self,
project_id=project_id,
trigger_name=self.job_trigger_id,
)
return JobTrigger.to_dict(trigger)
class CloudDLPUpdateStoredInfoTypeOperator(GoogleCloudBaseOperator):
"""
Updates the stored infoType by creating a new version.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPUpdateStoredInfoTypeOperator`
:param stored_info_type_id: The ID of the stored info type to be updated.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param config: Updated configuration for the storedInfoType. If not provided, a new
version of the storedInfoType will be created with the existing configuration.
:param update_mask: Mask to control which fields get updated.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"stored_info_type_id",
"organization_id",
"project_id",
"config",
"update_mask",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPInfoTypeDetailsLink(),)
def __init__(
self,
*,
stored_info_type_id,
organization_id: str | None = None,
project_id: str | None = None,
config: dict | StoredInfoTypeConfig | None = None,
update_mask: dict | FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.stored_info_type_id = stored_info_type_id
self.organization_id = organization_id
self.project_id = project_id
self.config = config
self.update_mask = update_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
info = hook.update_stored_info_type(
stored_info_type_id=self.stored_info_type_id,
organization_id=self.organization_id,
project_id=self.project_id,
config=self.config,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPInfoTypeDetailsLink.persist(
context=context,
task_instance=self,
project_id=project_id,
info_type_name=self.stored_info_type_id,
)
return StoredInfoType.to_dict(info)
| 120,442 | 40.632561 | 108 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Storage Bucket operator."""
from __future__ import annotations
import datetime
import subprocess
import sys
import warnings
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from typing import TYPE_CHECKING, Sequence
import pendulum
if TYPE_CHECKING:
from airflow.utils.context import Context
from google.api_core.exceptions import Conflict
from google.cloud.exceptions import GoogleCloudError
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
from airflow.providers.google.common.links.storage import FileDetailsLink, StorageLink
from airflow.utils import timezone
class GCSCreateBucketOperator(GoogleCloudBaseOperator):
"""
Creates a new bucket.
Google Cloud Storage uses a flat namespace, so you
can't create a bucket with a name that is already in use.
.. seealso::
For more information, see Bucket Naming Guidelines:
https://cloud.google.com/storage/docs/bucketnaming.html#requirements
:param bucket_name: The name of the bucket. (templated)
:param resource: An optional dict with parameters for creating the bucket.
For information on available parameters, see Cloud Storage API doc:
https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
:param storage_class: This defines how objects in the bucket are stored
and determines the SLA and the cost of storage (templated). Values include
- ``MULTI_REGIONAL``
- ``REGIONAL``
- ``STANDARD``
- ``NEARLINE``
- ``COLDLINE``.
If this value is not specified when the bucket is
created, it will default to STANDARD.
:param location: The location of the bucket. (templated)
Object data for objects in the bucket resides in physical storage
within this region. Defaults to US.
.. seealso:: https://developers.google.com/storage/docs/bucket-locations
:param project_id: The ID of the Google Cloud Project. (templated)
:param labels: User-provided labels, in key/value pairs.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
The following Operator would create a new bucket ``test-bucket``
with ``MULTI_REGIONAL`` storage class in ``EU`` region
.. code-block:: python
CreateBucket = GoogleCloudStorageCreateBucketOperator(
task_id="CreateNewBucket",
bucket_name="test-bucket",
storage_class="MULTI_REGIONAL",
location="EU",
labels={"env": "dev", "team": "airflow"},
gcp_conn_id="airflow-conn-id",
)
"""
template_fields: Sequence[str] = (
"bucket_name",
"storage_class",
"location",
"project_id",
"impersonation_chain",
)
ui_color = "#f0eee4"
operator_extra_links = (StorageLink(),)
def __init__(
self,
*,
bucket_name: str,
resource: dict | None = None,
storage_class: str = "MULTI_REGIONAL",
location: str = "US",
project_id: str | None = None,
labels: dict | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.resource = resource
self.storage_class = storage_class
self.location = location
self.project_id = project_id
self.labels = labels
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
StorageLink.persist(
context=context,
task_instance=self,
uri=self.bucket_name,
project_id=self.project_id or hook.project_id,
)
try:
hook.create_bucket(
bucket_name=self.bucket_name,
resource=self.resource,
storage_class=self.storage_class,
location=self.location,
project_id=self.project_id,
labels=self.labels,
)
except Conflict: # HTTP 409
self.log.warning("Bucket %s already exists", self.bucket_name)
class GCSListObjectsOperator(GoogleCloudBaseOperator):
"""
List all objects from the bucket filtered by given string prefix and delimiter in name or match_glob.
This operator returns a python list with the name of objects which can be used by
XCom in the downstream task.
:param bucket: The Google Cloud Storage bucket to find the objects. (templated)
:param prefix: String or list of strings, which filter objects whose name begins with
it/them. (templated)
:param delimiter: (Deprecated) The delimiter by which you want to filter the objects. (templated)
For example, to list the CSV files from in a directory in GCS you would use
delimiter='.csv'.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param match_glob: (Optional) filters objects based on the glob pattern given by the string
(e.g, ``'**/*/.json'``)
**Example**:
The following Operator would list all the Avro files from ``sales/sales-2017``
folder in ``data`` bucket. ::
GCS_Files = GoogleCloudStorageListOperator(
task_id='GCS_Files',
bucket='data',
prefix='sales/sales-2017/',
match_glob='**/*/.avro',
gcp_conn_id=google_cloud_conn_id
)
"""
template_fields: Sequence[str] = (
"bucket",
"prefix",
"delimiter",
"impersonation_chain",
)
ui_color = "#f0eee4"
operator_extra_links = (StorageLink(),)
def __init__(
self,
*,
bucket: str,
prefix: str | list[str] | None = None,
delimiter: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
match_glob: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.prefix = prefix
if delimiter:
warnings.warn(
"Usage of 'delimiter' is deprecated, please use 'match_glob' instead",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.delimiter = delimiter
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.match_glob = match_glob
def execute(self, context: Context) -> list:
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if self.match_glob:
self.log.info(
"Getting list of the files. Bucket: %s; MatchGlob: %s; Prefix(es): %s",
self.bucket,
self.match_glob,
self.prefix,
)
else:
self.log.info(
"Getting list of the files. Bucket: %s; Delimiter: %s; Prefix(es): %s",
self.bucket,
self.delimiter,
self.prefix,
)
StorageLink.persist(
context=context,
task_instance=self,
uri=self.bucket,
project_id=hook.project_id,
)
return hook.list(
bucket_name=self.bucket, prefix=self.prefix, delimiter=self.delimiter, match_glob=self.match_glob
)
class GCSDeleteObjectsOperator(GoogleCloudBaseOperator):
"""
Deletes objects from a list or all objects matching a prefix from a Google Cloud Storage bucket.
:param bucket_name: The GCS bucket to delete from
:param objects: List of objects to delete. These should be the names
of objects in the bucket, not including gs://bucket/
:param prefix: String or list of strings, which filter objects whose name begin with
it/them. (templated)
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"bucket_name",
"prefix",
"objects",
"impersonation_chain",
)
def __init__(
self,
*,
bucket_name: str,
objects: list[str] | None = None,
prefix: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.bucket_name = bucket_name
self.objects = objects
self.prefix = prefix
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
if objects is None and prefix is None:
err_message = "(Task {task_id}) Either object or prefix should be set. Both are None.".format(
**kwargs
)
raise ValueError(err_message)
super().__init__(**kwargs)
def execute(self, context: Context) -> None:
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if self.objects is not None:
objects = self.objects
else:
objects = hook.list(bucket_name=self.bucket_name, prefix=self.prefix)
self.log.info("Deleting %s objects from %s", len(objects), self.bucket_name)
for object_name in objects:
hook.delete(bucket_name=self.bucket_name, object_name=object_name)
class GCSBucketCreateAclEntryOperator(GoogleCloudBaseOperator):
"""
Creates a new ACL entry on the specified bucket.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSBucketCreateAclEntryOperator`
:param bucket: Name of a bucket.
:param entity: The entity holding the permission, in one of the following forms:
user-userId, user-email, group-groupId, group-email, domain-domain,
project-team-projectId, allUsers, allAuthenticatedUsers
:param role: The access permission for the entity.
Acceptable values are: "OWNER", "READER", "WRITER".
:param user_project: (Optional) The project to be billed for this request.
Required for Requester Pays buckets.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcs_bucket_create_acl_template_fields]
template_fields: Sequence[str] = (
"bucket",
"entity",
"role",
"user_project",
"impersonation_chain",
)
# [END gcs_bucket_create_acl_template_fields]
operator_extra_links = (StorageLink(),)
def __init__(
self,
*,
bucket: str,
entity: str,
role: str,
user_project: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.entity = entity
self.role = role
self.user_project = user_project
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
StorageLink.persist(
context=context,
task_instance=self,
uri=self.bucket,
project_id=hook.project_id,
)
hook.insert_bucket_acl(
bucket_name=self.bucket, entity=self.entity, role=self.role, user_project=self.user_project
)
class GCSObjectCreateAclEntryOperator(GoogleCloudBaseOperator):
"""
Creates a new ACL entry on the specified object.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSObjectCreateAclEntryOperator`
:param bucket: Name of a bucket.
:param object_name: Name of the object. For information about how to URL encode object
names to be path safe, see:
https://cloud.google.com/storage/docs/json_api/#encoding
:param entity: The entity holding the permission, in one of the following forms:
user-userId, user-email, group-groupId, group-email, domain-domain,
project-team-projectId, allUsers, allAuthenticatedUsers
:param role: The access permission for the entity.
Acceptable values are: "OWNER", "READER".
:param generation: Optional. If present, selects a specific revision of this object.
:param user_project: (Optional) The project to be billed for this request.
Required for Requester Pays buckets.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcs_object_create_acl_template_fields]
template_fields: Sequence[str] = (
"bucket",
"object_name",
"entity",
"generation",
"role",
"user_project",
"impersonation_chain",
)
# [END gcs_object_create_acl_template_fields]
operator_extra_links = (FileDetailsLink(),)
def __init__(
self,
*,
bucket: str,
object_name: str,
entity: str,
role: str,
generation: int | None = None,
user_project: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.object_name = object_name
self.entity = entity
self.role = role
self.generation = generation
self.user_project = user_project
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
FileDetailsLink.persist(
context=context,
task_instance=self,
uri=f"{self.bucket}/{self.object_name}",
project_id=hook.project_id,
)
hook.insert_object_acl(
bucket_name=self.bucket,
object_name=self.object_name,
entity=self.entity,
role=self.role,
generation=self.generation,
user_project=self.user_project,
)
class GCSFileTransformOperator(GoogleCloudBaseOperator):
"""
Copies data from a source GCS location to a temporary location on the local filesystem.
Runs a transformation on this file as specified by the transformation script
and uploads the output to a destination bucket. If the output bucket is not
specified the original file will be overwritten.
The locations of the source and the destination files in the local
filesystem is provided as an first and second arguments to the
transformation script. The transformation script is expected to read the
data from source, transform it and write the output to the local
destination file.
:param source_bucket: The bucket to locate the source_object. (templated)
:param source_object: The key to be retrieved from GCS. (templated)
:param destination_bucket: The bucket to upload the key after transformation.
If not provided, source_bucket will be used. (templated)
:param destination_object: The key to be written in GCS.
If not provided, source_object will be used. (templated)
:param transform_script: location of the executable transformation script or list of arguments
passed to subprocess ex. `['python', 'script.py', 10]`. (templated)
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"source_bucket",
"source_object",
"destination_bucket",
"destination_object",
"transform_script",
"impersonation_chain",
)
operator_extra_links = (FileDetailsLink(),)
def __init__(
self,
*,
source_bucket: str,
source_object: str,
transform_script: str | list[str],
destination_bucket: str | None = None,
destination_object: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.source_bucket = source_bucket
self.source_object = source_object
self.destination_bucket = destination_bucket or self.source_bucket
self.destination_object = destination_object or self.source_object
self.gcp_conn_id = gcp_conn_id
self.transform_script = transform_script
self.output_encoding = sys.getdefaultencoding()
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = GCSHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
with NamedTemporaryFile() as source_file, NamedTemporaryFile() as destination_file:
self.log.info("Downloading file from %s", self.source_bucket)
hook.download(
bucket_name=self.source_bucket, object_name=self.source_object, filename=source_file.name
)
self.log.info("Starting the transformation")
cmd = [self.transform_script] if isinstance(self.transform_script, str) else self.transform_script
cmd += [source_file.name, destination_file.name]
with subprocess.Popen(
args=cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True
) as process:
self.log.info("Process output:")
if process.stdout:
for line in iter(process.stdout.readline, b""):
self.log.info(line.decode(self.output_encoding).rstrip())
process.wait()
if process.returncode:
raise AirflowException(f"Transform script failed: {process.returncode}")
self.log.info("Transformation succeeded. Output temporarily located at %s", destination_file.name)
self.log.info("Uploading file to %s as %s", self.destination_bucket, self.destination_object)
FileDetailsLink.persist(
context=context,
task_instance=self,
uri=f"{self.destination_bucket}/{self.destination_object}",
project_id=hook.project_id,
)
hook.upload(
bucket_name=self.destination_bucket,
object_name=self.destination_object,
filename=destination_file.name,
)
class GCSTimeSpanFileTransformOperator(GoogleCloudBaseOperator):
"""
Copy objects that were modified during a time span, run a transform, and upload results to a bucket.
Determines a list of objects that were added or modified at a GCS source
location during a specific time-span, copies them to a temporary location
on the local file system, runs a transform on this file as specified by
the transformation script and uploads the output to the destination bucket.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSTimeSpanFileTransformOperator`
The locations of the source and the destination files in the local
filesystem is provided as an first and second arguments to the
transformation script. The time-span is passed to the transform script as
third and fourth argument as UTC ISO 8601 string.
The transformation script is expected to read the
data from source, transform it and write the output to the local
destination file.
:param source_bucket: The bucket to fetch data from. (templated)
:param source_prefix: Prefix string which filters objects whose name begin with
this prefix. Can interpolate execution date and time components. (templated)
:param source_gcp_conn_id: The connection ID to use connecting to Google Cloud
to download files to be processed.
:param source_impersonation_chain: Optional service account to impersonate using short-term
credentials (to download files to be processed), or chained list of accounts required to
get the access_token of the last account in the list, which will be impersonated in the
request. If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param destination_bucket: The bucket to write data to. (templated)
:param destination_prefix: Prefix string for the upload location.
Can interpolate execution date and time components. (templated)
:param destination_gcp_conn_id: The connection ID to use connecting to Google Cloud
to upload processed files.
:param destination_impersonation_chain: Optional service account to impersonate using short-term
credentials (to upload processed files), or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param transform_script: location of the executable transformation script or list of arguments
passed to subprocess ex. `['python', 'script.py', 10]`. (templated)
:param chunk_size: The size of a chunk of data when downloading or uploading (in bytes).
This must be a multiple of 256 KB (per the google clout storage API specification).
:param download_continue_on_fail: With this set to true, if a download fails the task does not error out
but will still continue.
:param upload_chunk_size: The size of a chunk of data when uploading (in bytes).
This must be a multiple of 256 KB (per the google clout storage API specification).
:param upload_continue_on_fail: With this set to true, if an upload fails the task does not error out
but will still continue.
:param upload_num_attempts: Number of attempts to try to upload a single file.
"""
template_fields: Sequence[str] = (
"source_bucket",
"source_prefix",
"destination_bucket",
"destination_prefix",
"transform_script",
"source_impersonation_chain",
"destination_impersonation_chain",
)
operator_extra_links = (StorageLink(),)
@staticmethod
def interpolate_prefix(prefix: str, dt: datetime.datetime) -> str | None:
"""Interpolate prefix with datetime.
:param prefix: The prefix to interpolate
:param dt: The datetime to interpolate
"""
return dt.strftime(prefix) if prefix else None
def __init__(
self,
*,
source_bucket: str,
source_prefix: str,
source_gcp_conn_id: str,
destination_bucket: str,
destination_prefix: str,
destination_gcp_conn_id: str,
transform_script: str | list[str],
source_impersonation_chain: str | Sequence[str] | None = None,
destination_impersonation_chain: str | Sequence[str] | None = None,
chunk_size: int | None = None,
download_continue_on_fail: bool | None = False,
download_num_attempts: int = 1,
upload_continue_on_fail: bool | None = False,
upload_num_attempts: int = 1,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.source_bucket = source_bucket
self.source_prefix = source_prefix
self.source_gcp_conn_id = source_gcp_conn_id
self.source_impersonation_chain = source_impersonation_chain
self.destination_bucket = destination_bucket
self.destination_prefix = destination_prefix
self.destination_gcp_conn_id = destination_gcp_conn_id
self.destination_impersonation_chain = destination_impersonation_chain
self.transform_script = transform_script
self.output_encoding = sys.getdefaultencoding()
self.chunk_size = chunk_size
self.download_continue_on_fail = download_continue_on_fail
self.download_num_attempts = download_num_attempts
self.upload_continue_on_fail = upload_continue_on_fail
self.upload_num_attempts = upload_num_attempts
def execute(self, context: Context) -> list[str]:
# Define intervals and prefixes.
try:
orig_start = context["data_interval_start"]
orig_end = context["data_interval_end"]
except KeyError:
orig_start = pendulum.instance(context["execution_date"])
following_execution_date = context["dag"].following_schedule(context["execution_date"])
if following_execution_date is None:
orig_end = None
else:
orig_end = pendulum.instance(following_execution_date)
timespan_start = orig_start
if orig_end is None: # Only possible in Airflow before 2.2.
self.log.warning("No following schedule found, setting timespan end to max %s", orig_end)
timespan_end = pendulum.instance(datetime.datetime.max)
elif orig_start >= orig_end: # Airflow 2.2 sets start == end for non-perodic schedules.
self.log.warning("DAG schedule not periodic, setting timespan end to max %s", orig_end)
timespan_end = pendulum.instance(datetime.datetime.max)
else:
timespan_end = orig_end
timespan_start = timespan_start.in_timezone(timezone.utc)
timespan_end = timespan_end.in_timezone(timezone.utc)
source_prefix_interp = GCSTimeSpanFileTransformOperator.interpolate_prefix(
self.source_prefix,
timespan_start,
)
destination_prefix_interp = GCSTimeSpanFileTransformOperator.interpolate_prefix(
self.destination_prefix,
timespan_start,
)
source_hook = GCSHook(
gcp_conn_id=self.source_gcp_conn_id,
impersonation_chain=self.source_impersonation_chain,
)
destination_hook = GCSHook(
gcp_conn_id=self.destination_gcp_conn_id,
impersonation_chain=self.destination_impersonation_chain,
)
StorageLink.persist(
context=context,
task_instance=self,
uri=self.destination_bucket,
project_id=destination_hook.project_id,
)
# Fetch list of files.
blobs_to_transform = source_hook.list_by_timespan(
bucket_name=self.source_bucket,
prefix=source_prefix_interp,
timespan_start=timespan_start,
timespan_end=timespan_end,
)
with TemporaryDirectory() as temp_input_dir, TemporaryDirectory() as temp_output_dir:
temp_input_dir_path = Path(temp_input_dir)
temp_output_dir_path = Path(temp_output_dir)
# TODO: download in parallel.
for blob_to_transform in blobs_to_transform:
destination_file = temp_input_dir_path / blob_to_transform
destination_file.parent.mkdir(parents=True, exist_ok=True)
try:
source_hook.download(
bucket_name=self.source_bucket,
object_name=blob_to_transform,
filename=str(destination_file),
chunk_size=self.chunk_size,
num_max_attempts=self.download_num_attempts,
)
except GoogleCloudError:
if self.download_continue_on_fail:
continue
raise
self.log.info("Starting the transformation")
cmd = [self.transform_script] if isinstance(self.transform_script, str) else self.transform_script
cmd += [
str(temp_input_dir_path),
str(temp_output_dir_path),
timespan_start.replace(microsecond=0).isoformat(),
timespan_end.replace(microsecond=0).isoformat(),
]
with subprocess.Popen(
args=cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True
) as process:
self.log.info("Process output:")
if process.stdout:
for line in iter(process.stdout.readline, b""):
self.log.info(line.decode(self.output_encoding).rstrip())
process.wait()
if process.returncode:
raise AirflowException(f"Transform script failed: {process.returncode}")
self.log.info("Transformation succeeded. Output temporarily located at %s", temp_output_dir_path)
files_uploaded = []
# TODO: upload in parallel.
for upload_file in temp_output_dir_path.glob("**/*"):
if upload_file.is_dir():
continue
upload_file_name = str(upload_file.relative_to(temp_output_dir_path))
if self.destination_prefix is not None:
upload_file_name = f"{destination_prefix_interp}/{upload_file_name}"
self.log.info("Uploading file %s to %s", upload_file, upload_file_name)
try:
destination_hook.upload(
bucket_name=self.destination_bucket,
object_name=upload_file_name,
filename=str(upload_file),
chunk_size=self.chunk_size,
num_max_attempts=self.upload_num_attempts,
)
files_uploaded.append(str(upload_file_name))
except GoogleCloudError:
if self.upload_continue_on_fail:
continue
raise
return files_uploaded
class GCSDeleteBucketOperator(GoogleCloudBaseOperator):
"""
Deletes bucket from a Google Cloud Storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSDeleteBucketOperator`
:param bucket_name: name of the bucket which will be deleted
:param force: false not allow to delete non empty bucket, set force=True
allows to delete non empty bucket
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"bucket_name",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
bucket_name: str,
force: bool = True,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.force: bool = force
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = GCSHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
hook.delete_bucket(bucket_name=self.bucket_name, force=self.force)
class GCSSynchronizeBucketsOperator(GoogleCloudBaseOperator):
"""
Synchronizes the contents of the buckets or bucket's directories in the Google Cloud Services.
Parameters ``source_object`` and ``destination_object`` describe the root sync directory. If they are
not passed, the entire bucket will be synchronized. They should point to directories.
.. note::
The synchronization of individual files is not supported. Only entire directories can be
synchronized.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSSynchronizeBuckets`
:param source_bucket: The name of the bucket containing the source objects.
:param destination_bucket: The name of the bucket containing the destination objects.
:param source_object: The root sync directory in the source bucket.
:param destination_object: The root sync directory in the destination bucket.
:param recursive: If True, subdirectories will be considered
:param allow_overwrite: if True, the files will be overwritten if a mismatched file is found.
By default, overwriting files is not allowed
:param delete_extra_files: if True, deletes additional files from the source that not found in the
destination. By default extra files are not deleted.
.. note::
This option can delete data quickly if you specify the wrong source/destination combination.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"source_bucket",
"destination_bucket",
"source_object",
"destination_object",
"recursive",
"delete_extra_files",
"allow_overwrite",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (StorageLink(),)
def __init__(
self,
*,
source_bucket: str,
destination_bucket: str,
source_object: str | None = None,
destination_object: str | None = None,
recursive: bool = True,
delete_extra_files: bool = False,
allow_overwrite: bool = False,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.source_bucket = source_bucket
self.destination_bucket = destination_bucket
self.source_object = source_object
self.destination_object = destination_object
self.recursive = recursive
self.delete_extra_files = delete_extra_files
self.allow_overwrite = allow_overwrite
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
StorageLink.persist(
context=context,
task_instance=self,
uri=self._get_uri(self.destination_bucket, self.destination_object),
project_id=hook.project_id,
)
hook.sync(
source_bucket=self.source_bucket,
destination_bucket=self.destination_bucket,
source_object=self.source_object,
destination_object=self.destination_object,
recursive=self.recursive,
delete_extra_files=self.delete_extra_files,
allow_overwrite=self.allow_overwrite,
)
def _get_uri(self, gcs_bucket: str, gcs_object: str | None) -> str:
if gcs_object and gcs_object[-1] == "/":
gcs_object = gcs_object[:-1]
return f"{gcs_bucket}/{gcs_object}" if gcs_object else gcs_bucket
| 42,429 | 41.051536 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/video_intelligence.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Vision operators."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.videointelligence_v1 import Feature, VideoContext
from google.protobuf.json_format import MessageToDict
from airflow.providers.google.cloud.hooks.video_intelligence import CloudVideoIntelligenceHook
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class CloudVideoIntelligenceDetectVideoLabelsOperator(GoogleCloudBaseOperator):
"""
Performs video annotation, annotating video labels.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVideoIntelligenceDetectVideoLabelsOperator`.
:param input_uri: Input video location. Currently, only Google Cloud Storage URIs are supported,
which must be specified in the following format: ``gs://bucket-id/object-id``.
:param input_content: The video data bytes.
If unset, the input video(s) should be specified via ``input_uri``.
If set, ``input_uri`` should be unset.
:param output_uri: Optional, location where the output (in JSON format) should be stored. Currently, only
Google Cloud Storage URIs are supported, which must be specified in the following format:
``gs://bucket-id/object-id``.
:param video_context: Optional, Additional video context and/or feature-specific parameters.
:param location: Optional, cloud region where annotation should take place. Supported cloud regions:
us-east1, us-west1, europe-west1, asia-east1. If no region is specified, a region will be determined
based on video file location.
:param retry: Retry object used to determine when/if to retry requests.
If None is specified, requests will not be retried.
:param timeout: Optional, The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to ``google_cloud_default``.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_video_intelligence_detect_labels_template_fields]
template_fields: Sequence[str] = (
"input_uri",
"output_uri",
"gcp_conn_id",
"impersonation_chain",
)
# [END gcp_video_intelligence_detect_labels_template_fields]
def __init__(
self,
*,
input_uri: str,
input_content: bytes | None = None,
output_uri: str | None = None,
video_context: dict | VideoContext | None = None,
location: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.input_uri = input_uri
self.input_content = input_content
self.output_uri = output_uri
self.video_context = video_context
self.location = location
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.timeout = timeout
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVideoIntelligenceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
operation = hook.annotate_video(
input_uri=self.input_uri,
input_content=self.input_content,
video_context=self.video_context,
location=self.location,
retry=self.retry,
features=[Feature.LABEL_DETECTION],
timeout=self.timeout,
)
self.log.info("Processing video for label annotations")
result = MessageToDict(operation.result()._pb)
self.log.info("Finished processing.")
return result
class CloudVideoIntelligenceDetectVideoExplicitContentOperator(GoogleCloudBaseOperator):
"""
Performs video annotation, annotating explicit content.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVideoIntelligenceDetectVideoExplicitContentOperator`
:param input_uri: Input video location. Currently, only Google Cloud Storage URIs are supported,
which must be specified in the following format: ``gs://bucket-id/object-id``.
:param input_content: The video data bytes.
If unset, the input video(s) should be specified via ``input_uri``.
If set, ``input_uri`` should be unset.
:param output_uri: Optional, location where the output (in JSON format) should be stored. Currently, only
Google Cloud Storage URIs are supported, which must be specified in the following format:
``gs://bucket-id/object-id``.
:param video_context: Optional, Additional video context and/or feature-specific parameters.
:param location: Optional, cloud region where annotation should take place. Supported cloud regions:
us-east1, us-west1, europe-west1, asia-east1. If no region is specified, a region will be determined
based on video file location.
:param retry: Retry object used to determine when/if to retry requests.
If None is specified, requests will not be retried.
:param timeout: Optional, The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud
Defaults to ``google_cloud_default``.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_video_intelligence_detect_explicit_content_template_fields]
template_fields: Sequence[str] = (
"input_uri",
"output_uri",
"gcp_conn_id",
"impersonation_chain",
)
# [END gcp_video_intelligence_detect_explicit_content_template_fields]
def __init__(
self,
*,
input_uri: str,
output_uri: str | None = None,
input_content: bytes | None = None,
video_context: dict | VideoContext | None = None,
location: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.input_uri = input_uri
self.output_uri = output_uri
self.input_content = input_content
self.video_context = video_context
self.location = location
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.timeout = timeout
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVideoIntelligenceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
operation = hook.annotate_video(
input_uri=self.input_uri,
input_content=self.input_content,
video_context=self.video_context,
location=self.location,
retry=self.retry,
features=[Feature.EXPLICIT_CONTENT_DETECTION],
timeout=self.timeout,
)
self.log.info("Processing video for explicit content annotations")
result = MessageToDict(operation.result()._pb)
self.log.info("Finished processing.")
return result
class CloudVideoIntelligenceDetectVideoShotsOperator(GoogleCloudBaseOperator):
"""
Performs video annotation, annotating video shots.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVideoIntelligenceDetectVideoShotsOperator`
:param input_uri: Input video location. Currently, only Google Cloud Storage URIs are supported,
which must be specified in the following format: ``gs://bucket-id/object-id``.
:param input_content: The video data bytes.
If unset, the input video(s) should be specified via ``input_uri``.
If set, ``input_uri`` should be unset.
:param output_uri: Optional, location where the output (in JSON format) should be stored. Currently, only
Google Cloud Storage URIs are supported, which must be specified in the following format:
``gs://bucket-id/object-id``.
:param video_context: Optional, Additional video context and/or feature-specific parameters.
:param location: Optional, cloud region where annotation should take place. Supported cloud regions:
us-east1, us-west1, europe-west1, asia-east1. If no region is specified, a region will be determined
based on video file location.
:param retry: Retry object used to determine when/if to retry requests.
If None is specified, requests will not be retried.
:param timeout: Optional, The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to ``google_cloud_default``.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_video_intelligence_detect_video_shots_template_fields]
template_fields: Sequence[str] = (
"input_uri",
"output_uri",
"gcp_conn_id",
"impersonation_chain",
)
# [END gcp_video_intelligence_detect_video_shots_template_fields]
def __init__(
self,
*,
input_uri: str,
output_uri: str | None = None,
input_content: bytes | None = None,
video_context: dict | VideoContext | None = None,
location: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.input_uri = input_uri
self.output_uri = output_uri
self.input_content = input_content
self.video_context = video_context
self.location = location
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.timeout = timeout
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVideoIntelligenceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
operation = hook.annotate_video(
input_uri=self.input_uri,
input_content=self.input_content,
video_context=self.video_context,
location=self.location,
retry=self.retry,
features=[Feature.SHOT_CHANGE_DETECTION],
timeout=self.timeout,
)
self.log.info("Processing video for video shots annotations")
result = MessageToDict(operation.result()._pb)
self.log.info("Finished processing.")
return result
| 14,159 | 45.732673 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/cloud_composer.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from google.api_core.exceptions import AlreadyExists
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.orchestration.airflow.service_v1 import ImageVersion
from google.cloud.orchestration.airflow.service_v1.types import Environment
from google.protobuf.field_mask_pb2 import FieldMask
from airflow import AirflowException
from airflow.configuration import conf
from airflow.providers.google.cloud.hooks.cloud_composer import CloudComposerHook
from airflow.providers.google.cloud.links.base import BaseGoogleLink
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
from airflow.providers.google.cloud.triggers.cloud_composer import CloudComposerExecutionTrigger
from airflow.providers.google.common.consts import GOOGLE_DEFAULT_DEFERRABLE_METHOD_NAME
if TYPE_CHECKING:
from airflow.utils.context import Context
CLOUD_COMPOSER_BASE_LINK = "https://console.cloud.google.com/composer/environments"
CLOUD_COMPOSER_DETAILS_LINK = (
CLOUD_COMPOSER_BASE_LINK + "/detail/{region}/{environment_id}/monitoring?project={project_id}"
)
CLOUD_COMPOSER_ENVIRONMENTS_LINK = CLOUD_COMPOSER_BASE_LINK + "?project={project_id}"
class CloudComposerEnvironmentLink(BaseGoogleLink):
"""Helper class for constructing Cloud Composer Environment Link."""
name = "Cloud Composer Environment"
key = "composer_conf"
format_str = CLOUD_COMPOSER_DETAILS_LINK
@staticmethod
def persist(
operator_instance: (
CloudComposerCreateEnvironmentOperator
| CloudComposerUpdateEnvironmentOperator
| CloudComposerGetEnvironmentOperator
),
context: Context,
) -> None:
operator_instance.xcom_push(
context,
key=CloudComposerEnvironmentLink.key,
value={
"project_id": operator_instance.project_id,
"region": operator_instance.region,
"environment_id": operator_instance.environment_id,
},
)
class CloudComposerEnvironmentsLink(BaseGoogleLink):
"""Helper class for constructing Cloud Composer Environment Link."""
name = "Cloud Composer Environment List"
key = "composer_conf"
format_str = CLOUD_COMPOSER_ENVIRONMENTS_LINK
@staticmethod
def persist(operator_instance: CloudComposerListEnvironmentsOperator, context: Context) -> None:
operator_instance.xcom_push(
context,
key=CloudComposerEnvironmentsLink.key,
value={
"project_id": operator_instance.project_id,
},
)
class CloudComposerCreateEnvironmentOperator(GoogleCloudBaseOperator):
"""
Create a new environment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment_id: Required. The ID of the Google Cloud environment that the service belongs to.
:param environment: The environment to create.
:param gcp_conn_id:
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param deferrable: Run operator in the deferrable mode
:param pooling_period_seconds: Optional: Control the rate of the poll for the result of deferrable run.
By default the trigger will poll every 30 seconds.
"""
template_fields = (
"project_id",
"region",
"environment_id",
"environment",
"impersonation_chain",
)
operator_extra_links = (CloudComposerEnvironmentLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
environment_id: str,
environment: Environment | dict,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
pooling_period_seconds: int = 30,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.environment_id = environment_id
self.environment = environment
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
self.pooling_period_seconds = pooling_period_seconds
def execute(self, context: Context):
hook = CloudComposerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
name = hook.get_environment_name(self.project_id, self.region, self.environment_id)
if isinstance(self.environment, Environment):
self.environment.name = name
else:
self.environment["name"] = name
CloudComposerEnvironmentLink.persist(operator_instance=self, context=context)
try:
result = hook.create_environment(
project_id=self.project_id,
region=self.region,
environment=self.environment,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
context["ti"].xcom_push(key="operation_id", value=result.operation.name)
if not self.deferrable:
environment = hook.wait_for_operation(timeout=self.timeout, operation=result)
return Environment.to_dict(environment)
else:
self.defer(
trigger=CloudComposerExecutionTrigger(
project_id=self.project_id,
region=self.region,
operation_name=result.operation.name,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
pooling_period_seconds=self.pooling_period_seconds,
),
method_name=GOOGLE_DEFAULT_DEFERRABLE_METHOD_NAME,
)
except AlreadyExists:
environment = hook.get_environment(
project_id=self.project_id,
region=self.region,
environment_id=self.environment_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return Environment.to_dict(environment)
def execute_complete(self, context: Context, event: dict):
if event["operation_done"]:
hook = CloudComposerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
env = hook.get_environment(
project_id=self.project_id,
region=self.region,
environment_id=self.environment_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return Environment.to_dict(env)
else:
raise AirflowException(f"Unexpected error in the operation: {event['operation_name']}")
class CloudComposerDeleteEnvironmentOperator(GoogleCloudBaseOperator):
"""
Delete an environment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment_id: Required. The ID of the Google Cloud environment that the service belongs to.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id:
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run operator in the deferrable mode
:param pooling_period_seconds: Optional: Control the rate of the poll for the result of deferrable run.
By default, the trigger will poll every 30 seconds.
"""
template_fields = (
"project_id",
"region",
"environment_id",
"impersonation_chain",
)
def __init__(
self,
*,
project_id: str,
region: str,
environment_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
pooling_period_seconds: int = 30,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.environment_id = environment_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
self.pooling_period_seconds = pooling_period_seconds
def execute(self, context: Context):
hook = CloudComposerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = hook.delete_environment(
project_id=self.project_id,
region=self.region,
environment_id=self.environment_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
if not self.deferrable:
hook.wait_for_operation(timeout=self.timeout, operation=result)
else:
self.defer(
trigger=CloudComposerExecutionTrigger(
project_id=self.project_id,
region=self.region,
operation_name=result.operation.name,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
pooling_period_seconds=self.pooling_period_seconds,
),
method_name=GOOGLE_DEFAULT_DEFERRABLE_METHOD_NAME,
)
def execute_complete(self, context: Context, event: dict):
pass
class CloudComposerGetEnvironmentOperator(GoogleCloudBaseOperator):
"""
Get an existing environment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment_id: Required. The ID of the Google Cloud environment that the service belongs to.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id:
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"project_id",
"region",
"environment_id",
"impersonation_chain",
)
operator_extra_links = (CloudComposerEnvironmentLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
environment_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.environment_id = environment_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudComposerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = hook.get_environment(
project_id=self.project_id,
region=self.region,
environment_id=self.environment_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
CloudComposerEnvironmentLink.persist(operator_instance=self, context=context)
return Environment.to_dict(result)
class CloudComposerListEnvironmentsOperator(GoogleCloudBaseOperator):
"""
List environments.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param page_size: The maximum number of environments to return.
:param page_token: The next_page_token value returned from a previous List
request, if any.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id:
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"project_id",
"region",
"impersonation_chain",
)
operator_extra_links = (CloudComposerEnvironmentsLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
page_size: int | None = None,
page_token: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.page_size = page_size
self.page_token = page_token
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudComposerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
CloudComposerEnvironmentsLink.persist(operator_instance=self, context=context)
result = hook.list_environments(
project_id=self.project_id,
region=self.region,
page_size=self.page_size,
page_token=self.page_token,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return [Environment.to_dict(env) for env in result]
class CloudComposerUpdateEnvironmentOperator(GoogleCloudBaseOperator):
r"""
Update an environment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment_id: Required. The ID of the Google Cloud environment that the service belongs to.
:param environment: A patch environment. Fields specified by the ``updateMask`` will be copied from the
patch environment into the environment under update.
:param update_mask: Required. A comma-separated list of paths, relative to ``Environment``, of fields to
update. If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id:
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run operator in the deferrable mode
:param pooling_period_seconds: Optional: Control the rate of the poll for the result of deferrable run.
By default, the trigger will poll every 30 seconds.
"""
template_fields = (
"project_id",
"region",
"environment_id",
"impersonation_chain",
)
operator_extra_links = (CloudComposerEnvironmentLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
environment_id: str,
environment: dict | Environment,
update_mask: dict | FieldMask,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
pooling_period_seconds: int = 30,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.environment_id = environment_id
self.environment = environment
self.update_mask = update_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
self.pooling_period_seconds = pooling_period_seconds
def execute(self, context: Context):
hook = CloudComposerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = hook.update_environment(
project_id=self.project_id,
region=self.region,
environment_id=self.environment_id,
environment=self.environment,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
CloudComposerEnvironmentLink.persist(operator_instance=self, context=context)
if not self.deferrable:
environment = hook.wait_for_operation(timeout=self.timeout, operation=result)
return Environment.to_dict(environment)
else:
self.defer(
trigger=CloudComposerExecutionTrigger(
project_id=self.project_id,
region=self.region,
operation_name=result.operation.name,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
pooling_period_seconds=self.pooling_period_seconds,
),
method_name=GOOGLE_DEFAULT_DEFERRABLE_METHOD_NAME,
)
def execute_complete(self, context: Context, event: dict):
if event["operation_done"]:
hook = CloudComposerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
env = hook.get_environment(
project_id=self.project_id,
region=self.region,
environment_id=self.environment_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return Environment.to_dict(env)
else:
raise AirflowException(f"Unexpected error in the operation: {event['operation_name']}")
class CloudComposerListImageVersionsOperator(GoogleCloudBaseOperator):
"""
List ImageVersions for provided location.
:param request: The request object. List ImageVersions in a project and location.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id:
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"project_id",
"region",
"impersonation_chain",
)
def __init__(
self,
*,
project_id: str,
region: str,
page_size: int | None = None,
page_token: str | None = None,
include_past_releases: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.page_size = page_size
self.page_token = page_token
self.include_past_releases = include_past_releases
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudComposerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = hook.list_image_versions(
project_id=self.project_id,
region=self.region,
page_size=self.page_size,
page_token=self.page_token,
include_past_releases=self.include_past_releases,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return [ImageVersion.to_dict(image) for image in result]
| 26,674 | 39.849923 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/functions.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Functions operators."""
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any, Sequence
from googleapiclient.errors import HttpError
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.functions import CloudFunctionsHook
from airflow.providers.google.cloud.links.cloud_functions import (
CloudFunctionsDetailsLink,
CloudFunctionsListLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
from airflow.providers.google.cloud.utils.field_validator import (
GcpBodyFieldValidator,
GcpFieldValidationException,
)
from airflow.version import version
if TYPE_CHECKING:
from airflow.utils.context import Context
def _validate_available_memory_in_mb(value):
if int(value) <= 0:
raise GcpFieldValidationException("The available memory has to be greater than 0")
def _validate_max_instances(value):
if int(value) <= 0:
raise GcpFieldValidationException("The max instances parameter has to be greater than 0")
CLOUD_FUNCTION_VALIDATION: list[dict[str, Any]] = [
dict(name="name", regexp="^.+$"),
dict(name="description", regexp="^.+$", optional=True),
dict(name="entryPoint", regexp=r"^.+$", optional=True),
dict(name="runtime", regexp=r"^.+$", optional=True),
dict(name="timeout", regexp=r"^.+$", optional=True),
dict(name="availableMemoryMb", custom_validation=_validate_available_memory_in_mb, optional=True),
dict(name="labels", optional=True),
dict(name="environmentVariables", optional=True),
dict(name="network", regexp=r"^.+$", optional=True),
dict(name="maxInstances", optional=True, custom_validation=_validate_max_instances),
dict(
name="source_code",
type="union",
fields=[
dict(name="sourceArchiveUrl", regexp=r"^.+$"),
dict(name="sourceRepositoryUrl", regexp=r"^.+$", api_version="v1beta2"),
dict(name="sourceRepository", type="dict", fields=[dict(name="url", regexp=r"^.+$")]),
dict(name="sourceUploadUrl"),
],
),
dict(
name="trigger",
type="union",
fields=[
dict(
name="httpsTrigger",
type="dict",
fields=[
# This dict should be empty at input (url is added at output)
],
),
dict(
name="eventTrigger",
type="dict",
fields=[
dict(name="eventType", regexp=r"^.+$"),
dict(name="resource", regexp=r"^.+$"),
dict(name="service", regexp=r"^.+$", optional=True),
dict(
name="failurePolicy",
type="dict",
optional=True,
fields=[dict(name="retry", type="dict", optional=True)],
),
],
),
],
),
]
class CloudFunctionDeployFunctionOperator(GoogleCloudBaseOperator):
"""
Create or update a function in Google Cloud Functions.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudFunctionDeployFunctionOperator`
:param location: Google Cloud region where the function should be created.
:param body: Body of the Cloud Functions definition. The body must be a
Cloud Functions dictionary as described in:
https://cloud.google.com/functions/docs/reference/rest/v1/projects.locations.functions
. Different API versions require different variants of the Cloud Functions
dictionary.
:param project_id: (Optional) Google Cloud project ID where the function
should be created.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
Default 'google_cloud_default'.
:param api_version: (Optional) API version used (for example v1 - default - or
v1beta1).
:param zip_path: Path to zip file containing source code of the function. If the path
is set, the sourceUploadUrl should not be specified in the body or it should
be empty. Then the zip file will be uploaded using the upload URL generated
via generateUploadUrl from the Cloud Functions API.
:param validate_body: If set to False, body validation is not performed.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcf_function_deploy_template_fields]
template_fields: Sequence[str] = (
"body",
"project_id",
"location",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gcf_function_deploy_template_fields]
operator_extra_links = (CloudFunctionsDetailsLink(),)
def __init__(
self,
*,
location: str,
body: dict,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
zip_path: str | None = None,
validate_body: bool = True,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.project_id = project_id
self.location = location
self.body = body
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.zip_path = zip_path
self.zip_path_preprocessor = ZipPathPreprocessor(body, zip_path)
self._field_validator: GcpBodyFieldValidator | None = None
self.impersonation_chain = impersonation_chain
if validate_body:
self._field_validator = GcpBodyFieldValidator(CLOUD_FUNCTION_VALIDATION, api_version=api_version)
self._validate_inputs()
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if not self.location:
raise AirflowException("The required parameter 'location' is missing")
if not self.body:
raise AirflowException("The required parameter 'body' is missing")
self.zip_path_preprocessor.preprocess_body()
def _validate_all_body_fields(self) -> None:
if self._field_validator:
self._field_validator.validate(self.body)
def _create_new_function(self, hook) -> None:
hook.create_new_function(project_id=self.project_id, location=self.location, body=self.body)
def _update_function(self, hook) -> None:
hook.update_function(self.body["name"], self.body, self.body.keys())
def _check_if_function_exists(self, hook) -> bool:
name = self.body.get("name")
if not name:
raise GcpFieldValidationException(f"The 'name' field should be present in body: '{self.body}'.")
try:
hook.get_function(name)
except HttpError as e:
status = e.resp.status
if status == 404:
return False
raise e
return True
def _upload_source_code(self, hook):
return hook.upload_function_zip(
project_id=self.project_id, location=self.location, zip_path=self.zip_path
)
def _set_airflow_version_label(self) -> None:
if "labels" not in self.body.keys():
self.body["labels"] = {}
self.body["labels"].update({"airflow-version": "v" + version.replace(".", "-").replace("+", "-")})
def execute(self, context: Context):
hook = CloudFunctionsHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
if self.zip_path_preprocessor.should_upload_function():
self.body[GCF_SOURCE_UPLOAD_URL] = self._upload_source_code(hook)
self._validate_all_body_fields()
self._set_airflow_version_label()
if not self._check_if_function_exists(hook):
self._create_new_function(hook)
else:
self._update_function(hook)
project_id = self.project_id or hook.project_id
if project_id:
CloudFunctionsDetailsLink.persist(
context=context,
task_instance=self,
location=self.location,
project_id=project_id,
function_name=self.body["name"].split("/")[-1],
)
GCF_SOURCE_ARCHIVE_URL = "sourceArchiveUrl"
GCF_SOURCE_UPLOAD_URL = "sourceUploadUrl"
SOURCE_REPOSITORY = "sourceRepository"
GCF_ZIP_PATH = "zip_path"
class ZipPathPreprocessor:
"""
Pre-processes zip path parameter.
Responsible for checking if the zip path parameter is correctly specified in
relation with source_code body fields. Non empty zip path parameter is special because
it is mutually exclusive with sourceArchiveUrl and sourceRepository body fields.
It is also mutually exclusive with non-empty sourceUploadUrl.
The pre-process modifies sourceUploadUrl body field in special way when zip_path
is not empty. An extra step is run when execute method is called and sourceUploadUrl
field value is set in the body with the value returned by generateUploadUrl Cloud
Function API method.
:param body: Body passed to the create/update method calls.
:param zip_path: (optional) Path to zip file containing source code of the function. If the path
is set, the sourceUploadUrl should not be specified in the body or it should
be empty. Then the zip file will be uploaded using the upload URL generated
via generateUploadUrl from the Cloud Functions API.
"""
upload_function: bool | None = None
def __init__(self, body: dict, zip_path: str | None = None) -> None:
self.body = body
self.zip_path = zip_path
@staticmethod
def _is_present_and_empty(dictionary, field) -> bool:
return field in dictionary and not dictionary[field]
def _verify_upload_url_and_no_zip_path(self) -> None:
if self._is_present_and_empty(self.body, GCF_SOURCE_UPLOAD_URL):
if not self.zip_path:
raise AirflowException(
"Parameter '{url}' is empty in the body and argument '{path}' "
"is missing or empty. You need to have non empty '{path}' "
"when '{url}' is present and empty.".format(url=GCF_SOURCE_UPLOAD_URL, path=GCF_ZIP_PATH)
)
def _verify_upload_url_and_zip_path(self) -> None:
if GCF_SOURCE_UPLOAD_URL in self.body and self.zip_path:
if not self.body[GCF_SOURCE_UPLOAD_URL]:
self.upload_function = True
else:
raise AirflowException(
f"Only one of '{GCF_SOURCE_UPLOAD_URL}' in body or '{GCF_ZIP_PATH}' argument allowed. "
f"Found both."
)
def _verify_archive_url_and_zip_path(self) -> None:
if GCF_SOURCE_ARCHIVE_URL in self.body and self.zip_path:
raise AirflowException(
f"Only one of '{GCF_SOURCE_ARCHIVE_URL}' in body or '{GCF_ZIP_PATH}' argument allowed. "
f"Found both."
)
def should_upload_function(self) -> bool:
"""Checks if function source should be uploaded."""
if self.upload_function is None:
raise AirflowException("validate() method has to be invoked before should_upload_function")
return self.upload_function
def preprocess_body(self) -> None:
"""Modifies sourceUploadUrl body field in special way when zip_path is not empty."""
self._verify_archive_url_and_zip_path()
self._verify_upload_url_and_zip_path()
self._verify_upload_url_and_no_zip_path()
if self.upload_function is None:
self.upload_function = False
FUNCTION_NAME_PATTERN = "^projects/[^/]+/locations/[^/]+/functions/[^/]+$"
FUNCTION_NAME_COMPILED_PATTERN = re.compile(FUNCTION_NAME_PATTERN)
class CloudFunctionDeleteFunctionOperator(GoogleCloudBaseOperator):
"""
Deletes the specified function from Google Cloud Functions.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudFunctionDeleteFunctionOperator`
:param name: A fully-qualified function name, matching
the pattern: `^projects/[^/]+/locations/[^/]+/functions/[^/]+$`
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param api_version: API version used (for example v1 or v1beta1).
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcf_function_delete_template_fields]
template_fields: Sequence[str] = (
"name",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gcf_function_delete_template_fields]
operator_extra_links = (CloudFunctionsListLink(),)
def __init__(
self,
*,
name: str,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
impersonation_chain: str | Sequence[str] | None = None,
project_id: str | None = None,
**kwargs,
) -> None:
self.name = name
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.impersonation_chain = impersonation_chain
self._validate_inputs()
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if not self.name:
raise AttributeError("Empty parameter: name")
else:
pattern = FUNCTION_NAME_COMPILED_PATTERN
if not pattern.match(self.name):
raise AttributeError(f"Parameter name must match pattern: {FUNCTION_NAME_PATTERN}")
def execute(self, context: Context):
hook = CloudFunctionsHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
try:
project_id = self.project_id or hook.project_id
if project_id:
CloudFunctionsListLink.persist(
context=context,
task_instance=self,
project_id=project_id,
)
return hook.delete_function(self.name)
except HttpError as e:
status = e.resp.status
if status == 404:
self.log.info("The function does not exist in this project")
return None
else:
self.log.error("An error occurred. Exiting.")
raise e
class CloudFunctionInvokeFunctionOperator(GoogleCloudBaseOperator):
"""
Invokes a deployed Cloud Function. To be used for testing purposes as very limited traffic is allowed.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudFunctionDeployFunctionOperator`
:param function_id: ID of the function to be called
:param input_data: Input to be passed to the function
:param location: The location where the function is located.
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:return: None
"""
template_fields: Sequence[str] = (
"function_id",
"input_data",
"location",
"project_id",
"impersonation_chain",
)
operator_extra_links = (CloudFunctionsDetailsLink(),)
def __init__(
self,
*,
function_id: str,
input_data: dict,
location: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.function_id = function_id
self.input_data = input_data
self.location = location
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudFunctionsHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Calling function %s.", self.function_id)
result = hook.call_function(
function_id=self.function_id,
input_data=self.input_data,
location=self.location,
project_id=self.project_id,
)
self.log.info("Function called successfully. Execution id %s", result.get("executionId"))
self.xcom_push(context=context, key="execution_id", value=result.get("executionId"))
project_id = self.project_id or hook.project_id
if project_id:
CloudFunctionsDetailsLink.persist(
context=context,
task_instance=self,
location=self.location,
project_id=project_id,
function_name=self.function_id,
)
return result
| 19,957 | 39.647658 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/cloud_build.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Operators that integrates with Google Cloud Build service."""
from __future__ import annotations
import json
import re
from copy import deepcopy
from typing import TYPE_CHECKING, Any, Sequence
from urllib.parse import unquote, urlsplit
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.devtools.cloudbuild_v1.types import Build, BuildTrigger, RepoSource
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.cloud_build import CloudBuildHook
from airflow.providers.google.cloud.links.cloud_build import (
CloudBuildLink,
CloudBuildListLink,
CloudBuildTriggerDetailsLink,
CloudBuildTriggersListLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
from airflow.providers.google.cloud.triggers.cloud_build import CloudBuildCreateBuildTrigger
from airflow.providers.google.common.consts import GOOGLE_DEFAULT_DEFERRABLE_METHOD_NAME
from airflow.utils import yaml
from airflow.utils.helpers import exactly_one
if TYPE_CHECKING:
from airflow.utils.context import Context
REGEX_REPO_PATH = re.compile(r"^/(?P<project_id>[^/]+)/(?P<repo_name>[^/]+)[\+/]*(?P<branch_name>[^:]+)?")
class CloudBuildCancelBuildOperator(GoogleCloudBaseOperator):
"""
Cancels a build in progress.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildCancelBuildOperator`
:param id_: The ID of the build.
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param location: The location of the project.
"""
template_fields: Sequence[str] = ("project_id", "id_", "gcp_conn_id", "location")
operator_extra_links = (CloudBuildLink(),)
def __init__(
self,
*,
id_: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
location: str = "global",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.id_ = id_
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.location = location
def execute(self, context: Context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.cancel_build(
id_=self.id_,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
location=self.location,
)
self.xcom_push(context, key="id", value=result.id)
project_id = self.project_id or hook.project_id
if project_id:
CloudBuildLink.persist(
context=context,
task_instance=self,
project_id=project_id,
region=self.location,
build_id=result.id,
)
return Build.to_dict(result)
class CloudBuildCreateBuildOperator(GoogleCloudBaseOperator):
"""
Starts a build with the specified configuration.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildCreateBuildOperator`
:param build: The build resource to create. If a dict is provided, it must be of
the same form as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.Build`.
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param wait: Optional, wait for operation to finish.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param deferrable: Run operator in the deferrable mode
:param location: The location of the project.
"""
template_fields: Sequence[str] = ("project_id", "build", "gcp_conn_id", "impersonation_chain", "location")
operator_extra_links = (CloudBuildLink(),)
def __init__(
self,
*,
build: dict | Build,
project_id: str | None = None,
wait: bool = True,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
poll_interval: float = 4.0,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
location: str = "global",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.build = build
# Not template fields to keep original value
self.build_raw = build
self.project_id = project_id
self.wait = wait
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.poll_interval = poll_interval
self.deferrable = deferrable
self.location = location
def prepare_template(self) -> None:
# if no file is specified, skip
if not isinstance(self.build_raw, str):
return
with open(self.build_raw) as file:
if any(self.build_raw.endswith(ext) for ext in [".yaml", ".yml"]):
self.build = yaml.safe_load(file.read())
if self.build_raw.endswith(".json"):
self.build = json.loads(file.read())
def execute(self, context: Context):
hook = CloudBuildHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
build = BuildProcessor(build=self.build).process_body()
self.cloud_build_operation, self.id_ = hook.create_build_without_waiting_for_result(
build=build,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
location=self.location,
)
self.xcom_push(context, key="id", value=self.id_)
if not self.wait:
return Build.to_dict(
hook.get_build(id_=self.id_, project_id=self.project_id, location=self.location)
)
if self.deferrable:
self.defer(
trigger=CloudBuildCreateBuildTrigger(
id_=self.id_,
project_id=self.project_id,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
poll_interval=self.poll_interval,
location=self.location,
),
method_name=GOOGLE_DEFAULT_DEFERRABLE_METHOD_NAME,
)
else:
cloud_build_instance_result = hook.wait_for_operation(
timeout=self.timeout, operation=self.cloud_build_operation
)
project_id = self.project_id or hook.project_id
if project_id:
CloudBuildLink.persist(
context=context,
task_instance=self,
project_id=project_id,
region=self.location,
build_id=cloud_build_instance_result.id,
)
return Build.to_dict(cloud_build_instance_result)
def execute_complete(self, context: Context, event: dict):
if event["status"] == "success":
hook = CloudBuildHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Cloud Build completed with response %s ", event["message"])
project_id = self.project_id or hook.project_id
if project_id:
CloudBuildLink.persist(
context=context,
task_instance=self,
project_id=project_id,
region=self.location,
build_id=event["id_"],
)
return event["instance"]
else:
raise AirflowException(f"Unexpected error in the operation: {event['message']}")
class CloudBuildCreateBuildTriggerOperator(GoogleCloudBaseOperator):
"""
Creates a new BuildTrigger.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildCreateBuildTriggerOperator`
:param trigger: The BuildTrigger to create. If a dict is provided, it must be of the same form
as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.BuildTrigger`
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param location: The location of the project.
"""
template_fields: Sequence[str] = ("project_id", "trigger", "gcp_conn_id", "location")
operator_extra_links = (
CloudBuildTriggersListLink(),
CloudBuildTriggerDetailsLink(),
)
def __init__(
self,
*,
trigger: dict | BuildTrigger,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
location: str = "global",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.trigger = trigger
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.location = location
def execute(self, context: Context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.create_build_trigger(
trigger=self.trigger,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
location=self.location,
)
self.xcom_push(context, key="id", value=result.id)
project_id = self.project_id or hook.project_id
if project_id:
CloudBuildTriggerDetailsLink.persist(
context=context,
task_instance=self,
project_id=project_id,
region=self.location,
trigger_id=result.id,
)
CloudBuildTriggersListLink.persist(
context=context,
task_instance=self,
project_id=project_id,
region=self.location,
)
return BuildTrigger.to_dict(result)
class CloudBuildDeleteBuildTriggerOperator(GoogleCloudBaseOperator):
"""
Deletes a BuildTrigger by its project ID and trigger ID.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildDeleteBuildTriggerOperator`
:param trigger_id: The ID of the BuildTrigger to delete.
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param location: The location of the project.
"""
template_fields: Sequence[str] = ("project_id", "trigger_id", "gcp_conn_id", "location")
operator_extra_links = (CloudBuildTriggersListLink(),)
def __init__(
self,
*,
trigger_id: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
location: str = "global",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.trigger_id = trigger_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.location = location
def execute(self, context: Context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
hook.delete_build_trigger(
trigger_id=self.trigger_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
location=self.location,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudBuildTriggersListLink.persist(
context=context,
task_instance=self,
project_id=project_id,
region=self.location,
)
class CloudBuildGetBuildOperator(GoogleCloudBaseOperator):
"""
Returns information about a previously requested build.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildGetBuildOperator`
:param id_: The ID of the build.
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param location: The location of the project.
"""
template_fields: Sequence[str] = ("project_id", "id_", "gcp_conn_id", "location")
operator_extra_links = (CloudBuildLink(),)
def __init__(
self,
*,
id_: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
location: str = "global",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.id_ = id_
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.location = location
def execute(self, context: Context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.get_build(
id_=self.id_,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
location=self.location,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudBuildLink.persist(
context=context,
task_instance=self,
project_id=project_id,
region=self.location,
build_id=result.id,
)
return Build.to_dict(result)
class CloudBuildGetBuildTriggerOperator(GoogleCloudBaseOperator):
"""
Returns information about a BuildTrigger.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildGetBuildTriggerOperator`
:param trigger_id: The ID of the BuildTrigger to get.
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param location: The location of the project.
"""
template_fields: Sequence[str] = ("project_id", "trigger_id", "gcp_conn_id", "location")
operator_extra_links = (CloudBuildTriggerDetailsLink(),)
def __init__(
self,
*,
trigger_id: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
location: str = "global",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.trigger_id = trigger_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.location = location
def execute(self, context: Context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.get_build_trigger(
trigger_id=self.trigger_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
location=self.location,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudBuildTriggerDetailsLink.persist(
context=context,
task_instance=self,
project_id=project_id,
region=self.location,
trigger_id=result.id,
)
return BuildTrigger.to_dict(result)
class CloudBuildListBuildTriggersOperator(GoogleCloudBaseOperator):
"""
Lists existing BuildTriggers.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildListBuildTriggersOperator`
:param location: The location of the project.
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param page_size: Optional, number of results to return in the list.
:param page_token: Optional, token to provide to skip to a particular spot in the list.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = ("location", "project_id", "gcp_conn_id")
operator_extra_links = (CloudBuildTriggersListLink(),)
def __init__(
self,
*,
location: str = "global",
project_id: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.project_id = project_id
self.page_size = page_size
self.page_token = page_token
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
results = hook.list_build_triggers(
project_id=self.project_id,
location=self.location,
page_size=self.page_size,
page_token=self.page_token,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudBuildTriggersListLink.persist(
context=context,
task_instance=self,
project_id=project_id,
region=self.location,
)
return [BuildTrigger.to_dict(result) for result in results]
class CloudBuildListBuildsOperator(GoogleCloudBaseOperator):
"""
Lists previously requested builds.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildListBuildsOperator`
:param location: The location of the project.
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param page_size: Optional, number of results to return in the list.
:param filter_: Optional, the raw filter text to constrain the results.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = ("location", "project_id", "gcp_conn_id")
operator_extra_links = (CloudBuildListLink(),)
def __init__(
self,
*,
location: str = "global",
project_id: str | None = None,
page_size: int | None = None,
filter_: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.project_id = project_id
self.page_size = page_size
self.filter_ = filter_
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
results = hook.list_builds(
project_id=self.project_id,
location=self.location,
page_size=self.page_size,
filter_=self.filter_,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudBuildListLink.persist(
context=context, task_instance=self, project_id=project_id, region=self.location
)
return [Build.to_dict(result) for result in results]
class CloudBuildRetryBuildOperator(GoogleCloudBaseOperator):
"""
Creates a new build using the original build request, which may or may not result in an identical build.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildRetryBuildOperator`
:param id_: Build ID of the original build.
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param wait: Optional, wait for operation to finish.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param location: The location of the project.
"""
template_fields: Sequence[str] = ("project_id", "id_", "gcp_conn_id", "location")
operator_extra_links = (CloudBuildLink(),)
def __init__(
self,
*,
id_: str,
project_id: str | None = None,
wait: bool = True,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
location: str = "global",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.id_ = id_
self.project_id = project_id
self.wait = wait
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.location = location
def execute(self, context: Context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.retry_build(
id_=self.id_,
project_id=self.project_id,
wait=self.wait,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
location=self.location,
)
self.xcom_push(context, key="id", value=result.id)
project_id = self.project_id or hook.project_id
if project_id:
CloudBuildLink.persist(
context=context,
task_instance=self,
project_id=project_id,
region=self.location,
build_id=result.id,
)
return Build.to_dict(result)
class CloudBuildRunBuildTriggerOperator(GoogleCloudBaseOperator):
"""
Runs a BuildTrigger at a particular source revision.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildRunBuildTriggerOperator`
:param trigger_id: The ID of the trigger.
:param source: Source to build against this trigger. If a dict is provided, it must be of the same form
as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.RepoSource`
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param wait: Optional, wait for operation to finish.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param location: The location of the project.
"""
template_fields: Sequence[str] = ("project_id", "trigger_id", "source", "gcp_conn_id", "location")
operator_extra_links = (CloudBuildLink(),)
def __init__(
self,
*,
trigger_id: str,
source: dict | RepoSource,
project_id: str | None = None,
wait: bool = True,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
location: str = "global",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.trigger_id = trigger_id
self.source = source
self.project_id = project_id
self.wait = wait
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.location = location
def execute(self, context: Context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.run_build_trigger(
trigger_id=self.trigger_id,
source=self.source,
project_id=self.project_id,
wait=self.wait,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
location=self.location,
)
self.xcom_push(context, key="id", value=result.id)
project_id = self.project_id or hook.project_id
if project_id:
CloudBuildLink.persist(
context=context,
task_instance=self,
project_id=project_id,
region=self.location,
build_id=result.id,
)
return Build.to_dict(result)
class CloudBuildUpdateBuildTriggerOperator(GoogleCloudBaseOperator):
"""
Updates a BuildTrigger by its project ID and trigger ID.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildUpdateBuildTriggerOperator`
:param trigger_id: The ID of the trigger.
:param trigger: The BuildTrigger to create. If a dict is provided, it must be of the same form
as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.BuildTrigger`
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param location: The location of the project.
"""
template_fields: Sequence[str] = ("project_id", "trigger_id", "trigger", "gcp_conn_id", "location")
operator_extra_links = (CloudBuildTriggerDetailsLink(),)
def __init__(
self,
*,
trigger_id: str,
trigger: dict | BuildTrigger,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
location: str = "global",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.trigger_id = trigger_id
self.trigger = trigger
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.location = location
def execute(self, context: Context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.update_build_trigger(
trigger_id=self.trigger_id,
trigger=self.trigger,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
location=self.location,
)
self.xcom_push(context, key="id", value=result.id)
project_id = self.project_id or hook.project_id
if project_id:
CloudBuildTriggerDetailsLink.persist(
context=context,
task_instance=self,
project_id=project_id,
region=self.location,
trigger_id=result.id,
)
return BuildTrigger.to_dict(result)
class BuildProcessor:
"""
Processes build configurations to add additional functionality to support the use of operators.
The following improvements are made:
* It is required to provide the source and only one type can be given,
* It is possible to provide the source as the URL address instead dict.
:param build: The request body of the build.
See: https://cloud.google.com/cloud-build/docs/api/reference/rest/Shared.Types/Build
"""
def __init__(self, build: dict | Build) -> None:
self.build = deepcopy(build)
def _verify_source(self) -> None:
if not exactly_one("storage_source" in self.build["source"], "repo_source" in self.build["source"]):
raise AirflowException(
"The source could not be determined. Please choose one data source from: "
"storage_source and repo_source."
)
def _reformat_source(self) -> None:
self._reformat_repo_source()
self._reformat_storage_source()
def _reformat_repo_source(self) -> None:
if "repo_source" not in self.build["source"]:
return
repo_source = self.build["source"]["repo_source"]
if not isinstance(repo_source, str):
return
self.build["source"]["repo_source"] = self._convert_repo_url_to_dict(repo_source)
def _reformat_storage_source(self) -> None:
if "storage_source" not in self.build["source"]:
return
storage_source = self.build["source"]["storage_source"]
if not isinstance(storage_source, str):
return
self.build["source"]["storage_source"] = self._convert_storage_url_to_dict(storage_source)
def process_body(self) -> Build:
"""
Processes the body passed in the constructor.
:return: the body.
"""
if "source" in self.build:
self._verify_source()
self._reformat_source()
return Build(self.build)
@staticmethod
def _convert_repo_url_to_dict(source: str) -> dict[str, Any]:
"""
Convert url to repository in Google Cloud Source to a format supported by the API.
Example valid input:
.. code-block:: none
https://source.cloud.google.com/airflow-project/airflow-repo/+/branch-name:
"""
url_parts = urlsplit(source)
match = REGEX_REPO_PATH.search(url_parts.path)
if url_parts.scheme != "https" or url_parts.hostname != "source.cloud.google.com" or not match:
raise AirflowException(
"Invalid URL. You must pass the URL in the format: "
"https://source.cloud.google.com/airflow-project/airflow-repo/+/branch-name:"
)
project_id = unquote(match.group("project_id"))
repo_name = unquote(match.group("repo_name"))
branch_name = unquote(match.group("branch_name")) if match.group("branch_name") else "master"
source_dict = {
"project_id": project_id,
"repo_name": repo_name,
"branch_name": branch_name,
}
return source_dict
@staticmethod
def _convert_storage_url_to_dict(storage_url: str) -> dict[str, Any]:
"""
Convert url to object in Google Cloud Storage to a format supported by the API.
Example valid input:
.. code-block:: none
gs://bucket-name/object-name.tar.gz
"""
url_parts = urlsplit(storage_url)
if url_parts.scheme != "gs" or not url_parts.hostname or not url_parts.path or url_parts.path == "/":
raise AirflowException(
"Invalid URL. You must pass the URL in the format: "
"gs://bucket-name/object-name.tar.gz#24565443"
)
source_dict: dict[str, Any] = {
"bucket": url_parts.hostname,
"object_": url_parts.path[1:],
}
if url_parts.fragment:
source_dict["generation"] = int(url_parts.fragment)
return source_dict
| 48,339 | 42.160714 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/pubsub.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains Google PubSub operators.
.. spelling:word-list::
MessageStoragePolicy
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable, Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.pubsub_v1.types import (
DeadLetterPolicy,
Duration,
ExpirationPolicy,
MessageStoragePolicy,
PushConfig,
ReceivedMessage,
RetryPolicy,
)
from airflow.providers.google.cloud.hooks.pubsub import PubSubHook
from airflow.providers.google.cloud.links.pubsub import PubSubSubscriptionLink, PubSubTopicLink
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class PubSubCreateTopicOperator(GoogleCloudBaseOperator):
"""Create a PubSub topic.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PubSubCreateTopicOperator`
By default, if the topic already exists, this operator will
not cause the DAG to fail. ::
with DAG('successful DAG') as dag:
(
PubSubCreateTopicOperator(project_id='my-project', topic='my_new_topic')
>> PubSubCreateTopicOperator(project_id='my-project', topic='my_new_topic')
)
The operator can be configured to fail if the topic already exists. ::
with DAG('failing DAG') as dag:
(
PubSubCreateTopicOperator(project_id='my-project', topic='my_new_topic')
>> PubSubCreateTopicOperator(
project_id='my-project',
topic='my_new_topic',
fail_if_exists=True,
)
)
Both ``project_id`` and ``topic`` are templated so you can use Jinja templating in their values.
:param project_id: Optional, the Google Cloud project ID where the topic will be created.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param topic: the topic to create. Do not include the
full topic path. In other words, instead of
``projects/{project}/topics/{topic}``, provide only
``{topic}``. (templated)
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud.
:param labels: Client-assigned labels; see
https://cloud.google.com/pubsub/docs/labels
:param message_storage_policy: Policy constraining the set
of Google Cloud regions where messages published to
the topic may be stored. If not present, then no constraints
are in effect.
Union[dict, google.cloud.pubsub_v1.types.MessageStoragePolicy]
:param kms_key_name: The resource name of the Cloud KMS CryptoKey
to be used to protect access to messages published on this topic.
The expected format is
``projects/*/locations/*/keyRings/*/cryptoKeys/*``.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"topic",
"impersonation_chain",
)
ui_color = "#0273d4"
operator_extra_links = (PubSubTopicLink(),)
def __init__(
self,
*,
topic: str,
project_id: str | None = None,
fail_if_exists: bool = False,
gcp_conn_id: str = "google_cloud_default",
labels: dict[str, str] | None = None,
message_storage_policy: dict | MessageStoragePolicy = None,
kms_key_name: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.topic = topic
self.fail_if_exists = fail_if_exists
self.gcp_conn_id = gcp_conn_id
self.labels = labels
self.message_storage_policy = message_storage_policy
self.kms_key_name = kms_key_name
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = PubSubHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating topic %s", self.topic)
hook.create_topic(
project_id=self.project_id,
topic=self.topic,
fail_if_exists=self.fail_if_exists,
labels=self.labels,
message_storage_policy=self.message_storage_policy,
kms_key_name=self.kms_key_name,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Created topic %s", self.topic)
PubSubTopicLink.persist(
context=context,
task_instance=self,
topic_id=self.topic,
project_id=self.project_id or hook.project_id,
)
class PubSubCreateSubscriptionOperator(GoogleCloudBaseOperator):
"""Create a PubSub subscription.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PubSubCreateSubscriptionOperator`
By default, the subscription will be created in ``project_id``. If
``subscription_project_id`` is specified and the Google Cloud credentials allow, the
Subscription can be created in a different project from its topic.
By default, if the subscription already exists, this operator will
not cause the DAG to fail. However, the topic must exist in the project. ::
with DAG('successful DAG') as dag:
(
PubSubCreateSubscriptionOperator(
project_id='my-project',
topic='my-topic',
subscription='my-subscription'
)
>> PubSubCreateSubscriptionOperator(
project_id='my-project',
topic='my-topic',
subscription='my-subscription',
)
)
The operator can be configured to fail if the subscription already exists.
::
with DAG('failing DAG') as dag:
(
PubSubCreateSubscriptionOperator(
project_id='my-project',
topic='my-topic',
subscription='my-subscription',
)
>> PubSubCreateSubscriptionOperator(
project_id='my-project',
topic='my-topic',
subscription='my-subscription',
fail_if_exists=True,
)
)
Finally, subscription is not required. If not passed, the operator will
generated a universally unique identifier for the subscription's name. ::
with DAG('DAG') as dag:
PubSubCreateSubscriptionOperator(project_id='my-project', topic='my-topic')
``project_id``, ``topic``, ``subscription``, ``subscription_project_id`` and
``impersonation_chain`` are templated so you can use Jinja templating in their values.
:param project_id: Optional, the Google Cloud project ID where the topic exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param topic: the topic to create. Do not include the
full topic path. In other words, instead of
``projects/{project}/topics/{topic}``, provide only
``{topic}``. (templated)
:param subscription: the Pub/Sub subscription name. If empty, a random
name will be generated using the uuid module
:param subscription_project_id: the Google Cloud project ID where the subscription
will be created. If empty, ``topic_project`` will be used.
:param ack_deadline_secs: Number of seconds that a subscriber has to
acknowledge each message pulled from the subscription
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud.
:param push_config: If push delivery is used with this subscription,
this field is used to configure it. An empty ``pushConfig`` signifies
that the subscriber will pull and ack messages using API methods.
:param retain_acked_messages: Indicates whether to retain acknowledged
messages. If true, then messages are not expunged from the subscription's
backlog, even if they are acknowledged, until they fall out of the
``message_retention_duration`` window. This must be true if you would
like to Seek to a timestamp.
:param message_retention_duration: How long to retain unacknowledged messages
in the subscription's backlog, from the moment a message is published. If
``retain_acked_messages`` is true, then this also configures the
retention of acknowledged messages, and thus configures how far back in
time a ``Seek`` can be done. Defaults to 7 days. Cannot be more than 7
days or less than 10 minutes.
:param labels: Client-assigned labels; see
https://cloud.google.com/pubsub/docs/labels
:param enable_message_ordering: If true, messages published with the same
ordering_key in PubsubMessage will be delivered to the subscribers in the order
in which they are received by the Pub/Sub system. Otherwise, they may be
delivered in any order.
:param expiration_policy: A policy that specifies the conditions for this
subscription's expiration. A subscription is considered active as long as any
connected subscriber is successfully consuming messages from the subscription or
is issuing operations on the subscription. If expiration_policy is not set,
a default policy with ttl of 31 days will be used. The minimum allowed value for
expiration_policy.ttl is 1 day.
:param filter_: An expression written in the Cloud Pub/Sub filter language. If
non-empty, then only PubsubMessages whose attributes field matches the filter are
delivered on this subscription. If empty, then no messages are filtered out.
:param dead_letter_policy: A policy that specifies the conditions for dead lettering
messages in this subscription. If dead_letter_policy is not set, dead lettering is
disabled.
:param retry_policy: A policy that specifies how Pub/Sub retries message delivery
for this subscription. If not set, the default retry policy is applied. This
generally implies that messages will be retried as soon as possible for healthy
subscribers. RetryPolicy will be triggered on NACKs or acknowledgement deadline
exceeded events for a given message.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"topic",
"subscription",
"subscription_project_id",
"impersonation_chain",
)
ui_color = "#0273d4"
operator_extra_links = (PubSubSubscriptionLink(),)
def __init__(
self,
*,
topic: str,
project_id: str | None = None,
subscription: str | None = None,
subscription_project_id: str | None = None,
ack_deadline_secs: int = 10,
fail_if_exists: bool = False,
gcp_conn_id: str = "google_cloud_default",
push_config: dict | PushConfig | None = None,
retain_acked_messages: bool | None = None,
message_retention_duration: dict | Duration | None = None,
labels: dict[str, str] | None = None,
enable_message_ordering: bool = False,
expiration_policy: dict | ExpirationPolicy | None = None,
filter_: str | None = None,
dead_letter_policy: dict | DeadLetterPolicy | None = None,
retry_policy: dict | RetryPolicy | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.topic = topic
self.subscription = subscription
self.subscription_project_id = subscription_project_id
self.ack_deadline_secs = ack_deadline_secs
self.fail_if_exists = fail_if_exists
self.gcp_conn_id = gcp_conn_id
self.push_config = push_config
self.retain_acked_messages = retain_acked_messages
self.message_retention_duration = message_retention_duration
self.labels = labels
self.enable_message_ordering = enable_message_ordering
self.expiration_policy = expiration_policy
self.filter_ = filter_
self.dead_letter_policy = dead_letter_policy
self.retry_policy = retry_policy
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> str:
hook = PubSubHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating subscription for topic %s", self.topic)
result = hook.create_subscription(
project_id=self.project_id,
topic=self.topic,
subscription=self.subscription,
subscription_project_id=self.subscription_project_id,
ack_deadline_secs=self.ack_deadline_secs,
fail_if_exists=self.fail_if_exists,
push_config=self.push_config,
retain_acked_messages=self.retain_acked_messages,
message_retention_duration=self.message_retention_duration,
labels=self.labels,
enable_message_ordering=self.enable_message_ordering,
expiration_policy=self.expiration_policy,
filter_=self.filter_,
dead_letter_policy=self.dead_letter_policy,
retry_policy=self.retry_policy,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Created subscription for topic %s", self.topic)
PubSubSubscriptionLink.persist(
context=context,
task_instance=self,
subscription_id=self.subscription or result, # result returns subscription name
project_id=self.project_id or hook.project_id,
)
return result
class PubSubDeleteTopicOperator(GoogleCloudBaseOperator):
"""Delete a PubSub topic.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PubSubDeleteTopicOperator`
By default, if the topic does not exist, this operator will
not cause the DAG to fail. ::
with DAG('successful DAG') as dag:
PubSubDeleteTopicOperator(project_id='my-project', topic='non_existing_topic')
The operator can be configured to fail if the topic does not exist. ::
with DAG('failing DAG') as dag:
PubSubDeleteTopicOperator(
project_id='my-project', topic='non_existing_topic', fail_if_not_exists=True,
)
Both ``project_id`` and ``topic`` are templated so you can use Jinja templating in their values.
:param project_id: Optional, the Google Cloud project ID in which to work (templated).
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param topic: the topic to delete. Do not include the
full topic path. In other words, instead of
``projects/{project}/topics/{topic}``, provide only
``{topic}``. (templated)
:param fail_if_not_exists: If True and the topic does not exist, fail
the task
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"topic",
"impersonation_chain",
)
ui_color = "#cb4335"
def __init__(
self,
*,
topic: str,
project_id: str | None = None,
fail_if_not_exists: bool = False,
gcp_conn_id: str = "google_cloud_default",
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.topic = topic
self.fail_if_not_exists = fail_if_not_exists
self.gcp_conn_id = gcp_conn_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = PubSubHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Deleting topic %s", self.topic)
hook.delete_topic(
project_id=self.project_id,
topic=self.topic,
fail_if_not_exists=self.fail_if_not_exists,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Deleted topic %s", self.topic)
class PubSubDeleteSubscriptionOperator(GoogleCloudBaseOperator):
"""Delete a PubSub subscription.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PubSubDeleteSubscriptionOperator`
By default, if the subscription does not exist, this operator will
not cause the DAG to fail. ::
with DAG('successful DAG') as dag:
PubSubDeleteSubscriptionOperator(project_id='my-project', subscription='non-existing')
The operator can be configured to fail if the subscription already exists.
::
with DAG('failing DAG') as dag:
PubSubDeleteSubscriptionOperator(
project_id='my-project', subscription='non-existing', fail_if_not_exists=True,
)
``project_id``, and ``subscription`` are templated so you can use Jinja templating in their values.
:param project_id: Optional, the Google Cloud project ID in which to work (templated).
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param subscription: the subscription to delete. Do not include the
full subscription path. In other words, instead of
``projects/{project}/subscription/{subscription}``, provide only
``{subscription}``. (templated)
:param fail_if_not_exists: If True and the subscription does not exist,
fail the task
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"subscription",
"impersonation_chain",
)
ui_color = "#cb4335"
def __init__(
self,
*,
subscription: str,
project_id: str | None = None,
fail_if_not_exists: bool = False,
gcp_conn_id: str = "google_cloud_default",
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.subscription = subscription
self.fail_if_not_exists = fail_if_not_exists
self.gcp_conn_id = gcp_conn_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = PubSubHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Deleting subscription %s", self.subscription)
hook.delete_subscription(
project_id=self.project_id,
subscription=self.subscription,
fail_if_not_exists=self.fail_if_not_exists,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Deleted subscription %s", self.subscription)
class PubSubPublishMessageOperator(GoogleCloudBaseOperator):
"""Publish messages to a PubSub topic.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PubSubPublishMessageOperator`
Each Task publishes all provided messages to the same topic
in a single Google Cloud project. If the topic does not exist, this
task will fail. ::
m1 = {'data': b'Hello, World!',
'attributes': {'type': 'greeting'}
}
m2 = {'data': b'Knock, knock'}
m3 = {'attributes': {'foo': ''}}
t1 = PubSubPublishMessageOperator(
project_id='my-project',
topic='my_topic',
messages=[m1, m2, m3],
create_topic=True,
dag=dag,
)
``project_id``, ``topic``, and ``messages`` are templated so you can use Jinja templating
in their values.
:param project_id: Optional, the Google Cloud project ID in which to work (templated).
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param topic: the topic to which to publish. Do not include the
full topic path. In other words, instead of
``projects/{project}/topics/{topic}``, provide only
``{topic}``. (templated)
:param messages: a list of messages to be published to the
topic. Each message is a dict with one or more of the
following keys-value mappings:
* 'data': a bytestring (utf-8 encoded)
* 'attributes': {'key1': 'value1', ...}
Each message must contain at least a non-empty 'data' value
or an attribute dict with at least one key (templated). See
https://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"topic",
"messages",
"impersonation_chain",
)
ui_color = "#0273d4"
def __init__(
self,
*,
topic: str,
messages: list,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.topic = topic
self.messages = messages
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = PubSubHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Publishing to topic %s", self.topic)
hook.publish(project_id=self.project_id, topic=self.topic, messages=self.messages)
self.log.info("Published to topic %s", self.topic)
class PubSubPullOperator(GoogleCloudBaseOperator):
"""
Pulls messages from a PubSub subscription and passes them through XCom.
If the queue is empty, returns empty list - never waits for messages.
If you do need to wait, please use :class:`airflow.providers.google.cloud.sensors.PubSubPullSensor`
instead.
.. seealso::
For more information on how to use this operator and the PubSubPullSensor, take a look at the guide:
:ref:`howto/operator:PubSubPullSensor`
This operator will pull up to ``max_messages`` messages from the
specified PubSub subscription. When the subscription returns messages,
the messages will be returned immediately from the operator and passed through XCom for downstream tasks.
If ``ack_messages`` is set to True, messages will be immediately
acknowledged before being returned, otherwise, downstream tasks will be
responsible for acknowledging them.
``project_id `` and ``subscription`` are templated so you can use Jinja templating in their values.
:param project_id: the Google Cloud project ID for the subscription (templated)
:param subscription: the Pub/Sub subscription name. Do not include the
full subscription path.
:param max_messages: The maximum number of messages to retrieve per
PubSub pull request
:param ack_messages: If True, each message will be acknowledged
immediately rather than by any downstream tasks
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud.
:param messages_callback: (Optional) Callback to process received messages.
It's return value will be saved to XCom.
If you are pulling large messages, you probably want to provide a custom callback.
If not provided, the default implementation will convert `ReceivedMessage` objects
into JSON-serializable dicts using `google.protobuf.json_format.MessageToDict` function.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"subscription",
"impersonation_chain",
)
def __init__(
self,
*,
project_id: str,
subscription: str,
max_messages: int = 5,
ack_messages: bool = False,
messages_callback: Callable[[list[ReceivedMessage], Context], Any] | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.subscription = subscription
self.max_messages = max_messages
self.ack_messages = ack_messages
self.messages_callback = messages_callback
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> list:
hook = PubSubHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
pulled_messages = hook.pull(
project_id=self.project_id,
subscription=self.subscription,
max_messages=self.max_messages,
return_immediately=True,
)
handle_messages = self.messages_callback or self._default_message_callback
ret = handle_messages(pulled_messages, context)
if pulled_messages and self.ack_messages:
hook.acknowledge(
project_id=self.project_id,
subscription=self.subscription,
messages=pulled_messages,
)
return ret
def _default_message_callback(
self,
pulled_messages: list[ReceivedMessage],
context: Context,
) -> list:
"""
This method can be overridden by subclasses or by `messages_callback` constructor argument.
This default implementation converts `ReceivedMessage` objects into JSON-serializable dicts.
:param pulled_messages: messages received from the topic.
:param context: same as in `execute`
:return: value to be saved to XCom.
"""
messages_json = [ReceivedMessage.to_dict(m) for m in pulled_messages]
return messages_json
| 34,106 | 41.84799 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/translate.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Translate operators."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.translate import CloudTranslateHook
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class CloudTranslateTextOperator(GoogleCloudBaseOperator):
"""
Translate a string or list of strings.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTranslateTextOperator`
See https://cloud.google.com/translate/docs/translating-text
Execute method returns str or list.
This is a list of dictionaries for each queried value. Each
dictionary typically contains three keys (though not
all will be present in all cases).
* ``detectedSourceLanguage``: The detected language (as an
ISO 639-1 language code) of the text.
* ``translatedText``: The translation of the text into the
target language.
* ``input``: The corresponding input value.
* ``model``: The model used to translate the text.
If only a single value is passed, then only a single
dictionary is set as XCom return value.
:param values: String or list of strings to translate.
:param target_language: The language to translate results into. This
is required by the API and defaults to
the target language of the current instance.
:param format_: (Optional) One of ``text`` or ``html``, to specify
if the input text is plain text or HTML.
:param source_language: (Optional) The language of the text to
be translated.
:param model: (Optional) The model used to translate the text, such
as ``'base'`` or ``'nmt'``.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START translate_template_fields]
template_fields: Sequence[str] = (
"values",
"target_language",
"format_",
"source_language",
"model",
"gcp_conn_id",
"impersonation_chain",
)
# [END translate_template_fields]
def __init__(
self,
*,
values: list[str] | str,
target_language: str,
format_: str,
source_language: str | None,
model: str,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.values = values
self.target_language = target_language
self.format_ = format_
self.source_language = source_language
self.model = model
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = CloudTranslateHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
translation = hook.translate(
values=self.values,
target_language=self.target_language,
format_=self.format_,
source_language=self.source_language,
model=self.model,
)
self.log.debug("Translation %s", translation)
return translation
except ValueError as e:
self.log.error("An error has been thrown from translate method:")
self.log.error(e)
raise AirflowException(e)
| 5,006 | 36.088889 | 93 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/translate_speech.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Translate Speech operator."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from google.cloud.speech_v1.types import RecognitionAudio, RecognitionConfig
from google.protobuf.json_format import MessageToDict
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.speech_to_text import CloudSpeechToTextHook
from airflow.providers.google.cloud.hooks.translate import CloudTranslateHook
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
from airflow.providers.google.common.links.storage import FileDetailsLink
if TYPE_CHECKING:
from airflow.utils.context import Context
class CloudTranslateSpeechOperator(GoogleCloudBaseOperator):
"""
Recognizes speech in audio input and translates it.
Note that it uses the first result from the recognition api response - the one with the highest confidence
In order to see other possible results please use
:ref:`howto/operator:CloudSpeechToTextRecognizeSpeechOperator`
and
:ref:`howto/operator:CloudTranslateTextOperator`
separately
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTranslateSpeechOperator`
See https://cloud.google.com/translate/docs/translating-text
Execute method returns string object with the translation
This is a list of dictionaries queried value.
Dictionary typically contains three keys (though not
all will be present in all cases).
* ``detectedSourceLanguage``: The detected language (as an
ISO 639-1 language code) of the text.
* ``translatedText``: The translation of the text into the
target language.
* ``input``: The corresponding input value.
* ``model``: The model used to translate the text.
Dictionary is set as XCom return value.
:param audio: audio data to be recognized. See more:
https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionAudio
:param config: information to the recognizer that specifies how to process the request. See more:
https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionConfig
:param target_language: The language to translate results into. This is required by the API and defaults
to the target language of the current instance.
Check the list of available languages here: https://cloud.google.com/translate/docs/languages
:param format_: (Optional) One of ``text`` or ``html``, to specify
if the input text is plain text or HTML.
:param source_language: (Optional) The language of the text to
be translated.
:param model: (Optional) The model used to translate the text, such
as ``'base'`` or ``'nmt'``.
:param project_id: Optional, Google Cloud Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START translate_speech_template_fields]
template_fields: Sequence[str] = (
"target_language",
"format_",
"source_language",
"model",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (FileDetailsLink(),)
# [END translate_speech_template_fields]
def __init__(
self,
*,
audio: RecognitionAudio,
config: RecognitionConfig,
target_language: str,
format_: str,
source_language: str | None,
model: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.audio = audio
self.config = config
self.target_language = target_language
self.format_ = format_
self.source_language = source_language
self.model = model
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
speech_to_text_hook = CloudSpeechToTextHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
translate_hook = CloudTranslateHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
recognize_result = speech_to_text_hook.recognize_speech(config=self.config, audio=self.audio)
recognize_dict = MessageToDict(recognize_result._pb)
self.log.info("Recognition operation finished")
if not recognize_dict["results"]:
self.log.info("No recognition results")
return {}
self.log.debug("Recognition result: %s", recognize_dict)
try:
transcript = recognize_dict["results"][0]["alternatives"][0]["transcript"]
except KeyError as key:
raise AirflowException(
f"Wrong response '{recognize_dict}' returned - it should contain {key} field"
)
try:
translation = translate_hook.translate(
values=transcript,
target_language=self.target_language,
format_=self.format_,
source_language=self.source_language,
model=self.model,
)
self.log.info("Translated output: %s", translation)
FileDetailsLink.persist(
context=context,
task_instance=self,
uri=self.audio["uri"][5:],
project_id=self.project_id or translate_hook.project_id,
)
return translation
except ValueError as e:
self.log.error("An error has been thrown from translate speech method:")
self.log.error(e)
raise AirflowException(e)
| 7,723 | 39.652632 | 137 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/stackdriver.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.monitoring_v3 import AlertPolicy, NotificationChannel
from airflow.providers.google.cloud.hooks.stackdriver import StackdriverHook
from airflow.providers.google.cloud.links.stackdriver import (
StackdriverNotificationsLink,
StackdriverPoliciesLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class StackdriverListAlertPoliciesOperator(GoogleCloudBaseOperator):
"""
Fetches all the Alert Policies identified by the filter passed as filter parameter.
The desired return type can be specified by the format parameter, the supported
formats are "dict", "json" and None which returns python dictionary, stringified
JSON and protobuf respectively.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:StackdriverListAlertPoliciesOperator`
:param format_: (Optional) Desired output format of the result. The
supported formats are "dict", "json" and None which returns
python dictionary, stringified JSON and protobuf respectively.
:param filter_: If provided, this field specifies the criteria that must be met by alert
policies to be included in the response.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param order_by: A comma-separated list of fields by which to sort the result.
Supports the same set of field references as the ``filter`` field. Entries
can be prefixed with a minus sign to sort by the field in descending order.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param page_size: The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google
Cloud Platform.
:param project_id: The project to fetch alerts from.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"filter_",
"impersonation_chain",
)
operator_extra_links = (StackdriverPoliciesLink(),)
ui_color = "#e5ffcc"
def __init__(
self,
*,
format_: str | None = None,
filter_: str | None = None,
order_by: str | None = None,
page_size: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
project_id: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.format_ = format_
self.filter_ = filter_
self.order_by = order_by
self.page_size = page_size
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.impersonation_chain = impersonation_chain
self.hook: StackdriverHook | None = None
def execute(self, context: Context):
self.log.info(
"List Alert Policies: Project id: %s Format: %s Filter: %s Order By: %s Page Size: %s",
self.project_id,
self.format_,
self.filter_,
self.order_by,
self.page_size,
)
if self.hook is None:
self.hook = StackdriverHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = self.hook.list_alert_policies(
project_id=self.project_id,
format_=self.format_,
filter_=self.filter_,
order_by=self.order_by,
page_size=self.page_size,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
StackdriverPoliciesLink.persist(
context=context,
operator_instance=self,
project_id=self.project_id or self.hook.project_id,
)
return [AlertPolicy.to_dict(policy) for policy in result]
class StackdriverEnableAlertPoliciesOperator(GoogleCloudBaseOperator):
"""
Enables one or more disabled alerting policies identified by filter parameter.
Inoperative in case the policy is already enabled.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:StackdriverEnableAlertPoliciesOperator`
:param filter_: If provided, this field specifies the criteria that
must be met by alert policies to be enabled.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google
Cloud Platform.
:param project_id: The project in which alert needs to be enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
ui_color = "#e5ffcc"
template_fields: Sequence[str] = (
"filter_",
"impersonation_chain",
)
operator_extra_links = (StackdriverPoliciesLink(),)
def __init__(
self,
*,
filter_: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
project_id: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.filter_ = filter_
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.impersonation_chain = impersonation_chain
self.hook: StackdriverHook | None = None
def execute(self, context: Context):
self.log.info("Enable Alert Policies: Project id: %s Filter: %s", self.project_id, self.filter_)
if self.hook is None:
self.hook = StackdriverHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.hook.enable_alert_policies(
filter_=self.filter_,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
StackdriverPoliciesLink.persist(
context=context,
operator_instance=self,
project_id=self.project_id or self.hook.project_id,
)
# Disable Alert Operator
class StackdriverDisableAlertPoliciesOperator(GoogleCloudBaseOperator):
"""
Disables one or more enabled alerting policies identified by filter parameter.
Inoperative in case the policy is already disabled.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:StackdriverDisableAlertPoliciesOperator`
:param filter_: If provided, this field specifies the criteria that
must be met by alert policies to be disabled.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google
Cloud Platform.
:param project_id: The project in which alert needs to be disabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
ui_color = "#e5ffcc"
template_fields: Sequence[str] = (
"filter_",
"impersonation_chain",
)
operator_extra_links = (StackdriverPoliciesLink(),)
def __init__(
self,
*,
filter_: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
project_id: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.filter_ = filter_
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.impersonation_chain = impersonation_chain
self.hook: StackdriverHook | None = None
def execute(self, context: Context):
self.log.info("Disable Alert Policies: Project id: %s Filter: %s", self.project_id, self.filter_)
if self.hook is None:
self.hook = StackdriverHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.hook.disable_alert_policies(
filter_=self.filter_,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
StackdriverPoliciesLink.persist(
context=context,
operator_instance=self,
project_id=self.project_id or self.hook.project_id,
)
class StackdriverUpsertAlertOperator(GoogleCloudBaseOperator):
"""
Creates a new alert or updates an existing policy identified the name field in the alerts parameter.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:StackdriverUpsertAlertOperator`
:param alerts: A JSON string or file that specifies all the alerts that needs
to be either created or updated. For more details, see
https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.alertPolicies#AlertPolicy.
(templated)
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google
Cloud Platform.
:param project_id: The project in which alert needs to be created/updated.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"alerts",
"impersonation_chain",
)
template_ext: Sequence[str] = (".json",)
operator_extra_links = (StackdriverPoliciesLink(),)
ui_color = "#e5ffcc"
def __init__(
self,
*,
alerts: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
project_id: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.alerts = alerts
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.impersonation_chain = impersonation_chain
self.hook: StackdriverHook | None = None
def execute(self, context: Context):
self.log.info("Upsert Alert Policies: Alerts: %s Project id: %s", self.alerts, self.project_id)
if self.hook is None:
self.hook = StackdriverHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.hook.upsert_alert(
alerts=self.alerts,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
StackdriverPoliciesLink.persist(
context=context,
operator_instance=self,
project_id=self.project_id or self.hook.project_id,
)
class StackdriverDeleteAlertOperator(GoogleCloudBaseOperator):
"""
Deletes an alerting policy.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:StackdriverDeleteAlertOperator`
:param name: The alerting policy to delete. The format is:
``projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]``.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google
Cloud Platform.
:param project_id: The project from which alert needs to be deleted.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"name",
"impersonation_chain",
)
ui_color = "#e5ffcc"
def __init__(
self,
*,
name: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
project_id: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.name = name
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.impersonation_chain = impersonation_chain
self.hook: StackdriverHook | None = None
def execute(self, context: Context):
self.log.info("Delete Alert Policy: Project id: %s Name: %s", self.project_id, self.name)
if self.hook is None:
self.hook = StackdriverHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.hook.delete_alert_policy(
name=self.name,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class StackdriverListNotificationChannelsOperator(GoogleCloudBaseOperator):
"""
Fetches all the Notification Channels identified by the filter passed as filter parameter.
The desired return type can be specified by the format parameter, the
supported formats are "dict", "json" and None which returns python
dictionary, stringified JSON and protobuf respectively.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:StackdriverListNotificationChannelsOperator`
:param format_: (Optional) Desired output format of the result. The
supported formats are "dict", "json" and None which returns
python dictionary, stringified JSON and protobuf respectively.
:param filter_: If provided, this field specifies the criteria that
must be met by notification channels to be included in the response.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param order_by: A comma-separated list of fields by which to sort the result.
Supports the same set of field references as the ``filter`` field. Entries
can be prefixed with a minus sign to sort by the field in descending order.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param page_size: The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google
Cloud Platform.
:param project_id: The project to fetch notification channels from.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"filter_",
"impersonation_chain",
)
operator_extra_links = (StackdriverNotificationsLink(),)
ui_color = "#e5ffcc"
def __init__(
self,
*,
format_: str | None = None,
filter_: str | None = None,
order_by: str | None = None,
page_size: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
project_id: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.format_ = format_
self.filter_ = filter_
self.order_by = order_by
self.page_size = page_size
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.impersonation_chain = impersonation_chain
self.hook: StackdriverHook | None = None
def execute(self, context: Context):
self.log.info(
"List Notification Channels: Project id: %s Format: %s Filter: %s Order By: %s Page Size: %s",
self.project_id,
self.format_,
self.filter_,
self.order_by,
self.page_size,
)
if self.hook is None:
self.hook = StackdriverHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
channels = self.hook.list_notification_channels(
format_=self.format_,
project_id=self.project_id,
filter_=self.filter_,
order_by=self.order_by,
page_size=self.page_size,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
StackdriverNotificationsLink.persist(
context=context,
operator_instance=self,
project_id=self.project_id or self.hook.project_id,
)
return [NotificationChannel.to_dict(channel) for channel in channels]
class StackdriverEnableNotificationChannelsOperator(GoogleCloudBaseOperator):
"""
Enables one or more disabled alerting policies identified by filter parameter.
Inoperative in case the policy is already enabled.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:StackdriverEnableNotificationChannelsOperator`
:param filter_: If provided, this field specifies the criteria that
must be met by notification channels to be enabled.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google
Cloud Platform.
:param project_id: The location used for the operation.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"filter_",
"impersonation_chain",
)
operator_extra_links = (StackdriverNotificationsLink(),)
ui_color = "#e5ffcc"
def __init__(
self,
*,
filter_: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
project_id: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.filter_ = filter_
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.impersonation_chain = impersonation_chain
self.hook: StackdriverHook | None = None
def execute(self, context: Context):
self.log.info(
"Enable Notification Channels: Project id: %s Filter: %s", self.project_id, self.filter_
)
if self.hook is None:
self.hook = StackdriverHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.hook.enable_notification_channels(
filter_=self.filter_,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
StackdriverNotificationsLink.persist(
context=context,
operator_instance=self,
project_id=self.project_id or self.hook.project_id,
)
class StackdriverDisableNotificationChannelsOperator(GoogleCloudBaseOperator):
"""
Disables one or more enabled notification channels identified by filter parameter.
Inoperative in case the policy is already disabled.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:StackdriverDisableNotificationChannelsOperator`
:param filter_: If provided, this field specifies the criteria that
must be met by alert policies to be disabled.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google
Cloud Platform.
:param project_id: The project in which notification channels needs to be enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"filter_",
"impersonation_chain",
)
operator_extra_links = (StackdriverNotificationsLink(),)
ui_color = "#e5ffcc"
def __init__(
self,
*,
filter_: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
project_id: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.filter_ = filter_
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.impersonation_chain = impersonation_chain
self.hook: StackdriverHook | None = None
def execute(self, context: Context):
self.log.info(
"Disable Notification Channels: Project id: %s Filter: %s", self.project_id, self.filter_
)
if self.hook is None:
self.hook = StackdriverHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.hook.disable_notification_channels(
filter_=self.filter_,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
StackdriverNotificationsLink.persist(
context=context,
operator_instance=self,
project_id=self.project_id or self.hook.project_id,
)
class StackdriverUpsertNotificationChannelOperator(GoogleCloudBaseOperator):
"""
Create a new notification or updates an existing notification channel.
Channel is identified by the name field in the alerts parameter.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:StackdriverUpsertNotificationChannelOperator`
:param channels: A JSON string or file that specifies all the alerts that needs
to be either created or updated. For more details, see
https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.notificationChannels.
(templated)
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google
Cloud Platform.
:param project_id: The project in which notification channels needs to be created/updated.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"channels",
"impersonation_chain",
)
template_ext: Sequence[str] = (".json",)
operator_extra_links = (StackdriverNotificationsLink(),)
ui_color = "#e5ffcc"
def __init__(
self,
*,
channels: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
project_id: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.channels = channels
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.impersonation_chain = impersonation_chain
self.hook: StackdriverHook | None = None
def execute(self, context: Context):
self.log.info(
"Upsert Notification Channels: Channels: %s Project id: %s", self.channels, self.project_id
)
if self.hook is None:
self.hook = StackdriverHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.hook.upsert_channel(
channels=self.channels,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
StackdriverNotificationsLink.persist(
context=context,
operator_instance=self,
project_id=self.project_id or self.hook.project_id,
)
class StackdriverDeleteNotificationChannelOperator(GoogleCloudBaseOperator):
"""
Deletes a notification channel.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:StackdriverDeleteNotificationChannelOperator`
:param name: The alerting policy to delete. The format is:
``projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]``.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google
Cloud Platform.
:param project_id: The project from which notification channel needs to be deleted.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"name",
"impersonation_chain",
)
ui_color = "#e5ffcc"
def __init__(
self,
*,
name: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
project_id: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.name = name
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.impersonation_chain = impersonation_chain
self.hook: StackdriverHook | None = None
def execute(self, context: Context):
self.log.info("Delete Notification Channel: Project id: %s Name: %s", self.project_id, self.name)
if self.hook is None:
self.hook = StackdriverHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.hook.delete_notification_channel(
name=self.name, retry=self.retry, timeout=self.timeout, metadata=self.metadata
)
| 39,049 | 41.864984 | 106 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/dataprep.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Dataprep operator."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.providers.google.cloud.hooks.dataprep import GoogleDataprepHook
from airflow.providers.google.cloud.links.dataprep import DataprepFlowLink, DataprepJobGroupLink
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class DataprepGetJobsForJobGroupOperator(GoogleCloudBaseOperator):
"""
Get information about the batch jobs within a Cloud Dataprep job.
API documentation: https://clouddataprep.com/documentation/api#section/Overview.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataprepGetJobsForJobGroupOperator`
:param job_group_id The ID of the job group that will be requests
"""
template_fields: Sequence[str] = ("job_group_id",)
def __init__(
self,
*,
dataprep_conn_id: str = "dataprep_default",
job_group_id: int | str,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataprep_conn_id = (dataprep_conn_id,)
self.job_group_id = job_group_id
def execute(self, context: Context) -> dict:
self.log.info("Fetching data for job with id: %d ...", self.job_group_id)
hook = GoogleDataprepHook(
dataprep_conn_id="dataprep_default",
)
response = hook.get_jobs_for_job_group(job_id=int(self.job_group_id))
return response
class DataprepGetJobGroupOperator(GoogleCloudBaseOperator):
"""
Get the specified job group.
A job group is a job that is executed from a specific node in a flow.
API documentation: https://clouddataprep.com/documentation/api#section/Overview.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataprepGetJobGroupOperator`
:param job_group_id: The ID of the job group that will be requests
:param embed: Comma-separated list of objects to pull in as part of the response
:param include_deleted: if set to "true", will include deleted objects
"""
template_fields: Sequence[str] = (
"job_group_id",
"embed",
"project_id",
)
operator_extra_links = (DataprepJobGroupLink(),)
def __init__(
self,
*,
dataprep_conn_id: str = "dataprep_default",
project_id: str | None = None,
job_group_id: int | str,
embed: str,
include_deleted: bool,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataprep_conn_id: str = dataprep_conn_id
self.project_id = project_id
self.job_group_id = job_group_id
self.embed = embed
self.include_deleted = include_deleted
def execute(self, context: Context) -> dict:
self.log.info("Fetching data for job with id: %d ...", self.job_group_id)
if self.project_id:
DataprepJobGroupLink.persist(
context=context,
task_instance=self,
project_id=self.project_id,
job_group_id=int(self.job_group_id),
)
hook = GoogleDataprepHook(dataprep_conn_id=self.dataprep_conn_id)
response = hook.get_job_group(
job_group_id=int(self.job_group_id),
embed=self.embed,
include_deleted=self.include_deleted,
)
return response
class DataprepRunJobGroupOperator(GoogleCloudBaseOperator):
"""
Create a ``jobGroup``, which launches the specified job as the authenticated user.
This performs the same action as clicking on the Run Job button in the application.
To get recipe_id please follow the Dataprep API documentation:
https://clouddataprep.com/documentation/api#operation/runJobGroup.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataprepRunJobGroupOperator`
:param dataprep_conn_id: The Dataprep connection ID
:param body_request: Passed as the body_request to GoogleDataprepHook's run_job_group,
where it's the identifier for the recipe to run
"""
template_fields: Sequence[str] = ("body_request",)
operator_extra_links = (DataprepJobGroupLink(),)
def __init__(
self,
*,
project_id: str | None = None,
dataprep_conn_id: str = "dataprep_default",
body_request: dict,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.dataprep_conn_id = dataprep_conn_id
self.body_request = body_request
def execute(self, context: Context) -> dict:
self.log.info("Creating a job...")
hook = GoogleDataprepHook(dataprep_conn_id=self.dataprep_conn_id)
response = hook.run_job_group(body_request=self.body_request)
job_group_id = response.get("id")
if self.project_id and job_group_id:
DataprepJobGroupLink.persist(
context=context,
task_instance=self,
project_id=self.project_id,
job_group_id=int(job_group_id),
)
return response
class DataprepCopyFlowOperator(GoogleCloudBaseOperator):
"""
Create a copy of the provided flow id, as well as all contained recipes.
:param dataprep_conn_id: The Dataprep connection ID
:param flow_id: ID of the flow to be copied
:param name: Name for the copy of the flow
:param description: Description of the copy of the flow
:param copy_datasources: Bool value to define should the copy of data inputs be made or not.
"""
template_fields: Sequence[str] = (
"flow_id",
"name",
"project_id",
"description",
)
operator_extra_links = (DataprepFlowLink(),)
def __init__(
self,
*,
project_id: str | None = None,
dataprep_conn_id: str = "dataprep_default",
flow_id: int | str,
name: str = "",
description: str = "",
copy_datasources: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.dataprep_conn_id = dataprep_conn_id
self.flow_id = flow_id
self.name = name
self.description = description
self.copy_datasources = copy_datasources
def execute(self, context: Context) -> dict:
self.log.info("Copying flow with id %d...", self.flow_id)
hook = GoogleDataprepHook(dataprep_conn_id=self.dataprep_conn_id)
response = hook.copy_flow(
flow_id=int(self.flow_id),
name=self.name,
description=self.description,
copy_datasources=self.copy_datasources,
)
copied_flow_id = response.get("id")
if self.project_id and copied_flow_id:
DataprepFlowLink.persist(
context=context,
task_instance=self,
project_id=self.project_id,
flow_id=int(copied_flow_id),
)
return response
class DataprepDeleteFlowOperator(GoogleCloudBaseOperator):
"""
Delete the flow with provided id.
:param dataprep_conn_id: The Dataprep connection ID
:param flow_id: ID of the flow to be copied
"""
template_fields: Sequence[str] = ("flow_id",)
def __init__(
self,
*,
dataprep_conn_id: str = "dataprep_default",
flow_id: int | str,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataprep_conn_id = dataprep_conn_id
self.flow_id = flow_id
def execute(self, context: Context) -> None:
self.log.info("Start delete operation of the flow with id: %d...", self.flow_id)
hook = GoogleDataprepHook(dataprep_conn_id=self.dataprep_conn_id)
hook.delete_flow(flow_id=int(self.flow_id))
class DataprepRunFlowOperator(GoogleCloudBaseOperator):
"""
Runs the flow with the provided id copy of the provided flow id.
:param dataprep_conn_id: The Dataprep connection ID
:param flow_id: ID of the flow to be copied
:param body_request: Body of the POST request to be sent.
"""
template_fields: Sequence[str] = (
"flow_id",
"project_id",
)
operator_extra_links = (DataprepJobGroupLink(),)
def __init__(
self,
*,
project_id: str | None = None,
flow_id: int | str,
body_request: dict,
dataprep_conn_id: str = "dataprep_default",
**kwargs,
):
super().__init__(**kwargs)
self.project_id = project_id
self.flow_id = flow_id
self.body_request = body_request
self.dataprep_conn_id = dataprep_conn_id
def execute(self, context: Context) -> dict:
self.log.info("Running the flow with id: %d...", self.flow_id)
hooks = GoogleDataprepHook(dataprep_conn_id=self.dataprep_conn_id)
response = hooks.run_flow(flow_id=int(self.flow_id), body_request=self.body_request)
if self.project_id:
job_group_id = response["data"][0]["id"]
DataprepJobGroupLink.persist(
context=context,
task_instance=self,
project_id=self.project_id,
job_group_id=int(job_group_id),
)
return response
| 10,367 | 32.553398 | 96 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/bigquery.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google BigQuery operators."""
from __future__ import annotations
import enum
import json
import warnings
from typing import TYPE_CHECKING, Any, Iterable, Sequence, SupportsAbs
import attr
from google.api_core.exceptions import Conflict
from google.api_core.retry import Retry
from google.cloud.bigquery import DEFAULT_RETRY, CopyJob, ExtractJob, LoadJob, QueryJob
from google.cloud.bigquery.table import RowIterator
from airflow.configuration import conf
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning, AirflowSkipException
from airflow.models import BaseOperator, BaseOperatorLink
from airflow.models.xcom import XCom
from airflow.providers.common.sql.operators.sql import (
SQLCheckOperator,
SQLColumnCheckOperator,
SQLIntervalCheckOperator,
SQLTableCheckOperator,
SQLValueCheckOperator,
_parse_boolean,
)
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook, BigQueryJob
from airflow.providers.google.cloud.hooks.gcs import GCSHook, _parse_gcs_url
from airflow.providers.google.cloud.links.bigquery import BigQueryDatasetLink, BigQueryTableLink
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
from airflow.providers.google.cloud.triggers.bigquery import (
BigQueryCheckTrigger,
BigQueryGetDataTrigger,
BigQueryInsertJobTrigger,
BigQueryIntervalCheckTrigger,
BigQueryValueCheckTrigger,
)
from airflow.providers.google.cloud.utils.bigquery import convert_job_id
if TYPE_CHECKING:
from google.cloud.bigquery import UnknownJob
from airflow.models.taskinstancekey import TaskInstanceKey
from airflow.utils.context import Context
BIGQUERY_JOB_DETAILS_LINK_FMT = "https://console.cloud.google.com/bigquery?j={job_id}"
class BigQueryUIColors(enum.Enum):
"""Hex colors for BigQuery operators."""
CHECK = "#C0D7FF"
QUERY = "#A1BBFF"
TABLE = "#81A0FF"
DATASET = "#5F86FF"
class IfExistAction(enum.Enum):
"""Action to take if the resource exist."""
IGNORE = "ignore"
LOG = "log"
FAIL = "fail"
SKIP = "skip"
class BigQueryConsoleLink(BaseOperatorLink):
"""Helper class for constructing BigQuery link."""
name = "BigQuery Console"
def get_link(
self,
operator: BaseOperator,
*,
ti_key: TaskInstanceKey,
):
job_id_path = XCom.get_value(key="job_id_path", ti_key=ti_key)
return BIGQUERY_JOB_DETAILS_LINK_FMT.format(job_id=job_id_path) if job_id_path else ""
@attr.s(auto_attribs=True)
class BigQueryConsoleIndexableLink(BaseOperatorLink):
"""Helper class for constructing BigQuery link."""
index: int = attr.ib()
@property
def name(self) -> str:
return f"BigQuery Console #{self.index + 1}"
def get_link(
self,
operator: BaseOperator,
*,
ti_key: TaskInstanceKey,
):
job_ids = XCom.get_value(key="job_id_path", ti_key=ti_key)
if not job_ids:
return None
if len(job_ids) < self.index:
return None
job_id = job_ids[self.index]
return BIGQUERY_JOB_DETAILS_LINK_FMT.format(job_id=job_id)
class _BigQueryDbHookMixin:
def get_db_hook(self: BigQueryCheckOperator) -> BigQueryHook: # type:ignore[misc]
"""Get BigQuery DB Hook."""
return BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
use_legacy_sql=self.use_legacy_sql,
location=self.location,
impersonation_chain=self.impersonation_chain,
labels=self.labels,
)
class BigQueryCheckOperator(_BigQueryDbHookMixin, SQLCheckOperator):
"""Performs checks against BigQuery.
This operator expects a SQL query that returns a single row. Each value on
that row is evaluated using a Python ``bool`` cast. If any of the values
is falsy, the check errors out.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryCheckOperator`
Note that Python bool casting evals the following as *False*:
* ``False``
* ``0``
* Empty string (``""``)
* Empty list (``[]``)
* Empty dictionary or set (``{}``)
Given a query like ``SELECT COUNT(*) FROM foo``, it will fail only if
the count equals to zero. You can craft much more complex query that could,
for instance, check that the table has the same number of rows as the source
table upstream, or that the count of today's partition is greater than
yesterday's partition, or that a set of metrics are less than three standard
deviation for the 7-day average.
This operator can be used as a data quality check in your pipeline.
Depending on where you put it in your DAG, you have the choice to stop the
critical path, preventing from publishing dubious data, or on the side and
receive email alerts without stopping the progress of the DAG.
:param sql: SQL to execute.
:param gcp_conn_id: Connection ID for Google Cloud.
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
:param location: The geographic location of the job. See details at:
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:param impersonation_chain: Optional service account to impersonate using
short-term credentials, or chained list of accounts required to get the
access token of the last account in the list, which will be impersonated
in the request. If set as a string, the account must grant the
originating account the Service Account Token Creator IAM role. If set
as a sequence, the identities from the list must grant Service Account
Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account. (templated)
:param labels: a dictionary containing labels for the table, passed to BigQuery.
:param deferrable: Run operator in the deferrable mode.
:param poll_interval: (Deferrable mode only) polling period in seconds to
check for the status of job.
"""
template_fields: Sequence[str] = (
"sql",
"gcp_conn_id",
"impersonation_chain",
"labels",
)
template_ext: Sequence[str] = (".sql",)
ui_color = BigQueryUIColors.CHECK.value
def __init__(
self,
*,
sql: str,
gcp_conn_id: str = "google_cloud_default",
use_legacy_sql: bool = True,
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
labels: dict | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: float = 4.0,
**kwargs,
) -> None:
super().__init__(sql=sql, **kwargs)
self.gcp_conn_id = gcp_conn_id
self.sql = sql
self.use_legacy_sql = use_legacy_sql
self.location = location
self.impersonation_chain = impersonation_chain
self.labels = labels
self.deferrable = deferrable
self.poll_interval = poll_interval
def _submit_job(
self,
hook: BigQueryHook,
job_id: str,
) -> BigQueryJob:
"""Submit a new job and get the job id for polling the status using Trigger."""
configuration = {"query": {"query": self.sql, "useLegacySql": self.use_legacy_sql}}
return hook.insert_job(
configuration=configuration,
project_id=hook.project_id,
location=self.location,
job_id=job_id,
nowait=True,
)
def execute(self, context: Context):
if not self.deferrable:
super().execute(context=context)
else:
hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
)
job = self._submit_job(hook, job_id="")
context["ti"].xcom_push(key="job_id", value=job.job_id)
if job.running():
self.defer(
timeout=self.execution_timeout,
trigger=BigQueryCheckTrigger(
conn_id=self.gcp_conn_id,
job_id=job.job_id,
project_id=hook.project_id,
poll_interval=self.poll_interval,
),
method_name="execute_complete",
)
self.log.info("Current state of job %s is %s", job.job_id, job.state)
def execute_complete(self, context: Context, event: dict[str, Any]) -> None:
"""Callback for when the trigger fires.
This returns immediately. It relies on trigger to throw an exception,
otherwise it assumes execution was successful.
"""
if event["status"] == "error":
raise AirflowException(event["message"])
records = event["records"]
if not records:
raise AirflowException("The query returned empty results")
elif not all(bool(r) for r in records):
self._raise_exception( # type: ignore[attr-defined]
f"Test failed.\nQuery:\n{self.sql}\nResults:\n{records!s}"
)
self.log.info("Record: %s", event["records"])
self.log.info("Success.")
class BigQueryValueCheckOperator(_BigQueryDbHookMixin, SQLValueCheckOperator):
"""Perform a simple value check using sql code.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryValueCheckOperator`
:param sql: SQL to execute.
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param location: The geographic location of the job. See details at:
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:param impersonation_chain: Optional service account to impersonate using
short-term credentials, or chained list of accounts required to get the
access token of the last account in the list, which will be impersonated
in the request. If set as a string, the account must grant the
originating account the Service Account Token Creator IAM role. If set
as a sequence, the identities from the list must grant Service Account
Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account. (templated)
:param labels: a dictionary containing labels for the table, passed to BigQuery.
:param deferrable: Run operator in the deferrable mode.
:param poll_interval: (Deferrable mode only) polling period in seconds to
check for the status of job.
"""
template_fields: Sequence[str] = (
"sql",
"gcp_conn_id",
"pass_value",
"impersonation_chain",
"labels",
)
template_ext: Sequence[str] = (".sql",)
ui_color = BigQueryUIColors.CHECK.value
def __init__(
self,
*,
sql: str,
pass_value: Any,
tolerance: Any = None,
gcp_conn_id: str = "google_cloud_default",
use_legacy_sql: bool = True,
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
labels: dict | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: float = 4.0,
**kwargs,
) -> None:
super().__init__(sql=sql, pass_value=pass_value, tolerance=tolerance, **kwargs)
self.location = location
self.gcp_conn_id = gcp_conn_id
self.use_legacy_sql = use_legacy_sql
self.impersonation_chain = impersonation_chain
self.labels = labels
self.deferrable = deferrable
self.poll_interval = poll_interval
def _submit_job(
self,
hook: BigQueryHook,
job_id: str,
) -> BigQueryJob:
"""Submit a new job and get the job id for polling the status using Triggerer."""
configuration = {
"query": {
"query": self.sql,
"useLegacySql": self.use_legacy_sql,
},
}
return hook.insert_job(
configuration=configuration,
project_id=hook.project_id,
location=self.location,
job_id=job_id,
nowait=True,
)
def execute(self, context: Context) -> None: # type: ignore[override]
if not self.deferrable:
super().execute(context=context)
else:
hook = BigQueryHook(gcp_conn_id=self.gcp_conn_id)
job = self._submit_job(hook, job_id="")
context["ti"].xcom_push(key="job_id", value=job.job_id)
if job.running():
self.defer(
timeout=self.execution_timeout,
trigger=BigQueryValueCheckTrigger(
conn_id=self.gcp_conn_id,
job_id=job.job_id,
project_id=hook.project_id,
sql=self.sql,
pass_value=self.pass_value,
tolerance=self.tol,
poll_interval=self.poll_interval,
),
method_name="execute_complete",
)
self._handle_job_error(job)
self.log.info("Current state of job %s is %s", job.job_id, job.state)
@staticmethod
def _handle_job_error(job: BigQueryJob | UnknownJob) -> None:
if job.error_result:
raise AirflowException(f"BigQuery job {job.job_id} failed: {job.error_result}")
def execute_complete(self, context: Context, event: dict[str, Any]) -> None:
"""Callback for when the trigger fires.
This returns immediately. It relies on trigger to throw an exception,
otherwise it assumes execution was successful.
"""
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info(
"%s completed with response %s ",
self.task_id,
event["message"],
)
class BigQueryIntervalCheckOperator(_BigQueryDbHookMixin, SQLIntervalCheckOperator):
"""
Check that the values of metrics given as SQL expressions are within a tolerance of the older ones.
This method constructs a query like so ::
SELECT {metrics_threshold_dict_key} FROM {table}
WHERE {date_filter_column}=<date>
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryIntervalCheckOperator`
:param table: the table name
:param days_back: number of days between ds and the ds we want to check
against. Defaults to 7 days
:param metrics_thresholds: a dictionary of ratios indexed by metrics, for
example 'COUNT(*)': 1.5 would require a 50 percent or less difference
between the current day, and the prior days_back.
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param location: The geographic location of the job. See details at:
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param labels: a dictionary containing labels for the table, passed to BigQuery
:param deferrable: Run operator in the deferrable mode
:param poll_interval: (Deferrable mode only) polling period in seconds to check for the status of job.
Defaults to 4 seconds.
"""
template_fields: Sequence[str] = (
"table",
"gcp_conn_id",
"sql1",
"sql2",
"impersonation_chain",
"labels",
)
ui_color = BigQueryUIColors.CHECK.value
def __init__(
self,
*,
table: str,
metrics_thresholds: dict,
date_filter_column: str = "ds",
days_back: SupportsAbs[int] = -7,
gcp_conn_id: str = "google_cloud_default",
use_legacy_sql: bool = True,
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
labels: dict | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: float = 4.0,
**kwargs,
) -> None:
super().__init__(
table=table,
metrics_thresholds=metrics_thresholds,
date_filter_column=date_filter_column,
days_back=days_back,
**kwargs,
)
self.gcp_conn_id = gcp_conn_id
self.use_legacy_sql = use_legacy_sql
self.location = location
self.impersonation_chain = impersonation_chain
self.labels = labels
self.deferrable = deferrable
self.poll_interval = poll_interval
def _submit_job(
self,
hook: BigQueryHook,
sql: str,
job_id: str,
) -> BigQueryJob:
"""Submit a new job and get the job id for polling the status using Triggerer."""
configuration = {"query": {"query": sql, "useLegacySql": self.use_legacy_sql}}
return hook.insert_job(
configuration=configuration,
project_id=hook.project_id,
location=self.location,
job_id=job_id,
nowait=True,
)
def execute(self, context: Context):
if not self.deferrable:
super().execute(context)
else:
hook = BigQueryHook(gcp_conn_id=self.gcp_conn_id)
self.log.info("Using ratio formula: %s", self.ratio_formula)
self.log.info("Executing SQL check: %s", self.sql1)
job_1 = self._submit_job(hook, sql=self.sql1, job_id="")
context["ti"].xcom_push(key="job_id", value=job_1.job_id)
self.log.info("Executing SQL check: %s", self.sql2)
job_2 = self._submit_job(hook, sql=self.sql2, job_id="")
self.defer(
timeout=self.execution_timeout,
trigger=BigQueryIntervalCheckTrigger(
conn_id=self.gcp_conn_id,
first_job_id=job_1.job_id,
second_job_id=job_2.job_id,
project_id=hook.project_id,
table=self.table,
metrics_thresholds=self.metrics_thresholds,
date_filter_column=self.date_filter_column,
days_back=self.days_back,
ratio_formula=self.ratio_formula,
ignore_zero=self.ignore_zero,
poll_interval=self.poll_interval,
),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, Any]) -> None:
"""Callback for when the trigger fires.
This returns immediately. It relies on trigger to throw an exception,
otherwise it assumes execution was successful.
"""
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info(
"%s completed with response %s ",
self.task_id,
event["message"],
)
class BigQueryColumnCheckOperator(_BigQueryDbHookMixin, SQLColumnCheckOperator):
"""
Subclasses the SQLColumnCheckOperator in order to provide a job id for OpenLineage to parse.
See base class docstring for usage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryColumnCheckOperator`
:param table: the table name
:param column_mapping: a dictionary relating columns to their checks
:param partition_clause: a string SQL statement added to a WHERE clause
to partition data
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:param location: The geographic location of the job. See details at:
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param labels: a dictionary containing labels for the table, passed to BigQuery
"""
def __init__(
self,
*,
table: str,
column_mapping: dict,
partition_clause: str | None = None,
database: str | None = None,
accept_none: bool = True,
gcp_conn_id: str = "google_cloud_default",
use_legacy_sql: bool = True,
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
labels: dict | None = None,
**kwargs,
) -> None:
super().__init__(
table=table,
column_mapping=column_mapping,
partition_clause=partition_clause,
database=database,
accept_none=accept_none,
**kwargs,
)
self.table = table
self.column_mapping = column_mapping
self.partition_clause = partition_clause
self.database = database
self.accept_none = accept_none
self.gcp_conn_id = gcp_conn_id
self.use_legacy_sql = use_legacy_sql
self.location = location
self.impersonation_chain = impersonation_chain
self.labels = labels
def _submit_job(
self,
hook: BigQueryHook,
job_id: str,
) -> BigQueryJob:
"""Submit a new job and get the job id for polling the status using Trigger."""
configuration = {"query": {"query": self.sql, "useLegacySql": self.use_legacy_sql}}
return hook.insert_job(
configuration=configuration,
project_id=hook.project_id,
location=self.location,
job_id=job_id,
nowait=False,
)
def execute(self, context=None):
"""Perform checks on the given columns."""
hook = self.get_db_hook()
failed_tests = []
job = self._submit_job(hook, job_id="")
context["ti"].xcom_push(key="job_id", value=job.job_id)
records = job.result().to_dataframe()
if records.empty:
raise AirflowException(f"The following query returned zero rows: {self.sql}")
records.columns = records.columns.str.lower()
self.log.info("Record: %s", records)
for row in records.iterrows():
column = row[1].get("col_name")
check = row[1].get("check_type")
result = row[1].get("check_result")
tolerance = self.column_mapping[column][check].get("tolerance")
self.column_mapping[column][check]["result"] = result
self.column_mapping[column][check]["success"] = self._get_match(
self.column_mapping[column][check], result, tolerance
)
failed_tests.extend(
f"Column: {col}\n\tCheck: {check},\n\tCheck Values: {check_values}\n"
for col, checks in self.column_mapping.items()
for check, check_values in checks.items()
if not check_values["success"]
)
if failed_tests:
exception_string = (
f"Test failed.\nResults:\n{records!s}\n"
f"The following tests have failed:"
f"\n{''.join(failed_tests)}"
)
self._raise_exception(exception_string)
self.log.info("All tests have passed")
class BigQueryTableCheckOperator(_BigQueryDbHookMixin, SQLTableCheckOperator):
"""
Subclasses the SQLTableCheckOperator in order to provide a job id for OpenLineage to parse.
See base class for usage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryTableCheckOperator`
:param table: the table name
:param checks: a dictionary of check names and boolean SQL statements
:param partition_clause: a string SQL statement added to a WHERE clause
to partition data
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:param location: The geographic location of the job. See details at:
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param labels: a dictionary containing labels for the table, passed to BigQuery
"""
def __init__(
self,
*,
table: str,
checks: dict,
partition_clause: str | None = None,
gcp_conn_id: str = "google_cloud_default",
use_legacy_sql: bool = True,
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
labels: dict | None = None,
**kwargs,
) -> None:
super().__init__(table=table, checks=checks, partition_clause=partition_clause, **kwargs)
self.table = table
self.checks = checks
self.partition_clause = partition_clause
self.gcp_conn_id = gcp_conn_id
self.use_legacy_sql = use_legacy_sql
self.location = location
self.impersonation_chain = impersonation_chain
self.labels = labels
def _submit_job(
self,
hook: BigQueryHook,
job_id: str,
) -> BigQueryJob:
"""Submit a new job and get the job id for polling the status using Trigger."""
configuration = {"query": {"query": self.sql, "useLegacySql": self.use_legacy_sql}}
return hook.insert_job(
configuration=configuration,
project_id=hook.project_id,
location=self.location,
job_id=job_id,
nowait=False,
)
def execute(self, context=None):
"""Execute the given checks on the table."""
hook = self.get_db_hook()
job = self._submit_job(hook, job_id="")
context["ti"].xcom_push(key="job_id", value=job.job_id)
records = job.result().to_dataframe()
if records.empty:
raise AirflowException(f"The following query returned zero rows: {self.sql}")
records.columns = records.columns.str.lower()
self.log.info("Record:\n%s", records)
for row in records.iterrows():
check = row[1].get("check_name")
result = row[1].get("check_result")
self.checks[check]["success"] = _parse_boolean(str(result))
failed_tests = [
f"\tCheck: {check},\n\tCheck Values: {check_values}\n"
for check, check_values in self.checks.items()
if not check_values["success"]
]
if failed_tests:
exception_string = (
f"Test failed.\nQuery:\n{self.sql}\nResults:\n{records!s}\n"
f"The following tests have failed:\n{', '.join(failed_tests)}"
)
self._raise_exception(exception_string)
self.log.info("All tests have passed")
class BigQueryGetDataOperator(GoogleCloudBaseOperator):
"""
Fetches the data from a BigQuery table (alternatively fetch data for selected columns) and returns data.
Data is returned in either of the following two formats, based on "as_dict" value:
1. False (Default) - A Python list of lists, with the number of nested lists equal to the number of rows
fetched. Each nested list represents a row, where the elements within it correspond to the column values
for that particular row.
**Example Result**: ``[['Tony', 10], ['Mike', 20]``
2. True - A Python list of dictionaries, where each dictionary represents a row. In each dictionary,
the keys are the column names and the values are the corresponding values for those columns.
**Example Result**: ``[{'name': 'Tony', 'age': 10}, {'name': 'Mike', 'age': 20}]``
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryGetDataOperator`
.. note::
If you pass fields to ``selected_fields`` which are in different order than the
order of columns already in
BQ table, the data will still be in the order of BQ table.
For example if the BQ table has 3 columns as
``[A,B,C]`` and you pass 'B,A' in the ``selected_fields``
the data would still be of the form ``'A,B'``.
**Example**::
get_data = BigQueryGetDataOperator(
task_id='get_data_from_bq',
dataset_id='test_dataset',
table_id='Transaction_partitions',
project_id='internal-gcp-project',
max_results=100,
selected_fields='DATE',
gcp_conn_id='airflow-conn-id'
)
:param dataset_id: The dataset ID of the requested table. (templated)
:param table_id: The table ID of the requested table. (templated)
:param project_id: (Optional) The name of the project where the data
will be returned from. If None, it will be derived from the hook's project ID. (templated)
:param max_results: The maximum number of records (rows) to be fetched
from the table. (templated)
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param location: The location used for the operation.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run operator in the deferrable mode
:param poll_interval: (Deferrable mode only) polling period in seconds to check for the status of job.
Defaults to 4 seconds.
:param as_dict: if True returns the result as a list of dictionaries, otherwise as list of lists
(default: False).
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
"""
template_fields: Sequence[str] = (
"dataset_id",
"table_id",
"project_id",
"max_results",
"selected_fields",
"impersonation_chain",
)
ui_color = BigQueryUIColors.QUERY.value
def __init__(
self,
*,
dataset_id: str,
table_id: str,
project_id: str | None = None,
max_results: int = 100,
selected_fields: str | None = None,
gcp_conn_id: str = "google_cloud_default",
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: float = 4.0,
as_dict: bool = False,
use_legacy_sql: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataset_id = dataset_id
self.table_id = table_id
self.max_results = int(max_results)
self.selected_fields = selected_fields
self.gcp_conn_id = gcp_conn_id
self.location = location
self.impersonation_chain = impersonation_chain
self.project_id = project_id
self.deferrable = deferrable
self.poll_interval = poll_interval
self.as_dict = as_dict
self.use_legacy_sql = use_legacy_sql
def _submit_job(
self,
hook: BigQueryHook,
job_id: str,
) -> BigQueryJob:
get_query = self.generate_query(hook=hook)
configuration = {"query": {"query": get_query, "useLegacySql": self.use_legacy_sql}}
"""Submit a new job and get the job id for polling the status using Triggerer."""
return hook.insert_job(
configuration=configuration,
location=self.location,
project_id=hook.project_id,
job_id=job_id,
nowait=True,
)
def generate_query(self, hook: BigQueryHook) -> str:
"""Generate a SELECT query if for the given dataset and table ID."""
query = "select "
if self.selected_fields:
query += self.selected_fields
else:
query += "*"
query += (
f" from `{self.project_id or hook.project_id}.{self.dataset_id}"
f".{self.table_id}` limit {self.max_results}"
)
return query
def execute(self, context: Context):
hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
use_legacy_sql=self.use_legacy_sql,
)
if not self.deferrable:
self.log.info(
"Fetching Data from %s.%s.%s max results: %s",
self.project_id or hook.project_id,
self.dataset_id,
self.table_id,
self.max_results,
)
if not self.selected_fields:
schema: dict[str, list] = hook.get_schema(
dataset_id=self.dataset_id,
table_id=self.table_id,
project_id=self.project_id,
)
if "fields" in schema:
self.selected_fields = ",".join([field["name"] for field in schema["fields"]])
rows = hook.list_rows(
dataset_id=self.dataset_id,
table_id=self.table_id,
max_results=self.max_results,
selected_fields=self.selected_fields,
location=self.location,
project_id=self.project_id,
)
if isinstance(rows, RowIterator):
raise TypeError(
"BigQueryHook.list_rows() returns iterator when return_iterator is False (default)"
)
self.log.info("Total extracted rows: %s", len(rows))
if self.as_dict:
table_data = [{k: v for k, v in row.items()} for row in rows]
else:
table_data = [row.values() for row in rows]
return table_data
job = self._submit_job(hook, job_id="")
context["ti"].xcom_push(key="job_id", value=job.job_id)
self.defer(
timeout=self.execution_timeout,
trigger=BigQueryGetDataTrigger(
conn_id=self.gcp_conn_id,
job_id=job.job_id,
dataset_id=self.dataset_id,
table_id=self.table_id,
project_id=hook.project_id,
poll_interval=self.poll_interval,
as_dict=self.as_dict,
),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, Any]) -> Any:
"""Callback for when the trigger fires.
This returns immediately. It relies on trigger to throw an exception,
otherwise it assumes execution was successful.
"""
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info("Total extracted rows: %s", len(event["records"]))
return event["records"]
class BigQueryExecuteQueryOperator(GoogleCloudBaseOperator):
"""Executes BigQuery SQL queries in a specific BigQuery database.
This operator is deprecated. Please use
:class:`airflow.providers.google.cloud.operators.bigquery.BigQueryInsertJobOperator`
instead.
This operator does not assert idempotency.
:param sql: the SQL code to be executed as a single string, or
a list of str (sql statements), or a reference to a template file.
Template references are recognized by str ending in '.sql'
:param destination_dataset_table: A dotted
``(<project>.|<project>:)<dataset>.<table>`` that, if set, will store the results
of the query. (templated)
:param write_disposition: Specifies the action that occurs if the destination table
already exists. (default: 'WRITE_EMPTY')
:param create_disposition: Specifies whether the job is allowed to create new tables.
(default: 'CREATE_IF_NEEDED')
:param allow_large_results: Whether to allow large results.
:param flatten_results: If true and query uses legacy SQL dialect, flattens
all nested and repeated fields in the query results. ``allow_large_results``
must be ``true`` if this is set to ``false``. For standard SQL queries, this
flag is ignored and results are never flattened.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
:param maximum_billing_tier: Positive integer that serves as a multiplier
of the basic price.
Defaults to None, in which case it uses the value set in the project.
:param maximum_bytes_billed: Limits the bytes billed for this job.
Queries that will have bytes billed beyond this limit will fail
(without incurring a charge). If unspecified, this will be
set to your project default.
:param api_resource_configs: a dictionary that contain params
'configuration' applied for Google BigQuery Jobs API:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs
for example, {'query': {'useQueryCache': False}}. You could use it
if you need to provide some params that are not supported by BigQueryOperator
like args.
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the load job.
:param query_params: a list of dictionary containing query parameter types and
values, passed to BigQuery. The structure of dictionary should look like
'queryParameters' in Google BigQuery Jobs API:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs.
For example, [{ 'name': 'corpus', 'parameterType': { 'type': 'STRING' },
'parameterValue': { 'value': 'romeoandjuliet' } }]. (templated)
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:param priority: Specifies a priority for the query.
Possible values include INTERACTIVE and BATCH.
The default value is INTERACTIVE.
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
:param cluster_fields: Request that the result of this query be stored sorted
by one or more columns. BigQuery supports clustering for both partitioned and
non-partitioned tables. The order of columns given determines the sort order.
:param location: The geographic location of the job. Required except for
US and EU. See details at
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
.. code-block:: python
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key",
}
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"sql",
"destination_dataset_table",
"labels",
"query_params",
"impersonation_chain",
)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"sql": "sql"}
ui_color = BigQueryUIColors.QUERY.value
@property
def operator_extra_links(self):
"""Return operator extra links."""
if isinstance(self.sql, str):
return (BigQueryConsoleLink(),)
return (BigQueryConsoleIndexableLink(i) for i, _ in enumerate(self.sql))
def __init__(
self,
*,
sql: str | Iterable[str],
destination_dataset_table: str | None = None,
write_disposition: str = "WRITE_EMPTY",
allow_large_results: bool = False,
flatten_results: bool | None = None,
gcp_conn_id: str = "google_cloud_default",
udf_config: list | None = None,
use_legacy_sql: bool = True,
maximum_billing_tier: int | None = None,
maximum_bytes_billed: float | None = None,
create_disposition: str = "CREATE_IF_NEEDED",
schema_update_options: list | tuple | set | None = None,
query_params: list | None = None,
labels: dict | None = None,
priority: str = "INTERACTIVE",
time_partitioning: dict | None = None,
api_resource_configs: dict | None = None,
cluster_fields: list[str] | None = None,
location: str | None = None,
encryption_configuration: dict | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
warnings.warn(
"This operator is deprecated. Please use `BigQueryInsertJobOperator`.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.sql = sql
self.destination_dataset_table = destination_dataset_table
self.write_disposition = write_disposition
self.create_disposition = create_disposition
self.allow_large_results = allow_large_results
self.flatten_results = flatten_results
self.gcp_conn_id = gcp_conn_id
self.udf_config = udf_config
self.use_legacy_sql = use_legacy_sql
self.maximum_billing_tier = maximum_billing_tier
self.maximum_bytes_billed = maximum_bytes_billed
self.schema_update_options = schema_update_options
self.query_params = query_params
self.labels = labels
self.priority = priority
self.time_partitioning = time_partitioning
self.api_resource_configs = api_resource_configs
self.cluster_fields = cluster_fields
self.location = location
self.encryption_configuration = encryption_configuration
self.hook: BigQueryHook | None = None
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
if self.hook is None:
self.log.info("Executing: %s", self.sql)
self.hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
use_legacy_sql=self.use_legacy_sql,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
if isinstance(self.sql, str):
job_id: str | list[str] = self.hook.run_query(
sql=self.sql,
destination_dataset_table=self.destination_dataset_table,
write_disposition=self.write_disposition,
allow_large_results=self.allow_large_results,
flatten_results=self.flatten_results,
udf_config=self.udf_config,
maximum_billing_tier=self.maximum_billing_tier,
maximum_bytes_billed=self.maximum_bytes_billed,
create_disposition=self.create_disposition,
query_params=self.query_params,
labels=self.labels,
schema_update_options=self.schema_update_options,
priority=self.priority,
time_partitioning=self.time_partitioning,
api_resource_configs=self.api_resource_configs,
cluster_fields=self.cluster_fields,
encryption_configuration=self.encryption_configuration,
)
elif isinstance(self.sql, Iterable):
job_id = [
self.hook.run_query(
sql=s,
destination_dataset_table=self.destination_dataset_table,
write_disposition=self.write_disposition,
allow_large_results=self.allow_large_results,
flatten_results=self.flatten_results,
udf_config=self.udf_config,
maximum_billing_tier=self.maximum_billing_tier,
maximum_bytes_billed=self.maximum_bytes_billed,
create_disposition=self.create_disposition,
query_params=self.query_params,
labels=self.labels,
schema_update_options=self.schema_update_options,
priority=self.priority,
time_partitioning=self.time_partitioning,
api_resource_configs=self.api_resource_configs,
cluster_fields=self.cluster_fields,
encryption_configuration=self.encryption_configuration,
)
for s in self.sql
]
else:
raise AirflowException(f"argument 'sql' of type {type(str)} is neither a string nor an iterable")
project_id = self.hook.project_id
if project_id:
job_id_path = convert_job_id(job_id=job_id, project_id=project_id, location=self.location)
context["task_instance"].xcom_push(key="job_id_path", value=job_id_path)
return job_id
def on_kill(self) -> None:
super().on_kill()
if self.hook is not None:
self.log.info("Cancelling running query")
self.hook.cancel_job(self.hook.running_job_id)
class BigQueryCreateEmptyTableOperator(GoogleCloudBaseOperator):
"""Creates a new table in the specified BigQuery dataset, optionally with schema.
The schema to be used for the BigQuery table may be specified in one of
two ways. You may either directly pass the schema fields in, or you may
point the operator to a Google Cloud Storage object name. The object in
Google Cloud Storage must be a JSON file with the schema fields in it.
You can also create a table without schema.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryCreateEmptyTableOperator`
:param project_id: The project to create the table into. (templated)
:param dataset_id: The dataset to create the table into. (templated)
:param table_id: The Name of the table to be created. (templated)
:param table_resource: Table resource as described in documentation:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table
If provided all other parameters are ignored. (templated)
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
**Example**::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:param gcs_schema_object: Full path to the JSON file containing
schema (templated). For
example: ``gs://test-bucket/dir1/dir2/employee_schema.json``
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning
:param gcp_conn_id: [Optional] The connection ID used to connect to Google Cloud and
interact with the Bigquery service.
:param google_cloud_storage_conn_id: [Optional] The connection ID used to connect to Google Cloud.
and interact with the Google Cloud Storage service.
:param labels: a dictionary containing labels for the table, passed to BigQuery
**Example (with schema JSON in GCS)**::
CreateTable = BigQueryCreateEmptyTableOperator(
task_id='BigQueryCreateEmptyTableOperator_task',
dataset_id='ODS',
table_id='Employees',
project_id='internal-gcp-project',
gcs_schema_object='gs://schema-bucket/employee_schema.json',
gcp_conn_id='airflow-conn-id',
google_cloud_storage_conn_id='airflow-conn-id'
)
**Corresponding Schema file** (``employee_schema.json``)::
[
{
"mode": "NULLABLE",
"name": "emp_name",
"type": "STRING"
},
{
"mode": "REQUIRED",
"name": "salary",
"type": "INTEGER"
}
]
**Example (with schema in the DAG)**::
CreateTable = BigQueryCreateEmptyTableOperator(
task_id='BigQueryCreateEmptyTableOperator_task',
dataset_id='ODS',
table_id='Employees',
project_id='internal-gcp-project',
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}],
gcp_conn_id='airflow-conn-id-account',
google_cloud_storage_conn_id='airflow-conn-id'
)
:param view: [Optional] A dictionary containing definition for the view.
If set, it will create a view instead of a table:
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ViewDefinition
:param materialized_view: [Optional] The materialized view definition.
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
.. code-block:: python
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key",
}
:param location: The location used for the operation.
:param cluster_fields: [Optional] The fields used for clustering.
BigQuery supports clustering for both partitioned and
non-partitioned tables.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#clustering.fields
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param if_exists: What should Airflow do if the table exists. If set to `log`, the TI will be passed to
success and an error message will be logged. Set to `ignore` to ignore the error, set to `fail` to
fail the TI, and set to `skip` to skip it.
:param exists_ok: Deprecated - use `if_exists="ignore"` instead.
"""
template_fields: Sequence[str] = (
"dataset_id",
"table_id",
"table_resource",
"project_id",
"gcs_schema_object",
"labels",
"view",
"materialized_view",
"impersonation_chain",
)
template_fields_renderers = {"table_resource": "json", "materialized_view": "json"}
ui_color = BigQueryUIColors.TABLE.value
operator_extra_links = (BigQueryTableLink(),)
def __init__(
self,
*,
dataset_id: str,
table_id: str,
table_resource: dict[str, Any] | None = None,
project_id: str | None = None,
schema_fields: list | None = None,
gcs_schema_object: str | None = None,
time_partitioning: dict | None = None,
gcp_conn_id: str = "google_cloud_default",
google_cloud_storage_conn_id: str = "google_cloud_default",
labels: dict | None = None,
view: dict | None = None,
materialized_view: dict | None = None,
encryption_configuration: dict | None = None,
location: str | None = None,
cluster_fields: list[str] | None = None,
impersonation_chain: str | Sequence[str] | None = None,
if_exists: str = "log",
bigquery_conn_id: str | None = None,
exists_ok: bool | None = None,
**kwargs,
) -> None:
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. Use the gcp_conn_id parameter instead.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
gcp_conn_id = bigquery_conn_id
super().__init__(**kwargs)
self.project_id = project_id
self.dataset_id = dataset_id
self.table_id = table_id
self.schema_fields = schema_fields
self.gcs_schema_object = gcs_schema_object
self.gcp_conn_id = gcp_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.time_partitioning = {} if time_partitioning is None else time_partitioning
self.labels = labels
self.view = view
self.materialized_view = materialized_view
self.encryption_configuration = encryption_configuration
self.location = location
self.cluster_fields = cluster_fields
self.table_resource = table_resource
self.impersonation_chain = impersonation_chain
if exists_ok is not None:
warnings.warn(
"`exists_ok` parameter is deprecated, please use `if_exists`",
AirflowProviderDeprecationWarning,
)
self.if_exists = IfExistAction.IGNORE if exists_ok else IfExistAction.LOG
else:
self.if_exists = IfExistAction(if_exists)
def execute(self, context: Context) -> None:
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
if not self.schema_fields and self.gcs_schema_object:
gcs_bucket, gcs_object = _parse_gcs_url(self.gcs_schema_object)
gcs_hook = GCSHook(
gcp_conn_id=self.google_cloud_storage_conn_id,
impersonation_chain=self.impersonation_chain,
)
schema_fields_string = gcs_hook.download_as_byte_array(gcs_bucket, gcs_object).decode("utf-8")
schema_fields = json.loads(schema_fields_string)
else:
schema_fields = self.schema_fields
try:
self.log.info("Creating table")
table = bq_hook.create_empty_table(
project_id=self.project_id,
dataset_id=self.dataset_id,
table_id=self.table_id,
schema_fields=schema_fields,
time_partitioning=self.time_partitioning,
cluster_fields=self.cluster_fields,
labels=self.labels,
view=self.view,
materialized_view=self.materialized_view,
encryption_configuration=self.encryption_configuration,
table_resource=self.table_resource,
exists_ok=self.if_exists == IfExistAction.IGNORE,
)
persist_kwargs = {
"context": context,
"task_instance": self,
"project_id": table.to_api_repr()["tableReference"]["projectId"],
"dataset_id": table.to_api_repr()["tableReference"]["datasetId"],
"table_id": table.to_api_repr()["tableReference"]["tableId"],
}
self.log.info(
"Table %s.%s.%s created successfully", table.project, table.dataset_id, table.table_id
)
except Conflict:
error_msg = f"Table {self.dataset_id}.{self.table_id} already exists."
if self.if_exists == IfExistAction.LOG:
self.log.info(error_msg)
persist_kwargs = {
"context": context,
"task_instance": self,
"project_id": self.project_id or bq_hook.project_id,
"dataset_id": self.dataset_id,
"table_id": self.table_id,
}
elif self.if_exists == IfExistAction.FAIL:
raise AirflowException(error_msg)
else:
raise AirflowSkipException(error_msg)
BigQueryTableLink.persist(**persist_kwargs)
class BigQueryCreateExternalTableOperator(GoogleCloudBaseOperator):
"""Create a new external table with data from Google Cloud Storage.
The schema to be used for the BigQuery table may be specified in one of
two ways. You may either directly pass the schema fields in, or you may
point the operator to a Google Cloud Storage object name. The object in
Google Cloud Storage must be a JSON file with the schema fields in it.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryCreateExternalTableOperator`
:param bucket: The bucket to point the external table to. (templated)
:param source_objects: List of Google Cloud Storage URIs to point
table to. If source_format is 'DATASTORE_BACKUP', the list must only contain a single URI.
:param destination_project_dataset_table: The dotted ``(<project>.)<dataset>.<table>``
BigQuery table to load data into (templated). If ``<project>`` is not included,
project will be the project defined in the connection json.
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
**Example**::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
Should not be set when source_format is 'DATASTORE_BACKUP'.
:param table_resource: Table resource as described in documentation:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table
If provided all other parameters are ignored. External schema from object will be resolved.
:param schema_object: If set, a GCS object path pointing to a .json file that
contains the schema for the table. (templated)
:param gcs_schema_bucket: GCS bucket name where the schema JSON is stored (templated).
The default value is self.bucket.
:param source_format: File format of the data.
:param autodetect: Try to detect schema and format options automatically.
The schema_fields and schema_object options will be honored when specified explicitly.
https://cloud.google.com/bigquery/docs/schema-detect#schema_auto-detection_for_external_data_sources
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:param field_delimiter: The delimiter to use for the CSV.
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:param quote_character: The value that is used to quote data sections in a CSV file.
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not (false).
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing trailing
columns are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result. Only applicable to CSV, ignored
for other formats.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud and
interact with the Bigquery service.
:param google_cloud_storage_conn_id: (Optional) The connection ID used to connect to Google Cloud
and interact with the Google Cloud Storage service.
:param src_fmt_configs: configure optional fields specific to the source format
:param labels: a dictionary containing labels for the table, passed to BigQuery
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
.. code-block:: python
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key",
}
:param location: The location used for the operation.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"bucket",
"source_objects",
"schema_object",
"gcs_schema_bucket",
"destination_project_dataset_table",
"labels",
"table_resource",
"impersonation_chain",
)
template_fields_renderers = {"table_resource": "json"}
ui_color = BigQueryUIColors.TABLE.value
operator_extra_links = (BigQueryTableLink(),)
def __init__(
self,
*,
bucket: str | None = None,
source_objects: list[str] | None = None,
destination_project_dataset_table: str | None = None,
table_resource: dict[str, Any] | None = None,
schema_fields: list | None = None,
schema_object: str | None = None,
gcs_schema_bucket: str | None = None,
source_format: str | None = None,
autodetect: bool = False,
compression: str | None = None,
skip_leading_rows: int | None = None,
field_delimiter: str | None = None,
max_bad_records: int = 0,
quote_character: str | None = None,
allow_quoted_newlines: bool = False,
allow_jagged_rows: bool = False,
gcp_conn_id: str = "google_cloud_default",
google_cloud_storage_conn_id: str = "google_cloud_default",
src_fmt_configs: dict | None = None,
labels: dict | None = None,
encryption_configuration: dict | None = None,
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
bigquery_conn_id: str | None = None,
**kwargs,
) -> None:
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. Use the gcp_conn_id parameter instead.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
gcp_conn_id = bigquery_conn_id
super().__init__(**kwargs)
# BQ config
kwargs_passed = any(
[
destination_project_dataset_table,
schema_fields,
source_format,
compression,
skip_leading_rows,
field_delimiter,
max_bad_records,
autodetect,
quote_character,
allow_quoted_newlines,
allow_jagged_rows,
src_fmt_configs,
labels,
encryption_configuration,
]
)
if not table_resource:
warnings.warn(
"Passing table parameters via keywords arguments will be deprecated. "
"Please provide table definition using `table_resource` parameter.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
if not bucket:
raise ValueError("`bucket` is required when not using `table_resource`.")
if not gcs_schema_bucket:
gcs_schema_bucket = bucket
if not source_objects:
raise ValueError("`source_objects` is required when not using `table_resource`.")
if not source_format:
source_format = "CSV"
if not compression:
compression = "NONE"
if not skip_leading_rows:
skip_leading_rows = 0
if not field_delimiter:
field_delimiter = ","
if not destination_project_dataset_table:
raise ValueError(
"`destination_project_dataset_table` is required when not using `table_resource`."
)
self.bucket = bucket
self.source_objects = source_objects
self.schema_object = schema_object
self.gcs_schema_bucket = gcs_schema_bucket
self.destination_project_dataset_table = destination_project_dataset_table
self.schema_fields = schema_fields
self.source_format = source_format
self.compression = compression
self.skip_leading_rows = skip_leading_rows
self.field_delimiter = field_delimiter
self.table_resource = None
else:
self.table_resource = table_resource
self.bucket = ""
self.source_objects = []
self.schema_object = None
self.gcs_schema_bucket = ""
self.destination_project_dataset_table = ""
if table_resource and kwargs_passed:
raise ValueError("You provided both `table_resource` and exclusive keywords arguments.")
self.max_bad_records = max_bad_records
self.quote_character = quote_character
self.allow_quoted_newlines = allow_quoted_newlines
self.allow_jagged_rows = allow_jagged_rows
self.gcp_conn_id = gcp_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.autodetect = autodetect
self.src_fmt_configs = src_fmt_configs or {}
self.labels = labels
self.encryption_configuration = encryption_configuration
self.location = location
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
if self.table_resource:
table = bq_hook.create_empty_table(
table_resource=self.table_resource,
)
BigQueryTableLink.persist(
context=context,
task_instance=self,
dataset_id=table.to_api_repr()["tableReference"]["datasetId"],
project_id=table.to_api_repr()["tableReference"]["projectId"],
table_id=table.to_api_repr()["tableReference"]["tableId"],
)
return
if not self.schema_fields and self.schema_object and self.source_format != "DATASTORE_BACKUP":
gcs_hook = GCSHook(
gcp_conn_id=self.google_cloud_storage_conn_id,
impersonation_chain=self.impersonation_chain,
)
schema_fields = json.loads(
gcs_hook.download(self.gcs_schema_bucket, self.schema_object).decode("utf-8")
)
else:
schema_fields = self.schema_fields
source_uris = [f"gs://{self.bucket}/{source_object}" for source_object in self.source_objects]
project_id, dataset_id, table_id = bq_hook.split_tablename(
table_input=self.destination_project_dataset_table,
default_project_id=bq_hook.project_id or "",
)
table_resource = {
"tableReference": {
"projectId": project_id,
"datasetId": dataset_id,
"tableId": table_id,
},
"labels": self.labels,
"schema": {"fields": schema_fields},
"externalDataConfiguration": {
"source_uris": source_uris,
"source_format": self.source_format,
"maxBadRecords": self.max_bad_records,
"autodetect": self.autodetect,
"compression": self.compression,
"csvOptions": {
"fieldDelimiter": self.field_delimiter,
"skipLeadingRows": self.skip_leading_rows,
"quote": self.quote_character,
"allowQuotedNewlines": self.allow_quoted_newlines,
"allowJaggedRows": self.allow_jagged_rows,
},
},
"location": self.location,
"encryptionConfiguration": self.encryption_configuration,
}
table = bq_hook.create_empty_table(
table_resource=table_resource,
)
BigQueryTableLink.persist(
context=context,
task_instance=self,
dataset_id=table.to_api_repr()["tableReference"]["datasetId"],
project_id=table.to_api_repr()["tableReference"]["projectId"],
table_id=table.to_api_repr()["tableReference"]["tableId"],
)
class BigQueryDeleteDatasetOperator(GoogleCloudBaseOperator):
"""Delete an existing dataset from your Project in BigQuery.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/delete
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryDeleteDatasetOperator`
:param project_id: The project id of the dataset.
:param dataset_id: The dataset to be deleted.
:param delete_contents: (Optional) Whether to force the deletion even if the dataset is not empty.
Will delete all tables (if any) in the dataset if set to True.
Will raise HttpError 400: "{dataset_id} is still in use" if set to False and dataset is not empty.
The default value is False.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
**Example**::
delete_temp_data = BigQueryDeleteDatasetOperator(
dataset_id='temp-dataset',
project_id='temp-project',
delete_contents=True, # Force the deletion of the dataset as well as its tables (if any).
gcp_conn_id='_my_gcp_conn_',
task_id='Deletetemp',
dag=dag)
"""
template_fields: Sequence[str] = (
"dataset_id",
"project_id",
"impersonation_chain",
)
ui_color = BigQueryUIColors.DATASET.value
def __init__(
self,
*,
dataset_id: str,
project_id: str | None = None,
delete_contents: bool = False,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.dataset_id = dataset_id
self.project_id = project_id
self.delete_contents = delete_contents
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context: Context) -> None:
self.log.info("Dataset id: %s Project id: %s", self.dataset_id, self.project_id)
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
bq_hook.delete_dataset(
project_id=self.project_id, dataset_id=self.dataset_id, delete_contents=self.delete_contents
)
class BigQueryCreateEmptyDatasetOperator(GoogleCloudBaseOperator):
"""Create a new dataset for your Project in BigQuery.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryCreateEmptyDatasetOperator`
:param project_id: The name of the project where we want to create the dataset.
:param dataset_id: The id of dataset. Don't need to provide, if datasetId in dataset_reference.
:param location: The geographic location where the dataset should reside.
:param dataset_reference: Dataset reference that could be provided with request body.
More info:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param if_exists: What should Airflow do if the dataset exists. If set to `log`, the TI will be passed to
success and an error message will be logged. Set to `ignore` to ignore the error, set to `fail` to
fail the TI, and set to `skip` to skip it.
**Example**::
create_new_dataset = BigQueryCreateEmptyDatasetOperator(
dataset_id='new-dataset',
project_id='my-project',
dataset_reference={"friendlyName": "New Dataset"}
gcp_conn_id='_my_gcp_conn_',
task_id='newDatasetCreator',
dag=dag)
:param exists_ok: Deprecated - use `if_exists="ignore"` instead.
"""
template_fields: Sequence[str] = (
"dataset_id",
"project_id",
"dataset_reference",
"impersonation_chain",
)
template_fields_renderers = {"dataset_reference": "json"}
ui_color = BigQueryUIColors.DATASET.value
operator_extra_links = (BigQueryDatasetLink(),)
def __init__(
self,
*,
dataset_id: str | None = None,
project_id: str | None = None,
dataset_reference: dict | None = None,
location: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
if_exists: str = "log",
exists_ok: bool | None = None,
**kwargs,
) -> None:
self.dataset_id = dataset_id
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.dataset_reference = dataset_reference if dataset_reference else {}
self.impersonation_chain = impersonation_chain
if exists_ok is not None:
warnings.warn(
"`exists_ok` parameter is deprecated, please use `if_exists`",
AirflowProviderDeprecationWarning,
)
self.if_exists = IfExistAction.IGNORE if exists_ok else IfExistAction.LOG
else:
self.if_exists = IfExistAction(if_exists)
super().__init__(**kwargs)
def execute(self, context: Context) -> None:
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
try:
dataset = bq_hook.create_empty_dataset(
project_id=self.project_id,
dataset_id=self.dataset_id,
dataset_reference=self.dataset_reference,
location=self.location,
exists_ok=self.if_exists == IfExistAction.IGNORE,
)
persist_kwargs = {
"context": context,
"task_instance": self,
"project_id": dataset["datasetReference"]["projectId"],
"dataset_id": dataset["datasetReference"]["datasetId"],
}
except Conflict:
dataset_id = self.dataset_reference.get("datasetReference", {}).get("datasetId", self.dataset_id)
project_id = self.dataset_reference.get("datasetReference", {}).get(
"projectId", self.project_id or bq_hook.project_id
)
persist_kwargs = {
"context": context,
"task_instance": self,
"project_id": project_id,
"dataset_id": dataset_id,
}
error_msg = f"Dataset {dataset_id} already exists."
if self.if_exists == IfExistAction.LOG:
self.log.info(error_msg)
elif self.if_exists == IfExistAction.FAIL:
raise AirflowException(error_msg)
else:
raise AirflowSkipException(error_msg)
BigQueryDatasetLink.persist(**persist_kwargs)
class BigQueryGetDatasetOperator(GoogleCloudBaseOperator):
"""Get the dataset specified by ID.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryGetDatasetOperator`
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:param project_id: The name of the project where we want to create the dataset.
Don't need to provide, if projectId in dataset_reference.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"dataset_id",
"project_id",
"impersonation_chain",
)
ui_color = BigQueryUIColors.DATASET.value
operator_extra_links = (BigQueryDatasetLink(),)
def __init__(
self,
*,
dataset_id: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.dataset_id = dataset_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context: Context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Start getting dataset: %s:%s", self.project_id, self.dataset_id)
dataset = bq_hook.get_dataset(dataset_id=self.dataset_id, project_id=self.project_id)
dataset_api_repr = dataset.to_api_repr()
BigQueryDatasetLink.persist(
context=context,
task_instance=self,
dataset_id=dataset_api_repr["datasetReference"]["datasetId"],
project_id=dataset_api_repr["datasetReference"]["projectId"],
)
return dataset_api_repr
class BigQueryGetDatasetTablesOperator(GoogleCloudBaseOperator):
"""Retrieve the list of tables in the specified dataset.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryGetDatasetTablesOperator`
:param dataset_id: the dataset ID of the requested dataset.
:param project_id: (Optional) the project of the requested dataset. If None,
self.project_id will be used.
:param max_results: (Optional) the maximum number of tables to return.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"dataset_id",
"project_id",
"impersonation_chain",
)
ui_color = BigQueryUIColors.DATASET.value
def __init__(
self,
*,
dataset_id: str,
project_id: str | None = None,
max_results: int | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.dataset_id = dataset_id
self.project_id = project_id
self.max_results = max_results
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context: Context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return bq_hook.get_dataset_tables(
dataset_id=self.dataset_id,
project_id=self.project_id,
max_results=self.max_results,
)
class BigQueryPatchDatasetOperator(GoogleCloudBaseOperator):
"""Patch a dataset for your Project in BigQuery.
This operator is deprecated. Please use
:class:`airflow.providers.google.cloud.operators.bigquery.BigQueryUpdateTableOperator`
instead.
Only replaces fields that are provided in the submitted dataset resource.
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:param dataset_resource: Dataset resource that will be provided with request body.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:param project_id: The name of the project where we want to create the dataset.
Don't need to provide, if projectId in dataset_reference.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"dataset_id",
"project_id",
"impersonation_chain",
)
template_fields_renderers = {"dataset_resource": "json"}
ui_color = BigQueryUIColors.DATASET.value
def __init__(
self,
*,
dataset_id: str,
dataset_resource: dict,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
warnings.warn(
"This operator is deprecated. Please use BigQueryUpdateDatasetOperator.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.dataset_id = dataset_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.dataset_resource = dataset_resource
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context: Context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return bq_hook.patch_dataset(
dataset_id=self.dataset_id,
dataset_resource=self.dataset_resource,
project_id=self.project_id,
)
class BigQueryUpdateTableOperator(GoogleCloudBaseOperator):
"""Update a table for your Project in BigQuery.
Use ``fields`` to specify which fields of table to update. If a field
is listed in ``fields`` and is ``None`` in table, it will be deleted.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryUpdateTableOperator`
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in table_reference.
:param table_id: The id of table. Don't need to provide,
if tableId in table_reference.
:param table_resource: Dataset resource that will be provided with request body.
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
:param fields: The fields of ``table`` to change, spelled as the Table
properties (e.g. "friendly_name").
:param project_id: The name of the project where we want to create the table.
Don't need to provide, if projectId in table_reference.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"dataset_id",
"table_id",
"project_id",
"impersonation_chain",
)
template_fields_renderers = {"table_resource": "json"}
ui_color = BigQueryUIColors.TABLE.value
operator_extra_links = (BigQueryTableLink(),)
def __init__(
self,
*,
table_resource: dict[str, Any],
fields: list[str] | None = None,
dataset_id: str | None = None,
table_id: str | None = None,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.dataset_id = dataset_id
self.table_id = table_id
self.project_id = project_id
self.fields = fields
self.gcp_conn_id = gcp_conn_id
self.table_resource = table_resource
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context: Context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
table = bq_hook.update_table(
table_resource=self.table_resource,
fields=self.fields,
dataset_id=self.dataset_id,
table_id=self.table_id,
project_id=self.project_id,
)
BigQueryTableLink.persist(
context=context,
task_instance=self,
dataset_id=table["tableReference"]["datasetId"],
project_id=table["tableReference"]["projectId"],
table_id=table["tableReference"]["tableId"],
)
return table
class BigQueryUpdateDatasetOperator(GoogleCloudBaseOperator):
"""Update a dataset for your Project in BigQuery.
Use ``fields`` to specify which fields of dataset to update. If a field
is listed in ``fields`` and is ``None`` in dataset, it will be deleted.
If no ``fields`` are provided then all fields of provided ``dataset_resource``
will be used.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryUpdateDatasetOperator`
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:param dataset_resource: Dataset resource that will be provided with request body.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:param fields: The properties of dataset to change (e.g. "friendly_name").
:param project_id: The name of the project where we want to create the dataset.
Don't need to provide, if projectId in dataset_reference.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"dataset_id",
"project_id",
"impersonation_chain",
)
template_fields_renderers = {"dataset_resource": "json"}
ui_color = BigQueryUIColors.DATASET.value
operator_extra_links = (BigQueryDatasetLink(),)
def __init__(
self,
*,
dataset_resource: dict[str, Any],
fields: list[str] | None = None,
dataset_id: str | None = None,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.dataset_id = dataset_id
self.project_id = project_id
self.fields = fields
self.gcp_conn_id = gcp_conn_id
self.dataset_resource = dataset_resource
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context: Context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
fields = self.fields or list(self.dataset_resource.keys())
dataset = bq_hook.update_dataset(
dataset_resource=self.dataset_resource,
project_id=self.project_id,
dataset_id=self.dataset_id,
fields=fields,
)
dataset_api_repr = dataset.to_api_repr()
BigQueryDatasetLink.persist(
context=context,
task_instance=self,
dataset_id=dataset_api_repr["datasetReference"]["datasetId"],
project_id=dataset_api_repr["datasetReference"]["projectId"],
)
return dataset_api_repr
class BigQueryDeleteTableOperator(GoogleCloudBaseOperator):
"""Delete a BigQuery table.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryDeleteTableOperator`
:param deletion_dataset_table: A dotted
``(<project>.|<project>:)<dataset>.<table>`` that indicates which table
will be deleted. (templated)
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:param location: The location used for the operation.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"deletion_dataset_table",
"impersonation_chain",
)
ui_color = BigQueryUIColors.TABLE.value
def __init__(
self,
*,
deletion_dataset_table: str,
gcp_conn_id: str = "google_cloud_default",
ignore_if_missing: bool = False,
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.deletion_dataset_table = deletion_dataset_table
self.gcp_conn_id = gcp_conn_id
self.ignore_if_missing = ignore_if_missing
self.location = location
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
self.log.info("Deleting: %s", self.deletion_dataset_table)
hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
hook.delete_table(table_id=self.deletion_dataset_table, not_found_ok=self.ignore_if_missing)
class BigQueryUpsertTableOperator(GoogleCloudBaseOperator):
"""Upsert to a BigQuery table.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryUpsertTableOperator`
:param dataset_id: A dotted
``(<project>.|<project>:)<dataset>`` that indicates which dataset
will be updated. (templated)
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param project_id: The name of the project where we want to update the dataset.
Don't need to provide, if projectId in dataset_reference.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param location: The location used for the operation.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"dataset_id",
"table_resource",
"impersonation_chain",
"project_id",
)
template_fields_renderers = {"table_resource": "json"}
ui_color = BigQueryUIColors.TABLE.value
operator_extra_links = (BigQueryTableLink(),)
def __init__(
self,
*,
dataset_id: str,
table_resource: dict,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataset_id = dataset_id
self.table_resource = table_resource
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.location = location
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
self.log.info("Upserting Dataset: %s with table_resource: %s", self.dataset_id, self.table_resource)
hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
table = hook.run_table_upsert(
dataset_id=self.dataset_id,
table_resource=self.table_resource,
project_id=self.project_id,
)
BigQueryTableLink.persist(
context=context,
task_instance=self,
dataset_id=table["tableReference"]["datasetId"],
project_id=table["tableReference"]["projectId"],
table_id=table["tableReference"]["tableId"],
)
class BigQueryUpdateTableSchemaOperator(GoogleCloudBaseOperator):
"""Update BigQuery Table Schema.
Updates fields on a table schema based on contents of the supplied schema_fields_updates
parameter. The supplied schema does not need to be complete, if the field
already exists in the schema you only need to supply keys & values for the
items you want to patch, just ensure the "name" key is set.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryUpdateTableSchemaOperator`
:param schema_fields_updates: a partial schema resource. see
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#TableSchema
.. code-block:: python
schema_fields_updates = [
{"name": "emp_name", "description": "Some New Description"},
{
"name": "salary",
"policyTags": {"names": ["some_new_policy_tag"]},
},
{
"name": "departments",
"fields": [
{"name": "name", "description": "Some New Description"},
{"name": "type", "description": "Some New Description"},
],
},
]
:param include_policy_tags: (Optional) If set to True policy tags will be included in
the update request which requires special permissions even if unchanged (default False)
see https://cloud.google.com/bigquery/docs/column-level-security#roles
:param dataset_id: A dotted
``(<project>.|<project>:)<dataset>`` that indicates which dataset
will be updated. (templated)
:param table_id: The table ID of the requested table. (templated)
:param project_id: The name of the project where we want to update the dataset.
Don't need to provide, if projectId in dataset_reference.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param location: The location used for the operation.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"schema_fields_updates",
"dataset_id",
"table_id",
"project_id",
"impersonation_chain",
)
template_fields_renderers = {"schema_fields_updates": "json"}
ui_color = BigQueryUIColors.TABLE.value
operator_extra_links = (BigQueryTableLink(),)
def __init__(
self,
*,
schema_fields_updates: list[dict[str, Any]],
dataset_id: str,
table_id: str,
include_policy_tags: bool = False,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.schema_fields_updates = schema_fields_updates
self.include_policy_tags = include_policy_tags
self.table_id = table_id
self.dataset_id = dataset_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context: Context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
table = bq_hook.update_table_schema(
schema_fields_updates=self.schema_fields_updates,
include_policy_tags=self.include_policy_tags,
dataset_id=self.dataset_id,
table_id=self.table_id,
project_id=self.project_id,
)
BigQueryTableLink.persist(
context=context,
task_instance=self,
dataset_id=table["tableReference"]["datasetId"],
project_id=table["tableReference"]["projectId"],
table_id=table["tableReference"]["tableId"],
)
return table
class BigQueryInsertJobOperator(GoogleCloudBaseOperator):
"""Execute a BigQuery job.
Waits for the job to complete and returns job id.
This operator work in the following way:
- it calculates a unique hash of the job using job's configuration or uuid if ``force_rerun`` is True
- creates ``job_id`` in form of
``[provided_job_id | airflow_{dag_id}_{task_id}_{exec_date}]_{uniqueness_suffix}``
- submits a BigQuery job using the ``job_id``
- if job with given id already exists then it tries to reattach to the job if its not done and its
state is in ``reattach_states``. If the job is done the operator will raise ``AirflowException``.
Using ``force_rerun`` will submit a new job every time without attaching to already existing ones.
For job definition see here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryInsertJobOperator`
:param configuration: The configuration parameter maps directly to BigQuery's
configuration field in the job object. For more details see
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfiguration
:param job_id: The ID of the job. It will be suffixed with hash of job configuration
unless ``force_rerun`` is True.
The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or
dashes (-). The maximum length is 1,024 characters. If not provided then uuid will
be generated.
:param force_rerun: If True then operator will use hash of uuid as job id suffix
:param reattach_states: Set of BigQuery job's states in case of which we should reattach
to the job. Should be other than final states.
:param project_id: Google Cloud Project where the job is running
:param location: location the job is running
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param cancel_on_kill: Flag which indicates whether cancel the hook's job or not, when on_kill is called
:param result_retry: How to retry the `result` call that retrieves rows
:param result_timeout: The number of seconds to wait for `result` method before using `result_retry`
:param deferrable: Run operator in the deferrable mode
:param poll_interval: (Deferrable mode only) polling period in seconds to check for the status of job.
Defaults to 4 seconds.
"""
template_fields: Sequence[str] = (
"configuration",
"job_id",
"impersonation_chain",
"project_id",
)
template_ext: Sequence[str] = (
".json",
".sql",
)
template_fields_renderers = {"configuration": "json", "configuration.query.query": "sql"}
ui_color = BigQueryUIColors.QUERY.value
operator_extra_links = (BigQueryTableLink(),)
def __init__(
self,
configuration: dict[str, Any],
project_id: str | None = None,
location: str | None = None,
job_id: str | None = None,
force_rerun: bool = True,
reattach_states: set[str] | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
cancel_on_kill: bool = True,
result_retry: Retry = DEFAULT_RETRY,
result_timeout: float | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: float = 4.0,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.configuration = configuration
self.location = location
self.job_id = job_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.force_rerun = force_rerun
self.reattach_states: set[str] = reattach_states or set()
self.impersonation_chain = impersonation_chain
self.cancel_on_kill = cancel_on_kill
self.result_retry = result_retry
self.result_timeout = result_timeout
self.hook: BigQueryHook | None = None
self.deferrable = deferrable
self.poll_interval = poll_interval
def prepare_template(self) -> None:
# If .json is passed then we have to read the file
if isinstance(self.configuration, str) and self.configuration.endswith(".json"):
with open(self.configuration) as file:
self.configuration = json.loads(file.read())
def _submit_job(
self,
hook: BigQueryHook,
job_id: str,
) -> BigQueryJob:
# Submit a new job without waiting for it to complete.
return hook.insert_job(
configuration=self.configuration,
project_id=self.project_id,
location=self.location,
job_id=job_id,
timeout=self.result_timeout,
retry=self.result_retry,
nowait=True,
)
@staticmethod
def _handle_job_error(job: BigQueryJob | UnknownJob) -> None:
if job.error_result:
raise AirflowException(f"BigQuery job {job.job_id} failed: {job.error_result}")
def execute(self, context: Any):
hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.hook = hook
job_id = hook.generate_job_id(
job_id=self.job_id,
dag_id=self.dag_id,
task_id=self.task_id,
logical_date=context["logical_date"],
configuration=self.configuration,
force_rerun=self.force_rerun,
)
try:
self.log.info("Executing: %s'", self.configuration)
job: BigQueryJob | UnknownJob = self._submit_job(hook, job_id)
except Conflict:
# If the job already exists retrieve it
job = hook.get_job(
project_id=self.project_id,
location=self.location,
job_id=job_id,
)
if job.state in self.reattach_states:
# We are reattaching to a job
job._begin()
self._handle_job_error(job)
else:
# Same job configuration so we need force_rerun
raise AirflowException(
f"Job with id: {job_id} already exists and is in {job.state} state. If you "
f"want to force rerun it consider setting `force_rerun=True`."
f"Or, if you want to reattach in this scenario add {job.state} to `reattach_states`"
)
job_types = {
LoadJob._JOB_TYPE: ["sourceTable", "destinationTable"],
CopyJob._JOB_TYPE: ["sourceTable", "destinationTable"],
ExtractJob._JOB_TYPE: ["sourceTable"],
QueryJob._JOB_TYPE: ["destinationTable"],
}
project_id = self.project_id or hook.project_id
if project_id:
for job_type, tables_prop in job_types.items():
job_configuration = job.to_api_repr()["configuration"]
if job_type in job_configuration:
for table_prop in tables_prop:
if table_prop in job_configuration[job_type]:
table = job_configuration[job_type][table_prop]
persist_kwargs = {
"context": context,
"task_instance": self,
"project_id": project_id,
"table_id": table,
}
if not isinstance(table, str):
persist_kwargs["table_id"] = table["tableId"]
persist_kwargs["dataset_id"] = table["datasetId"]
persist_kwargs["project_id"] = table["projectId"]
BigQueryTableLink.persist(**persist_kwargs)
self.job_id = job.job_id
project_id = self.project_id or self.hook.project_id
if project_id:
job_id_path = convert_job_id(job_id=job_id, project_id=project_id, location=self.location)
context["ti"].xcom_push(key="job_id_path", value=job_id_path)
# Wait for the job to complete
if not self.deferrable:
job.result(timeout=self.result_timeout, retry=self.result_retry)
self._handle_job_error(job)
return self.job_id
else:
if job.running():
self.defer(
timeout=self.execution_timeout,
trigger=BigQueryInsertJobTrigger(
conn_id=self.gcp_conn_id,
job_id=self.job_id,
project_id=self.project_id,
poll_interval=self.poll_interval,
),
method_name="execute_complete",
)
self.log.info("Current state of job %s is %s", job.job_id, job.state)
self._handle_job_error(job)
def execute_complete(self, context: Context, event: dict[str, Any]):
"""Callback for when the trigger fires.
This returns immediately. It relies on trigger to throw an exception,
otherwise it assumes execution was successful.
"""
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info(
"%s completed with response %s ",
self.task_id,
event["message"],
)
return self.job_id
def on_kill(self) -> None:
if self.job_id and self.cancel_on_kill:
self.hook.cancel_job( # type: ignore[union-attr]
job_id=self.job_id, project_id=self.project_id, location=self.location
)
else:
self.log.info("Skipping to cancel job: %s:%s.%s", self.project_id, self.location, self.job_id)
| 118,047 | 41.387074 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/looker.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Looker operators."""
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.looker import LookerHook
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class LookerStartPdtBuildOperator(GoogleCloudBaseOperator):
"""
Submits a PDT materialization job to Looker.
:param looker_conn_id: Required. The connection ID to use connecting to Looker.
:param model: Required. The model of the PDT to start building.
:param view: Required. The view of the PDT to start building.
:param query_params: Optional. Additional materialization parameters.
:param asynchronous: Optional. Flag indicating whether to wait for the job
to finish or return immediately.
This is useful for submitting long running jobs and
waiting on them asynchronously using the LookerCheckPdtBuildSensor
:param cancel_on_kill: Optional. Flag which indicates whether cancel the
hook's job or not, when on_kill is called.
:param wait_time: Optional. Number of seconds between checks for job to be
ready. Used only if ``asynchronous`` is False.
:param wait_timeout: Optional. How many seconds wait for job to be ready.
Used only if ``asynchronous`` is False.
"""
def __init__(
self,
looker_conn_id: str,
model: str,
view: str,
query_params: dict | None = None,
asynchronous: bool = False,
cancel_on_kill: bool = True,
wait_time: int = 10,
wait_timeout: int | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.model = model
self.view = view
self.query_params = query_params
self.looker_conn_id = looker_conn_id
self.asynchronous = asynchronous
self.cancel_on_kill = cancel_on_kill
self.wait_time = wait_time
self.wait_timeout = wait_timeout
self.hook: LookerHook | None = None
self.materialization_id: str | None = None
def execute(self, context: Context) -> str:
self.hook = LookerHook(looker_conn_id=self.looker_conn_id)
resp = self.hook.start_pdt_build(
model=self.model,
view=self.view,
query_params=self.query_params,
)
self.materialization_id = resp.materialization_id
if not self.materialization_id:
raise AirflowException(
f"No `materialization_id` was returned for model: {self.model}, view: {self.view}."
)
self.log.info("PDT materialization job submitted successfully. Job id: %s.", self.materialization_id)
if not self.asynchronous:
self.hook.wait_for_job(
materialization_id=self.materialization_id,
wait_time=self.wait_time,
timeout=self.wait_timeout,
)
return self.materialization_id
def on_kill(self):
if self.materialization_id and self.cancel_on_kill:
self.hook.stop_pdt_build(materialization_id=self.materialization_id)
| 4,063 | 37.339623 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/spanner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Spanner operators."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.spanner import SpannerHook
from airflow.providers.google.cloud.links.spanner import SpannerDatabaseLink, SpannerInstanceLink
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class SpannerDeployInstanceOperator(GoogleCloudBaseOperator):
"""
Create or update a Cloud Spanner instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SpannerDeployInstanceOperator`
:param instance_id: Cloud Spanner instance ID.
:param configuration_name: The name of the Cloud Spanner instance configuration
defining how the instance will be created. Required for
instances that do not yet exist.
:param node_count: (Optional) The number of nodes allocated to the Cloud Spanner
instance.
:param display_name: (Optional) The display name for the Cloud Spanner instance in
the Google Cloud Console. (Must be between 4 and 30 characters.) If this value is not set
in the constructor, the name is the same as the instance ID.
:param project_id: Optional, the ID of the project which owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_spanner_deploy_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance_id",
"configuration_name",
"display_name",
"gcp_conn_id",
"impersonation_chain",
)
# [END gcp_spanner_deploy_template_fields]
operator_extra_links = (SpannerInstanceLink(),)
def __init__(
self,
*,
instance_id: str,
configuration_name: str,
node_count: int,
display_name: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.configuration_name = configuration_name
self.node_count = node_count
self.display_name = display_name
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if self.project_id == "":
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' is empty or None")
def execute(self, context: Context) -> None:
hook = SpannerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if not hook.get_instance(project_id=self.project_id, instance_id=self.instance_id):
self.log.info("Creating Cloud Spanner instance '%s'", self.instance_id)
func = hook.create_instance
else:
self.log.info("Updating Cloud Spanner instance '%s'", self.instance_id)
func = hook.update_instance
func(
project_id=self.project_id,
instance_id=self.instance_id,
configuration_name=self.configuration_name,
node_count=self.node_count,
display_name=self.display_name,
)
SpannerInstanceLink.persist(
context=context,
task_instance=self,
instance_id=self.instance_id,
project_id=self.project_id or hook.project_id,
)
class SpannerDeleteInstanceOperator(GoogleCloudBaseOperator):
"""
Delete a Cloud Spanner instance; if an instance does not exist, no action is taken and the task succeeds.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SpannerDeleteInstanceOperator`
:param instance_id: The Cloud Spanner instance ID.
:param project_id: Optional, the ID of the project that owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_spanner_delete_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END gcp_spanner_delete_template_fields]
def __init__(
self,
*,
instance_id: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if self.project_id == "":
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' is empty or None")
def execute(self, context: Context) -> bool | None:
hook = SpannerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if hook.get_instance(project_id=self.project_id, instance_id=self.instance_id):
return hook.delete_instance(project_id=self.project_id, instance_id=self.instance_id)
else:
self.log.info(
"Instance '%s' does not exist in project '%s'. Aborting delete.",
self.instance_id,
self.project_id,
)
return True
class SpannerQueryDatabaseInstanceOperator(GoogleCloudBaseOperator):
"""
Executes an arbitrary DML query (INSERT, UPDATE, DELETE).
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SpannerQueryDatabaseInstanceOperator`
:param instance_id: The Cloud Spanner instance ID.
:param database_id: The Cloud Spanner database ID.
:param query: The query or list of queries to be executed. Can be a path to a SQL
file.
:param project_id: Optional, the ID of the project that owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_spanner_query_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance_id",
"database_id",
"query",
"gcp_conn_id",
"impersonation_chain",
)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"query": "sql"}
# [END gcp_spanner_query_template_fields]
operator_extra_links = (SpannerDatabaseLink(),)
def __init__(
self,
*,
instance_id: str,
database_id: str,
query: str | list[str],
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.database_id = database_id
self.query = query
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if self.project_id == "":
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' is empty or None")
if not self.database_id:
raise AirflowException("The required parameter 'database_id' is empty or None")
if not self.query:
raise AirflowException("The required parameter 'query' is empty")
def execute(self, context: Context):
hook = SpannerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if isinstance(self.query, str):
queries = [x.strip() for x in self.query.split(";")]
self.sanitize_queries(queries)
else:
queries = self.query
self.log.info(
"Executing DML query(-ies) on projects/%s/instances/%s/databases/%s",
self.project_id,
self.instance_id,
self.database_id,
)
self.log.info(queries)
hook.execute_dml(
project_id=self.project_id,
instance_id=self.instance_id,
database_id=self.database_id,
queries=queries,
)
SpannerDatabaseLink.persist(
context=context,
task_instance=self,
instance_id=self.instance_id,
database_id=self.database_id,
project_id=self.project_id or hook.project_id,
)
@staticmethod
def sanitize_queries(queries: list[str]) -> None:
"""
Drops empty query in queries.
:param queries: queries
"""
if queries and queries[-1] == "":
del queries[-1]
class SpannerDeployDatabaseInstanceOperator(GoogleCloudBaseOperator):
"""
Creates a new Cloud Spanner database; if database exists, the operator does nothing.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SpannerDeployDatabaseInstanceOperator`
:param instance_id: The Cloud Spanner instance ID.
:param database_id: The Cloud Spanner database ID.
:param ddl_statements: The string list containing DDL for the new database.
:param project_id: Optional, the ID of the project that owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_spanner_database_deploy_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance_id",
"database_id",
"ddl_statements",
"gcp_conn_id",
"impersonation_chain",
)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"ddl_statements": "sql"}
# [END gcp_spanner_database_deploy_template_fields]
operator_extra_links = (SpannerDatabaseLink(),)
def __init__(
self,
*,
instance_id: str,
database_id: str,
ddl_statements: list[str],
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.database_id = database_id
self.ddl_statements = ddl_statements
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if self.project_id == "":
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' is empty or None")
if not self.database_id:
raise AirflowException("The required parameter 'database_id' is empty or None")
def execute(self, context: Context) -> bool | None:
hook = SpannerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
SpannerDatabaseLink.persist(
context=context,
task_instance=self,
instance_id=self.instance_id,
database_id=self.database_id,
project_id=self.project_id or hook.project_id,
)
if not hook.get_database(
project_id=self.project_id, instance_id=self.instance_id, database_id=self.database_id
):
self.log.info(
"Creating Cloud Spanner database '%s' in project '%s' and instance '%s'",
self.database_id,
self.project_id,
self.instance_id,
)
return hook.create_database(
project_id=self.project_id,
instance_id=self.instance_id,
database_id=self.database_id,
ddl_statements=self.ddl_statements,
)
else:
self.log.info(
"The database '%s' in project '%s' and instance '%s'"
" already exists. Nothing to do. Exiting.",
self.database_id,
self.project_id,
self.instance_id,
)
return True
class SpannerUpdateDatabaseInstanceOperator(GoogleCloudBaseOperator):
"""
Updates a Cloud Spanner database with the specified DDL statement.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SpannerUpdateDatabaseInstanceOperator`
:param instance_id: The Cloud Spanner instance ID.
:param database_id: The Cloud Spanner database ID.
:param ddl_statements: The string list containing DDL to apply to the database.
:param project_id: Optional, the ID of the project that owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the Google Cloud connection is used.
:param operation_id: (Optional) Unique per database operation id that can
be specified to implement idempotency check.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_spanner_database_update_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance_id",
"database_id",
"ddl_statements",
"gcp_conn_id",
"impersonation_chain",
)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"ddl_statements": "sql"}
# [END gcp_spanner_database_update_template_fields]
operator_extra_links = (SpannerDatabaseLink(),)
def __init__(
self,
*,
instance_id: str,
database_id: str,
ddl_statements: list[str],
project_id: str | None = None,
operation_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.database_id = database_id
self.ddl_statements = ddl_statements
self.operation_id = operation_id
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if self.project_id == "":
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' is empty or None")
if not self.database_id:
raise AirflowException("The required parameter 'database_id' is empty or None")
if not self.ddl_statements:
raise AirflowException("The required parameter 'ddl_statements' is empty or None")
def execute(self, context: Context) -> None:
hook = SpannerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if not hook.get_database(
project_id=self.project_id, instance_id=self.instance_id, database_id=self.database_id
):
raise AirflowException(
f"The Cloud Spanner database '{self.database_id}' in project '{self.project_id}' "
f"and instance '{self.instance_id}' is missing. "
f"Create the database first before you can update it."
)
else:
SpannerDatabaseLink.persist(
context=context,
task_instance=self,
instance_id=self.instance_id,
database_id=self.database_id,
project_id=self.project_id or hook.project_id,
)
return hook.update_database(
project_id=self.project_id,
instance_id=self.instance_id,
database_id=self.database_id,
ddl_statements=self.ddl_statements,
operation_id=self.operation_id,
)
class SpannerDeleteDatabaseInstanceOperator(GoogleCloudBaseOperator):
"""
Deletes a Cloud Spanner database.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SpannerDeleteDatabaseInstanceOperator`
:param instance_id: Cloud Spanner instance ID.
:param database_id: Cloud Spanner database ID.
:param project_id: Optional, the ID of the project that owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_spanner_database_delete_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance_id",
"database_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END gcp_spanner_database_delete_template_fields]
def __init__(
self,
*,
instance_id: str,
database_id: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.database_id = database_id
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if self.project_id == "":
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' is empty or None")
if not self.database_id:
raise AirflowException("The required parameter 'database_id' is empty or None")
def execute(self, context: Context) -> bool:
hook = SpannerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
database = hook.get_database(
project_id=self.project_id, instance_id=self.instance_id, database_id=self.database_id
)
if not database:
self.log.info(
"The Cloud Spanner database was missing: "
"'%s' in project '%s' and instance '%s'. Assuming success.",
self.database_id,
self.project_id,
self.instance_id,
)
return True
else:
return hook.delete_database(
project_id=self.project_id, instance_id=self.instance_id, database_id=self.database_id
)
| 24,817 | 40.781145 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/dataform.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.providers.google.cloud.links.dataform import (
DataformRepositoryLink,
DataformWorkflowInvocationLink,
DataformWorkspaceLink,
)
if TYPE_CHECKING:
from airflow.utils.context import Context
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.dataform_v1beta1.types import (
CompilationResult,
InstallNpmPackagesResponse,
MakeDirectoryResponse,
Repository,
WorkflowInvocation,
Workspace,
WriteFileResponse,
)
from airflow.providers.google.cloud.hooks.dataform import DataformHook
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
class DataformCreateCompilationResultOperator(GoogleCloudBaseOperator):
"""
Creates a new CompilationResult in a given project and location.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param repository_id: Required. The ID of the Dataform repository that the task belongs to.
:param compilation_result: Required. The compilation result to create.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
def __init__(
self,
project_id: str,
region: str,
repository_id: str,
compilation_result: CompilationResult | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.repository_id = repository_id
self.compilation_result = compilation_result
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DataformHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = hook.create_compilation_result(
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
compilation_result=self.compilation_result,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return CompilationResult.to_dict(result)
class DataformGetCompilationResultOperator(GoogleCloudBaseOperator):
"""
Fetches a single CompilationResult.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param repository_id: Required. The ID of the Dataform repository that the task belongs to.
:param compilation_result_id: The Id of the Dataform Compilation Result
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("repository_id", "compilation_result_id", "impersonation_chain")
def __init__(
self,
project_id: str,
region: str,
repository_id: str,
compilation_result_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.repository_id = repository_id
self.compilation_result_id = compilation_result_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DataformHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = hook.get_compilation_result(
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
compilation_result_id=self.compilation_result_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return CompilationResult.to_dict(result)
class DataformCreateWorkflowInvocationOperator(GoogleCloudBaseOperator):
"""
Creates a new WorkflowInvocation in a given Repository.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param repository_id: Required. The ID of the Dataform repository that the task belongs to.
:param workflow_invocation: Required. The workflow invocation resource to create.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param asynchronous: Flag to return workflow_invocation_id from the Dataform API.
This is useful for submitting long-running workflows and
waiting on them asynchronously using the DataformWorkflowInvocationStateSensor
:param wait_time: Number of seconds between checks
"""
template_fields = ("workflow_invocation", "impersonation_chain")
operator_extra_links = (DataformWorkflowInvocationLink(),)
def __init__(
self,
project_id: str,
region: str,
repository_id: str,
workflow_invocation: WorkflowInvocation | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: int | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
asynchronous: bool = False,
wait_time: int = 10,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.repository_id = repository_id
self.workflow_invocation = workflow_invocation
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.asynchronous = asynchronous
self.wait_time = wait_time
def execute(self, context: Context):
hook = DataformHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = hook.create_workflow_invocation(
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
workflow_invocation=self.workflow_invocation,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
workflow_invocation_id = result.name.split("/")[-1]
DataformWorkflowInvocationLink.persist(
operator_instance=self,
context=context,
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
workflow_invocation_id=workflow_invocation_id,
)
if not self.asynchronous:
hook.wait_for_workflow_invocation(
workflow_invocation_id=workflow_invocation_id,
repository_id=self.repository_id,
project_id=self.project_id,
region=self.region,
timeout=self.timeout,
wait_time=self.wait_time,
)
return WorkflowInvocation.to_dict(result)
class DataformGetWorkflowInvocationOperator(GoogleCloudBaseOperator):
"""
Fetches a single WorkflowInvocation.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param repository_id: Required. The ID of the Dataform repository that the task belongs to.
:param workflow_invocation_id: the workflow invocation resource's id.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("repository_id", "workflow_invocation_id", "impersonation_chain")
operator_extra_links = (DataformWorkflowInvocationLink(),)
def __init__(
self,
project_id: str,
region: str,
repository_id: str,
workflow_invocation_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.repository_id = repository_id
self.workflow_invocation_id = workflow_invocation_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DataformHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = hook.get_workflow_invocation(
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
workflow_invocation_id=self.workflow_invocation_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return WorkflowInvocation.to_dict(result)
class DataformCancelWorkflowInvocationOperator(GoogleCloudBaseOperator):
"""
Requests cancellation of a running WorkflowInvocation.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param repository_id: Required. The ID of the Dataform repository that the task belongs to.
:param workflow_invocation_id: the workflow invocation resource's id.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("repository_id", "workflow_invocation_id", "impersonation_chain")
operator_extra_links = (DataformWorkflowInvocationLink(),)
def __init__(
self,
project_id: str,
region: str,
repository_id: str,
workflow_invocation_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.repository_id = repository_id
self.workflow_invocation_id = workflow_invocation_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DataformHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.cancel_workflow_invocation(
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
workflow_invocation_id=self.workflow_invocation_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class DataformCreateRepositoryOperator(GoogleCloudBaseOperator):
"""
Creates repository.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param repository_id: Required. The ID of the Dataform repository that the task belongs to.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
operator_extra_links = (DataformRepositoryLink(),)
template_fields = (
"project_id",
"repository_id",
"impersonation_chain",
)
def __init__(
self,
project_id: str,
region: str,
repository_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.repository_id = repository_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = DataformHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
repository = hook.create_repository(
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataformRepositoryLink.persist(
operator_instance=self,
context=context,
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
)
return Repository.to_dict(repository)
class DataformDeleteRepositoryOperator(GoogleCloudBaseOperator):
"""
Deletes repository.
:param project_id: Required. The ID of the Google Cloud project where repository located.
:param region: Required. The ID of the Google Cloud region where repository located.
:param repository_id: Required. The ID of the Dataform repository that should be deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"project_id",
"repository_id",
"impersonation_chain",
)
def __init__(
self,
project_id: str,
region: str,
repository_id: str,
force: bool = True,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.repository_id = repository_id
self.project_id = project_id
self.region = region
self.force = force
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DataformHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.delete_repository(
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
force=self.force,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class DataformCreateWorkspaceOperator(GoogleCloudBaseOperator):
"""
Creates workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace should be in.
:param region: Required. Name of the Google Cloud region that where workspace should be in.
:param repository_id: Required. The ID of the Dataform repository that the workspace belongs to.
:param workspace_id: Required. The ID of the new workspace that will be created.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
operator_extra_links = (DataformWorkspaceLink(),)
template_fields = (
"project_id",
"repository_id",
"impersonation_chain",
)
def __init__(
self,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.project_id = project_id
self.workspace_id = workspace_id
self.repository_id = repository_id
self.region = region
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = DataformHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
workspace = hook.create_workspace(
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
workspace_id=self.workspace_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataformWorkspaceLink.persist(
operator_instance=self,
context=context,
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
workspace_id=self.workspace_id,
)
return Workspace.to_dict(workspace)
class DataformDeleteWorkspaceOperator(GoogleCloudBaseOperator):
"""
Deletes workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace that should be deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"project_id",
"repository_id",
"workspace_id",
"impersonation_chain",
)
def __init__(
self,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.repository_id = repository_id
self.workspace_id = workspace_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DataformHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.delete_workspace(
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
workspace_id=self.workspace_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class DataformWriteFileOperator(GoogleCloudBaseOperator):
"""
Writes new file to specified workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace where files should be created.
:param filepath: Required. Path to file including name of the file relative to workspace root.
:param contents: Required. Content of the file to be written.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"project_id",
"repository_id",
"workspace_id",
"impersonation_chain",
)
def __init__(
self,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
filepath: str,
contents: bytes,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.repository_id = repository_id
self.workspace_id = workspace_id
self.filepath = filepath
self.contents = contents
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = DataformHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
write_file_response = hook.write_file(
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
workspace_id=self.workspace_id,
filepath=self.filepath,
contents=self.contents,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return WriteFileResponse.to_dict(write_file_response)
class DataformMakeDirectoryOperator(GoogleCloudBaseOperator):
"""
Makes new directory in specified workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace where directory should be created.
:param path: Required. The directory's full path including directory name, relative to the workspace root.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"project_id",
"repository_id",
"workspace_id",
"impersonation_chain",
)
def __init__(
self,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
directory_path: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.repository_id = repository_id
self.workspace_id = workspace_id
self.directory_path = directory_path
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = DataformHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
make_directory_response = hook.make_directory(
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
workspace_id=self.workspace_id,
path=self.directory_path,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MakeDirectoryResponse.to_dict(make_directory_response)
class DataformRemoveFileOperator(GoogleCloudBaseOperator):
"""
Removes file in specified workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace where directory located.
:param filepath: Required. The full path including name of the file, relative to the workspace root.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"project_id",
"repository_id",
"workspace_id",
"impersonation_chain",
)
def __init__(
self,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
filepath: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.repository_id = repository_id
self.workspace_id = workspace_id
self.filepath = filepath
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DataformHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.remove_file(
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
workspace_id=self.workspace_id,
filepath=self.filepath,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class DataformRemoveDirectoryOperator(GoogleCloudBaseOperator):
"""
Removes directory in specified workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace where directory located.
:param path: Required. The directory's full path including directory name, relative to the workspace root.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"project_id",
"repository_id",
"workspace_id",
"impersonation_chain",
)
def __init__(
self,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
directory_path: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.repository_id = repository_id
self.workspace_id = workspace_id
self.directory_path = directory_path
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DataformHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.remove_directory(
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
workspace_id=self.workspace_id,
path=self.directory_path,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class DataformInstallNpmPackagesOperator(GoogleCloudBaseOperator):
"""Install NPM dependencies in the provided workspace.
Requires "package.json" to be created in the workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"project_id",
"repository_id",
"workspace_id",
"impersonation_chain",
)
def __init__(
self,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.repository_id = repository_id
self.workspace_id = workspace_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = DataformHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
response = hook.install_npm_packages(
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
workspace_id=self.workspace_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return InstallNpmPackagesResponse.to_dict(response)
| 44,616 | 39.340868 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/bigquery_dts.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google BigQuery Data Transfer Service operators."""
from __future__ import annotations
import time
from functools import cached_property
from typing import TYPE_CHECKING, Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.bigquery_datatransfer_v1 import (
StartManualTransferRunsResponse,
TransferConfig,
TransferRun,
TransferState,
)
from airflow import AirflowException
from airflow.configuration import conf
from airflow.providers.google.cloud.hooks.bigquery_dts import BiqQueryDataTransferServiceHook, get_object_id
from airflow.providers.google.cloud.links.bigquery_dts import BigQueryDataTransferConfigLink
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
from airflow.providers.google.cloud.triggers.bigquery_dts import BigQueryDataTransferRunTrigger
if TYPE_CHECKING:
from airflow.utils.context import Context
def _get_transfer_config_details(config_transfer_name: str):
config_details = config_transfer_name.split("/")
return {"project_id": config_details[1], "region": config_details[3], "config_id": config_details[5]}
class BigQueryCreateDataTransferOperator(GoogleCloudBaseOperator):
"""
Creates a new data transfer configuration.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryCreateDataTransferOperator`
:param transfer_config: Data transfer configuration to create.
:param project_id: The BigQuery project id where the transfer configuration should be
created. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:param location: BigQuery Transfer Service location for regional transfers.
:param authorization_code: authorization code to use with this transfer configuration.
This is required if new credentials are needed.
:param retry: A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"transfer_config",
"project_id",
"authorization_code",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (BigQueryDataTransferConfigLink(),)
def __init__(
self,
*,
transfer_config: dict,
project_id: str | None = None,
location: str | None = None,
authorization_code: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id="google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.transfer_config = transfer_config
self.authorization_code = authorization_code
self.project_id = project_id
self.location = location
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = BiqQueryDataTransferServiceHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, location=self.location
)
self.log.info("Creating DTS transfer config")
response = hook.create_transfer_config(
project_id=self.project_id,
transfer_config=self.transfer_config,
authorization_code=self.authorization_code,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
transfer_config = _get_transfer_config_details(response.name)
BigQueryDataTransferConfigLink.persist(
context=context,
task_instance=self,
region=transfer_config["region"],
config_id=transfer_config["config_id"],
project_id=transfer_config["project_id"],
)
result = TransferConfig.to_dict(response)
self.log.info("Created DTS transfer config %s", get_object_id(result))
self.xcom_push(context, key="transfer_config_id", value=get_object_id(result))
# don't push AWS secret in XCOM
result.get("params", {}).pop("secret_access_key", None)
result.get("params", {}).pop("access_key_id", None)
return result
class BigQueryDeleteDataTransferConfigOperator(GoogleCloudBaseOperator):
"""
Deletes transfer configuration.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryDeleteDataTransferConfigOperator`
:param transfer_config_id: Id of transfer config to be used.
:param project_id: The BigQuery project id where the transfer configuration should be
created. If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: BigQuery Transfer Service location for regional transfers.
:param retry: A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"transfer_config_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
transfer_config_id: str,
project_id: str | None = None,
location: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id="google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.location = location
self.transfer_config_id = transfer_config_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = BiqQueryDataTransferServiceHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, location=self.location
)
hook.delete_transfer_config(
transfer_config_id=self.transfer_config_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class BigQueryDataTransferServiceStartTransferRunsOperator(GoogleCloudBaseOperator):
"""
Start manual transfer runs to be executed now with schedule_time equal to current time.
The transfer runs can be created for a time range where the run_time is between
start_time (inclusive) and end_time (exclusive), or for a specific run_time.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryDataTransferServiceStartTransferRunsOperator`
:param transfer_config_id: Id of transfer config to be used.
:param requested_time_range: Time range for the transfer runs that should be started.
If a dict is provided, it must be of the same form as the protobuf
message `~google.cloud.bigquery_datatransfer_v1.types.TimeRange`
:param requested_run_time: Specific run_time for a transfer run to be started. The
requested_run_time must not be in the future. If a dict is provided, it
must be of the same form as the protobuf message
`~google.cloud.bigquery_datatransfer_v1.types.Timestamp`
:param project_id: The BigQuery project id where the transfer configuration should be
created.
:param location: BigQuery Transfer Service location for regional transfers.
:param retry: A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run operator in the deferrable mode.
"""
template_fields: Sequence[str] = (
"transfer_config_id",
"project_id",
"requested_time_range",
"requested_run_time",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (BigQueryDataTransferConfigLink(),)
def __init__(
self,
*,
transfer_config_id: str,
project_id: str | None = None,
location: str | None = None,
requested_time_range: dict | None = None,
requested_run_time: dict | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id="google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.location = location
self.transfer_config_id = transfer_config_id
self.requested_time_range = requested_time_range
self.requested_run_time = requested_run_time
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
@cached_property
def hook(self) -> BiqQueryDataTransferServiceHook:
hook = BiqQueryDataTransferServiceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
location=self.location,
)
return hook
def execute(self, context: Context):
self.log.info("Submitting manual transfer for %s", self.transfer_config_id)
if self.requested_run_time and isinstance(self.requested_run_time.get("seconds"), str):
self.requested_run_time["seconds"] = int(self.requested_run_time["seconds"])
response = self.hook.start_manual_transfer_runs(
transfer_config_id=self.transfer_config_id,
requested_time_range=self.requested_time_range,
requested_run_time=self.requested_run_time,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
transfer_config = _get_transfer_config_details(response.runs[0].name)
BigQueryDataTransferConfigLink.persist(
context=context,
task_instance=self,
region=transfer_config["region"],
config_id=transfer_config["config_id"],
project_id=transfer_config["project_id"],
)
result = StartManualTransferRunsResponse.to_dict(response)
run_id = get_object_id(result["runs"][0])
self.xcom_push(context, key="run_id", value=run_id)
if not self.deferrable:
result = self._wait_for_transfer_to_be_done(
run_id=run_id,
transfer_config_id=transfer_config["config_id"],
)
self.log.info("Transfer run %s submitted successfully.", run_id)
return result
self.defer(
trigger=BigQueryDataTransferRunTrigger(
project_id=self.project_id,
config_id=transfer_config["config_id"],
run_id=run_id,
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
),
method_name="execute_completed",
)
def _wait_for_transfer_to_be_done(self, run_id: str, transfer_config_id: str, interval: int = 10):
if interval < 0:
raise ValueError("Interval must be > 0")
while True:
transfer_run: TransferRun = self.hook.get_transfer_run(
run_id=run_id,
transfer_config_id=transfer_config_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
state = transfer_run.state
if self._job_is_done(state):
if state == TransferState.FAILED or state == TransferState.CANCELLED:
raise AirflowException(f"Transfer run was finished with {state} status.")
result = TransferRun.to_dict(transfer_run)
return result
self.log.info("Transfer run is still working, waiting for %s seconds...", interval)
self.log.info("Transfer run status: %s", state)
time.sleep(interval)
@staticmethod
def _job_is_done(state: TransferState) -> bool:
finished_job_statuses = [
state.SUCCEEDED,
state.CANCELLED,
state.FAILED,
]
return state in finished_job_statuses
def execute_completed(self, context: Context, event: dict):
"""Method to be executed after invoked trigger in defer method finishes its job."""
if event["status"] == "failed" or event["status"] == "cancelled":
self.log.error("Trigger finished its work with status: %s.", event["status"])
raise AirflowException(event["message"])
transfer_run: TransferRun = self.hook.get_transfer_run(
project_id=self.project_id,
run_id=event["run_id"],
transfer_config_id=event["config_id"],
)
self.log.info(
"%s finished with message: %s",
event["run_id"],
event["message"],
)
return TransferRun.to_dict(transfer_run)
| 17,617 | 41.762136 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/datafusion.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google DataFusion operators."""
from __future__ import annotations
from time import sleep
from typing import TYPE_CHECKING, Any, Sequence
from google.api_core.retry import exponential_sleep_generator
from googleapiclient.errors import HttpError
from airflow import AirflowException
from airflow.configuration import conf
from airflow.providers.google.cloud.hooks.datafusion import SUCCESS_STATES, DataFusionHook, PipelineStates
from airflow.providers.google.cloud.links.datafusion import (
DataFusionInstanceLink,
DataFusionPipelineLink,
DataFusionPipelinesLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
from airflow.providers.google.cloud.triggers.datafusion import DataFusionStartPipelineTrigger
if TYPE_CHECKING:
from airflow.utils.context import Context
class DataFusionPipelineLinkHelper:
"""Helper class for Pipeline links."""
@staticmethod
def get_project_id(instance):
instance = instance["name"]
project_id = [x for x in instance.split("/") if x.startswith("airflow")][0]
return project_id
class CloudDataFusionRestartInstanceOperator(GoogleCloudBaseOperator):
"""
Restart a single Data Fusion instance.
At the end of an operation instance is fully restarted.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionRestartInstanceOperator`
:param instance_name: The name of the instance to restart.
:param location: The Cloud Data Fusion location in which to handle the request.
:param project_id: The ID of the Google Cloud project that the instance belongs to.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"instance_name",
"impersonation_chain",
)
operator_extra_links = (DataFusionInstanceLink(),)
def __init__(
self,
*,
instance_name: str,
location: str,
project_id: str | None = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.instance_name = instance_name
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Restarting Data Fusion instance: %s", self.instance_name)
operation = hook.restart_instance(
instance_name=self.instance_name,
location=self.location,
project_id=self.project_id,
)
instance = hook.wait_for_operation(operation)
self.log.info("Instance %s restarted successfully", self.instance_name)
project_id = self.project_id or DataFusionPipelineLinkHelper.get_project_id(instance)
DataFusionInstanceLink.persist(
context=context,
task_instance=self,
project_id=project_id,
instance_name=self.instance_name,
location=self.location,
)
class CloudDataFusionDeleteInstanceOperator(GoogleCloudBaseOperator):
"""
Deletes a single Date Fusion instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionDeleteInstanceOperator`
:param instance_name: The name of the instance to restart.
:param location: The Cloud Data Fusion location in which to handle the request.
:param project_id: The ID of the Google Cloud project that the instance belongs to.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"instance_name",
"impersonation_chain",
)
def __init__(
self,
*,
instance_name: str,
location: str,
project_id: str | None = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.instance_name = instance_name
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Deleting Data Fusion instance: %s", self.instance_name)
operation = hook.delete_instance(
instance_name=self.instance_name,
location=self.location,
project_id=self.project_id,
)
hook.wait_for_operation(operation)
self.log.info("Instance %s deleted successfully", self.instance_name)
class CloudDataFusionCreateInstanceOperator(GoogleCloudBaseOperator):
"""
Creates a new Data Fusion instance in the specified project and location.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionCreateInstanceOperator`
:param instance_name: The name of the instance to create.
:param instance: An instance of Instance.
https://cloud.google.com/data-fusion/docs/reference/rest/v1beta1/projects.locations.instances#Instance
:param location: The Cloud Data Fusion location in which to handle the request.
:param project_id: The ID of the Google Cloud project that the instance belongs to.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"instance_name",
"instance",
"impersonation_chain",
)
operator_extra_links = (DataFusionInstanceLink(),)
def __init__(
self,
*,
instance_name: str,
instance: dict[str, Any],
location: str,
project_id: str | None = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.instance_name = instance_name
self.instance = instance
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating Data Fusion instance: %s", self.instance_name)
try:
operation = hook.create_instance(
instance_name=self.instance_name,
instance=self.instance,
location=self.location,
project_id=self.project_id,
)
instance = hook.wait_for_operation(operation)
self.log.info("Instance %s created successfully", self.instance_name)
except HttpError as err:
if err.resp.status not in (409, "409"):
raise
self.log.info("Instance %s already exists", self.instance_name)
instance = hook.get_instance(
instance_name=self.instance_name, location=self.location, project_id=self.project_id
)
# Wait for instance to be ready
for time_to_wait in exponential_sleep_generator(initial=10, maximum=120):
if instance["state"] != "CREATING":
break
sleep(time_to_wait)
instance = hook.get_instance(
instance_name=self.instance_name, location=self.location, project_id=self.project_id
)
project_id = self.project_id or DataFusionPipelineLinkHelper.get_project_id(instance)
DataFusionInstanceLink.persist(
context=context,
task_instance=self,
project_id=project_id,
instance_name=self.instance_name,
location=self.location,
)
return instance
class CloudDataFusionUpdateInstanceOperator(GoogleCloudBaseOperator):
"""
Updates a single Data Fusion instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionUpdateInstanceOperator`
:param instance_name: The name of the instance to create.
:param instance: An instance of Instance.
https://cloud.google.com/data-fusion/docs/reference/rest/v1beta1/projects.locations.instances#Instance
:param update_mask: Field mask is used to specify the fields that the update will overwrite
in an instance resource. The fields specified in the updateMask are relative to the resource,
not the full request. A field will be overwritten if it is in the mask. If the user does not
provide a mask, all the supported fields (labels and options currently) will be overwritten.
A comma-separated list of fully qualified names of fields. Example: "user.displayName,photo".
https://developers.google.com/protocol-buffers/docs/reference/google.protobuf?_ga=2.205612571.-968688242.1573564810#google.protobuf.FieldMask
:param location: The Cloud Data Fusion location in which to handle the request.
:param project_id: The ID of the Google Cloud project that the instance belongs to.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"instance_name",
"instance",
"impersonation_chain",
)
operator_extra_links = (DataFusionInstanceLink(),)
def __init__(
self,
*,
instance_name: str,
instance: dict[str, Any],
update_mask: str,
location: str,
project_id: str | None = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.update_mask = update_mask
self.instance_name = instance_name
self.instance = instance
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Updating Data Fusion instance: %s", self.instance_name)
operation = hook.patch_instance(
instance_name=self.instance_name,
instance=self.instance,
update_mask=self.update_mask,
location=self.location,
project_id=self.project_id,
)
instance = hook.wait_for_operation(operation)
self.log.info("Instance %s updated successfully", self.instance_name)
project_id = self.project_id or DataFusionPipelineLinkHelper.get_project_id(instance)
DataFusionInstanceLink.persist(
context=context,
task_instance=self,
project_id=project_id,
instance_name=self.instance_name,
location=self.location,
)
class CloudDataFusionGetInstanceOperator(GoogleCloudBaseOperator):
"""
Gets details of a single Data Fusion instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionGetInstanceOperator`
:param instance_name: The name of the instance.
:param location: The Cloud Data Fusion location in which to handle the request.
:param project_id: The ID of the Google Cloud project that the instance belongs to.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"instance_name",
"impersonation_chain",
)
operator_extra_links = (DataFusionInstanceLink(),)
def __init__(
self,
*,
instance_name: str,
location: str,
project_id: str | None = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.instance_name = instance_name
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Retrieving Data Fusion instance: %s", self.instance_name)
instance = hook.get_instance(
instance_name=self.instance_name,
location=self.location,
project_id=self.project_id,
)
project_id = self.project_id or DataFusionPipelineLinkHelper.get_project_id(instance)
DataFusionInstanceLink.persist(
context=context,
task_instance=self,
project_id=project_id,
instance_name=self.instance_name,
location=self.location,
)
return instance
class CloudDataFusionCreatePipelineOperator(GoogleCloudBaseOperator):
"""
Creates a Cloud Data Fusion pipeline.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionCreatePipelineOperator`
:param pipeline_name: Your pipeline name.
:param pipeline: The pipeline definition. For more information check:
https://docs.cdap.io/cdap/current/en/developer-manual/pipelines/developing-pipelines.html#pipeline-configuration-file-format
:param instance_name: The name of the instance.
:param location: The Cloud Data Fusion location in which to handle the request.
:param namespace: If your pipeline belongs to a Basic edition instance, the namespace ID
is always default. If your pipeline belongs to an Enterprise edition instance, you
can create a namespace.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
operator_extra_links = (DataFusionPipelineLink(),)
template_fields: Sequence[str] = (
"instance_name",
"pipeline_name",
"impersonation_chain",
)
def __init__(
self,
*,
pipeline_name: str,
pipeline: dict[str, Any],
instance_name: str,
location: str,
namespace: str = "default",
project_id: str | None = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.pipeline_name = pipeline_name
self.pipeline = pipeline
self.namespace = namespace
self.instance_name = instance_name
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating Data Fusion pipeline: %s", self.pipeline_name)
instance = hook.get_instance(
instance_name=self.instance_name,
location=self.location,
project_id=self.project_id,
)
api_url = instance["apiEndpoint"]
hook.create_pipeline(
pipeline_name=self.pipeline_name,
pipeline=self.pipeline,
instance_url=api_url,
namespace=self.namespace,
)
DataFusionPipelineLink.persist(
context=context,
task_instance=self,
uri=instance["serviceEndpoint"],
pipeline_name=self.pipeline_name,
)
self.log.info("Pipeline %s created", self.pipeline_name)
class CloudDataFusionDeletePipelineOperator(GoogleCloudBaseOperator):
"""
Deletes a Cloud Data Fusion pipeline.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionDeletePipelineOperator`
:param pipeline_name: Your pipeline name.
:param version_id: Version of pipeline to delete
:param instance_name: The name of the instance.
:param location: The Cloud Data Fusion location in which to handle the request.
:param namespace: If your pipeline belongs to a Basic edition instance, the namespace ID
is always default. If your pipeline belongs to an Enterprise edition instance, you
can create a namespace.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"instance_name",
"version_id",
"pipeline_name",
"impersonation_chain",
)
def __init__(
self,
*,
pipeline_name: str,
instance_name: str,
location: str,
version_id: str | None = None,
namespace: str = "default",
project_id: str | None = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.pipeline_name = pipeline_name
self.version_id = version_id
self.namespace = namespace
self.instance_name = instance_name
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Deleting Data Fusion pipeline: %s", self.pipeline_name)
instance = hook.get_instance(
instance_name=self.instance_name,
location=self.location,
project_id=self.project_id,
)
api_url = instance["apiEndpoint"]
hook.delete_pipeline(
pipeline_name=self.pipeline_name,
version_id=self.version_id,
instance_url=api_url,
namespace=self.namespace,
)
self.log.info("Pipeline deleted")
class CloudDataFusionListPipelinesOperator(GoogleCloudBaseOperator):
"""
Lists Cloud Data Fusion pipelines.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionListPipelinesOperator`
:param instance_name: The name of the instance.
:param location: The Cloud Data Fusion location in which to handle the request.
:param artifact_version: Artifact version to filter instances
:param artifact_name: Artifact name to filter instances
:param namespace: If your pipeline belongs to a Basic edition instance, the namespace ID
is always default. If your pipeline belongs to an Enterprise edition instance, you
can create a namespace.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"instance_name",
"artifact_name",
"artifact_version",
"impersonation_chain",
)
operator_extra_links = (DataFusionPipelinesLink(),)
def __init__(
self,
*,
instance_name: str,
location: str,
artifact_name: str | None = None,
artifact_version: str | None = None,
namespace: str = "default",
project_id: str | None = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.artifact_version = artifact_version
self.artifact_name = artifact_name
self.namespace = namespace
self.instance_name = instance_name
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Listing Data Fusion pipelines")
instance = hook.get_instance(
instance_name=self.instance_name,
location=self.location,
project_id=self.project_id,
)
api_url = instance["apiEndpoint"]
service_endpoint = instance["serviceEndpoint"]
pipelines = hook.list_pipelines(
instance_url=api_url,
namespace=self.namespace,
artifact_version=self.artifact_version,
artifact_name=self.artifact_name,
)
self.log.info("Pipelines: %s", pipelines)
DataFusionPipelinesLink.persist(context=context, task_instance=self, uri=service_endpoint)
return pipelines
class CloudDataFusionStartPipelineOperator(GoogleCloudBaseOperator):
"""
Starts a Cloud Data Fusion pipeline. Works for both batch and stream pipelines.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionStartPipelineOperator`
:param pipeline_name: Your pipeline name.
:param instance_name: The name of the instance.
:param success_states: If provided the operator will wait for pipeline to be in one of
the provided states.
:param pipeline_timeout: How long (in seconds) operator should wait for the pipeline to be in one of
``success_states``. Works only if ``success_states`` are provided.
:param location: The Cloud Data Fusion location in which to handle the request.
:param runtime_args: Optional runtime args to be passed to the pipeline
:param namespace: If your pipeline belongs to a Basic edition instance, the namespace ID
is always default. If your pipeline belongs to an Enterprise edition instance, you
can create a namespace.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param asynchronous: Flag to return after submitting the pipeline ID to the Data Fusion API.
This is useful for submitting long-running pipelines and
waiting on them asynchronously using the CloudDataFusionPipelineStateSensor
:param deferrable: Run operator in the deferrable mode. Is not related to asynchronous parameter. While
asynchronous parameter gives a possibility to wait until pipeline reaches terminate state using
sleep() method, deferrable mode checks for the state using asynchronous calls. It is not possible to
use both asynchronous and deferrable parameters at the same time.
:param poll_interval: Polling period in seconds to check for the status. Used only in deferrable mode.
"""
template_fields: Sequence[str] = (
"instance_name",
"pipeline_name",
"runtime_args",
"impersonation_chain",
)
operator_extra_links = (DataFusionPipelineLink(),)
def __init__(
self,
*,
pipeline_name: str,
instance_name: str,
location: str,
runtime_args: dict[str, Any] | None = None,
success_states: list[str] | None = None,
namespace: str = "default",
pipeline_timeout: int = 5 * 60,
project_id: str | None = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
asynchronous=False,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval=3.0,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.pipeline_name = pipeline_name
self.runtime_args = runtime_args
self.namespace = namespace
self.instance_name = instance_name
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.asynchronous = asynchronous
self.pipeline_timeout = pipeline_timeout
self.deferrable = deferrable
self.poll_interval = poll_interval
if success_states:
self.success_states = success_states
else:
self.success_states = SUCCESS_STATES + [PipelineStates.RUNNING]
def execute(self, context: Context) -> str:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Starting Data Fusion pipeline: %s", self.pipeline_name)
instance = hook.get_instance(
instance_name=self.instance_name,
location=self.location,
project_id=self.project_id,
)
api_url = instance["apiEndpoint"]
pipeline_id = hook.start_pipeline(
pipeline_name=self.pipeline_name,
instance_url=api_url,
namespace=self.namespace,
runtime_args=self.runtime_args,
)
self.log.info("Pipeline %s submitted successfully.", pipeline_id)
DataFusionPipelineLink.persist(
context=context,
task_instance=self,
uri=instance["serviceEndpoint"],
pipeline_name=self.pipeline_name,
)
if self.deferrable:
if self.asynchronous:
raise AirflowException(
"Both asynchronous and deferrable parameters were passed. Please, provide only one."
)
self.defer(
trigger=DataFusionStartPipelineTrigger(
success_states=self.success_states,
instance_url=api_url,
namespace=self.namespace,
pipeline_name=self.pipeline_name,
pipeline_id=pipeline_id,
poll_interval=self.poll_interval,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
),
method_name="execute_complete",
)
else:
if not self.asynchronous:
# when NOT using asynchronous mode it will just wait for pipeline to finish and print message
self.log.info("Waiting when pipeline %s will be in one of the success states", pipeline_id)
hook.wait_for_pipeline_state(
success_states=self.success_states,
pipeline_id=pipeline_id,
pipeline_name=self.pipeline_name,
namespace=self.namespace,
instance_url=api_url,
timeout=self.pipeline_timeout,
)
self.log.info("Pipeline %s discovered success state.", pipeline_id)
# otherwise, return pipeline_id so that sensor can use it later to check the pipeline state
return pipeline_id
def execute_complete(self, context: Context, event: dict[str, Any]):
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info(
"%s completed with response %s ",
self.task_id,
event["message"],
)
return event["pipeline_id"]
class CloudDataFusionStopPipelineOperator(GoogleCloudBaseOperator):
"""
Stops a Cloud Data Fusion pipeline. Works for both batch and stream pipelines.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionStopPipelineOperator`
:param pipeline_name: Your pipeline name.
:param instance_name: The name of the instance.
:param location: The Cloud Data Fusion location in which to handle the request.
:param namespace: If your pipeline belongs to a Basic edition instance, the namespace ID
is always default. If your pipeline belongs to an Enterprise edition instance, you
can create a namespace.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"instance_name",
"pipeline_name",
"impersonation_chain",
)
operator_extra_links = (DataFusionPipelineLink(),)
def __init__(
self,
*,
pipeline_name: str,
instance_name: str,
location: str,
namespace: str = "default",
project_id: str | None = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.pipeline_name = pipeline_name
self.namespace = namespace
self.instance_name = instance_name
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Data Fusion pipeline: %s is going to be stopped", self.pipeline_name)
instance = hook.get_instance(
instance_name=self.instance_name,
location=self.location,
project_id=self.project_id,
)
api_url = instance["apiEndpoint"]
DataFusionPipelineLink.persist(
context=context,
task_instance=self,
uri=instance["serviceEndpoint"],
pipeline_name=self.pipeline_name,
)
hook.stop_pipeline(
pipeline_name=self.pipeline_name,
instance_url=api_url,
namespace=self.namespace,
)
self.log.info("Pipeline stopped")
| 40,280 | 41.490506 | 149 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/cloud_base.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google API base operator."""
from __future__ import annotations
from google.api_core.gapic_v1.method import DEFAULT
from airflow.models import BaseOperator
class GoogleCloudBaseOperator(BaseOperator):
"""Abstract base class for operators using Google API client libraries."""
def __deepcopy__(self, memo):
"""
Updating the memo to fix the non-copyable global constant.
This constant can be specified in operator parameters as a retry configuration to indicate a default.
See https://github.com/apache/airflow/issues/28751 for details.
"""
memo[id(DEFAULT)] = DEFAULT
return super().__deepcopy__(memo)
| 1,484 | 38.078947 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/bigtable.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Bigtable operators."""
from __future__ import annotations
import enum
from typing import TYPE_CHECKING, Iterable, Sequence
import google.api_core.exceptions
from google.cloud.bigtable import enums
from google.cloud.bigtable.column_family import GarbageCollectionRule
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.bigtable import BigtableHook
from airflow.providers.google.cloud.links.bigtable import (
BigtableClusterLink,
BigtableInstanceLink,
BigtableTablesLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class BigtableValidationMixin:
"""Common class for Cloud Bigtable operators for validating required fields."""
REQUIRED_ATTRIBUTES = [] # type: Iterable[str]
def _validate_inputs(self):
for attr_name in self.REQUIRED_ATTRIBUTES:
if not getattr(self, attr_name):
raise AirflowException(f"Empty parameter: {attr_name}")
class BigtableCreateInstanceOperator(GoogleCloudBaseOperator, BigtableValidationMixin):
"""
Creates a new Cloud Bigtable instance.
If the Cloud Bigtable instance with the given ID exists, the operator does not
compare its configuration and immediately succeeds. No changes are made to the
existing instance.
For more details about instance creation have a look at the reference:
https://googleapis.github.io/google-cloud-python/latest/bigtable/instance.html#google.cloud.bigtable.instance.Instance.create
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigtableCreateInstanceOperator`
:param instance_id: The ID of the Cloud Bigtable instance to create.
:param main_cluster_id: The ID for main cluster for the new instance.
:param main_cluster_zone: The zone for main cluster
See https://cloud.google.com/bigtable/docs/locations for more details.
:param project_id: Optional, the ID of the Google Cloud project. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:param replica_clusters: (optional) A list of replica clusters for the new
instance. Each cluster dictionary contains an id and a zone.
Example: [{"id": "replica-1", "zone": "us-west1-a"}]
:param instance_type: (optional) The type of the instance.
:param instance_display_name: (optional) Human-readable name of the instance. Defaults
to ``instance_id``.
:param instance_labels: (optional) Dictionary of labels to associate
with the instance.
:param cluster_nodes: (optional) Number of nodes for cluster.
:param cluster_storage_type: (optional) The type of storage.
:param timeout: (optional) timeout (in seconds) for instance creation.
If None is not specified, Operator will wait indefinitely.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
REQUIRED_ATTRIBUTES: Iterable[str] = ("instance_id", "main_cluster_id", "main_cluster_zone")
template_fields: Sequence[str] = (
"project_id",
"instance_id",
"main_cluster_id",
"main_cluster_zone",
"impersonation_chain",
)
operator_extra_links = (BigtableInstanceLink(),)
def __init__(
self,
*,
instance_id: str,
main_cluster_id: str,
main_cluster_zone: str,
project_id: str | None = None,
replica_clusters: list[dict[str, str]] | None = None,
instance_display_name: str | None = None,
instance_type: enums.Instance.Type | None = None,
instance_labels: dict | None = None,
cluster_nodes: int | None = None,
cluster_storage_type: enums.StorageType | None = None,
timeout: float | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.project_id = project_id
self.instance_id = instance_id
self.main_cluster_id = main_cluster_id
self.main_cluster_zone = main_cluster_zone
self.replica_clusters = replica_clusters
self.instance_display_name = instance_display_name
self.instance_type = instance_type
self.instance_labels = instance_labels
self.cluster_nodes = cluster_nodes
self.cluster_storage_type = cluster_storage_type
self.timeout = timeout
self._validate_inputs()
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context: Context) -> None:
hook = BigtableHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
instance = hook.get_instance(project_id=self.project_id, instance_id=self.instance_id)
if instance:
# Based on Instance.__eq__ instance with the same ID and client is
# considered as equal.
self.log.info(
"The instance '%s' already exists in this project. Consider it as created",
self.instance_id,
)
BigtableInstanceLink.persist(context=context, task_instance=self)
return
try:
hook.create_instance(
project_id=self.project_id,
instance_id=self.instance_id,
main_cluster_id=self.main_cluster_id,
main_cluster_zone=self.main_cluster_zone,
replica_clusters=self.replica_clusters,
instance_display_name=self.instance_display_name,
instance_type=self.instance_type,
instance_labels=self.instance_labels,
cluster_nodes=self.cluster_nodes,
cluster_storage_type=self.cluster_storage_type,
timeout=self.timeout,
)
BigtableInstanceLink.persist(context=context, task_instance=self)
except google.api_core.exceptions.GoogleAPICallError as e:
self.log.error("An error occurred. Exiting.")
raise e
class BigtableUpdateInstanceOperator(GoogleCloudBaseOperator, BigtableValidationMixin):
"""
Updates an existing Cloud Bigtable instance.
For more details about instance creation have a look at the reference:
https://googleapis.dev/python/bigtable/latest/instance.html#google.cloud.bigtable.instance.Instance.update
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigtableUpdateInstanceOperator`
:param instance_id: The ID of the Cloud Bigtable instance to update.
:param project_id: Optional, the ID of the Google Cloud project. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:param instance_display_name: (optional) Human-readable name of the instance.
:param instance_type: (optional) The type of the instance.
:param instance_labels: (optional) Dictionary of labels to associate
with the instance.
:param timeout: (optional) timeout (in seconds) for instance update.
If None is not specified, Operator will wait indefinitely.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
REQUIRED_ATTRIBUTES: Iterable[str] = ["instance_id"]
template_fields: Sequence[str] = (
"project_id",
"instance_id",
"impersonation_chain",
)
operator_extra_links = (BigtableInstanceLink(),)
def __init__(
self,
*,
instance_id: str,
project_id: str | None = None,
instance_display_name: str | None = None,
instance_type: enums.Instance.Type | enum.IntEnum | None = None,
instance_labels: dict | None = None,
timeout: float | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.project_id = project_id
self.instance_id = instance_id
self.instance_display_name = instance_display_name
self.instance_type = instance_type
self.instance_labels = instance_labels
self.timeout = timeout
self._validate_inputs()
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context: Context) -> None:
hook = BigtableHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
instance = hook.get_instance(project_id=self.project_id, instance_id=self.instance_id)
if not instance:
raise AirflowException(f"Dependency: instance '{self.instance_id}' does not exist.")
try:
hook.update_instance(
project_id=self.project_id,
instance_id=self.instance_id,
instance_display_name=self.instance_display_name,
instance_type=self.instance_type,
instance_labels=self.instance_labels,
timeout=self.timeout,
)
BigtableInstanceLink.persist(context=context, task_instance=self)
except google.api_core.exceptions.GoogleAPICallError as e:
self.log.error("An error occurred. Exiting.")
raise e
class BigtableDeleteInstanceOperator(GoogleCloudBaseOperator, BigtableValidationMixin):
"""
Deletes the Cloud Bigtable instance, including its clusters and all related tables.
For more details about deleting instance have a look at the reference:
https://googleapis.github.io/google-cloud-python/latest/bigtable/instance.html#google.cloud.bigtable.instance.Instance.delete
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigtableDeleteInstanceOperator`
:param instance_id: The ID of the Cloud Bigtable instance to delete.
:param project_id: Optional, the ID of the Google Cloud project. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
REQUIRED_ATTRIBUTES = ("instance_id",) # type: Iterable[str]
template_fields: Sequence[str] = (
"project_id",
"instance_id",
"impersonation_chain",
)
def __init__(
self,
*,
instance_id: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.project_id = project_id
self.instance_id = instance_id
self._validate_inputs()
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context: Context) -> None:
hook = BigtableHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
hook.delete_instance(project_id=self.project_id, instance_id=self.instance_id)
except google.api_core.exceptions.NotFound:
self.log.info(
"The instance '%s' does not exist in project '%s'. Consider it as deleted",
self.instance_id,
self.project_id,
)
except google.api_core.exceptions.GoogleAPICallError as e:
self.log.error("An error occurred. Exiting.")
raise e
class BigtableCreateTableOperator(GoogleCloudBaseOperator, BigtableValidationMixin):
"""
Creates the table in the Cloud Bigtable instance.
For more details about creating table have a look at the reference:
https://googleapis.github.io/google-cloud-python/latest/bigtable/table.html#google.cloud.bigtable.table.Table.create
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigtableCreateTableOperator`
:param instance_id: The ID of the Cloud Bigtable instance that will
hold the new table.
:param table_id: The ID of the table to be created.
:param project_id: Optional, the ID of the Google Cloud project. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:param initial_split_keys: (Optional) list of row keys in bytes that will be used to
initially split the table into several tablets.
:param column_families: (Optional) A map columns to create.
The key is the column_id str and the value is a
:class:`google.cloud.bigtable.column_family.GarbageCollectionRule`
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
REQUIRED_ATTRIBUTES = ("instance_id", "table_id") # type: Iterable[str]
template_fields: Sequence[str] = (
"project_id",
"instance_id",
"table_id",
"impersonation_chain",
)
operator_extra_links = (BigtableTablesLink(),)
def __init__(
self,
*,
instance_id: str,
table_id: str,
project_id: str | None = None,
initial_split_keys: list | None = None,
column_families: dict[str, GarbageCollectionRule] | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.project_id = project_id
self.instance_id = instance_id
self.table_id = table_id
self.initial_split_keys = initial_split_keys or []
self.column_families = column_families or {}
self._validate_inputs()
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def _compare_column_families(self, hook, instance) -> bool:
table_column_families = hook.get_column_families_for_table(instance, self.table_id)
if set(table_column_families.keys()) != set(self.column_families.keys()):
self.log.error("Table '%s' has different set of Column Families", self.table_id)
self.log.error("Expected: %s", self.column_families.keys())
self.log.error("Actual: %s", table_column_families.keys())
return False
for key in table_column_families:
# There is difference in structure between local Column Families
# and remote ones
# Local `self.column_families` is dict with column_id as key
# and GarbageCollectionRule as value.
# Remote `table_column_families` is list of ColumnFamily objects.
# For more information about ColumnFamily please refer to the documentation:
# https://googleapis.github.io/google-cloud-python/latest/bigtable/column-family.html#google.cloud.bigtable.column_family.ColumnFamily
if table_column_families[key].gc_rule != self.column_families[key]:
self.log.error("Column Family '%s' differs for table '%s'.", key, self.table_id)
return False
return True
def execute(self, context: Context) -> None:
hook = BigtableHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
instance = hook.get_instance(project_id=self.project_id, instance_id=self.instance_id)
if not instance:
raise AirflowException(
f"Dependency: instance '{self.instance_id}' does not exist in project '{self.project_id}'."
)
try:
hook.create_table(
instance=instance,
table_id=self.table_id,
initial_split_keys=self.initial_split_keys,
column_families=self.column_families,
)
BigtableTablesLink.persist(context=context, task_instance=self)
except google.api_core.exceptions.AlreadyExists:
if not self._compare_column_families(hook, instance):
raise AirflowException(
f"Table '{self.table_id}' already exists with different Column Families."
)
self.log.info("The table '%s' already exists. Consider it as created", self.table_id)
class BigtableDeleteTableOperator(GoogleCloudBaseOperator, BigtableValidationMixin):
"""
Deletes the Cloud Bigtable table.
For more details about deleting table have a look at the reference:
https://googleapis.github.io/google-cloud-python/latest/bigtable/table.html#google.cloud.bigtable.table.Table.delete
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigtableDeleteTableOperator`
:param instance_id: The ID of the Cloud Bigtable instance.
:param table_id: The ID of the table to be deleted.
:param project_id: Optional, the ID of the Google Cloud project. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:param app_profile_id: Application profile.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
REQUIRED_ATTRIBUTES = ("instance_id", "table_id") # type: Iterable[str]
template_fields: Sequence[str] = (
"project_id",
"instance_id",
"table_id",
"impersonation_chain",
)
def __init__(
self,
*,
instance_id: str,
table_id: str,
project_id: str | None = None,
app_profile_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.project_id = project_id
self.instance_id = instance_id
self.table_id = table_id
self.app_profile_id = app_profile_id
self._validate_inputs()
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context: Context) -> None:
hook = BigtableHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
instance = hook.get_instance(project_id=self.project_id, instance_id=self.instance_id)
if not instance:
raise AirflowException(f"Dependency: instance '{self.instance_id}' does not exist.")
try:
hook.delete_table(
project_id=self.project_id,
instance_id=self.instance_id,
table_id=self.table_id,
)
except google.api_core.exceptions.NotFound:
# It's OK if table doesn't exists.
self.log.info("The table '%s' no longer exists. Consider it as deleted", self.table_id)
except google.api_core.exceptions.GoogleAPICallError as e:
self.log.error("An error occurred. Exiting.")
raise e
class BigtableUpdateClusterOperator(GoogleCloudBaseOperator, BigtableValidationMixin):
"""
Updates a Cloud Bigtable cluster.
For more details about updating a Cloud Bigtable cluster,
have a look at the reference:
https://googleapis.github.io/google-cloud-python/latest/bigtable/cluster.html#google.cloud.bigtable.cluster.Cluster.update
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigtableUpdateClusterOperator`
:param instance_id: The ID of the Cloud Bigtable instance.
:param cluster_id: The ID of the Cloud Bigtable cluster to update.
:param nodes: The desired number of nodes for the Cloud Bigtable cluster.
:param project_id: Optional, the ID of the Google Cloud project.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
REQUIRED_ATTRIBUTES = ("instance_id", "cluster_id", "nodes") # type: Iterable[str]
template_fields: Sequence[str] = (
"project_id",
"instance_id",
"cluster_id",
"nodes",
"impersonation_chain",
)
operator_extra_links = (BigtableClusterLink(),)
def __init__(
self,
*,
instance_id: str,
cluster_id: str,
nodes: int,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.project_id = project_id
self.instance_id = instance_id
self.cluster_id = cluster_id
self.nodes = nodes
self._validate_inputs()
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context: Context) -> None:
hook = BigtableHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
instance = hook.get_instance(project_id=self.project_id, instance_id=self.instance_id)
if not instance:
raise AirflowException(f"Dependency: instance '{self.instance_id}' does not exist.")
try:
hook.update_cluster(instance=instance, cluster_id=self.cluster_id, nodes=self.nodes)
BigtableClusterLink.persist(context=context, task_instance=self)
except google.api_core.exceptions.NotFound:
raise AirflowException(
f"Dependency: cluster '{self.cluster_id}' does not exist for instance '{self.instance_id}'."
)
except google.api_core.exceptions.GoogleAPICallError as e:
self.log.error("An error occurred. Exiting.")
raise e
| 26,783 | 44.243243 | 146 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/cloud_storage_transfer_service.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Transfer operators."""
from __future__ import annotations
from copy import deepcopy
from datetime import date, time
from typing import TYPE_CHECKING, Sequence
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.providers.google.cloud.hooks.cloud_storage_transfer_service import (
ACCESS_KEY_ID,
AWS_ACCESS_KEY,
AWS_S3_DATA_SOURCE,
BUCKET_NAME,
DAY,
DESCRIPTION,
GCS_DATA_SINK,
GCS_DATA_SOURCE,
HOURS,
HTTP_DATA_SOURCE,
MINUTES,
MONTH,
NAME,
OBJECT_CONDITIONS,
PATH,
PROJECT_ID,
SCHEDULE,
SCHEDULE_END_DATE,
SCHEDULE_START_DATE,
SECONDS,
SECRET_ACCESS_KEY,
START_TIME_OF_DAY,
STATUS,
TRANSFER_OPTIONS,
TRANSFER_SPEC,
YEAR,
CloudDataTransferServiceHook,
GcpTransferJobsStatus,
)
from airflow.providers.google.cloud.links.cloud_storage_transfer import (
CloudStorageTransferDetailsLink,
CloudStorageTransferJobLink,
CloudStorageTransferListLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
from airflow.providers.google.cloud.utils.helpers import normalize_directory_path
if TYPE_CHECKING:
from airflow.utils.context import Context
class TransferJobPreprocessor:
"""Helper class for preprocess of transfer job body."""
def __init__(self, body: dict, aws_conn_id: str = "aws_default", default_schedule: bool = False) -> None:
self.body = body
self.aws_conn_id = aws_conn_id
self.default_schedule = default_schedule
def _inject_aws_credentials(self) -> None:
if TRANSFER_SPEC in self.body and AWS_S3_DATA_SOURCE in self.body[TRANSFER_SPEC]:
aws_hook = AwsBaseHook(self.aws_conn_id, resource_type="s3")
aws_credentials = aws_hook.get_credentials()
aws_access_key_id = aws_credentials.access_key # type: ignore[attr-defined]
aws_secret_access_key = aws_credentials.secret_key # type: ignore[attr-defined]
self.body[TRANSFER_SPEC][AWS_S3_DATA_SOURCE][AWS_ACCESS_KEY] = {
ACCESS_KEY_ID: aws_access_key_id,
SECRET_ACCESS_KEY: aws_secret_access_key,
}
def _reformat_date(self, field_key: str) -> None:
schedule = self.body[SCHEDULE]
if field_key not in schedule:
return
if isinstance(schedule[field_key], date):
schedule[field_key] = self._convert_date_to_dict(schedule[field_key])
def _reformat_time(self, field_key: str) -> None:
schedule = self.body[SCHEDULE]
if field_key not in schedule:
return
if isinstance(schedule[field_key], time):
schedule[field_key] = self._convert_time_to_dict(schedule[field_key])
def _reformat_schedule(self) -> None:
if SCHEDULE not in self.body:
if self.default_schedule:
self.body[SCHEDULE] = {SCHEDULE_START_DATE: date.today(), SCHEDULE_END_DATE: date.today()}
else:
return
self._reformat_date(SCHEDULE_START_DATE)
self._reformat_date(SCHEDULE_END_DATE)
self._reformat_time(START_TIME_OF_DAY)
def process_body(self) -> dict:
"""
Injects AWS credentials into body if needed and reformats schedule information.
:return: Preprocessed body
"""
self._inject_aws_credentials()
self._reformat_schedule()
return self.body
@staticmethod
def _convert_date_to_dict(field_date: date) -> dict:
"""Convert native python ``datetime.date`` object to a format supported by the API."""
return {DAY: field_date.day, MONTH: field_date.month, YEAR: field_date.year}
@staticmethod
def _convert_time_to_dict(time_object: time) -> dict:
"""Convert native python ``datetime.time`` object to a format supported by the API."""
return {HOURS: time_object.hour, MINUTES: time_object.minute, SECONDS: time_object.second}
class TransferJobValidator:
"""Helper class for validating transfer job body."""
def __init__(self, body: dict) -> None:
if not body:
raise AirflowException("The required parameter 'body' is empty or None")
self.body = body
def _verify_data_source(self) -> None:
is_gcs = GCS_DATA_SOURCE in self.body[TRANSFER_SPEC]
is_aws_s3 = AWS_S3_DATA_SOURCE in self.body[TRANSFER_SPEC]
is_http = HTTP_DATA_SOURCE in self.body[TRANSFER_SPEC]
sources_count = sum([is_gcs, is_aws_s3, is_http])
if sources_count > 1:
raise AirflowException(
"More than one data source detected. Please choose exactly one data source from: "
"gcsDataSource, awsS3DataSource and httpDataSource."
)
def _restrict_aws_credentials(self) -> None:
aws_transfer = AWS_S3_DATA_SOURCE in self.body[TRANSFER_SPEC]
if aws_transfer and AWS_ACCESS_KEY in self.body[TRANSFER_SPEC][AWS_S3_DATA_SOURCE]:
raise AirflowException(
"AWS credentials detected inside the body parameter (awsAccessKey). This is not allowed, "
"please use Airflow connections to store credentials."
)
def validate_body(self) -> None:
"""
Validates the body.
Checks if body specifies `transferSpec` if yes, then check if AWS credentials
are passed correctly and no more than 1 data source was selected.
:raises: AirflowException
"""
if TRANSFER_SPEC in self.body:
self._restrict_aws_credentials()
self._verify_data_source()
class CloudDataTransferServiceCreateJobOperator(GoogleCloudBaseOperator):
"""
Creates a transfer job that runs periodically.
.. warning::
This operator is NOT idempotent in the following cases:
* `name` is not passed in body param
* transfer job `name` has been soft deleted. In this case,
each new task will receive a unique suffix
If you run it many times, many transfer jobs will be created in the Google Cloud.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataTransferServiceCreateJobOperator`
:param body: (Required) The request body, as described in
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs#TransferJob
With three additional improvements:
* dates can be given in the form :class:`datetime.date`
* times can be given in the form :class:`datetime.time`
* credentials to Amazon Web Service should be stored in the connection and indicated by the
aws_conn_id parameter
:param aws_conn_id: The connection ID used to retrieve credentials to
Amazon Web Service.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param api_version: API version used (e.g. v1).
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_transfer_job_create_template_fields]
template_fields: Sequence[str] = (
"body",
"gcp_conn_id",
"aws_conn_id",
"google_impersonation_chain",
)
# [END gcp_transfer_job_create_template_fields]
operator_extra_links = (CloudStorageTransferJobLink(),)
def __init__(
self,
*,
body: dict,
aws_conn_id: str = "aws_default",
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
project_id: str | None = None,
google_impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.body = deepcopy(body)
self.aws_conn_id = aws_conn_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.project_id = project_id
self.google_impersonation_chain = google_impersonation_chain
self._validate_inputs()
def _validate_inputs(self) -> None:
TransferJobValidator(body=self.body).validate_body()
def execute(self, context: Context) -> dict:
TransferJobPreprocessor(body=self.body, aws_conn_id=self.aws_conn_id).process_body()
hook = CloudDataTransferServiceHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
result = hook.create_transfer_job(body=self.body)
project_id = self.project_id or hook.project_id
if project_id:
CloudStorageTransferJobLink.persist(
context=context,
task_instance=self,
project_id=project_id,
job_name=result[NAME],
)
return result
class CloudDataTransferServiceUpdateJobOperator(GoogleCloudBaseOperator):
"""
Updates a transfer job that runs periodically.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataTransferServiceUpdateJobOperator`
:param job_name: (Required) Name of the job to be updated
:param body: (Required) The request body, as described in
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/patch#request-body
With three additional improvements:
* dates can be given in the form :class:`datetime.date`
* times can be given in the form :class:`datetime.time`
* credentials to Amazon Web Service should be stored in the connection and indicated by the
aws_conn_id parameter
:param aws_conn_id: The connection ID used to retrieve credentials to
Amazon Web Service.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param api_version: API version used (e.g. v1).
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_transfer_job_update_template_fields]
template_fields: Sequence[str] = (
"job_name",
"body",
"gcp_conn_id",
"aws_conn_id",
"google_impersonation_chain",
)
# [END gcp_transfer_job_update_template_fields]
operator_extra_links = (CloudStorageTransferJobLink(),)
def __init__(
self,
*,
job_name: str,
body: dict,
aws_conn_id: str = "aws_default",
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
project_id: str | None = None,
google_impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.job_name = job_name
self.body = body
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.aws_conn_id = aws_conn_id
self.google_impersonation_chain = google_impersonation_chain
self._validate_inputs()
def _validate_inputs(self) -> None:
TransferJobValidator(body=self.body).validate_body()
if not self.job_name:
raise AirflowException("The required parameter 'job_name' is empty or None")
def execute(self, context: Context) -> dict:
TransferJobPreprocessor(body=self.body, aws_conn_id=self.aws_conn_id).process_body()
hook = CloudDataTransferServiceHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudStorageTransferJobLink.persist(
context=context,
task_instance=self,
project_id=project_id,
job_name=self.job_name,
)
return hook.update_transfer_job(job_name=self.job_name, body=self.body)
class CloudDataTransferServiceDeleteJobOperator(GoogleCloudBaseOperator):
"""
Delete a transfer job.
This is a soft delete. After a transfer job is deleted, the job and all the transfer
executions are subject to garbage collection. Transfer jobs become eligible for garbage
collection 30 days after soft delete.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataTransferServiceDeleteJobOperator`
:param job_name: (Required) Name of the TRANSFER operation
:param project_id: (Optional) the ID of the project that owns the Transfer
Job. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param api_version: API version used (e.g. v1).
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_transfer_job_delete_template_fields]
template_fields: Sequence[str] = (
"job_name",
"project_id",
"gcp_conn_id",
"api_version",
"google_impersonation_chain",
)
# [END gcp_transfer_job_delete_template_fields]
def __init__(
self,
*,
job_name: str,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
project_id: str | None = None,
google_impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.job_name = job_name
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.google_impersonation_chain = google_impersonation_chain
self._validate_inputs()
def _validate_inputs(self) -> None:
if not self.job_name:
raise AirflowException("The required parameter 'job_name' is empty or None")
def execute(self, context: Context) -> None:
self._validate_inputs()
hook = CloudDataTransferServiceHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
hook.delete_transfer_job(job_name=self.job_name, project_id=self.project_id)
class CloudDataTransferServiceGetOperationOperator(GoogleCloudBaseOperator):
"""
Gets the latest state of a long-running operation in Google Storage Transfer Service.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataTransferServiceGetOperationOperator`
:param operation_name: (Required) Name of the transfer operation.
:param gcp_conn_id: The connection ID used to connect to Google
Cloud Platform.
:param api_version: API version used (e.g. v1).
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_transfer_operation_get_template_fields]
template_fields: Sequence[str] = (
"operation_name",
"gcp_conn_id",
"google_impersonation_chain",
)
# [END gcp_transfer_operation_get_template_fields]
operator_extra_links = (CloudStorageTransferDetailsLink(),)
def __init__(
self,
*,
project_id: str | None = None,
operation_name: str,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
google_impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.operation_name = operation_name
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.google_impersonation_chain = google_impersonation_chain
self._validate_inputs()
def _validate_inputs(self) -> None:
if not self.operation_name:
raise AirflowException("The required parameter 'operation_name' is empty or None")
def execute(self, context: Context) -> dict:
hook = CloudDataTransferServiceHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
operation = hook.get_transfer_operation(operation_name=self.operation_name)
project_id = self.project_id or hook.project_id
if project_id:
CloudStorageTransferDetailsLink.persist(
context=context,
task_instance=self,
project_id=project_id,
operation_name=self.operation_name,
)
return operation
class CloudDataTransferServiceListOperationsOperator(GoogleCloudBaseOperator):
"""
Lists long-running operations in Google Storage Transfer Service that match the specified filter.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataTransferServiceListOperationsOperator`
:param request_filter: (Required) A request filter, as described in
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/list#body.QUERY_PARAMETERS.filter
:param gcp_conn_id: The connection ID used to connect to Google
Cloud Platform.
:param api_version: API version used (e.g. v1).
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_transfer_operations_list_template_fields]
template_fields: Sequence[str] = (
"filter",
"gcp_conn_id",
"google_impersonation_chain",
)
# [END gcp_transfer_operations_list_template_fields]
operator_extra_links = (CloudStorageTransferListLink(),)
def __init__(
self,
request_filter: dict | None = None,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
google_impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
# To preserve backward compatibility
# TODO: remove one day
if request_filter is None:
if "filter" in kwargs:
request_filter = kwargs["filter"]
AirflowProviderDeprecationWarning(
"Use 'request_filter' instead 'filter' to pass the argument."
)
else:
TypeError("__init__() missing 1 required positional argument: 'request_filter'")
super().__init__(**kwargs)
self.filter = request_filter
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.google_impersonation_chain = google_impersonation_chain
self._validate_inputs()
def _validate_inputs(self) -> None:
if not self.filter:
raise AirflowException("The required parameter 'filter' is empty or None")
def execute(self, context: Context) -> list[dict]:
hook = CloudDataTransferServiceHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
operations_list = hook.list_transfer_operations(request_filter=self.filter)
self.log.info(operations_list)
project_id = self.project_id or hook.project_id
if project_id:
CloudStorageTransferListLink.persist(
context=context,
task_instance=self,
project_id=project_id,
)
return operations_list
class CloudDataTransferServicePauseOperationOperator(GoogleCloudBaseOperator):
"""
Pauses a transfer operation in Google Storage Transfer Service.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataTransferServicePauseOperationOperator`
:param operation_name: (Required) Name of the transfer operation.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param api_version: API version used (e.g. v1).
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_transfer_operation_pause_template_fields]
template_fields: Sequence[str] = (
"operation_name",
"gcp_conn_id",
"api_version",
"google_impersonation_chain",
)
# [END gcp_transfer_operation_pause_template_fields]
def __init__(
self,
*,
operation_name: str,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
google_impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.operation_name = operation_name
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.google_impersonation_chain = google_impersonation_chain
self._validate_inputs()
def _validate_inputs(self) -> None:
if not self.operation_name:
raise AirflowException("The required parameter 'operation_name' is empty or None")
def execute(self, context: Context) -> None:
hook = CloudDataTransferServiceHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
hook.pause_transfer_operation(operation_name=self.operation_name)
class CloudDataTransferServiceResumeOperationOperator(GoogleCloudBaseOperator):
"""
Resumes a transfer operation in Google Storage Transfer Service.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataTransferServiceResumeOperationOperator`
:param operation_name: (Required) Name of the transfer operation.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param api_version: API version used (e.g. v1).
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_transfer_operation_resume_template_fields]
template_fields: Sequence[str] = (
"operation_name",
"gcp_conn_id",
"api_version",
"google_impersonation_chain",
)
# [END gcp_transfer_operation_resume_template_fields]
def __init__(
self,
*,
operation_name: str,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
google_impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.operation_name = operation_name
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.google_impersonation_chain = google_impersonation_chain
self._validate_inputs()
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if not self.operation_name:
raise AirflowException("The required parameter 'operation_name' is empty or None")
def execute(self, context: Context) -> None:
hook = CloudDataTransferServiceHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
hook.resume_transfer_operation(operation_name=self.operation_name)
class CloudDataTransferServiceCancelOperationOperator(GoogleCloudBaseOperator):
"""
Cancels a transfer operation in Google Storage Transfer Service.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataTransferServiceCancelOperationOperator`
:param operation_name: (Required) Name of the transfer operation.
:param api_version: API version used (e.g. v1).
:param gcp_conn_id: The connection ID used to connect to Google
Cloud Platform.
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_transfer_operation_cancel_template_fields]
template_fields: Sequence[str] = (
"operation_name",
"gcp_conn_id",
"api_version",
"google_impersonation_chain",
)
# [END gcp_transfer_operation_cancel_template_fields]
def __init__(
self,
*,
operation_name: str,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
google_impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.operation_name = operation_name
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.google_impersonation_chain = google_impersonation_chain
self._validate_inputs()
def _validate_inputs(self) -> None:
if not self.operation_name:
raise AirflowException("The required parameter 'operation_name' is empty or None")
def execute(self, context: Context) -> None:
hook = CloudDataTransferServiceHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
hook.cancel_transfer_operation(operation_name=self.operation_name)
class CloudDataTransferServiceS3ToGCSOperator(GoogleCloudBaseOperator):
"""
Sync an S3 bucket with a Google Cloud Storage bucket using the Google Cloud Storage Transfer Service.
.. warning::
This operator is NOT idempotent. If you run it many times, many transfer
jobs will be created in the Google Cloud.
**Example**:
.. code-block:: python
s3_to_gcs_transfer_op = S3ToGoogleCloudStorageTransferOperator(
task_id="s3_to_gcs_transfer_example",
s3_bucket="my-s3-bucket",
project_id="my-gcp-project",
gcs_bucket="my-gcs-bucket",
dag=my_dag,
)
:param s3_bucket: The S3 bucket where to find the objects. (templated)
:param gcs_bucket: The destination Google Cloud Storage bucket
where you want to store the files. (templated)
:param s3_path: Optional root path where the source objects are. (templated)
:param gcs_path: Optional root path for transferred objects. (templated)
:param project_id: Optional ID of the Google Cloud Console project that
owns the job
:param aws_conn_id: The source S3 connection
:param gcp_conn_id: The destination connection ID to use
when connecting to Google Cloud Storage.
:param description: Optional transfer service job description
:param schedule: Optional transfer service schedule;
If not set, run transfer job once as soon as the operator runs
The format is described
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs.
With two additional improvements:
* dates they can be passed as :class:`datetime.date`
* times they can be passed as :class:`datetime.time`
:param object_conditions: Optional transfer service object conditions; see
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec
:param transfer_options: Optional transfer service transfer options; see
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec
:param wait: Wait for transfer to finish. It must be set to True, if
'delete_job_after_completion' is set to True.
:param timeout: Time to wait for the operation to end in seconds. Defaults to 60 seconds if not specified.
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param delete_job_after_completion: If True, delete the job after complete.
If set to True, 'wait' must be set to True.
"""
template_fields: Sequence[str] = (
"gcp_conn_id",
"s3_bucket",
"gcs_bucket",
"s3_path",
"gcs_path",
"description",
"object_conditions",
"google_impersonation_chain",
)
ui_color = "#e09411"
def __init__(
self,
*,
s3_bucket: str,
gcs_bucket: str,
s3_path: str | None = None,
gcs_path: str | None = None,
project_id: str | None = None,
aws_conn_id: str = "aws_default",
gcp_conn_id: str = "google_cloud_default",
description: str | None = None,
schedule: dict | None = None,
object_conditions: dict | None = None,
transfer_options: dict | None = None,
wait: bool = True,
timeout: float | None = None,
google_impersonation_chain: str | Sequence[str] | None = None,
delete_job_after_completion: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.s3_bucket = s3_bucket
self.gcs_bucket = gcs_bucket
self.s3_path = s3_path
self.gcs_path = gcs_path
self.project_id = project_id
self.aws_conn_id = aws_conn_id
self.gcp_conn_id = gcp_conn_id
self.description = description
self.schedule = schedule
self.object_conditions = object_conditions
self.transfer_options = transfer_options
self.wait = wait
self.timeout = timeout
self.google_impersonation_chain = google_impersonation_chain
self.delete_job_after_completion = delete_job_after_completion
self._validate_inputs()
def _validate_inputs(self) -> None:
if self.delete_job_after_completion and not self.wait:
raise AirflowException("If 'delete_job_after_completion' is True, then 'wait' must also be True.")
def execute(self, context: Context) -> None:
hook = CloudDataTransferServiceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
body = self._create_body()
TransferJobPreprocessor(body=body, aws_conn_id=self.aws_conn_id, default_schedule=True).process_body()
job = hook.create_transfer_job(body=body)
if self.wait:
hook.wait_for_transfer_job(job, timeout=self.timeout)
if self.delete_job_after_completion:
hook.delete_transfer_job(job_name=job[NAME], project_id=self.project_id)
def _create_body(self) -> dict:
body = {
DESCRIPTION: self.description,
STATUS: GcpTransferJobsStatus.ENABLED,
TRANSFER_SPEC: {
AWS_S3_DATA_SOURCE: {
BUCKET_NAME: self.s3_bucket,
PATH: normalize_directory_path(self.s3_path),
},
GCS_DATA_SINK: {
BUCKET_NAME: self.gcs_bucket,
PATH: normalize_directory_path(self.gcs_path),
},
},
}
if self.project_id is not None:
body[PROJECT_ID] = self.project_id
if self.schedule is not None:
body[SCHEDULE] = self.schedule
if self.object_conditions is not None:
body[TRANSFER_SPEC][OBJECT_CONDITIONS] = self.object_conditions # type: ignore[index]
if self.transfer_options is not None:
body[TRANSFER_SPEC][TRANSFER_OPTIONS] = self.transfer_options # type: ignore[index]
return body
class CloudDataTransferServiceGCSToGCSOperator(GoogleCloudBaseOperator):
"""
Copies objects from a bucket to another using the Google Cloud Storage Transfer Service.
.. warning::
This operator is NOT idempotent. If you run it many times, many transfer
jobs will be created in the Google Cloud.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSToGCSOperator`
**Example**:
.. code-block:: python
gcs_to_gcs_transfer_op = GoogleCloudStorageToGoogleCloudStorageTransferOperator(
task_id="gcs_to_gcs_transfer_example",
source_bucket="my-source-bucket",
destination_bucket="my-destination-bucket",
project_id="my-gcp-project",
dag=my_dag,
)
:param source_bucket: The source Google Cloud Storage bucket where the
object is. (templated)
:param destination_bucket: The destination Google Cloud Storage bucket
where the object should be. (templated)
:param source_path: Optional root path where the source objects are. (templated)
:param destination_path: Optional root path for transferred objects. (templated)
:param project_id: The ID of the Google Cloud Console project that
owns the job
:param gcp_conn_id: Optional connection ID to use when connecting to Google Cloud
Storage.
:param description: Optional transfer service job description
:param schedule: Optional transfer service schedule;
If not set, run transfer job once as soon as the operator runs
See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs.
With two additional improvements:
* dates they can be passed as :class:`datetime.date`
* times they can be passed as :class:`datetime.time`
:param object_conditions: Optional transfer service object conditions; see
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec#ObjectConditions
:param transfer_options: Optional transfer service transfer options; see
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec#TransferOptions
:param wait: Wait for transfer to finish. It must be set to True, if
'delete_job_after_completion' is set to True.
:param timeout: Time to wait for the operation to end in seconds. Defaults to 60 seconds if not specified.
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param delete_job_after_completion: If True, delete the job after complete.
If set to True, 'wait' must be set to True.
"""
template_fields: Sequence[str] = (
"gcp_conn_id",
"source_bucket",
"destination_bucket",
"source_path",
"destination_path",
"description",
"object_conditions",
"google_impersonation_chain",
)
ui_color = "#e09411"
def __init__(
self,
*,
source_bucket: str,
destination_bucket: str,
source_path: str | None = None,
destination_path: str | None = None,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
description: str | None = None,
schedule: dict | None = None,
object_conditions: dict | None = None,
transfer_options: dict | None = None,
wait: bool = True,
timeout: float | None = None,
google_impersonation_chain: str | Sequence[str] | None = None,
delete_job_after_completion: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.source_bucket = source_bucket
self.destination_bucket = destination_bucket
self.source_path = source_path
self.destination_path = destination_path
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.description = description
self.schedule = schedule
self.object_conditions = object_conditions
self.transfer_options = transfer_options
self.wait = wait
self.timeout = timeout
self.google_impersonation_chain = google_impersonation_chain
self.delete_job_after_completion = delete_job_after_completion
self._validate_inputs()
def _validate_inputs(self) -> None:
if self.delete_job_after_completion and not self.wait:
raise AirflowException("If 'delete_job_after_completion' is True, then 'wait' must also be True.")
def execute(self, context: Context) -> None:
hook = CloudDataTransferServiceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
body = self._create_body()
TransferJobPreprocessor(body=body, default_schedule=True).process_body()
job = hook.create_transfer_job(body=body)
if self.wait:
hook.wait_for_transfer_job(job, timeout=self.timeout)
if self.delete_job_after_completion:
hook.delete_transfer_job(job_name=job[NAME], project_id=self.project_id)
def _create_body(self) -> dict:
body = {
DESCRIPTION: self.description,
STATUS: GcpTransferJobsStatus.ENABLED,
TRANSFER_SPEC: {
GCS_DATA_SOURCE: {
BUCKET_NAME: self.source_bucket,
PATH: normalize_directory_path(self.source_path),
},
GCS_DATA_SINK: {
BUCKET_NAME: self.destination_bucket,
PATH: normalize_directory_path(self.destination_path),
},
},
}
if self.project_id is not None:
body[PROJECT_ID] = self.project_id
if self.schedule is not None:
body[SCHEDULE] = self.schedule
if self.object_conditions is not None:
body[TRANSFER_SPEC][OBJECT_CONDITIONS] = self.object_conditions # type: ignore[index]
if self.transfer_options is not None:
body[TRANSFER_SPEC][TRANSFER_OPTIONS] = self.transfer_options # type: ignore[index]
return body
| 44,407 | 39.703941 | 123 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/dataproc.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Dataproc operators."""
from __future__ import annotations
import inspect
import ntpath
import os
import re
import time
import uuid
import warnings
from datetime import datetime, timedelta
from typing import TYPE_CHECKING, Any, Sequence
from google.api_core import operation # type: ignore
from google.api_core.exceptions import AlreadyExists, NotFound
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry, exponential_sleep_generator
from google.cloud.dataproc_v1 import Batch, Cluster, ClusterStatus, JobStatus
from google.protobuf.duration_pb2 import Duration
from google.protobuf.field_mask_pb2 import FieldMask
from airflow.configuration import conf
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.providers.google.cloud.hooks.dataproc import DataprocHook, DataProcJobBuilder
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.cloud.links.dataproc import (
DATAPROC_BATCH_LINK,
DATAPROC_BATCHES_LINK,
DATAPROC_CLUSTER_LINK,
DATAPROC_JOB_LOG_LINK,
DATAPROC_WORKFLOW_LINK,
DATAPROC_WORKFLOW_TEMPLATE_LINK,
DataprocLink,
DataprocListLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
from airflow.providers.google.cloud.triggers.dataproc import (
DataprocBatchTrigger,
DataprocClusterTrigger,
DataprocDeleteClusterTrigger,
DataprocSubmitTrigger,
DataprocWorkflowTrigger,
)
from airflow.utils import timezone
if TYPE_CHECKING:
from airflow.utils.context import Context
class ClusterGenerator:
"""Create a new Dataproc Cluster.
:param cluster_name: The name of the DataProc cluster to create. (templated)
:param project_id: The ID of the google cloud project in which
to create the cluster. (templated)
:param num_workers: The # of workers to spin up. If set to zero will
spin up cluster in a single node mode
:param storage_bucket: The storage bucket to use, setting to None lets dataproc
generate a custom one for you
:param init_actions_uris: List of GCS uri's containing
dataproc initialization scripts
:param init_action_timeout: Amount of time executable scripts in
init_actions_uris has to complete
:param metadata: dict of key-value google compute engine metadata entries
to add to all instances
:param image_version: the version of software inside the Dataproc cluster
:param custom_image: custom Dataproc image for more info see
https://cloud.google.com/dataproc/docs/guides/dataproc-images
:param custom_image_project_id: project id for the custom Dataproc image, for more info see
https://cloud.google.com/dataproc/docs/guides/dataproc-images
:param custom_image_family: family for the custom Dataproc image,
family name can be provide using --family flag while creating custom image, for more info see
https://cloud.google.com/dataproc/docs/guides/dataproc-images
:param autoscaling_policy: The autoscaling policy used by the cluster. Only resource names
including projectid and location (region) are valid. Example:
``projects/[projectId]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]``
:param properties: dict of properties to set on
config files (e.g. spark-defaults.conf), see
https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters#SoftwareConfig
:param optional_components: List of optional cluster components, for more info see
https://cloud.google.com/dataproc/docs/reference/rest/v1/ClusterConfig#Component
:param num_masters: The # of master nodes to spin up
:param master_machine_type: Compute engine machine type to use for the primary node
:param master_disk_type: Type of the boot disk for the primary node
(default is ``pd-standard``).
Valid values: ``pd-ssd`` (Persistent Disk Solid State Drive) or
``pd-standard`` (Persistent Disk Hard Disk Drive).
:param master_disk_size: Disk size for the primary node
:param worker_machine_type: Compute engine machine type to use for the worker nodes
:param worker_disk_type: Type of the boot disk for the worker node
(default is ``pd-standard``).
Valid values: ``pd-ssd`` (Persistent Disk Solid State Drive) or
``pd-standard`` (Persistent Disk Hard Disk Drive).
:param worker_disk_size: Disk size for the worker nodes
:param num_preemptible_workers: The # of preemptible worker nodes to spin up
:param zone: The zone where the cluster will be located. Set to None to auto-zone. (templated)
:param network_uri: The network uri to be used for machine communication, cannot be
specified with subnetwork_uri
:param subnetwork_uri: The subnetwork uri to be used for machine communication,
cannot be specified with network_uri
:param internal_ip_only: If true, all instances in the cluster will only
have internal IP addresses. This can only be enabled for subnetwork
enabled networks
:param tags: The GCE tags to add to all instances
:param region: The specified region where the dataproc cluster is created.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param service_account: The service account of the dataproc instances.
:param service_account_scopes: The URIs of service account scopes to be included.
:param idle_delete_ttl: The longest duration that cluster would keep alive while
staying idle. Passing this threshold will cause cluster to be auto-deleted.
A duration in seconds.
:param auto_delete_time: The time when cluster will be auto-deleted.
:param auto_delete_ttl: The life duration of cluster, the cluster will be
auto-deleted at the end of this duration.
A duration in seconds. (If auto_delete_time is set this parameter will be ignored)
:param customer_managed_key: The customer-managed key used for disk encryption
``projects/[PROJECT_STORING_KEYS]/locations/[LOCATION]/keyRings/[KEY_RING_NAME]/cryptoKeys/[KEY_NAME]`` # noqa
:param enable_component_gateway: Provides access to the web interfaces of default and selected optional
components on the cluster.
""" # noqa: E501
def __init__(
self,
project_id: str,
num_workers: int | None = None,
zone: str | None = None,
network_uri: str | None = None,
subnetwork_uri: str | None = None,
internal_ip_only: bool | None = None,
tags: list[str] | None = None,
storage_bucket: str | None = None,
init_actions_uris: list[str] | None = None,
init_action_timeout: str = "10m",
metadata: dict | None = None,
custom_image: str | None = None,
custom_image_project_id: str | None = None,
custom_image_family: str | None = None,
image_version: str | None = None,
autoscaling_policy: str | None = None,
properties: dict | None = None,
optional_components: list[str] | None = None,
num_masters: int = 1,
master_machine_type: str = "n1-standard-4",
master_disk_type: str = "pd-standard",
master_disk_size: int = 1024,
worker_machine_type: str = "n1-standard-4",
worker_disk_type: str = "pd-standard",
worker_disk_size: int = 1024,
num_preemptible_workers: int = 0,
service_account: str | None = None,
service_account_scopes: list[str] | None = None,
idle_delete_ttl: int | None = None,
auto_delete_time: datetime | None = None,
auto_delete_ttl: int | None = None,
customer_managed_key: str | None = None,
enable_component_gateway: bool | None = False,
**kwargs,
) -> None:
self.project_id = project_id
self.num_masters = num_masters
self.num_workers = num_workers
self.num_preemptible_workers = num_preemptible_workers
self.storage_bucket = storage_bucket
self.init_actions_uris = init_actions_uris
self.init_action_timeout = init_action_timeout
self.metadata = metadata
self.custom_image = custom_image
self.custom_image_project_id = custom_image_project_id
self.custom_image_family = custom_image_family
self.image_version = image_version
self.properties = properties or {}
self.optional_components = optional_components
self.master_machine_type = master_machine_type
self.master_disk_type = master_disk_type
self.master_disk_size = master_disk_size
self.autoscaling_policy = autoscaling_policy
self.worker_machine_type = worker_machine_type
self.worker_disk_type = worker_disk_type
self.worker_disk_size = worker_disk_size
self.zone = zone
self.network_uri = network_uri
self.subnetwork_uri = subnetwork_uri
self.internal_ip_only = internal_ip_only
self.tags = tags
self.service_account = service_account
self.service_account_scopes = service_account_scopes
self.idle_delete_ttl = idle_delete_ttl
self.auto_delete_time = auto_delete_time
self.auto_delete_ttl = auto_delete_ttl
self.customer_managed_key = customer_managed_key
self.enable_component_gateway = enable_component_gateway
self.single_node = num_workers == 0
if self.custom_image and self.image_version:
raise ValueError("The custom_image and image_version can't be both set")
if self.custom_image_family and self.image_version:
raise ValueError("The image_version and custom_image_family can't be both set")
if self.custom_image_family and self.custom_image:
raise ValueError("The custom_image and custom_image_family can't be both set")
if self.single_node and self.num_preemptible_workers > 0:
raise ValueError("Single node cannot have preemptible workers.")
def _get_init_action_timeout(self) -> dict:
match = re.match(r"^(\d+)([sm])$", self.init_action_timeout)
if match:
val = float(match.group(1))
if match.group(2) == "s":
return {"seconds": int(val)}
elif match.group(2) == "m":
return {"seconds": int(timedelta(minutes=val).total_seconds())}
raise AirflowException(
"DataprocClusterCreateOperator init_action_timeout"
" should be expressed in minutes or seconds. i.e. 10m, 30s"
)
def _build_gce_cluster_config(self, cluster_data):
# This variable is created since same string was being used multiple times
config = "gce_cluster_config"
if self.zone:
zone_uri = f"https://www.googleapis.com/compute/v1/projects/{self.project_id}/zones/{self.zone}"
cluster_data[config]["zone_uri"] = zone_uri
if self.metadata:
cluster_data[config]["metadata"] = self.metadata
if self.network_uri:
cluster_data[config]["network_uri"] = self.network_uri
if self.subnetwork_uri:
cluster_data[config]["subnetwork_uri"] = self.subnetwork_uri
if self.internal_ip_only:
if not self.subnetwork_uri:
raise AirflowException("Set internal_ip_only to true only when you pass a subnetwork_uri.")
cluster_data[config]["internal_ip_only"] = True
if self.tags:
cluster_data[config]["tags"] = self.tags
if self.service_account:
cluster_data[config]["service_account"] = self.service_account
if self.service_account_scopes:
cluster_data[config]["service_account_scopes"] = self.service_account_scopes
return cluster_data
def _build_lifecycle_config(self, cluster_data):
# This variable is created since same string was being used multiple times
lifecycle_config = "lifecycle_config"
if self.idle_delete_ttl:
cluster_data[lifecycle_config]["idle_delete_ttl"] = {"seconds": self.idle_delete_ttl}
if self.auto_delete_time:
utc_auto_delete_time = timezone.convert_to_utc(self.auto_delete_time)
cluster_data[lifecycle_config]["auto_delete_time"] = utc_auto_delete_time.strftime(
"%Y-%m-%dT%H:%M:%S.%fZ"
)
elif self.auto_delete_ttl:
cluster_data[lifecycle_config]["auto_delete_ttl"] = {"seconds": int(self.auto_delete_ttl)}
return cluster_data
def _build_cluster_data(self):
if self.zone:
master_type_uri = (
f"projects/{self.project_id}/zones/{self.zone}/machineTypes/{self.master_machine_type}"
)
worker_type_uri = (
f"projects/{self.project_id}/zones/{self.zone}/machineTypes/{self.worker_machine_type}"
)
else:
master_type_uri = self.master_machine_type
worker_type_uri = self.worker_machine_type
cluster_data = {
"gce_cluster_config": {},
"master_config": {
"num_instances": self.num_masters,
"machine_type_uri": master_type_uri,
"disk_config": {
"boot_disk_type": self.master_disk_type,
"boot_disk_size_gb": self.master_disk_size,
},
},
"worker_config": {
"num_instances": self.num_workers,
"machine_type_uri": worker_type_uri,
"disk_config": {
"boot_disk_type": self.worker_disk_type,
"boot_disk_size_gb": self.worker_disk_size,
},
},
"secondary_worker_config": {},
"software_config": {},
"lifecycle_config": {},
"encryption_config": {},
"autoscaling_config": {},
"endpoint_config": {},
}
if self.num_preemptible_workers > 0:
cluster_data["secondary_worker_config"] = {
"num_instances": self.num_preemptible_workers,
"machine_type_uri": worker_type_uri,
"disk_config": {
"boot_disk_type": self.worker_disk_type,
"boot_disk_size_gb": self.worker_disk_size,
},
"is_preemptible": True,
}
if self.storage_bucket:
cluster_data["config_bucket"] = self.storage_bucket
if self.image_version:
cluster_data["software_config"]["image_version"] = self.image_version
elif self.custom_image:
project_id = self.custom_image_project_id or self.project_id
custom_image_url = (
f"https://www.googleapis.com/compute/beta/projects/{project_id}"
f"/global/images/{self.custom_image}"
)
cluster_data["master_config"]["image_uri"] = custom_image_url
if not self.single_node:
cluster_data["worker_config"]["image_uri"] = custom_image_url
elif self.custom_image_family:
project_id = self.custom_image_project_id or self.project_id
custom_image_url = (
"https://www.googleapis.com/compute/beta/projects/"
f"{project_id}/global/images/family/{self.custom_image_family}"
)
cluster_data["master_config"]["image_uri"] = custom_image_url
if not self.single_node:
cluster_data["worker_config"]["image_uri"] = custom_image_url
cluster_data = self._build_gce_cluster_config(cluster_data)
if self.single_node:
self.properties["dataproc:dataproc.allow.zero.workers"] = "true"
if self.properties:
cluster_data["software_config"]["properties"] = self.properties
if self.optional_components:
cluster_data["software_config"]["optional_components"] = self.optional_components
cluster_data = self._build_lifecycle_config(cluster_data)
if self.init_actions_uris:
init_actions_dict = [
{"executable_file": uri, "execution_timeout": self._get_init_action_timeout()}
for uri in self.init_actions_uris
]
cluster_data["initialization_actions"] = init_actions_dict
if self.customer_managed_key:
cluster_data["encryption_config"] = {"gce_pd_kms_key_name": self.customer_managed_key}
if self.autoscaling_policy:
cluster_data["autoscaling_config"] = {"policy_uri": self.autoscaling_policy}
if self.enable_component_gateway:
cluster_data["endpoint_config"] = {"enable_http_port_access": self.enable_component_gateway}
return cluster_data
def make(self):
"""
Helper method for easier migration.
:return: Dict representing Dataproc cluster.
"""
return self._build_cluster_data()
class DataprocCreateClusterOperator(GoogleCloudBaseOperator):
"""Create a new cluster on Google Cloud Dataproc.
The operator will wait until the creation is successful or an error occurs
in the creation process.
If the cluster already exists and ``use_if_exists`` is True, the operator will:
If the cluster already exists and ``use_if_exists`` is True then the operator will:
- if cluster state is ERROR then delete it if specified and raise error
- if cluster state is CREATING wait for it and then check for ERROR state
- if cluster state is DELETING wait for it and then create new cluster
Please refer to
https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters
for a detailed explanation on the different parameters. Most of the configuration
parameters detailed in the link are available as a parameter to this operator.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataprocCreateClusterOperator`
:param project_id: The ID of the Google cloud project in which
to create the cluster. (templated)
:param cluster_name: Name of the cluster to create
:param labels: Labels that will be assigned to created cluster. Please, notice that
adding labels to ClusterConfig object in cluster_config parameter will not lead
to adding labels to the cluster. Labels for the clusters could be only set by passing
values to parameter of DataprocCreateCluster operator.
:param cluster_config: Required. The cluster config to create.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.dataproc_v1.types.ClusterConfig`
:param virtual_cluster_config: Optional. The virtual cluster config, used when creating a Dataproc
cluster that does not directly control the underlying compute resources, for example, when creating a
`Dataproc-on-GKE cluster
<https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster>`
:param region: The specified region where the dataproc cluster is created.
:param delete_on_error: If true the cluster will be deleted if created with ERROR state. Default
value is true.
:param use_if_exists: If true use existing cluster
:param request_id: Optional. A unique id used to identify the request. If the server receives two
``DeleteClusterRequest`` requests with the same id, then the second request will be ignored and the
first ``google.longrunning.Operation`` created and stored in the backend is returned.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run operator in the deferrable mode.
:param polling_interval_seconds: Time (seconds) to wait between calls to check the run status.
"""
template_fields: Sequence[str] = (
"project_id",
"region",
"cluster_config",
"virtual_cluster_config",
"cluster_name",
"labels",
"impersonation_chain",
)
template_fields_renderers = {"cluster_config": "json", "virtual_cluster_config": "json"}
operator_extra_links = (DataprocLink(),)
def __init__(
self,
*,
cluster_name: str,
region: str,
project_id: str | None = None,
cluster_config: dict | Cluster | None = None,
virtual_cluster_config: dict | None = None,
labels: dict | None = None,
request_id: str | None = None,
delete_on_error: bool = True,
use_if_exists: bool = True,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float = 1 * 60 * 60,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
polling_interval_seconds: int = 10,
**kwargs,
) -> None:
# TODO: remove one day
if cluster_config is None and virtual_cluster_config is None:
warnings.warn(
f"Passing cluster parameters by keywords to `{type(self).__name__}` will be deprecated. "
"Please provide cluster_config object using `cluster_config` parameter. "
"You can use `airflow.dataproc.ClusterGenerator.generate_cluster` "
"method to obtain cluster object.",
AirflowProviderDeprecationWarning,
stacklevel=1,
)
# Remove result of apply defaults
if "params" in kwargs:
del kwargs["params"]
# Create cluster object from kwargs
if project_id is None:
raise AirflowException(
"project_id argument is required when building cluster from keywords parameters"
)
kwargs["project_id"] = project_id
cluster_config = ClusterGenerator(**kwargs).make()
# Remove from kwargs cluster params passed for backward compatibility
cluster_params = inspect.signature(ClusterGenerator.__init__).parameters
for arg in cluster_params:
if arg in kwargs:
del kwargs[arg]
super().__init__(**kwargs)
if deferrable and polling_interval_seconds <= 0:
raise ValueError("Invalid value for polling_interval_seconds. Expected value greater than 0")
self.cluster_config = cluster_config
self.cluster_name = cluster_name
self.labels = labels
self.project_id = project_id
self.region = region
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.delete_on_error = delete_on_error
self.use_if_exists = use_if_exists
self.impersonation_chain = impersonation_chain
self.virtual_cluster_config = virtual_cluster_config
self.deferrable = deferrable
self.polling_interval_seconds = polling_interval_seconds
def _create_cluster(self, hook: DataprocHook):
return hook.create_cluster(
project_id=self.project_id,
region=self.region,
cluster_name=self.cluster_name,
labels=self.labels,
cluster_config=self.cluster_config,
virtual_cluster_config=self.virtual_cluster_config,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
def _delete_cluster(self, hook):
self.log.info("Deleting the cluster")
hook.delete_cluster(region=self.region, cluster_name=self.cluster_name, project_id=self.project_id)
def _get_cluster(self, hook: DataprocHook) -> Cluster:
return hook.get_cluster(
project_id=self.project_id,
region=self.region,
cluster_name=self.cluster_name,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
def _handle_error_state(self, hook: DataprocHook, cluster: Cluster) -> None:
if cluster.status.state != cluster.status.State.ERROR:
return
self.log.info("Cluster is in ERROR state")
gcs_uri = hook.diagnose_cluster(
region=self.region, cluster_name=self.cluster_name, project_id=self.project_id
)
self.log.info("Diagnostic information for cluster %s available at: %s", self.cluster_name, gcs_uri)
if self.delete_on_error:
self._delete_cluster(hook)
raise AirflowException("Cluster was created but was in ERROR state.")
raise AirflowException("Cluster was created but is in ERROR state")
def _wait_for_cluster_in_deleting_state(self, hook: DataprocHook) -> None:
time_left = self.timeout
for time_to_sleep in exponential_sleep_generator(initial=10, maximum=120):
if time_left < 0:
raise AirflowException(f"Cluster {self.cluster_name} is still DELETING state, aborting")
time.sleep(time_to_sleep)
time_left = time_left - time_to_sleep
try:
self._get_cluster(hook)
except NotFound:
break
def _wait_for_cluster_in_creating_state(self, hook: DataprocHook) -> Cluster:
time_left = self.timeout
cluster = self._get_cluster(hook)
for time_to_sleep in exponential_sleep_generator(initial=10, maximum=120):
if cluster.status.state != cluster.status.State.CREATING:
break
if time_left < 0:
raise AirflowException(f"Cluster {self.cluster_name} is still CREATING state, aborting")
time.sleep(time_to_sleep)
time_left = time_left - time_to_sleep
cluster = self._get_cluster(hook)
return cluster
def execute(self, context: Context) -> dict:
self.log.info("Creating cluster: %s", self.cluster_name)
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
# Save data required to display extra link no matter what the cluster status will be
DataprocLink.persist(
context=context, task_instance=self, url=DATAPROC_CLUSTER_LINK, resource=self.cluster_name
)
try:
# First try to create a new cluster
operation = self._create_cluster(hook)
if not self.deferrable:
cluster = hook.wait_for_operation(
timeout=self.timeout, result_retry=self.retry, operation=operation
)
self.log.info("Cluster created.")
return Cluster.to_dict(cluster)
else:
self.defer(
trigger=DataprocClusterTrigger(
cluster_name=self.cluster_name,
project_id=self.project_id,
region=self.region,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
polling_interval_seconds=self.polling_interval_seconds,
),
method_name="execute_complete",
)
except AlreadyExists:
if not self.use_if_exists:
raise
self.log.info("Cluster already exists.")
cluster = self._get_cluster(hook)
# Check if cluster is not in ERROR state
self._handle_error_state(hook, cluster)
if cluster.status.state == cluster.status.State.CREATING:
# Wait for cluster to be created
cluster = self._wait_for_cluster_in_creating_state(hook)
self._handle_error_state(hook, cluster)
elif cluster.status.state == cluster.status.State.DELETING:
# Wait for cluster to be deleted
self._wait_for_cluster_in_deleting_state(hook)
# Create new cluster
cluster = self._create_cluster(hook)
self._handle_error_state(hook, cluster)
return Cluster.to_dict(cluster)
def execute_complete(self, context: Context, event: dict[str, Any]) -> Any:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
cluster_state = event["cluster_state"]
cluster_name = event["cluster_name"]
if cluster_state == ClusterStatus.State.ERROR:
raise AirflowException(f"Cluster is in ERROR state:\n{cluster_name}")
self.log.info("%s completed successfully.", self.task_id)
return event["cluster"]
class DataprocScaleClusterOperator(GoogleCloudBaseOperator):
"""Scale, up or down, a cluster on Google Cloud Dataproc.
The operator will wait until the cluster is re-scaled.
Example usage:
.. code-block:: python
t1 = DataprocClusterScaleOperator(
task_id="dataproc_scale",
project_id="my-project",
cluster_name="cluster-1",
num_workers=10,
num_preemptible_workers=10,
graceful_decommission_timeout="1h",
)
.. seealso::
For more detail on about scaling clusters have a look at the reference:
https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/scaling-clusters
:param cluster_name: The name of the cluster to scale. (templated)
:param project_id: The ID of the google cloud project in which
the cluster runs. (templated)
:param region: The region for the dataproc cluster. (templated)
:param num_workers: The new number of workers
:param num_preemptible_workers: The new number of preemptible workers
:param graceful_decommission_timeout: Timeout for graceful YARN decommissioning.
Maximum value is 1d
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = ("cluster_name", "project_id", "region", "impersonation_chain")
operator_extra_links = (DataprocLink(),)
def __init__(
self,
*,
cluster_name: str,
project_id: str | None = None,
region: str = "global",
num_workers: int = 2,
num_preemptible_workers: int = 0,
graceful_decommission_timeout: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.cluster_name = cluster_name
self.num_workers = num_workers
self.num_preemptible_workers = num_preemptible_workers
self.graceful_decommission_timeout = graceful_decommission_timeout
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
# TODO: Remove one day
warnings.warn(
f"The `{type(self).__name__}` operator is deprecated, "
"please use `DataprocUpdateClusterOperator` instead.",
AirflowProviderDeprecationWarning,
stacklevel=1,
)
def _build_scale_cluster_data(self) -> dict:
scale_data = {
"config": {
"worker_config": {"num_instances": self.num_workers},
"secondary_worker_config": {"num_instances": self.num_preemptible_workers},
}
}
return scale_data
@property
def _graceful_decommission_timeout_object(self) -> dict[str, int] | None:
if not self.graceful_decommission_timeout:
return None
timeout = None
match = re.match(r"^(\d+)([smdh])$", self.graceful_decommission_timeout)
if match:
if match.group(2) == "s":
timeout = int(match.group(1))
elif match.group(2) == "m":
val = float(match.group(1))
timeout = int(timedelta(minutes=val).total_seconds())
elif match.group(2) == "h":
val = float(match.group(1))
timeout = int(timedelta(hours=val).total_seconds())
elif match.group(2) == "d":
val = float(match.group(1))
timeout = int(timedelta(days=val).total_seconds())
if not timeout:
raise AirflowException(
"DataprocClusterScaleOperator "
" should be expressed in day, hours, minutes or seconds. "
" i.e. 1d, 4h, 10m, 30s"
)
return {"seconds": timeout}
def execute(self, context: Context) -> None:
"""Scale, up or down, a cluster on Google Cloud Dataproc."""
self.log.info("Scaling cluster: %s", self.cluster_name)
scaling_cluster_data = self._build_scale_cluster_data()
update_mask = ["config.worker_config.num_instances", "config.secondary_worker_config.num_instances"]
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
# Save data required to display extra link no matter what the cluster status will be
DataprocLink.persist(
context=context, task_instance=self, url=DATAPROC_CLUSTER_LINK, resource=self.cluster_name
)
operation = hook.update_cluster(
project_id=self.project_id,
region=self.region,
cluster_name=self.cluster_name,
cluster=scaling_cluster_data,
graceful_decommission_timeout=self._graceful_decommission_timeout_object,
update_mask={"paths": update_mask},
)
operation.result()
self.log.info("Cluster scaling finished")
class DataprocDeleteClusterOperator(GoogleCloudBaseOperator):
"""Delete a cluster in a project.
:param region: Required. The Cloud Dataproc region in which to handle the request (templated).
:param cluster_name: Required. The cluster name (templated).
:param project_id: Optional. The ID of the Google Cloud project that the cluster belongs to (templated).
:param cluster_uuid: Optional. Specifying the ``cluster_uuid`` means the RPC should fail
if cluster with specified UUID does not exist.
:param request_id: Optional. A unique id used to identify the request. If the server receives two
``DeleteClusterRequest`` requests with the same id, then the second request will be ignored and the
first ``google.longrunning.Operation`` created and stored in the backend is returned.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run operator in the deferrable mode.
:param polling_interval_seconds: Time (seconds) to wait between calls to check the cluster status.
"""
template_fields: Sequence[str] = ("project_id", "region", "cluster_name", "impersonation_chain")
def __init__(
self,
*,
region: str,
cluster_name: str,
project_id: str | None = None,
cluster_uuid: str | None = None,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float = 1 * 60 * 60,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
polling_interval_seconds: int = 10,
**kwargs,
):
super().__init__(**kwargs)
if deferrable and polling_interval_seconds <= 0:
raise ValueError("Invalid value for polling_interval_seconds. Expected value greater than 0")
self.project_id = project_id
self.region = region
self.cluster_name = cluster_name
self.cluster_uuid = cluster_uuid
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
self.polling_interval_seconds = polling_interval_seconds
def execute(self, context: Context) -> None:
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
operation = self._delete_cluster(hook)
if not self.deferrable:
hook.wait_for_operation(timeout=self.timeout, result_retry=self.retry, operation=operation)
self.log.info("Cluster deleted.")
else:
end_time: float = time.time() + self.timeout
self.defer(
trigger=DataprocDeleteClusterTrigger(
gcp_conn_id=self.gcp_conn_id,
project_id=self.project_id,
region=self.region,
cluster_name=self.cluster_name,
end_time=end_time,
metadata=self.metadata,
impersonation_chain=self.impersonation_chain,
polling_interval_seconds=self.polling_interval_seconds,
),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> Any:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event and event["status"] == "error":
raise AirflowException(event["message"])
elif event is None:
raise AirflowException("No event received in trigger callback")
self.log.info("Cluster deleted.")
def _delete_cluster(self, hook: DataprocHook):
self.log.info("Deleting cluster: %s", self.cluster_name)
return hook.delete_cluster(
project_id=self.project_id,
region=self.region,
cluster_name=self.cluster_name,
cluster_uuid=self.cluster_uuid,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class DataprocJobBaseOperator(GoogleCloudBaseOperator):
"""Base class for operators that launch job on DataProc.
:param region: The specified region where the dataproc cluster is created.
:param job_name: The job name used in the DataProc cluster. This name by default
is the task_id appended with the execution data, but can be templated. The
name will always be appended with a random number to avoid name clashes.
:param cluster_name: The name of the DataProc cluster.
:param project_id: The ID of the Google Cloud project the cluster belongs to,
if not specified the project will be inferred from the provided GCP connection.
:param dataproc_properties: Map for the Hive properties. Ideal to put in
default arguments (templated)
:param dataproc_jars: HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop
MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. (templated)
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param labels: The labels to associate with this job. Label keys must contain 1 to 63 characters,
and must conform to RFC 1035. Label values may be empty, but, if present, must contain 1 to 63
characters, and must conform to RFC 1035. No more than 32 labels can be associated with a job.
:param job_error_states: Job states that should be considered error states.
Any states in this set will result in an error being raised and failure of the
task. Eg, if the ``CANCELLED`` state should also be considered a task failure,
pass in ``{'ERROR', 'CANCELLED'}``. Possible values are currently only
``'ERROR'`` and ``'CANCELLED'``, but could change in the future. Defaults to
``{'ERROR'}``.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param asynchronous: Flag to return after submitting the job to the Dataproc API.
This is useful for submitting long running jobs and
waiting on them asynchronously using the DataprocJobSensor
:param deferrable: Run operator in the deferrable mode
:param polling_interval_seconds: time in seconds between polling for job completion.
The value is considered only when running in deferrable mode. Must be greater than 0.
:var dataproc_job_id: The actual "jobId" as submitted to the Dataproc API.
This is useful for identifying or linking to the job in the Google Cloud Console
Dataproc UI, as the actual "jobId" submitted to the Dataproc API is appended with
an 8 character random string.
:vartype dataproc_job_id: str
"""
job_type = ""
operator_extra_links = (DataprocLink(),)
def __init__(
self,
*,
region: str,
job_name: str = "{{task.task_id}}_{{ds_nodash}}",
cluster_name: str = "cluster-1",
project_id: str | None = None,
dataproc_properties: dict | None = None,
dataproc_jars: list[str] | None = None,
gcp_conn_id: str = "google_cloud_default",
labels: dict | None = None,
job_error_states: set[str] | None = None,
impersonation_chain: str | Sequence[str] | None = None,
asynchronous: bool = False,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
polling_interval_seconds: int = 10,
**kwargs,
) -> None:
super().__init__(**kwargs)
if deferrable and polling_interval_seconds <= 0:
raise ValueError("Invalid value for polling_interval_seconds. Expected value greater than 0")
self.gcp_conn_id = gcp_conn_id
self.labels = labels
self.job_name = job_name
self.cluster_name = cluster_name
self.dataproc_properties = dataproc_properties
self.dataproc_jars = dataproc_jars
self.region = region
self.job_error_states = job_error_states if job_error_states is not None else {"ERROR"}
self.impersonation_chain = impersonation_chain
self.hook = DataprocHook(gcp_conn_id=gcp_conn_id, impersonation_chain=impersonation_chain)
self.project_id = self.hook.project_id if project_id is None else project_id
self.job_template: DataProcJobBuilder | None = None
self.job: dict | None = None
self.dataproc_job_id = None
self.asynchronous = asynchronous
self.deferrable = deferrable
self.polling_interval_seconds = polling_interval_seconds
def create_job_template(self) -> DataProcJobBuilder:
"""Initialize `self.job_template` with default values."""
if self.project_id is None:
raise AirflowException(
"project id should either be set via project_id "
"parameter or retrieved from the connection,"
)
job_template = DataProcJobBuilder(
project_id=self.project_id,
task_id=self.task_id,
cluster_name=self.cluster_name,
job_type=self.job_type,
properties=self.dataproc_properties,
)
job_template.set_job_name(self.job_name)
job_template.add_jar_file_uris(self.dataproc_jars)
job_template.add_labels(self.labels)
self.job_template = job_template
return job_template
def _generate_job_template(self) -> str:
if self.job_template:
job = self.job_template.build()
return job["job"]
raise Exception("Create a job template before")
def execute(self, context: Context):
if self.job_template:
self.job = self.job_template.build()
if self.job is None:
raise Exception("The job should be set here.")
self.dataproc_job_id = self.job["job"]["reference"]["job_id"]
self.log.info("Submitting %s job %s", self.job_type, self.dataproc_job_id)
job_object = self.hook.submit_job(
project_id=self.project_id, job=self.job["job"], region=self.region
)
job_id = job_object.reference.job_id
self.log.info("Job %s submitted successfully.", job_id)
# Save data required for extra links no matter what the job status will be
DataprocLink.persist(
context=context, task_instance=self, url=DATAPROC_JOB_LOG_LINK, resource=job_id
)
if self.deferrable:
self.defer(
trigger=DataprocSubmitTrigger(
job_id=job_id,
project_id=self.project_id,
region=self.region,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
polling_interval_seconds=self.polling_interval_seconds,
),
method_name="execute_complete",
)
if not self.asynchronous:
self.log.info("Waiting for job %s to complete", job_id)
self.hook.wait_for_job(job_id=job_id, region=self.region, project_id=self.project_id)
self.log.info("Job %s completed successfully.", job_id)
return job_id
else:
raise AirflowException("Create a job template before")
def execute_complete(self, context, event=None) -> None:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
job_state = event["job_state"]
job_id = event["job_id"]
if job_state == JobStatus.State.ERROR:
raise AirflowException(f"Job failed:\n{job_id}")
if job_state == JobStatus.State.CANCELLED:
raise AirflowException(f"Job was cancelled:\n{job_id}")
self.log.info("%s completed successfully.", self.task_id)
return job_id
def on_kill(self) -> None:
"""Callback called when the operator is killed; cancel any running job."""
if self.dataproc_job_id:
self.hook.cancel_job(project_id=self.project_id, job_id=self.dataproc_job_id, region=self.region)
class DataprocSubmitPigJobOperator(DataprocJobBaseOperator):
"""Start a Pig query Job on a Cloud DataProc cluster.
The parameters of the operation will be passed to the cluster.
It's a good practice to define dataproc_* parameters in the default_args of the dag
like the cluster name and UDFs.
.. code-block:: python
default_args = {
"cluster_name": "cluster-1",
"dataproc_pig_jars": [
"gs://example/udf/jar/datafu/1.2.0/datafu.jar",
"gs://example/udf/jar/gpig/1.2/gpig.jar",
],
}
You can pass a pig script as string or file reference. Use variables to pass on
variables for the pig script to be resolved on the cluster or use the parameters to
be resolved in the script as template parameters.
.. code-block:: python
t1 = DataProcPigOperator(
task_id="dataproc_pig",
query="a_pig_script.pig",
variables={"out": "gs://example/output/{{ds}}"},
)
.. seealso::
For more detail on about job submission have a look at the reference:
https://cloud.google.com/dataproc/reference/rest/v1/projects.regions.jobs
:param query: The query or reference to the query
file (pg or pig extension). (templated)
:param query_uri: The HCFS URI of the script that contains the Pig queries.
:param variables: Map of named parameters for the query. (templated)
"""
template_fields: Sequence[str] = (
"query",
"variables",
"job_name",
"cluster_name",
"region",
"dataproc_jars",
"dataproc_properties",
"impersonation_chain",
)
template_ext = (".pg", ".pig")
ui_color = "#0273d4"
job_type = "pig_job"
operator_extra_links = (DataprocLink(),)
def __init__(
self,
*,
query: str | None = None,
query_uri: str | None = None,
variables: dict | None = None,
**kwargs,
) -> None:
# TODO: Remove one day
warnings.warn(
"The `{cls}` operator is deprecated, please use `DataprocSubmitJobOperator` instead. You can use"
" `generate_job` method of `{cls}` to generate dictionary representing your job"
" and use it with the new operator.".format(cls=type(self).__name__),
AirflowProviderDeprecationWarning,
stacklevel=1,
)
super().__init__(**kwargs)
self.query = query
self.query_uri = query_uri
self.variables = variables
def generate_job(self):
"""
Helper method for easier migration to `DataprocSubmitJobOperator`.
:return: Dict representing Dataproc job
"""
job_template = self.create_job_template()
if self.query is None:
if self.query_uri is None:
raise AirflowException("One of query or query_uri should be set here")
job_template.add_query_uri(self.query_uri)
else:
job_template.add_query(self.query)
job_template.add_variables(self.variables)
return self._generate_job_template()
def execute(self, context: Context):
job_template = self.create_job_template()
if self.query is None:
if self.query_uri is None:
raise AirflowException("One of query or query_uri should be set here")
job_template.add_query_uri(self.query_uri)
else:
job_template.add_query(self.query)
job_template.add_variables(self.variables)
super().execute(context)
class DataprocSubmitHiveJobOperator(DataprocJobBaseOperator):
"""Start a Hive query Job on a Cloud DataProc cluster.
:param query: The query or reference to the query file (q extension).
:param query_uri: The HCFS URI of the script that contains the Hive queries.
:param variables: Map of named parameters for the query.
"""
template_fields: Sequence[str] = (
"query",
"variables",
"job_name",
"cluster_name",
"region",
"dataproc_jars",
"dataproc_properties",
"impersonation_chain",
)
template_ext = (".q", ".hql")
ui_color = "#0273d4"
job_type = "hive_job"
def __init__(
self,
*,
query: str | None = None,
query_uri: str | None = None,
variables: dict | None = None,
**kwargs,
) -> None:
# TODO: Remove one day
warnings.warn(
"The `{cls}` operator is deprecated, please use `DataprocSubmitJobOperator` instead. You can use"
" `generate_job` method of `{cls}` to generate dictionary representing your job"
" and use it with the new operator.".format(cls=type(self).__name__),
AirflowProviderDeprecationWarning,
stacklevel=1,
)
super().__init__(**kwargs)
self.query = query
self.query_uri = query_uri
self.variables = variables
if self.query is not None and self.query_uri is not None:
raise AirflowException("Only one of `query` and `query_uri` can be passed.")
def generate_job(self):
"""
Helper method for easier migration to `DataprocSubmitJobOperator`.
:return: Dict representing Dataproc job
"""
job_template = self.create_job_template()
if self.query is None:
if self.query_uri is None:
raise AirflowException("One of query or query_uri should be set here")
job_template.add_query_uri(self.query_uri)
else:
job_template.add_query(self.query)
job_template.add_variables(self.variables)
return self._generate_job_template()
def execute(self, context: Context):
job_template = self.create_job_template()
if self.query is None:
if self.query_uri is None:
raise AirflowException("One of query or query_uri should be set here")
job_template.add_query_uri(self.query_uri)
else:
job_template.add_query(self.query)
job_template.add_variables(self.variables)
super().execute(context)
class DataprocSubmitSparkSqlJobOperator(DataprocJobBaseOperator):
"""Start a Spark SQL query Job on a Cloud DataProc cluster.
:param query: The query or reference to the query file (q extension). (templated)
:param query_uri: The HCFS URI of the script that contains the SQL queries.
:param variables: Map of named parameters for the query. (templated)
"""
template_fields: Sequence[str] = (
"query",
"variables",
"job_name",
"cluster_name",
"region",
"dataproc_jars",
"dataproc_properties",
"impersonation_chain",
)
template_ext = (".q",)
template_fields_renderers = {"sql": "sql"}
ui_color = "#0273d4"
job_type = "spark_sql_job"
def __init__(
self,
*,
query: str | None = None,
query_uri: str | None = None,
variables: dict | None = None,
**kwargs,
) -> None:
# TODO: Remove one day
warnings.warn(
"The `{cls}` operator is deprecated, please use `DataprocSubmitJobOperator` instead. You can use"
" `generate_job` method of `{cls}` to generate dictionary representing your job"
" and use it with the new operator.".format(cls=type(self).__name__),
AirflowProviderDeprecationWarning,
stacklevel=1,
)
super().__init__(**kwargs)
self.query = query
self.query_uri = query_uri
self.variables = variables
if self.query is not None and self.query_uri is not None:
raise AirflowException("Only one of `query` and `query_uri` can be passed.")
def generate_job(self):
"""
Helper method for easier migration to `DataprocSubmitJobOperator`.
:return: Dict representing Dataproc job
"""
job_template = self.create_job_template()
if self.query is None:
job_template.add_query_uri(self.query_uri)
else:
job_template.add_query(self.query)
job_template.add_variables(self.variables)
return self._generate_job_template()
def execute(self, context: Context):
job_template = self.create_job_template()
if self.query is None:
if self.query_uri is None:
raise AirflowException("One of query or query_uri should be set here")
job_template.add_query_uri(self.query_uri)
else:
job_template.add_query(self.query)
job_template.add_variables(self.variables)
super().execute(context)
class DataprocSubmitSparkJobOperator(DataprocJobBaseOperator):
"""Start a Spark Job on a Cloud DataProc cluster.
:param main_jar: The HCFS URI of the jar file that contains the main class
(use this or the main_class, not both together).
:param main_class: Name of the job class. (use this or the main_jar, not both
together).
:param arguments: Arguments for the job. (templated)
:param archives: List of archived files that will be unpacked in the work
directory. Should be stored in Cloud Storage.
:param files: List of files to be copied to the working directory
"""
template_fields: Sequence[str] = (
"arguments",
"job_name",
"cluster_name",
"region",
"dataproc_jars",
"dataproc_properties",
"impersonation_chain",
)
ui_color = "#0273d4"
job_type = "spark_job"
def __init__(
self,
*,
main_jar: str | None = None,
main_class: str | None = None,
arguments: list | None = None,
archives: list | None = None,
files: list | None = None,
**kwargs,
) -> None:
# TODO: Remove one day
warnings.warn(
"The `{cls}` operator is deprecated, please use `DataprocSubmitJobOperator` instead. You can use"
" `generate_job` method of `{cls}` to generate dictionary representing your job"
" and use it with the new operator.".format(cls=type(self).__name__),
AirflowProviderDeprecationWarning,
stacklevel=1,
)
super().__init__(**kwargs)
self.main_jar = main_jar
self.main_class = main_class
self.arguments = arguments
self.archives = archives
self.files = files
def generate_job(self):
"""
Helper method for easier migration to `DataprocSubmitJobOperator`.
:return: Dict representing Dataproc job
"""
job_template = self.create_job_template()
job_template.set_main(self.main_jar, self.main_class)
job_template.add_args(self.arguments)
job_template.add_archive_uris(self.archives)
job_template.add_file_uris(self.files)
return self._generate_job_template()
def execute(self, context: Context):
job_template = self.create_job_template()
job_template.set_main(self.main_jar, self.main_class)
job_template.add_args(self.arguments)
job_template.add_archive_uris(self.archives)
job_template.add_file_uris(self.files)
super().execute(context)
class DataprocSubmitHadoopJobOperator(DataprocJobBaseOperator):
"""Start a Hadoop Job on a Cloud DataProc cluster.
:param main_jar: The HCFS URI of the jar file containing the main class
(use this or the main_class, not both together).
:param main_class: Name of the job class. (use this or the main_jar, not both
together).
:param arguments: Arguments for the job. (templated)
:param archives: List of archived files that will be unpacked in the work
directory. Should be stored in Cloud Storage.
:param files: List of files to be copied to the working directory
"""
template_fields: Sequence[str] = (
"arguments",
"job_name",
"cluster_name",
"region",
"dataproc_jars",
"dataproc_properties",
"impersonation_chain",
)
ui_color = "#0273d4"
job_type = "hadoop_job"
def __init__(
self,
*,
main_jar: str | None = None,
main_class: str | None = None,
arguments: list | None = None,
archives: list | None = None,
files: list | None = None,
**kwargs,
) -> None:
# TODO: Remove one day
warnings.warn(
"The `{cls}` operator is deprecated, please use `DataprocSubmitJobOperator` instead. You can use"
" `generate_job` method of `{cls}` to generate dictionary representing your job"
" and use it with the new operator.".format(cls=type(self).__name__),
AirflowProviderDeprecationWarning,
stacklevel=1,
)
super().__init__(**kwargs)
self.main_jar = main_jar
self.main_class = main_class
self.arguments = arguments
self.archives = archives
self.files = files
def generate_job(self):
"""Helper method for easier migration to `DataprocSubmitJobOperator`.
:return: Dict representing Dataproc job
"""
job_template = self.create_job_template()
job_template.set_main(self.main_jar, self.main_class)
job_template.add_args(self.arguments)
job_template.add_archive_uris(self.archives)
job_template.add_file_uris(self.files)
return self._generate_job_template()
def execute(self, context: Context):
job_template = self.create_job_template()
job_template.set_main(self.main_jar, self.main_class)
job_template.add_args(self.arguments)
job_template.add_archive_uris(self.archives)
job_template.add_file_uris(self.files)
super().execute(context)
class DataprocSubmitPySparkJobOperator(DataprocJobBaseOperator):
"""Start a PySpark Job on a Cloud DataProc cluster.
:param main: [Required] The Hadoop Compatible Filesystem (HCFS) URI of the main
Python file to use as the driver. Must be a .py file. (templated)
:param arguments: Arguments for the job. (templated)
:param archives: List of archived files that will be unpacked in the work
directory. Should be stored in Cloud Storage.
:param files: List of files to be copied to the working directory
:param pyfiles: List of Python files to pass to the PySpark framework.
Supported file types: .py, .egg, and .zip
"""
template_fields: Sequence[str] = (
"main",
"arguments",
"job_name",
"cluster_name",
"region",
"dataproc_jars",
"dataproc_properties",
"impersonation_chain",
)
ui_color = "#0273d4"
job_type = "pyspark_job"
@staticmethod
def _generate_temp_filename(filename):
date = time.strftime("%Y%m%d%H%M%S")
return f"{date}_{str(uuid.uuid4())[:8]}_{ntpath.basename(filename)}"
def _upload_file_temp(self, bucket, local_file):
"""Upload a local file to a Google Cloud Storage bucket."""
temp_filename = self._generate_temp_filename(local_file)
if not bucket:
raise AirflowException(
"If you want Airflow to upload the local file to a temporary bucket, set "
"the 'temp_bucket' key in the connection string"
)
self.log.info("Uploading %s to %s", local_file, temp_filename)
GCSHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain).upload(
bucket_name=bucket,
object_name=temp_filename,
mime_type="application/x-python",
filename=local_file,
)
return f"gs://{bucket}/{temp_filename}"
def __init__(
self,
*,
main: str,
arguments: list | None = None,
archives: list | None = None,
pyfiles: list | None = None,
files: list | None = None,
**kwargs,
) -> None:
# TODO: Remove one day
warnings.warn(
"The `{cls}` operator is deprecated, please use `DataprocSubmitJobOperator` instead. You can use"
" `generate_job` method of `{cls}` to generate dictionary representing your job"
" and use it with the new operator.".format(cls=type(self).__name__),
AirflowProviderDeprecationWarning,
stacklevel=1,
)
super().__init__(**kwargs)
self.main = main
self.arguments = arguments
self.archives = archives
self.files = files
self.pyfiles = pyfiles
def generate_job(self):
"""Helper method for easier migration to :class:`DataprocSubmitJobOperator`.
:return: Dict representing Dataproc job
"""
job_template = self.create_job_template()
# Check if the file is local, if that is the case, upload it to a bucket
if os.path.isfile(self.main):
cluster_info = self.hook.get_cluster(
project_id=self.project_id, region=self.region, cluster_name=self.cluster_name
)
bucket = cluster_info["config"]["config_bucket"]
self.main = f"gs://{bucket}/{self.main}"
job_template.set_python_main(self.main)
job_template.add_args(self.arguments)
job_template.add_archive_uris(self.archives)
job_template.add_file_uris(self.files)
job_template.add_python_file_uris(self.pyfiles)
return self._generate_job_template()
def execute(self, context: Context):
job_template = self.create_job_template()
# Check if the file is local, if that is the case, upload it to a bucket
if os.path.isfile(self.main):
cluster_info = self.hook.get_cluster(
project_id=self.project_id, region=self.region, cluster_name=self.cluster_name
)
bucket = cluster_info["config"]["config_bucket"]
self.main = self._upload_file_temp(bucket, self.main)
job_template.set_python_main(self.main)
job_template.add_args(self.arguments)
job_template.add_archive_uris(self.archives)
job_template.add_file_uris(self.files)
job_template.add_python_file_uris(self.pyfiles)
super().execute(context)
class DataprocCreateWorkflowTemplateOperator(GoogleCloudBaseOperator):
"""Creates new workflow template.
:param project_id: Optional. The ID of the Google Cloud project the cluster belongs to.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param template: The Dataproc workflow template to create. If a dict is provided,
it must be of the same form as the protobuf message WorkflowTemplate.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
template_fields: Sequence[str] = ("region", "template")
template_fields_renderers = {"template": "json"}
operator_extra_links = (DataprocLink(),)
def __init__(
self,
*,
template: dict,
region: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.region = region
self.template = template
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Creating template")
try:
workflow = hook.create_workflow_template(
region=self.region,
template=self.template,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Workflow %s created", workflow.name)
except AlreadyExists:
self.log.info("Workflow with given id already exists")
DataprocLink.persist(
context=context,
task_instance=self,
url=DATAPROC_WORKFLOW_TEMPLATE_LINK,
resource=self.template["id"],
)
class DataprocInstantiateWorkflowTemplateOperator(GoogleCloudBaseOperator):
"""Instantiate a WorkflowTemplate on Google Cloud Dataproc.
The operator will wait until the WorkflowTemplate is finished executing.
.. seealso::
Please refer to:
https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.workflowTemplates/instantiate
:param template_id: The id of the template. (templated)
:param project_id: The ID of the google cloud project in which
the template runs
:param region: The specified region where the dataproc cluster is created.
:param parameters: a map of parameters for Dataproc Template in key-value format:
map (key: string, value: string)
Example: { "date_from": "2019-08-01", "date_to": "2019-08-02"}.
Values may not exceed 100 characters. Please refer to:
https://cloud.google.com/dataproc/docs/concepts/workflows/workflow-parameters
:param request_id: Optional. A unique id used to identify the request. If the server receives two
``SubmitJobRequest`` requests with the same id, then the second request will be ignored and the first
``Job`` created and stored in the backend is returned.
It is recommended to always set this value to a UUID.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run operator in the deferrable mode.
:param polling_interval_seconds: Time (seconds) to wait between calls to check the run status.
"""
template_fields: Sequence[str] = ("template_id", "impersonation_chain", "request_id", "parameters")
template_fields_renderers = {"parameters": "json"}
operator_extra_links = (DataprocLink(),)
def __init__(
self,
*,
template_id: str,
region: str,
project_id: str | None = None,
version: int | None = None,
request_id: str | None = None,
parameters: dict[str, str] | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
polling_interval_seconds: int = 10,
**kwargs,
) -> None:
super().__init__(**kwargs)
if deferrable and polling_interval_seconds <= 0:
raise ValueError("Invalid value for polling_interval_seconds. Expected value greater than 0")
self.template_id = template_id
self.parameters = parameters
self.version = version
self.project_id = project_id
self.region = region
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.request_id = request_id
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
self.polling_interval_seconds = polling_interval_seconds
def execute(self, context: Context):
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Instantiating template %s", self.template_id)
operation = hook.instantiate_workflow_template(
project_id=self.project_id,
region=self.region,
template_name=self.template_id,
version=self.version,
request_id=self.request_id,
parameters=self.parameters,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.workflow_id = operation.operation.name.split("/")[-1]
DataprocLink.persist(
context=context, task_instance=self, url=DATAPROC_WORKFLOW_LINK, resource=self.workflow_id
)
self.log.info("Template instantiated. Workflow Id : %s", self.workflow_id)
if not self.deferrable:
hook.wait_for_operation(timeout=self.timeout, result_retry=self.retry, operation=operation)
self.log.info("Workflow %s completed successfully", self.workflow_id)
else:
self.defer(
trigger=DataprocWorkflowTrigger(
name=operation.operation.name,
project_id=self.project_id,
region=self.region,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
polling_interval_seconds=self.polling_interval_seconds,
),
method_name="execute_complete",
)
def execute_complete(self, context, event=None) -> None:
"""Callback for when the trigger fires.
This returns immediately. It relies on trigger to throw an exception,
otherwise it assumes execution was successful.
"""
if event["status"] == "failed" or event["status"] == "error":
self.log.exception("Unexpected error in the operation.")
raise AirflowException(event["message"])
self.log.info("Workflow %s completed successfully", event["operation_name"])
class DataprocInstantiateInlineWorkflowTemplateOperator(GoogleCloudBaseOperator):
"""Instantiate a WorkflowTemplate Inline on Google Cloud Dataproc.
The operator will wait until the WorkflowTemplate is finished executing.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataprocInstantiateInlineWorkflowTemplateOperator`
For more detail on about instantiate inline have a look at the reference:
https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.workflowTemplates/instantiateInline
:param template: The template contents. (templated)
:param project_id: The ID of the google cloud project in which
the template runs
:param region: The specified region where the dataproc cluster is created.
:param parameters: a map of parameters for Dataproc Template in key-value format:
map (key: string, value: string)
Example: { "date_from": "2019-08-01", "date_to": "2019-08-02"}.
Values may not exceed 100 characters. Please refer to:
https://cloud.google.com/dataproc/docs/concepts/workflows/workflow-parameters
:param request_id: Optional. A unique id used to identify the request. If the server receives two
``SubmitJobRequest`` requests with the same id, then the second request will be ignored and the first
``Job`` created and stored in the backend is returned.
It is recommended to always set this value to a UUID.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run operator in the deferrable mode.
:param polling_interval_seconds: Time (seconds) to wait between calls to check the run status.
"""
template_fields: Sequence[str] = ("template", "impersonation_chain")
template_fields_renderers = {"template": "json"}
operator_extra_links = (DataprocLink(),)
def __init__(
self,
*,
template: dict,
region: str,
project_id: str | None = None,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
polling_interval_seconds: int = 10,
**kwargs,
) -> None:
super().__init__(**kwargs)
if deferrable and polling_interval_seconds <= 0:
raise ValueError("Invalid value for polling_interval_seconds. Expected value greater than 0")
self.template = template
self.project_id = project_id
self.region = region
self.template = template
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
self.polling_interval_seconds = polling_interval_seconds
def execute(self, context: Context):
self.log.info("Instantiating Inline Template")
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
operation = hook.instantiate_inline_workflow_template(
template=self.template,
project_id=self.project_id or hook.project_id,
region=self.region,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.workflow_id = operation.operation.name.split("/")[-1]
DataprocLink.persist(
context=context, task_instance=self, url=DATAPROC_WORKFLOW_LINK, resource=self.workflow_id
)
if not self.deferrable:
self.log.info("Template instantiated. Workflow Id : %s", self.workflow_id)
operation.result()
self.log.info("Workflow %s completed successfully", self.workflow_id)
else:
self.defer(
trigger=DataprocWorkflowTrigger(
name=operation.operation.name,
project_id=self.project_id or hook.project_id,
region=self.region,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
polling_interval_seconds=self.polling_interval_seconds,
),
method_name="execute_complete",
)
def execute_complete(self, context, event=None) -> None:
"""Callback for when the trigger fires.
This returns immediately. It relies on trigger to throw an exception,
otherwise it assumes execution was successful.
"""
if event["status"] == "failed" or event["status"] == "error":
self.log.exception("Unexpected error in the operation.")
raise AirflowException(event["message"])
self.log.info("Workflow %s completed successfully", event["operation_name"])
class DataprocSubmitJobOperator(GoogleCloudBaseOperator):
"""Submit a job to a cluster.
:param project_id: Optional. The ID of the Google Cloud project that the job belongs to.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param job: Required. The job resource.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.dataproc_v1.types.Job`
:param request_id: Optional. A unique id used to identify the request. If the server receives two
``SubmitJobRequest`` requests with the same id, then the second request will be ignored and the first
``Job`` created and stored in the backend is returned.
It is recommended to always set this value to a UUID.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id:
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param asynchronous: Flag to return after submitting the job to the Dataproc API.
This is useful for submitting long running jobs and
waiting on them asynchronously using the DataprocJobSensor
:param deferrable: Run operator in the deferrable mode
:param polling_interval_seconds: time in seconds between polling for job completion.
The value is considered only when running in deferrable mode. Must be greater than 0.
:param cancel_on_kill: Flag which indicates whether cancel the hook's job or not, when on_kill is called
:param wait_timeout: How many seconds wait for job to be ready. Used only if ``asynchronous`` is False
"""
template_fields: Sequence[str] = ("project_id", "region", "job", "impersonation_chain", "request_id")
template_fields_renderers = {"job": "json"}
operator_extra_links = (DataprocLink(),)
def __init__(
self,
*,
job: dict,
region: str,
project_id: str | None = None,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
asynchronous: bool = False,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
polling_interval_seconds: int = 10,
cancel_on_kill: bool = True,
wait_timeout: int | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if deferrable and polling_interval_seconds <= 0:
raise ValueError("Invalid value for polling_interval_seconds. Expected value greater than 0")
self.project_id = project_id
self.region = region
self.job = job
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.asynchronous = asynchronous
self.deferrable = deferrable
self.polling_interval_seconds = polling_interval_seconds
self.cancel_on_kill = cancel_on_kill
self.hook: DataprocHook | None = None
self.job_id: str | None = None
self.wait_timeout = wait_timeout
def execute(self, context: Context):
self.log.info("Submitting job")
self.hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
job_object = self.hook.submit_job(
project_id=self.project_id,
region=self.region,
job=self.job,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
new_job_id: str = job_object.reference.job_id
self.log.info("Job %s submitted successfully.", new_job_id)
# Save data required by extra links no matter what the job status will be
DataprocLink.persist(
context=context, task_instance=self, url=DATAPROC_JOB_LOG_LINK, resource=new_job_id
)
self.job_id = new_job_id
if self.deferrable:
job = self.hook.get_job(project_id=self.project_id, region=self.region, job_id=self.job_id)
state = job.status.state
if state == JobStatus.State.DONE:
return self.job_id
elif state == JobStatus.State.ERROR:
raise AirflowException(f"Job failed:\n{job}")
elif state == JobStatus.State.CANCELLED:
raise AirflowException(f"Job was cancelled:\n{job}")
self.defer(
trigger=DataprocSubmitTrigger(
job_id=self.job_id,
project_id=self.project_id,
region=self.region,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
polling_interval_seconds=self.polling_interval_seconds,
),
method_name="execute_complete",
)
elif not self.asynchronous:
self.log.info("Waiting for job %s to complete", new_job_id)
self.hook.wait_for_job(
job_id=new_job_id, region=self.region, project_id=self.project_id, timeout=self.wait_timeout
)
self.log.info("Job %s completed successfully.", new_job_id)
return self.job_id
def execute_complete(self, context, event=None) -> None:
"""Callback for when the trigger fires.
This returns immediately. It relies on trigger to throw an exception,
otherwise it assumes execution was successful.
"""
job_state = event["job_state"]
job_id = event["job_id"]
if job_state == JobStatus.State.ERROR:
raise AirflowException(f"Job failed:\n{job_id}")
if job_state == JobStatus.State.CANCELLED:
raise AirflowException(f"Job was cancelled:\n{job_id}")
self.log.info("%s completed successfully.", self.task_id)
return job_id
def on_kill(self):
if self.job_id and self.cancel_on_kill:
self.hook.cancel_job(job_id=self.job_id, project_id=self.project_id, region=self.region)
class DataprocUpdateClusterOperator(GoogleCloudBaseOperator):
"""Update a cluster in a project.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param project_id: Optional. The ID of the Google Cloud project the cluster belongs to.
:param cluster_name: Required. The cluster name.
:param cluster: Required. The changes to the cluster.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.dataproc_v1.types.Cluster`
:param update_mask: Required. Specifies the path, relative to ``Cluster``, of the field to update. For
example, to change the number of workers in a cluster to 5, the ``update_mask`` parameter would be
specified as ``config.worker_config.num_instances``, and the ``PATCH`` request body would specify the
new value. If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param graceful_decommission_timeout: Optional. Timeout for graceful YARN decommissioning. Graceful
decommissioning allows removing nodes from the cluster without interrupting jobs in progress. Timeout
specifies how long to wait for jobs in progress to finish before forcefully removing nodes (and
potentially interrupting jobs). Default timeout is 0 (for forceful decommission), and the maximum
allowed timeout is 1 day.
:param request_id: Optional. A unique id used to identify the request. If the server receives two
``UpdateClusterRequest`` requests with the same id, then the second request will be ignored and the
first ``google.longrunning.Operation`` created and stored in the backend is returned.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run operator in the deferrable mode.
:param polling_interval_seconds: Time (seconds) to wait between calls to check the run status.
"""
template_fields: Sequence[str] = (
"cluster_name",
"cluster",
"region",
"request_id",
"project_id",
"impersonation_chain",
)
operator_extra_links = (DataprocLink(),)
def __init__(
self,
*,
cluster_name: str,
cluster: dict | Cluster,
update_mask: dict | FieldMask,
graceful_decommission_timeout: dict | Duration,
region: str,
request_id: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
polling_interval_seconds: int = 10,
**kwargs,
):
super().__init__(**kwargs)
if deferrable and polling_interval_seconds <= 0:
raise ValueError("Invalid value for polling_interval_seconds. Expected value greater than 0")
self.project_id = project_id
self.region = region
self.cluster_name = cluster_name
self.cluster = cluster
self.update_mask = update_mask
self.graceful_decommission_timeout = graceful_decommission_timeout
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
self.polling_interval_seconds = polling_interval_seconds
def execute(self, context: Context):
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
# Save data required by extra links no matter what the cluster status will be
DataprocLink.persist(
context=context, task_instance=self, url=DATAPROC_CLUSTER_LINK, resource=self.cluster_name
)
self.log.info("Updating %s cluster.", self.cluster_name)
operation = hook.update_cluster(
project_id=self.project_id,
region=self.region,
cluster_name=self.cluster_name,
cluster=self.cluster,
update_mask=self.update_mask,
graceful_decommission_timeout=self.graceful_decommission_timeout,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
if not self.deferrable:
hook.wait_for_operation(timeout=self.timeout, result_retry=self.retry, operation=operation)
else:
self.defer(
trigger=DataprocClusterTrigger(
cluster_name=self.cluster_name,
project_id=self.project_id,
region=self.region,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
polling_interval_seconds=self.polling_interval_seconds,
),
method_name="execute_complete",
)
self.log.info("Updated %s cluster.", self.cluster_name)
def execute_complete(self, context: Context, event: dict[str, Any]) -> Any:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
cluster_state = event["cluster_state"]
cluster_name = event["cluster_name"]
if cluster_state == ClusterStatus.State.ERROR:
raise AirflowException(f"Cluster is in ERROR state:\n{cluster_name}")
self.log.info("%s completed successfully.", self.task_id)
class DataprocCreateBatchOperator(GoogleCloudBaseOperator):
"""Create a batch workload.
:param project_id: Optional. The ID of the Google Cloud project that the cluster belongs to. (templated)
:param region: Required. The Cloud Dataproc region in which to handle the request. (templated)
:param batch: Required. The batch to create. (templated)
:param batch_id: Optional. The ID to use for the batch, which will become the final component
of the batch's resource name.
This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/. (templated)
:param request_id: Optional. A unique id used to identify the request. If the server receives two
``CreateBatchRequest`` requests with the same id, then the second request will be ignored and
the first ``google.longrunning.Operation`` created and stored in the backend is returned.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param result_retry: Result retry object used to retry requests. Is used to decrease delay between
executing chained tasks in a DAG by specifying exact amount of seconds for executing.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param asynchronous: Flag to return after creating batch to the Dataproc API.
This is useful for creating long-running batch and
waiting on them asynchronously using the DataprocBatchSensor
:param deferrable: Run operator in the deferrable mode.
:param polling_interval_seconds: Time (seconds) to wait between calls to check the run status.
"""
template_fields: Sequence[str] = (
"project_id",
"batch",
"batch_id",
"region",
"impersonation_chain",
)
operator_extra_links = (DataprocLink(),)
def __init__(
self,
*,
region: str | None = None,
project_id: str | None = None,
batch: dict | Batch,
batch_id: str,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
result_retry: Retry | _MethodDefault = DEFAULT,
asynchronous: bool = False,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
polling_interval_seconds: int = 5,
**kwargs,
):
super().__init__(**kwargs)
if deferrable and polling_interval_seconds <= 0:
raise ValueError("Invalid value for polling_interval_seconds. Expected value greater than 0")
self.region = region
self.project_id = project_id
self.batch = batch
self.batch_id = batch_id
self.request_id = request_id
self.retry = retry
self.result_retry = result_retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.operation: operation.Operation | None = None
self.asynchronous = asynchronous
self.deferrable = deferrable
self.polling_interval_seconds = polling_interval_seconds
def execute(self, context: Context):
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
# batch_id might not be set and will be generated
if self.batch_id:
link = DATAPROC_BATCH_LINK.format(
region=self.region, project_id=self.project_id, resource=self.batch_id
)
self.log.info("Creating batch %s", self.batch_id)
self.log.info("Once started, the batch job will be available at %s", link)
else:
self.log.info("Starting batch job. The batch ID will be generated since it was not provided.")
if self.region is None:
raise AirflowException("Region should be set here")
try:
self.operation = hook.create_batch(
region=self.region,
project_id=self.project_id,
batch=self.batch,
batch_id=self.batch_id,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
if self.operation is None:
raise RuntimeError("The operation should be set here!")
if not self.deferrable:
if not self.asynchronous:
result = hook.wait_for_operation(
timeout=self.timeout, result_retry=self.result_retry, operation=self.operation
)
self.log.info("Batch %s created", self.batch_id)
else:
return self.operation.operation.name
else:
# processing ends in execute_complete
self.defer(
trigger=DataprocBatchTrigger(
batch_id=self.batch_id,
project_id=self.project_id,
region=self.region,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
polling_interval_seconds=self.polling_interval_seconds,
),
method_name="execute_complete",
)
except AlreadyExists:
self.log.info("Batch with given id already exists")
# This is only likely to happen if batch_id was provided
# Could be running if Airflow was restarted after task started
# poll until a final state is reached
self.log.info("Attaching to the job %s if it is still running.", self.batch_id)
# deferrable handling of a batch_id that already exists - processing ends in execute_complete
if self.deferrable:
self.defer(
trigger=DataprocBatchTrigger(
batch_id=self.batch_id,
project_id=self.project_id,
region=self.region,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
polling_interval_seconds=self.polling_interval_seconds,
),
method_name="execute_complete",
)
# non-deferrable handling of a batch_id that already exists
result = hook.wait_for_batch(
batch_id=self.batch_id,
region=self.region,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
wait_check_interval=self.polling_interval_seconds,
)
batch_id = self.batch_id or result.name.split("/")[-1]
self.handle_batch_status(context, result.state, batch_id)
return Batch.to_dict(result)
def execute_complete(self, context, event=None) -> None:
"""Callback for when the trigger fires.
This returns immediately. It relies on trigger to throw an exception,
otherwise it assumes execution was successful.
"""
if event is None:
raise AirflowException("Batch failed.")
state = event["batch_state"]
batch_id = event["batch_id"]
self.handle_batch_status(context, state, batch_id)
def on_kill(self):
if self.operation:
self.operation.cancel()
def handle_batch_status(self, context: Context, state: Batch.State, batch_id: str) -> None:
# The existing batch may be a number of states other than 'SUCCEEDED'\
# wait_for_operation doesn't fail if the job is cancelled, so we will check for it here which also
# finds a cancelling|canceled|unspecified job from wait_for_batch or the deferred trigger
link = DATAPROC_BATCH_LINK.format(region=self.region, project_id=self.project_id, resource=batch_id)
if state == Batch.State.FAILED:
DataprocLink.persist(
context=context, task_instance=self, url=DATAPROC_BATCH_LINK, resource=batch_id
)
raise AirflowException("Batch job %s failed. Driver Logs: %s", batch_id, link)
if state in (Batch.State.CANCELLED, Batch.State.CANCELLING):
DataprocLink.persist(
context=context, task_instance=self, url=DATAPROC_BATCH_LINK, resource=batch_id
)
raise AirflowException("Batch job %s was cancelled. Driver logs: %s", batch_id, link)
if state == Batch.State.STATE_UNSPECIFIED:
DataprocLink.persist(
context=context, task_instance=self, url=DATAPROC_BATCH_LINK, resource=batch_id
)
raise AirflowException("Batch job %s unspecified. Driver logs: %s", batch_id, link)
self.log.info("Batch job %s completed. Driver logs: %s", batch_id, link)
DataprocLink.persist(context=context, task_instance=self, url=DATAPROC_BATCH_LINK, resource=batch_id)
class DataprocDeleteBatchOperator(GoogleCloudBaseOperator):
"""Delete the batch workload resource.
:param batch_id: Required. The ID to use for the batch, which will become the final component
of the batch's resource name.
This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param project_id: Optional. The ID of the Google Cloud project that the cluster belongs to.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = ("batch_id", "region", "project_id", "impersonation_chain")
def __init__(
self,
*,
batch_id: str,
region: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.batch_id = batch_id
self.region = region
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Deleting batch: %s", self.batch_id)
hook.delete_batch(
batch_id=self.batch_id,
region=self.region,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Batch deleted.")
class DataprocGetBatchOperator(GoogleCloudBaseOperator):
"""Get the batch workload resource representation.
:param batch_id: Required. The ID to use for the batch, which will become the final component
of the batch's resource name.
This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param project_id: Optional. The ID of the Google Cloud project that the cluster belongs to.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = ("batch_id", "region", "project_id", "impersonation_chain")
operator_extra_links = (DataprocLink(),)
def __init__(
self,
*,
batch_id: str,
region: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.batch_id = batch_id
self.region = region
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Getting batch: %s", self.batch_id)
batch = hook.get_batch(
batch_id=self.batch_id,
region=self.region,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataprocLink.persist(
context=context, task_instance=self, url=DATAPROC_BATCH_LINK, resource=self.batch_id
)
return Batch.to_dict(batch)
class DataprocListBatchesOperator(GoogleCloudBaseOperator):
"""List batch workloads.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param project_id: Optional. The ID of the Google Cloud project that the cluster belongs to.
:param page_size: Optional. The maximum number of batches to return in each response. The service may
return fewer than this value. The default page size is 20; the maximum page size is 1000.
:param page_token: Optional. A page token received from a previous ``ListBatches`` call.
Provide this token to retrieve the subsequent page.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = ("region", "project_id", "impersonation_chain")
operator_extra_links = (DataprocListLink(),)
def __init__(
self,
*,
region: str,
project_id: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.page_size = page_size
self.page_token = page_token
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
results = hook.list_batches(
region=self.region,
project_id=self.project_id,
page_size=self.page_size,
page_token=self.page_token,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataprocListLink.persist(context=context, task_instance=self, url=DATAPROC_BATCHES_LINK)
return [Batch.to_dict(result) for result in results]
class DataprocCancelOperationOperator(GoogleCloudBaseOperator):
"""Cancel the batch workload resource.
:param operation_name: Required. The name of the operation resource to be cancelled.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param project_id: Optional. The ID of the Google Cloud project that the cluster belongs to.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = ("operation_name", "region", "project_id", "impersonation_chain")
def __init__(
self,
*,
operation_name: str,
region: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.operation_name = operation_name
self.region = region
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Canceling operation: %s", self.operation_name)
hook.get_operations_client(region=self.region).cancel_operation(name=self.operation_name)
self.log.info("Operation canceled.")
| 120,928 | 44.240928 | 118 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/dataflow.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Dataflow operators."""
from __future__ import annotations
import copy
import re
import uuid
import warnings
from contextlib import ExitStack
from enum import Enum
from functools import cached_property
from typing import TYPE_CHECKING, Any, Sequence
from airflow import AirflowException
from airflow.configuration import conf
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.providers.apache.beam.hooks.beam import BeamHook, BeamRunnerType
from airflow.providers.google.cloud.hooks.dataflow import (
DEFAULT_DATAFLOW_LOCATION,
DataflowHook,
process_line_and_extract_dataflow_job_id_callback,
)
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.cloud.links.dataflow import DataflowJobLink
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
from airflow.providers.google.cloud.triggers.dataflow import TemplateJobStartTrigger
from airflow.version import version
if TYPE_CHECKING:
from airflow.utils.context import Context
class CheckJobRunning(Enum):
"""
Helper enum for choosing what to do if job is already running.
IgnoreJob - do not check if running
FinishIfRunning - finish current dag run with no action
WaitForRun - wait for job to finish and then continue with new job
"""
IgnoreJob = 1
FinishIfRunning = 2
WaitForRun = 3
class DataflowConfiguration:
"""
Dataflow configuration for BeamRunJavaPipelineOperator and BeamRunPythonPipelineOperator.
.. seealso::
:class:`~airflow.providers.apache.beam.operators.beam.BeamRunJavaPipelineOperator`
and :class:`~airflow.providers.apache.beam.operators.beam.BeamRunPythonPipelineOperator`.
:param job_name: The 'jobName' to use when executing the Dataflow job
(templated). This ends up being set in the pipeline options, so any entry
with key ``'jobName'`` or ``'job_name'``in ``options`` will be overwritten.
:param append_job_name: True if unique suffix has to be appended to job name.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: Job location.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param poll_sleep: The time in seconds to sleep between polling Google
Cloud Platform for the dataflow job status while the job is in the
JOB_STATE_RUNNING state.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
.. warning::
This option requires Apache Beam 2.39.0 or newer.
:param drain_pipeline: Optional, set to True if want to stop streaming job by draining it
instead of canceling during killing task instance. See:
https://cloud.google.com/dataflow/docs/guides/stopping-a-pipeline
:param cancel_timeout: How long (in seconds) operator should wait for the pipeline to be
successfully cancelled when task is being killed. (optional) default to 300s
:param wait_until_finished: (Optional)
If True, wait for the end of pipeline execution before exiting.
If False, only submits job.
If None, default behavior.
The default behavior depends on the type of pipeline:
* for the streaming pipeline, wait for jobs to start,
* for the batch pipeline, wait for the jobs to complete.
.. warning::
You cannot call ``PipelineResult.wait_until_finish`` method in your pipeline code for the operator
to work properly. i. e. you must use asynchronous execution. Otherwise, your pipeline will
always wait until finished. For more information, look at:
`Asynchronous execution
<https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#python_10>`__
The process of starting the Dataflow job in Airflow consists of two steps:
* running a subprocess and reading the stderr/stderr log for the job id.
* loop waiting for the end of the job ID from the previous step by checking its status.
Step two is started just after step one has finished, so if you have wait_until_finished in your
pipeline code, step two will not start until the process stops. When this process stops,
steps two will run, but it will only execute one iteration as the job will be in a terminal state.
If you in your pipeline do not call the wait_for_pipeline method but pass wait_until_finish=True
to the operator, the second loop will wait for the job's terminal state.
If you in your pipeline do not call the wait_for_pipeline method, and pass wait_until_finish=False
to the operator, the second loop will check once is job not in terminal state and exit the loop.
:param multiple_jobs: If pipeline creates multiple jobs then monitor all jobs. Supported only by
:class:`~airflow.providers.apache.beam.operators.beam.BeamRunJavaPipelineOperator`.
:param check_if_running: Before running job, validate that a previous run is not in process.
Supported only by:
:class:`~airflow.providers.apache.beam.operators.beam.BeamRunJavaPipelineOperator`.
:param service_account: Run the job as a specific service account, instead of the default GCE robot.
"""
template_fields: Sequence[str] = ("job_name", "location")
def __init__(
self,
*,
job_name: str = "{{task.task_id}}",
append_job_name: bool = True,
project_id: str | None = None,
location: str | None = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
poll_sleep: int = 10,
impersonation_chain: str | Sequence[str] | None = None,
drain_pipeline: bool = False,
cancel_timeout: int | None = 5 * 60,
wait_until_finished: bool | None = None,
multiple_jobs: bool | None = None,
check_if_running: CheckJobRunning = CheckJobRunning.WaitForRun,
service_account: str | None = None,
) -> None:
self.job_name = job_name
self.append_job_name = append_job_name
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.poll_sleep = poll_sleep
self.impersonation_chain = impersonation_chain
self.drain_pipeline = drain_pipeline
self.cancel_timeout = cancel_timeout
self.wait_until_finished = wait_until_finished
self.multiple_jobs = multiple_jobs
self.check_if_running = check_if_running
self.service_account = service_account
class DataflowCreateJavaJobOperator(GoogleCloudBaseOperator):
"""
Start a Java Cloud Dataflow batch job; the parameters of the operation will be passed to the job.
This class is deprecated.
Please use :class:`providers.apache.beam.operators.beam.BeamRunJavaPipelineOperator`.
Example usage:
.. code-block:: python
default_args = {
"owner": "airflow",
"depends_on_past": False,
"start_date": (2016, 8, 1),
"email": ["[email protected]"],
"email_on_failure": False,
"email_on_retry": False,
"retries": 1,
"retry_delay": timedelta(minutes=30),
"dataflow_default_options": {
"project": "my-gcp-project",
"zone": "us-central1-f",
"stagingLocation": "gs://bucket/tmp/dataflow/staging/",
},
}
dag = DAG("test-dag", default_args=default_args)
task = DataflowCreateJavaJobOperator(
gcp_conn_id="gcp_default",
task_id="normalize-cal",
jar="{{var.value.gcp_dataflow_base}}pipeline-ingress-cal-normalize-1.0.jar",
options={
"autoscalingAlgorithm": "BASIC",
"maxNumWorkers": "50",
"start": "{{ds}}",
"partitionType": "DAY",
},
dag=dag,
)
.. seealso::
For more detail on job submission have a look at the reference:
https://cloud.google.com/dataflow/pipelines/specifying-exec-params
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataflowCreateJavaJobOperator`
:param jar: The reference to a self executing Dataflow jar (templated).
:param job_name: The 'jobName' to use when executing the Dataflow job
(templated). This ends up being set in the pipeline options, so any entry
with key ``'jobName'`` in ``options`` will be overwritten.
:param dataflow_default_options: Map of default job options.
:param options: Map of job specific options.The key must be a dictionary.
The value can contain different types:
* If the value is None, the single option - ``--key`` (without value) will be added.
* If the value is False, this option will be skipped
* If the value is True, the single option - ``--key`` (without value) will be added.
* If the value is list, the many options will be added for each key.
If the value is ``['A', 'B']`` and the key is ``key`` then the ``--key=A --key=B`` options
will be left
* Other value types will be replaced with the Python textual representation.
When defining labels (``labels`` option), you can also provide a dictionary.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: Job location.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param poll_sleep: The time in seconds to sleep between polling Google
Cloud Platform for the dataflow job status while the job is in the
JOB_STATE_RUNNING state.
:param job_class: The name of the dataflow job class to be executed, it
is often not the main class configured in the dataflow jar file.
:param multiple_jobs: If pipeline creates multiple jobs then monitor all jobs
:param check_if_running: before running job, validate that a previous run is not in process
if job is running finish with nothing, WaitForRun= wait until job finished and the run job)
``jar``, ``options``, and ``job_name`` are templated so you can use variables in them.
:param cancel_timeout: How long (in seconds) operator should wait for the pipeline to be
successfully cancelled when task is being killed.
:param wait_until_finished: (Optional)
If True, wait for the end of pipeline execution before exiting.
If False, only submits job.
If None, default behavior.
The default behavior depends on the type of pipeline:
* for the streaming pipeline, wait for jobs to start,
* for the batch pipeline, wait for the jobs to complete.
.. warning::
You cannot call ``PipelineResult.wait_until_finish`` method in your pipeline code for the operator
to work properly. i. e. you must use asynchronous execution. Otherwise, your pipeline will
always wait until finished. For more information, look at:
`Asynchronous execution
<https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#python_10>`__
The process of starting the Dataflow job in Airflow consists of two steps:
* running a subprocess and reading the stderr/stderr log for the job id.
* loop waiting for the end of the job ID from the previous step.
This loop checks the status of the job.
Step two is started just after step one has finished, so if you have wait_until_finished in your
pipeline code, step two will not start until the process stops. When this process stops,
steps two will run, but it will only execute one iteration as the job will be in a terminal state.
If you in your pipeline do not call the wait_for_pipeline method but pass wait_until_finish=True
to the operator, the second loop will wait for the job's terminal state.
If you in your pipeline do not call the wait_for_pipeline method, and pass wait_until_finish=False
to the operator, the second loop will check once is job not in terminal state and exit the loop.
Note that both
``dataflow_default_options`` and ``options`` will be merged to specify pipeline
execution parameter, and ``dataflow_default_options`` is expected to save
high-level options, for instances, project and zone information, which
apply to all dataflow operators in the DAG.
It's a good practice to define dataflow_* parameters in the default_args of the dag
like the project, zone and staging location.
.. code-block:: python
default_args = {
"dataflow_default_options": {
"zone": "europe-west1-d",
"stagingLocation": "gs://my-staging-bucket/staging/",
}
}
You need to pass the path to your dataflow as a file reference with the ``jar``
parameter, the jar needs to be a self executing jar (see documentation here:
https://beam.apache.org/documentation/runners/dataflow/#self-executing-jar).
Use ``options`` to pass on options to your job.
.. code-block:: python
t1 = DataflowCreateJavaJobOperator(
task_id="dataflow_example",
jar="{{var.value.gcp_dataflow_base}}pipeline/build/libs/pipeline-example-1.0.jar",
options={
"autoscalingAlgorithm": "BASIC",
"maxNumWorkers": "50",
"start": "{{ds}}",
"partitionType": "DAY",
"labels": {"foo": "bar"},
},
gcp_conn_id="airflow-conn-id",
dag=my_dag,
)
"""
template_fields: Sequence[str] = ("options", "jar", "job_name")
ui_color = "#0273d4"
def __init__(
self,
*,
jar: str,
job_name: str = "{{task.task_id}}",
dataflow_default_options: dict | None = None,
options: dict | None = None,
project_id: str | None = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
poll_sleep: int = 10,
job_class: str | None = None,
check_if_running: CheckJobRunning = CheckJobRunning.WaitForRun,
multiple_jobs: bool = False,
cancel_timeout: int | None = 10 * 60,
wait_until_finished: bool | None = None,
**kwargs,
) -> None:
# TODO: Remove one day
warnings.warn(
f"The `{self.__class__.__name__}` operator is deprecated, "
f"please use `providers.apache.beam.operators.beam.BeamRunJavaPipelineOperator` instead.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
super().__init__(**kwargs)
dataflow_default_options = dataflow_default_options or {}
options = options or {}
options.setdefault("labels", {}).update(
{"airflow-version": "v" + version.replace(".", "-").replace("+", "-")}
)
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.jar = jar
self.multiple_jobs = multiple_jobs
self.job_name = job_name
self.dataflow_default_options = dataflow_default_options
self.options = options
self.poll_sleep = poll_sleep
self.job_class = job_class
self.check_if_running = check_if_running
self.cancel_timeout = cancel_timeout
self.wait_until_finished = wait_until_finished
self.job_id = None
self.beam_hook: BeamHook | None = None
self.dataflow_hook: DataflowHook | None = None
def execute(self, context: Context):
"""Execute the Apache Beam Pipeline."""
self.beam_hook = BeamHook(runner=BeamRunnerType.DataflowRunner)
self.dataflow_hook = DataflowHook(
gcp_conn_id=self.gcp_conn_id,
poll_sleep=self.poll_sleep,
cancel_timeout=self.cancel_timeout,
wait_until_finished=self.wait_until_finished,
)
job_name = self.dataflow_hook.build_dataflow_job_name(job_name=self.job_name)
pipeline_options = copy.deepcopy(self.dataflow_default_options)
pipeline_options["jobName"] = self.job_name
pipeline_options["project"] = self.project_id or self.dataflow_hook.project_id
pipeline_options["region"] = self.location
pipeline_options.update(self.options)
pipeline_options.setdefault("labels", {}).update(
{"airflow-version": "v" + version.replace(".", "-").replace("+", "-")}
)
pipeline_options.update(self.options)
def set_current_job_id(job_id):
self.job_id = job_id
process_line_callback = process_line_and_extract_dataflow_job_id_callback(
on_new_job_id_callback=set_current_job_id
)
with ExitStack() as exit_stack:
if self.jar.lower().startswith("gs://"):
gcs_hook = GCSHook(self.gcp_conn_id)
tmp_gcs_file = exit_stack.enter_context(gcs_hook.provide_file(object_url=self.jar))
self.jar = tmp_gcs_file.name
is_running = False
if self.check_if_running != CheckJobRunning.IgnoreJob:
is_running = self.dataflow_hook.is_job_dataflow_running(
name=self.job_name,
variables=pipeline_options,
)
while is_running and self.check_if_running == CheckJobRunning.WaitForRun:
is_running = self.dataflow_hook.is_job_dataflow_running(
name=self.job_name,
variables=pipeline_options,
)
if not is_running:
pipeline_options["jobName"] = job_name
with self.dataflow_hook.provide_authorized_gcloud():
self.beam_hook.start_java_pipeline(
variables=pipeline_options,
jar=self.jar,
job_class=self.job_class,
process_line_callback=process_line_callback,
)
self.dataflow_hook.wait_for_done(
job_name=job_name,
location=self.location,
job_id=self.job_id,
multiple_jobs=self.multiple_jobs,
)
return {"job_id": self.job_id}
def on_kill(self) -> None:
self.log.info("On kill.")
if self.job_id:
self.dataflow_hook.cancel_job(
job_id=self.job_id, project_id=self.project_id or self.dataflow_hook.project_id
)
class DataflowTemplatedJobStartOperator(GoogleCloudBaseOperator):
"""
Start a Templated Cloud Dataflow job; the parameters of the operation will be passed to the job.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataflowTemplatedJobStartOperator`
:param template: The reference to the Dataflow template.
:param job_name: The 'jobName' to use when executing the Dataflow template
(templated).
:param options: Map of job runtime environment options.
It will update environment argument if passed.
.. seealso::
For more information on possible configurations, look at the API documentation
`https://cloud.google.com/dataflow/pipelines/specifying-exec-params
<https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment>`__
:param dataflow_default_options: Map of default job environment options.
:param parameters: Map of job specific parameters for the template.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: Job location.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param poll_sleep: The time in seconds to sleep between polling Google
Cloud Platform for the dataflow job status while the job is in the
JOB_STATE_RUNNING state.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param environment: Optional, Map of job runtime environment options.
.. seealso::
For more information on possible configurations, look at the API documentation
`https://cloud.google.com/dataflow/pipelines/specifying-exec-params
<https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment>`__
:param cancel_timeout: How long (in seconds) operator should wait for the pipeline to be
successfully cancelled when task is being killed.
:param append_job_name: True if unique suffix has to be appended to job name.
:param wait_until_finished: (Optional)
If True, wait for the end of pipeline execution before exiting.
If False, only submits job.
If None, default behavior.
The default behavior depends on the type of pipeline:
* for the streaming pipeline, wait for jobs to start,
* for the batch pipeline, wait for the jobs to complete.
.. warning::
You cannot call ``PipelineResult.wait_until_finish`` method in your pipeline code for the operator
to work properly. i. e. you must use asynchronous execution. Otherwise, your pipeline will
always wait until finished. For more information, look at:
`Asynchronous execution
<https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#python_10>`__
The process of starting the Dataflow job in Airflow consists of two steps:
* running a subprocess and reading the stderr/stderr log for the job id.
* loop waiting for the end of the job ID from the previous step.
This loop checks the status of the job.
Step two is started just after step one has finished, so if you have wait_until_finished in your
pipeline code, step two will not start until the process stops. When this process stops,
steps two will run, but it will only execute one iteration as the job will be in a terminal state.
If you in your pipeline do not call the wait_for_pipeline method but pass wait_until_finish=True
to the operator, the second loop will wait for the job's terminal state.
If you in your pipeline do not call the wait_for_pipeline method, and pass wait_until_finish=False
to the operator, the second loop will check once is job not in terminal state and exit the loop.
It's a good practice to define dataflow_* parameters in the default_args of the dag
like the project, zone and staging location.
.. seealso::
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/LaunchTemplateParameters
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment
.. code-block:: python
default_args = {
"dataflow_default_options": {
"zone": "europe-west1-d",
"tempLocation": "gs://my-staging-bucket/staging/",
}
}
You need to pass the path to your dataflow template as a file reference with the
``template`` parameter. Use ``parameters`` to pass on parameters to your job.
Use ``environment`` to pass on runtime environment variables to your job.
.. code-block:: python
t1 = DataflowTemplatedJobStartOperator(
task_id="dataflow_example",
template="{{var.value.gcp_dataflow_base}}",
parameters={
"inputFile": "gs://bucket/input/my_input.txt",
"outputFile": "gs://bucket/output/my_output.txt",
},
gcp_conn_id="airflow-conn-id",
dag=my_dag,
)
``template``, ``dataflow_default_options``, ``parameters``, and ``job_name`` are
templated, so you can use variables in them.
Note that ``dataflow_default_options`` is expected to save high-level options
for project information, which apply to all dataflow operators in the DAG.
.. seealso::
https://cloud.google.com/dataflow/docs/reference/rest/v1b3
/LaunchTemplateParameters
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment
For more detail on job template execution have a look at the reference:
https://cloud.google.com/dataflow/docs/templates/executing-templates
:param deferrable: Run operator in the deferrable mode.
"""
template_fields: Sequence[str] = (
"template",
"job_name",
"options",
"parameters",
"project_id",
"location",
"gcp_conn_id",
"impersonation_chain",
"environment",
"dataflow_default_options",
)
ui_color = "#0273d4"
operator_extra_links = (DataflowJobLink(),)
def __init__(
self,
*,
template: str,
project_id: str | None = None,
job_name: str = "{{task.task_id}}",
options: dict[str, Any] | None = None,
dataflow_default_options: dict[str, Any] | None = None,
parameters: dict[str, str] | None = None,
location: str | None = None,
gcp_conn_id: str = "google_cloud_default",
poll_sleep: int = 10,
impersonation_chain: str | Sequence[str] | None = None,
environment: dict | None = None,
cancel_timeout: int | None = 10 * 60,
wait_until_finished: bool | None = None,
append_job_name: bool = True,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.template = template
self.job_name = job_name
self.options = options or {}
self.dataflow_default_options = dataflow_default_options or {}
self.parameters = parameters or {}
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.poll_sleep = poll_sleep
self.impersonation_chain = impersonation_chain
self.environment = environment
self.cancel_timeout = cancel_timeout
self.wait_until_finished = wait_until_finished
self.append_job_name = append_job_name
self.deferrable = deferrable
self.job: dict | None = None
self._validate_deferrable_params()
def _validate_deferrable_params(self):
if self.deferrable and self.wait_until_finished:
raise ValueError(
"Conflict between deferrable and wait_until_finished parameters "
"because it makes operator as blocking when it requires to be deferred. "
"It should be True as deferrable parameter or True as wait_until_finished."
)
if self.deferrable and self.wait_until_finished is None:
self.wait_until_finished = False
@cached_property
def hook(self) -> DataflowHook:
hook = DataflowHook(
gcp_conn_id=self.gcp_conn_id,
poll_sleep=self.poll_sleep,
impersonation_chain=self.impersonation_chain,
cancel_timeout=self.cancel_timeout,
wait_until_finished=self.wait_until_finished,
)
return hook
def execute(self, context: Context):
def set_current_job(current_job):
self.job = current_job
DataflowJobLink.persist(self, context, self.project_id, self.location, self.job.get("id"))
options = self.dataflow_default_options
options.update(self.options)
self.job = self.hook.start_template_dataflow(
job_name=self.job_name,
variables=options,
parameters=self.parameters,
dataflow_template=self.template,
on_new_job_callback=set_current_job,
project_id=self.project_id,
location=self.location,
environment=self.environment,
append_job_name=self.append_job_name,
)
job_id = self.job.get("id")
if job_id is None:
raise AirflowException(
"While reading job object after template execution error occurred. Job object has no id."
)
if not self.deferrable:
return job_id
context["ti"].xcom_push(key="job_id", value=job_id)
self.defer(
trigger=TemplateJobStartTrigger(
project_id=self.project_id,
job_id=job_id,
location=self.location if self.location else DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id=self.gcp_conn_id,
poll_sleep=self.poll_sleep,
impersonation_chain=self.impersonation_chain,
cancel_timeout=self.cancel_timeout,
),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, Any]):
"""Method which executes after trigger finishes its work."""
if event["status"] == "error" or event["status"] == "stopped":
self.log.info("status: %s, msg: %s", event["status"], event["message"])
raise AirflowException(event["message"])
job_id = event["job_id"]
self.log.info("Task %s completed with response %s", self.task_id, event["message"])
return job_id
def on_kill(self) -> None:
self.log.info("On kill.")
if self.job is not None:
self.log.info("Cancelling job %s", self.job_name)
self.hook.cancel_job(
job_name=self.job_name,
job_id=self.job.get("id"),
project_id=self.job.get("projectId"),
location=self.job.get("location"),
)
class DataflowStartFlexTemplateOperator(GoogleCloudBaseOperator):
"""
Starts flex templates with the Dataflow pipeline.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataflowStartFlexTemplateOperator`
:param body: The request body. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.locations.flexTemplates/launch#request-body
:param location: The location of the Dataflow job (for example europe-west1)
:param project_id: The ID of the GCP project that owns the job.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud
Platform.
:param drain_pipeline: Optional, set to True if want to stop streaming job by draining it
instead of canceling during killing task instance. See:
https://cloud.google.com/dataflow/docs/guides/stopping-a-pipeline
:param cancel_timeout: How long (in seconds) operator should wait for the pipeline to be
successfully cancelled when task is being killed.
:param wait_until_finished: (Optional)
If True, wait for the end of pipeline execution before exiting.
If False, only submits job.
If None, default behavior.
The default behavior depends on the type of pipeline:
* for the streaming pipeline, wait for jobs to start,
* for the batch pipeline, wait for the jobs to complete.
.. warning::
You cannot call ``PipelineResult.wait_until_finish`` method in your pipeline code for the operator
to work properly. i. e. you must use asynchronous execution. Otherwise, your pipeline will
always wait until finished. For more information, look at:
`Asynchronous execution
<https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#python_10>`__
The process of starting the Dataflow job in Airflow consists of two steps:
* running a subprocess and reading the stderr/stderr log for the job id.
* loop waiting for the end of the job ID from the previous step.
This loop checks the status of the job.
Step two is started just after step one has finished, so if you have wait_until_finished in your
pipeline code, step two will not start until the process stops. When this process stops,
steps two will run, but it will only execute one iteration as the job will be in a terminal state.
If you in your pipeline do not call the wait_for_pipeline method but pass wait_until_finish=True
to the operator, the second loop will wait for the job's terminal state.
If you in your pipeline do not call the wait_for_pipeline method, and pass wait_until_finish=False
to the operator, the second loop will check once is job not in terminal state and exit the loop.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run operator in the deferrable mode.
:param append_job_name: True if unique suffix has to be appended to job name.
"""
template_fields: Sequence[str] = ("body", "location", "project_id", "gcp_conn_id")
operator_extra_links = (DataflowJobLink(),)
def __init__(
self,
body: dict,
location: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
drain_pipeline: bool = False,
cancel_timeout: int | None = 10 * 60,
wait_until_finished: bool | None = None,
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
append_job_name: bool = True,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.body = body
self.location = location
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.drain_pipeline = drain_pipeline
self.cancel_timeout = cancel_timeout
self.wait_until_finished = wait_until_finished
self.job: dict | None = None
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
self.append_job_name = append_job_name
self._validate_deferrable_params()
def _validate_deferrable_params(self):
if self.deferrable and self.wait_until_finished:
raise ValueError(
"Conflict between deferrable and wait_until_finished parameters "
"because it makes operator as blocking when it requires to be deferred. "
"It should be True as deferrable parameter or True as wait_until_finished."
)
if self.deferrable and self.wait_until_finished is None:
self.wait_until_finished = False
@cached_property
def hook(self) -> DataflowHook:
hook = DataflowHook(
gcp_conn_id=self.gcp_conn_id,
drain_pipeline=self.drain_pipeline,
cancel_timeout=self.cancel_timeout,
wait_until_finished=self.wait_until_finished,
impersonation_chain=self.impersonation_chain,
)
return hook
def execute(self, context: Context):
if self.append_job_name:
self._append_uuid_to_job_name()
def set_current_job(current_job):
self.job = current_job
DataflowJobLink.persist(self, context, self.project_id, self.location, self.job.get("id"))
self.job = self.hook.start_flex_template(
body=self.body,
location=self.location,
project_id=self.project_id,
on_new_job_callback=set_current_job,
)
job_id = self.job.get("id")
if job_id is None:
raise AirflowException(
"While reading job object after template execution error occurred. Job object has no id."
)
if not self.deferrable:
return self.job
self.defer(
trigger=TemplateJobStartTrigger(
project_id=self.project_id,
job_id=job_id,
location=self.location,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
cancel_timeout=self.cancel_timeout,
),
method_name="execute_complete",
)
def _append_uuid_to_job_name(self):
job_body = self.body.get("launch_parameter") or self.body.get("launchParameter")
job_name = job_body.get("jobName")
if job_name:
job_name += f"-{str(uuid.uuid4())[:8]}"
job_body["jobName"] = job_name
self.log.info("Job name was changed to %s", job_name)
def execute_complete(self, context: Context, event: dict):
"""Method which executes after trigger finishes its work."""
if event["status"] == "error" or event["status"] == "stopped":
self.log.info("status: %s, msg: %s", event["status"], event["message"])
raise AirflowException(event["message"])
job_id = event["job_id"]
self.log.info("Task %s completed with response %s", job_id, event["message"])
job = self.hook.get_job(job_id=job_id, project_id=self.project_id, location=self.location)
return job
def on_kill(self) -> None:
self.log.info("On kill.")
if self.job is not None:
self.hook.cancel_job(
job_id=self.job.get("id"),
project_id=self.job.get("projectId"),
location=self.job.get("location"),
)
class DataflowStartSqlJobOperator(GoogleCloudBaseOperator):
"""
Starts Dataflow SQL query.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataflowStartSqlJobOperator`
.. warning::
This operator requires ``gcloud`` command (Google Cloud SDK) must be installed on the Airflow worker
<https://cloud.google.com/sdk/docs/install>`__
:param job_name: The unique name to assign to the Cloud Dataflow job.
:param query: The SQL query to execute.
:param options: Job parameters to be executed. It can be a dictionary with the following keys.
For more information, look at:
`https://cloud.google.com/sdk/gcloud/reference/beta/dataflow/sql/query
<gcloud beta dataflow sql query>`__
command reference
:param location: The location of the Dataflow job (for example europe-west1)
:param project_id: The ID of the GCP project that owns the job.
If set to ``None`` or missing, the default project_id from the GCP connection is used.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud
Platform.
:param drain_pipeline: Optional, set to True if want to stop streaming job by draining it
instead of canceling during killing task instance. See:
https://cloud.google.com/dataflow/docs/guides/stopping-a-pipeline
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"job_name",
"query",
"options",
"location",
"project_id",
"gcp_conn_id",
)
template_fields_renderers = {"query": "sql"}
def __init__(
self,
job_name: str,
query: str,
options: dict[str, Any],
location: str = DEFAULT_DATAFLOW_LOCATION,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
drain_pipeline: bool = False,
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.job_name = job_name
self.query = query
self.options = options
self.location = location
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.drain_pipeline = drain_pipeline
self.impersonation_chain = impersonation_chain
self.job = None
self.hook: DataflowHook | None = None
def execute(self, context: Context):
self.hook = DataflowHook(
gcp_conn_id=self.gcp_conn_id,
drain_pipeline=self.drain_pipeline,
impersonation_chain=self.impersonation_chain,
)
def set_current_job(current_job):
self.job = current_job
job = self.hook.start_sql_job(
job_name=self.job_name,
query=self.query,
options=self.options,
location=self.location,
project_id=self.project_id,
on_new_job_callback=set_current_job,
)
return job
def on_kill(self) -> None:
self.log.info("On kill.")
if self.job:
self.hook.cancel_job(
job_id=self.job.get("id"),
project_id=self.job.get("projectId"),
location=self.job.get("location"),
)
class DataflowCreatePythonJobOperator(GoogleCloudBaseOperator):
"""
Launching Cloud Dataflow jobs written in python.
Note that both dataflow_default_options and options will be merged to specify pipeline
execution parameter, and dataflow_default_options is expected to save high-level options,
for instances, project and zone information, which apply to all dataflow operators in the DAG.
This class is deprecated.
Please use :class:`providers.apache.beam.operators.beam.BeamRunPythonPipelineOperator`.
.. seealso::
For more detail on job submission have a look at the reference:
https://cloud.google.com/dataflow/pipelines/specifying-exec-params
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataflowCreatePythonJobOperator`
:param py_file: Reference to the python dataflow pipeline file.py, e.g.,
/some/local/file/path/to/your/python/pipeline/file. (templated)
:param job_name: The 'job_name' to use when executing the Dataflow job
(templated). This ends up being set in the pipeline options, so any entry
with key ``'jobName'`` or ``'job_name'`` in ``options`` will be overwritten.
:param py_options: Additional python options, e.g., ["-m", "-v"].
:param dataflow_default_options: Map of default job options.
:param options: Map of job specific options.The key must be a dictionary.
The value can contain different types:
* If the value is None, the single option - ``--key`` (without value) will be added.
* If the value is False, this option will be skipped
* If the value is True, the single option - ``--key`` (without value) will be added.
* If the value is list, the many options will be added for each key.
If the value is ``['A', 'B']`` and the key is ``key`` then the ``--key=A --key=B`` options
will be left
* Other value types will be replaced with the Python textual representation.
When defining labels (``labels`` option), you can also provide a dictionary.
:param py_interpreter: Python version of the beam pipeline.
If None, this defaults to the python3.
To track python versions supported by beam and related
issues check: https://issues.apache.org/jira/browse/BEAM-1251
:param py_requirements: Additional python package(s) to install.
If a value is passed to this parameter, a new virtual environment has been created with
additional packages installed.
You could also install the apache_beam package if it is not installed on your system or you want
to use a different version.
:param py_system_site_packages: Whether to include system_site_packages in your virtualenv.
See virtualenv documentation for more information.
This option is only relevant if the ``py_requirements`` parameter is not None.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: Job location.
:param poll_sleep: The time in seconds to sleep between polling Google
Cloud Platform for the dataflow job status while the job is in the
JOB_STATE_RUNNING state.
:param drain_pipeline: Optional, set to True if want to stop streaming job by draining it
instead of canceling during killing task instance. See:
https://cloud.google.com/dataflow/docs/guides/stopping-a-pipeline
:param cancel_timeout: How long (in seconds) operator should wait for the pipeline to be
successfully cancelled when task is being killed.
:param wait_until_finished: (Optional)
If True, wait for the end of pipeline execution before exiting.
If False, only submits job.
If None, default behavior.
The default behavior depends on the type of pipeline:
* for the streaming pipeline, wait for jobs to start,
* for the batch pipeline, wait for the jobs to complete.
.. warning::
You cannot call ``PipelineResult.wait_until_finish`` method in your pipeline code for the operator
to work properly. i. e. you must use asynchronous execution. Otherwise, your pipeline will
always wait until finished. For more information, look at:
`Asynchronous execution
<https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#python_10>`__
The process of starting the Dataflow job in Airflow consists of two steps:
* running a subprocess and reading the stderr/stderr log for the job id.
* loop waiting for the end of the job ID from the previous step.
This loop checks the status of the job.
Step two is started just after step one has finished, so if you have wait_until_finished in your
pipeline code, step two will not start until the process stops. When this process stops,
steps two will run, but it will only execute one iteration as the job will be in a terminal state.
If you in your pipeline do not call the wait_for_pipeline method but pass wait_until_finish=True
to the operator, the second loop will wait for the job's terminal state.
If you in your pipeline do not call the wait_for_pipeline method, and pass wait_until_finish=False
to the operator, the second loop will check once is job not in terminal state and exit the loop.
"""
template_fields: Sequence[str] = ("options", "dataflow_default_options", "job_name", "py_file")
def __init__(
self,
*,
py_file: str,
job_name: str = "{{task.task_id}}",
dataflow_default_options: dict | None = None,
options: dict | None = None,
py_interpreter: str = "python3",
py_options: list[str] | None = None,
py_requirements: list[str] | None = None,
py_system_site_packages: bool = False,
project_id: str | None = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
poll_sleep: int = 10,
drain_pipeline: bool = False,
cancel_timeout: int | None = 10 * 60,
wait_until_finished: bool | None = None,
**kwargs,
) -> None:
# TODO: Remove one day
warnings.warn(
f"The `{self.__class__.__name__}` operator is deprecated, "
"please use `providers.apache.beam.operators.beam.BeamRunPythonPipelineOperator` instead.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
super().__init__(**kwargs)
self.py_file = py_file
self.job_name = job_name
self.py_options = py_options or []
self.dataflow_default_options = dataflow_default_options or {}
self.options = options or {}
self.options.setdefault("labels", {}).update(
{"airflow-version": "v" + version.replace(".", "-").replace("+", "-")}
)
self.py_interpreter = py_interpreter
self.py_requirements = py_requirements
self.py_system_site_packages = py_system_site_packages
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.poll_sleep = poll_sleep
self.drain_pipeline = drain_pipeline
self.cancel_timeout = cancel_timeout
self.wait_until_finished = wait_until_finished
self.job_id = None
self.beam_hook: BeamHook | None = None
self.dataflow_hook: DataflowHook | None = None
def execute(self, context: Context):
"""Execute the python dataflow job."""
self.beam_hook = BeamHook(runner=BeamRunnerType.DataflowRunner)
self.dataflow_hook = DataflowHook(
gcp_conn_id=self.gcp_conn_id,
poll_sleep=self.poll_sleep,
impersonation_chain=None,
drain_pipeline=self.drain_pipeline,
cancel_timeout=self.cancel_timeout,
wait_until_finished=self.wait_until_finished,
)
job_name = self.dataflow_hook.build_dataflow_job_name(job_name=self.job_name)
pipeline_options = self.dataflow_default_options.copy()
pipeline_options["job_name"] = job_name
pipeline_options["project"] = self.project_id or self.dataflow_hook.project_id
pipeline_options["region"] = self.location
pipeline_options.update(self.options)
# Convert argument names from lowerCamelCase to snake case.
camel_to_snake = lambda name: re.sub(r"[A-Z]", lambda x: "_" + x.group(0).lower(), name)
formatted_pipeline_options = {camel_to_snake(key): pipeline_options[key] for key in pipeline_options}
def set_current_job_id(job_id):
self.job_id = job_id
process_line_callback = process_line_and_extract_dataflow_job_id_callback(
on_new_job_id_callback=set_current_job_id
)
with ExitStack() as exit_stack:
if self.py_file.lower().startswith("gs://"):
gcs_hook = GCSHook(self.gcp_conn_id)
tmp_gcs_file = exit_stack.enter_context(gcs_hook.provide_file(object_url=self.py_file))
self.py_file = tmp_gcs_file.name
with self.dataflow_hook.provide_authorized_gcloud():
self.beam_hook.start_python_pipeline(
variables=formatted_pipeline_options,
py_file=self.py_file,
py_options=self.py_options,
py_interpreter=self.py_interpreter,
py_requirements=self.py_requirements,
py_system_site_packages=self.py_system_site_packages,
process_line_callback=process_line_callback,
)
self.dataflow_hook.wait_for_done(
job_name=job_name,
location=self.location,
job_id=self.job_id,
multiple_jobs=False,
)
return {"job_id": self.job_id}
def on_kill(self) -> None:
self.log.info("On kill.")
if self.job_id:
self.dataflow_hook.cancel_job(
job_id=self.job_id, project_id=self.project_id or self.dataflow_hook.project_id
)
class DataflowStopJobOperator(GoogleCloudBaseOperator):
"""
Stops the job with the specified name prefix or Job ID.
All jobs with provided name prefix will be stopped.
Streaming jobs are drained by default.
Parameter ``job_name_prefix`` and ``job_id`` are mutually exclusive.
.. seealso::
For more details on stopping a pipeline see:
https://cloud.google.com/dataflow/docs/guides/stopping-a-pipeline
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataflowStopJobOperator`
:param job_name_prefix: Name prefix specifying which jobs are to be stopped.
:param job_id: Job ID specifying which jobs are to be stopped.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: Optional, Job location. If set to None or missing, "us-central1" will be used.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param poll_sleep: The time in seconds to sleep between polling Google
Cloud Platform for the dataflow job status to confirm it's stopped.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param drain_pipeline: Optional, set to False if want to stop streaming job by canceling it
instead of draining. See: https://cloud.google.com/dataflow/docs/guides/stopping-a-pipeline
:param stop_timeout: wait time in seconds for successful job canceling/draining
"""
def __init__(
self,
job_name_prefix: str | None = None,
job_id: str | None = None,
project_id: str | None = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
poll_sleep: int = 10,
impersonation_chain: str | Sequence[str] | None = None,
stop_timeout: int | None = 10 * 60,
drain_pipeline: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.poll_sleep = poll_sleep
self.stop_timeout = stop_timeout
self.job_name = job_name_prefix
self.job_id = job_id
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.hook: DataflowHook | None = None
self.drain_pipeline = drain_pipeline
def execute(self, context: Context) -> None:
self.dataflow_hook = DataflowHook(
gcp_conn_id=self.gcp_conn_id,
poll_sleep=self.poll_sleep,
impersonation_chain=self.impersonation_chain,
cancel_timeout=self.stop_timeout,
drain_pipeline=self.drain_pipeline,
)
if self.job_id or self.dataflow_hook.is_job_dataflow_running(
name=self.job_name,
project_id=self.project_id,
location=self.location,
):
self.dataflow_hook.cancel_job(
job_name=self.job_name,
project_id=self.project_id,
location=self.location,
job_id=self.job_id,
)
else:
self.log.info("No jobs to stop")
return None
| 58,832 | 43.536715 | 119 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/vision.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Vision operator."""
from __future__ import annotations
from copy import deepcopy
from typing import TYPE_CHECKING, Any, Sequence, Tuple
from google.api_core.exceptions import AlreadyExists
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.vision_v1 import (
AnnotateImageRequest,
Image,
Product,
ProductSet,
ReferenceImage,
)
from google.protobuf.field_mask_pb2 import FieldMask # type: ignore
from airflow.providers.google.cloud.hooks.vision import CloudVisionHook
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
MetaData = Sequence[Tuple[str, str]]
class CloudVisionCreateProductSetOperator(GoogleCloudBaseOperator):
"""Create a new ProductSet resource.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionCreateProductSetOperator`
:param product_set: (Required) The ProductSet to create. If a dict is provided, it must be of the same
form as the protobuf message `ProductSet`.
:param location: (Required) The region where the ProductSet should be created. Valid regions
(as of 2019-02-05) are: us-east1, us-west1, europe-west1, asia-east1
:param project_id: (Optional) The project in which the ProductSet should be created. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param product_set_id: (Optional) A user-supplied resource id for this ProductSet.
If set, the server will attempt to use this value as the resource id. If it is
already in use, an error is returned with code ALREADY_EXISTS. Must be at most
128 characters long. It cannot contain the character /.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_productset_create_template_fields]
template_fields: Sequence[str] = (
"location",
"project_id",
"product_set_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_productset_create_template_fields]
def __init__(
self,
*,
product_set: dict | ProductSet,
location: str,
project_id: str | None = None,
product_set_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.project_id = project_id
self.product_set = product_set
self.product_set_id = product_set_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
return hook.create_product_set(
location=self.location,
project_id=self.project_id,
product_set=self.product_set,
product_set_id=self.product_set_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
self.log.info(
"Product set with id %s already exists. Exiting from the create operation.",
self.product_set_id,
)
return self.product_set_id
class CloudVisionGetProductSetOperator(GoogleCloudBaseOperator):
"""Get information associated with a ProductSet.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionGetProductSetOperator`
:param location: (Required) The region where the ProductSet is located. Valid regions (as of 2019-02-05)
are: us-east1, us-west1, europe-west1, asia-east1
:param product_set_id: (Required) The resource id of this ProductSet.
:param project_id: (Optional) The project in which the ProductSet is located. If set
to None or missing, the default `project_id` from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_productset_get_template_fields]
template_fields: Sequence[str] = (
"location",
"project_id",
"product_set_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_productset_get_template_fields]
def __init__(
self,
*,
location: str,
product_set_id: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.project_id = project_id
self.product_set_id = product_set_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.get_product_set(
location=self.location,
product_set_id=self.product_set_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudVisionUpdateProductSetOperator(GoogleCloudBaseOperator):
"""Make changes to a `ProductSet` resource.
Only ``display_name`` can be updated currently.
.. note:: To locate the ``ProductSet`` resource, its ``name`` in the form
`projects/PROJECT_ID/locations/LOC_ID/productSets/PRODUCT_SET_ID` is necessary.
You can provide the ``name` directly as an attribute of the ``product_set``
object. You can also leave it blank, in which case ``name`` will be created
by the operator from ``location`` and ``product_set_id`` instead (and
optionally ``project_id``; if not present, the connection default will be
used).
This mechanism exists for your convenience, to allow leaving the
``project_id`` empty and having Airflow use the connection default.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionUpdateProductSetOperator`
:param product_set: (Required) The ProductSet resource which replaces the one on the
server. If a dict is provided, it must be of the same form as the protobuf
message `ProductSet`.
:param location: (Optional) The region where the ProductSet is located. Valid regions (as of 2019-02-05)
are: us-east1, us-west1, europe-west1, asia-east1
:param product_set_id: (Optional) The resource id of this ProductSet.
:param project_id: (Optional) The project in which the ProductSet should be created. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param update_mask: (Optional) The `FieldMask` that specifies which fields to update. If update_mask
isn't specified, all mutable fields are to be updated. Valid mask path is display_name. If a dict is
provided, it must be of the same form as the protobuf message `FieldMask`.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_productset_update_template_fields]
template_fields: Sequence[str] = (
"location",
"project_id",
"product_set_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_productset_update_template_fields]
def __init__(
self,
*,
product_set: dict | ProductSet,
location: str | None = None,
product_set_id: str | None = None,
project_id: str | None = None,
update_mask: dict | FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.product_set = product_set
self.update_mask = update_mask
self.location = location
self.project_id = project_id
self.product_set_id = product_set_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if isinstance(self.product_set, dict):
self.product_set = ProductSet(self.product_set)
return hook.update_product_set(
location=self.location,
product_set_id=self.product_set_id,
project_id=self.project_id,
product_set=self.product_set,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudVisionDeleteProductSetOperator(GoogleCloudBaseOperator):
"""Permanently deletes a ``ProductSet``.
``Products`` and ``ReferenceImages`` in the ``ProductSet`` are not deleted.
The actual image files are not deleted from Google Cloud Storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionDeleteProductSetOperator`
:param location: (Required) The region where the ProductSet is located.
Valid regions (as of 2019-02-05) are: us-east1, us-west1, europe-west1, asia-east1
:param product_set_id: (Required) The resource id of this ProductSet.
:param project_id: (Optional) The project in which the ProductSet should be created.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_productset_delete_template_fields]
template_fields: Sequence[str] = (
"location",
"project_id",
"product_set_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_productset_delete_template_fields]
def __init__(
self,
*,
location: str,
product_set_id: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.project_id = project_id
self.product_set_id = product_set_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.delete_product_set(
location=self.location,
product_set_id=self.product_set_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudVisionCreateProductOperator(GoogleCloudBaseOperator):
"""Create and return a new product resource.
Possible errors regarding the ``Product`` object provided:
- Returns ``INVALID_ARGUMENT`` if ``display_name`` is missing or longer than 4096 characters.
- Returns ``INVALID_ARGUMENT`` if ``description`` is longer than 4096 characters.
- Returns ``INVALID_ARGUMENT`` if ``product_category`` is missing or invalid.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionCreateProductOperator`
:param location: (Required) The region where the Product should be created. Valid regions
(as of 2019-02-05) are: us-east1, us-west1, europe-west1, asia-east1
:param product: (Required) The product to create. If a dict is provided, it must be of the same form as
the protobuf message `Product`.
:param project_id: (Optional) The project in which the Product should be created. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param product_id: (Optional) A user-supplied resource id for this Product.
If set, the server will attempt to use this value as the resource id. If it is
already in use, an error is returned with code ALREADY_EXISTS. Must be at most
128 characters long. It cannot contain the character /.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_product_create_template_fields]
template_fields: Sequence[str] = (
"location",
"project_id",
"product_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_product_create_template_fields]
def __init__(
self,
*,
location: str,
product: str,
project_id: str | None = None,
product_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.product = product
self.project_id = project_id
self.product_id = product_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
return hook.create_product(
location=self.location,
product=self.product,
project_id=self.project_id,
product_id=self.product_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
self.log.info(
"Product with id %s already exists. Exiting from the create operation.", self.product_id
)
return self.product_id
class CloudVisionGetProductOperator(GoogleCloudBaseOperator):
"""Get information associated with a ``Product``.
Possible errors:
- Returns `NOT_FOUND` if the `Product` does not exist.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionGetProductOperator`
:param location: (Required) The region where the Product is located. Valid regions (as of 2019-02-05) are:
us-east1, us-west1, europe-west1, asia-east1
:param product_id: (Required) The resource id of this Product.
:param project_id: (Optional) The project in which the Product is located. If set to
None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_product_get_template_fields]
template_fields: Sequence[str] = (
"location",
"project_id",
"product_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_product_get_template_fields]
def __init__(
self,
*,
location: str,
product_id: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.product_id = product_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.get_product(
location=self.location,
product_id=self.product_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudVisionUpdateProductOperator(GoogleCloudBaseOperator):
"""Make changes to a Product resource.
Only the display_name, description, and labels fields can be updated right now.
If labels are updated, the change will not be reflected in queries until the next index time.
.. note:: To locate the `Product` resource, its `name` in the form
`projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID` is necessary.
You can provide the `name` directly as an attribute of the `product` object. However, you can leave it
blank and provide `location` and `product_id` instead (and optionally `project_id` - if not present,
the connection default will be used) and the `name` will be created by the operator itself.
This mechanism exists for your convenience, to allow leaving the `project_id` empty and having Airflow
use the connection default `project_id`.
Possible errors related to the provided `Product`:
- Returns `NOT_FOUND` if the Product does not exist.
- Returns `INVALID_ARGUMENT` if `display_name` is present in update_mask but is missing from the request
or longer than 4096 characters.
- Returns `INVALID_ARGUMENT` if `description` is present in update_mask but is longer than 4096
characters.
- Returns `INVALID_ARGUMENT` if `product_category` is present in update_mask.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionUpdateProductOperator`
:param product: (Required) The Product resource which replaces the one on the server. product.name is
immutable. If a dict is provided, it must be of the same form as the protobuf message `Product`.
:param location: (Optional) The region where the Product is located. Valid regions (as of 2019-02-05) are:
us-east1, us-west1, europe-west1, asia-east1
:param product_id: (Optional) The resource id of this Product.
:param project_id: (Optional) The project in which the Product is located. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param update_mask: (Optional) The `FieldMask` that specifies which fields to update. If update_mask
isn't specified, all mutable fields are to be updated. Valid mask paths include product_labels,
display_name, and description. If a dict is provided, it must be of the same form as the protobuf
message `FieldMask`.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_product_update_template_fields]
template_fields: Sequence[str] = (
"location",
"project_id",
"product_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_product_update_template_fields]
def __init__(
self,
*,
product: dict | Product,
location: str | None = None,
product_id: str | None = None,
project_id: str | None = None,
update_mask: dict | FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.product = product
self.location = location
self.product_id = product_id
self.project_id = project_id
self.update_mask = update_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.update_product(
product=self.product,
location=self.location,
product_id=self.product_id,
project_id=self.project_id,
update_mask=self.update_mask, # type: ignore
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudVisionDeleteProductOperator(GoogleCloudBaseOperator):
"""Permanently delete a product and its reference images.
Metadata of the product and all its images will be deleted right away, but
search queries against ProductSets containing the product may still work
until all related caches are refreshed.
Possible errors:
- Returns `NOT_FOUND` if the product does not exist.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionDeleteProductOperator`
:param location: (Required) The region where the Product is located. Valid regions (as of 2019-02-05) are:
us-east1, us-west1, europe-west1, asia-east1
:param product_id: (Required) The resource id of this Product.
:param project_id: (Optional) The project in which the Product is located. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_product_delete_template_fields]
template_fields: Sequence[str] = (
"location",
"project_id",
"product_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_product_delete_template_fields]
def __init__(
self,
*,
location: str,
product_id: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.product_id = product_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.delete_product(
location=self.location,
product_id=self.product_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudVisionImageAnnotateOperator(GoogleCloudBaseOperator):
"""Run image detection and annotation for an image or a batch of images.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionImageAnnotateOperator`
:param request: (Required) Annotation request for image or a batch.
If a dict is provided, it must be of the same form as the protobuf
message class:`google.cloud.vision_v1.types.AnnotateImageRequest`
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_annotate_image_template_fields]
template_fields: Sequence[str] = (
"request",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_annotate_image_template_fields]
def __init__(
self,
*,
request: dict | AnnotateImageRequest,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.request = request
self.retry = retry
self.timeout = timeout
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if not isinstance(self.request, list):
response = hook.annotate_image(request=self.request, retry=self.retry, timeout=self.timeout)
else:
response = hook.batch_annotate_images(
requests=self.request, retry=self.retry, timeout=self.timeout
)
return response
class CloudVisionCreateReferenceImageOperator(GoogleCloudBaseOperator):
"""Create and return a new ReferenceImage ID resource.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionCreateReferenceImageOperator`
:param location: (Required) The region where the Product is located. Valid regions (as of 2019-02-05) are:
us-east1, us-west1, europe-west1, asia-east1
:param reference_image: (Required) The reference image to create. If an image ID is specified, it is
ignored.
If a dict is provided, it must be of the same form as the protobuf message
:class:`google.cloud.vision_v1.types.ReferenceImage`
:param reference_image_id: (Optional) A user-supplied resource id for the ReferenceImage to be added.
If set, the server will attempt to use this value as the resource id. If it is already in use, an
error is returned with code ALREADY_EXISTS. Must be at most 128 characters long. It cannot contain
the character `/`.
:param product_id: (Optional) The resource id of this Product.
:param project_id: (Optional) The project in which the Product is located. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_reference_image_create_template_fields]
template_fields: Sequence[str] = (
"location",
"reference_image",
"product_id",
"reference_image_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_reference_image_create_template_fields]
def __init__(
self,
*,
location: str,
reference_image: dict | ReferenceImage,
product_id: str,
reference_image_id: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.product_id = product_id
self.reference_image = reference_image
self.reference_image_id = reference_image_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
try:
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if isinstance(self.reference_image, dict):
self.reference_image = ReferenceImage(self.reference_image)
return hook.create_reference_image(
location=self.location,
product_id=self.product_id,
reference_image=self.reference_image,
reference_image_id=self.reference_image_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
self.log.info(
"ReferenceImage with id %s already exists. Exiting from the create operation.",
self.product_id,
)
return self.reference_image_id
class CloudVisionDeleteReferenceImageOperator(GoogleCloudBaseOperator):
"""Delete a ReferenceImage ID resource.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionDeleteReferenceImageOperator`
:param location: (Required) The region where the Product is located. Valid regions (as of 2019-02-05) are:
us-east1, us-west1, europe-west1, asia-east1
:param reference_image_id: (Optional) A user-supplied resource id for the ReferenceImage to be added.
If set, the server will attempt to use this value as the resource id. If it is already in use, an
error is returned with code ALREADY_EXISTS. Must be at most 128 characters long. It cannot contain
the character `/`.
:param product_id: (Optional) The resource id of this Product.
:param project_id: (Optional) The project in which the Product is located. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_reference_image_create_template_fields]
template_fields: Sequence[str] = (
"location",
"product_id",
"reference_image_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_reference_image_create_template_fields]
def __init__(
self,
*,
location: str,
product_id: str,
reference_image_id: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.product_id = product_id
self.reference_image_id = reference_image_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.delete_reference_image(
location=self.location,
product_id=self.product_id,
reference_image_id=self.reference_image_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudVisionAddProductToProductSetOperator(GoogleCloudBaseOperator):
"""Add a Product to the specified ProductSet.
If the Product is already present, no change is made. One Product can be
added to at most 100 ProductSets.
Possible errors:
- Returns `NOT_FOUND` if the Product or the ProductSet doesn't exist.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionAddProductToProductSetOperator`
:param product_set_id: (Required) The resource id for the ProductSet to modify.
:param product_id: (Required) The resource id of this Product.
:param location: (Required) The region where the ProductSet is located. Valid regions (as of 2019-02-05)
are: us-east1, us-west1, europe-west1, asia-east1
:param project_id: (Optional) The project in which the Product is located. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_add_product_to_product_set_template_fields]
template_fields: Sequence[str] = (
"location",
"product_set_id",
"product_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_add_product_to_product_set_template_fields]
def __init__(
self,
*,
product_set_id: str,
product_id: str,
location: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.product_set_id = product_set_id
self.product_id = product_id
self.location = location
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.add_product_to_product_set(
product_set_id=self.product_set_id,
product_id=self.product_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudVisionRemoveProductFromProductSetOperator(GoogleCloudBaseOperator):
"""Remove a Product from the specified ProductSet.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionRemoveProductFromProductSetOperator`
:param product_set_id: (Required) The resource id for the ProductSet to modify.
:param product_id: (Required) The resource id of this Product.
:param location: (Required) The region where the ProductSet is located. Valid regions (as of 2019-02-05)
are: us-east1, us-west1, europe-west1, asia-east1
:param project_id: (Optional) The project in which the Product is located. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_remove_product_from_product_set_template_fields]
template_fields: Sequence[str] = (
"location",
"product_set_id",
"product_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_remove_product_from_product_set_template_fields]
def __init__(
self,
*,
product_set_id: str,
product_id: str,
location: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.product_set_id = product_set_id
self.product_id = product_id
self.location = location
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.remove_product_from_product_set(
product_set_id=self.product_set_id,
product_id=self.product_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudVisionDetectTextOperator(GoogleCloudBaseOperator):
"""Detect Text in the image.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionDetectTextOperator`
:param image: (Required) The image to analyze. See more:
https://googleapis.github.io/google-cloud-python/latest/vision/gapic/v1/types.html#google.cloud.vision_v1.types.Image
:param max_results: (Optional) Number of results to return.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: Number of seconds before timing out.
:param language_hints: List of languages to use for TEXT_DETECTION.
In most cases, an empty value yields the best results since it enables automatic language detection.
For languages based on the Latin alphabet, setting language_hints is not needed.
:param web_detection_params: Parameters for web detection.
:param additional_properties: Additional properties to be set on the AnnotateImageRequest. See more:
:class:`google.cloud.vision_v1.types.AnnotateImageRequest`
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_detect_text_set_template_fields]
template_fields: Sequence[str] = (
"image",
"max_results",
"timeout",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_detect_text_set_template_fields]
def __init__(
self,
image: dict | Image,
max_results: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
language_hints: str | list[str] | None = None,
web_detection_params: dict | None = None,
additional_properties: dict | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.image = image
self.max_results = max_results
self.retry = retry
self.timeout = timeout
self.gcp_conn_id = gcp_conn_id
self.kwargs = kwargs
self.additional_properties = prepare_additional_parameters(
additional_properties=additional_properties,
language_hints=language_hints,
web_detection_params=web_detection_params,
)
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.text_detection(
image=self.image,
max_results=self.max_results,
retry=self.retry,
timeout=self.timeout,
additional_properties=self.additional_properties,
)
class CloudVisionTextDetectOperator(GoogleCloudBaseOperator):
"""Detect Document Text in the image.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionTextDetectOperator`
:param image: (Required) The image to analyze. See more:
https://googleapis.github.io/google-cloud-python/latest/vision/gapic/v1/types.html#google.cloud.vision_v1.types.Image
:param max_results: Number of results to return.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: Number of seconds before timing out.
:param language_hints: List of languages to use for TEXT_DETECTION.
In most cases, an empty value yields the best results since it enables automatic language detection.
For languages based on the Latin alphabet, setting language_hints is not needed.
:param web_detection_params: Parameters for web detection.
:param additional_properties: Additional properties to be set on the AnnotateImageRequest. See more:
https://googleapis.github.io/google-cloud-python/latest/vision/gapic/v1/types.html#google.cloud.vision_v1.types.AnnotateImageRequest
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_document_detect_text_set_template_fields]
template_fields: Sequence[str] = (
"image",
"max_results",
"timeout",
"gcp_conn_id",
"impersonation_chain",
) # Iterable[str]
# [END vision_document_detect_text_set_template_fields]
def __init__(
self,
image: dict | Image,
max_results: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
language_hints: str | list[str] | None = None,
web_detection_params: dict | None = None,
additional_properties: dict | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.image = image
self.max_results = max_results
self.retry = retry
self.timeout = timeout
self.gcp_conn_id = gcp_conn_id
self.additional_properties = prepare_additional_parameters(
additional_properties=additional_properties,
language_hints=language_hints,
web_detection_params=web_detection_params,
)
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.document_text_detection(
image=self.image,
max_results=self.max_results,
retry=self.retry,
timeout=self.timeout,
additional_properties=self.additional_properties,
)
class CloudVisionDetectImageLabelsOperator(GoogleCloudBaseOperator):
"""Detect Document Text in the image.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionDetectImageLabelsOperator`
:param image: (Required) The image to analyze. See more:
https://googleapis.github.io/google-cloud-python/latest/vision/gapic/v1/types.html#google.cloud.vision_v1.types.Image
:param max_results: Number of results to return.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: Number of seconds before timing out.
:param additional_properties: Additional properties to be set on the AnnotateImageRequest. See more:
https://googleapis.github.io/google-cloud-python/latest/vision/gapic/v1/types.html#google.cloud.vision_v1.types.AnnotateImageRequest
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_detect_labels_template_fields]
template_fields: Sequence[str] = (
"image",
"max_results",
"timeout",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_detect_labels_template_fields]
def __init__(
self,
image: dict | Image,
max_results: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
additional_properties: dict | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.image = image
self.max_results = max_results
self.retry = retry
self.timeout = timeout
self.gcp_conn_id = gcp_conn_id
self.additional_properties = additional_properties
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.label_detection(
image=self.image,
max_results=self.max_results,
retry=self.retry,
timeout=self.timeout,
additional_properties=self.additional_properties,
)
class CloudVisionDetectImageSafeSearchOperator(GoogleCloudBaseOperator):
"""Detect Document Text in the image.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionDetectImageSafeSearchOperator`
:param image: (Required) The image to analyze. See more:
https://googleapis.github.io/google-cloud-python/latest/vision/gapic/v1/types.html#google.cloud.vision_v1.types.Image
:param max_results: Number of results to return.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: Number of seconds before timing out.
:param additional_properties: Additional properties to be set on the AnnotateImageRequest. See more:
https://googleapis.github.io/google-cloud-python/latest/vision/gapic/v1/types.html#google.cloud.vision_v1.types.AnnotateImageRequest
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_detect_safe_search_template_fields]
template_fields: Sequence[str] = (
"image",
"max_results",
"timeout",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_detect_safe_search_template_fields]
def __init__(
self,
image: dict | Image,
max_results: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
additional_properties: dict | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.image = image
self.max_results = max_results
self.retry = retry
self.timeout = timeout
self.gcp_conn_id = gcp_conn_id
self.additional_properties = additional_properties
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.safe_search_detection(
image=self.image,
max_results=self.max_results,
retry=self.retry,
timeout=self.timeout,
additional_properties=self.additional_properties,
)
def prepare_additional_parameters(
additional_properties: dict | None, language_hints: Any, web_detection_params: Any
) -> dict | None:
"""Create a value for the ``additional_properties`` parameter.
The new value is based on ``language_hints``, ``web_detection_params``, and
``additional_properties`` parameters specified by the user.
"""
if language_hints is None and web_detection_params is None:
return additional_properties
if additional_properties is None:
return {}
merged_additional_parameters = deepcopy(additional_properties)
if "image_context" not in merged_additional_parameters:
merged_additional_parameters["image_context"] = {}
merged_additional_parameters["image_context"]["language_hints"] = merged_additional_parameters[
"image_context"
].get("language_hints", language_hints)
merged_additional_parameters["image_context"]["web_detection_params"] = merged_additional_parameters[
"image_context"
].get("web_detection_params", web_detection_params)
return merged_additional_parameters
| 67,471 | 43.070542 | 140 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/cloud_sql.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud SQL operators."""
from __future__ import annotations
from typing import TYPE_CHECKING, Iterable, Mapping, Sequence
from googleapiclient.errors import HttpError
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models import Connection
from airflow.providers.google.cloud.hooks.cloud_sql import CloudSQLDatabaseHook, CloudSQLHook
from airflow.providers.google.cloud.links.cloud_sql import CloudSQLInstanceDatabaseLink, CloudSQLInstanceLink
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
from airflow.providers.google.cloud.triggers.cloud_sql import CloudSQLExportTrigger
from airflow.providers.google.cloud.utils.field_validator import GcpBodyFieldValidator
from airflow.providers.google.common.hooks.base_google import get_field
from airflow.providers.google.common.links.storage import FileDetailsLink
from airflow.providers.mysql.hooks.mysql import MySqlHook
from airflow.providers.postgres.hooks.postgres import PostgresHook
if TYPE_CHECKING:
from airflow.utils.context import Context
SETTINGS = "settings"
SETTINGS_VERSION = "settingsVersion"
CLOUD_SQL_CREATE_VALIDATION: Sequence[dict] = [
dict(name="name", allow_empty=False),
dict(
name="settings",
type="dict",
fields=[
dict(name="tier", allow_empty=False),
dict(
name="backupConfiguration",
type="dict",
fields=[
dict(name="binaryLogEnabled", optional=True),
dict(name="enabled", optional=True),
dict(name="replicationLogArchivingEnabled", optional=True),
dict(name="startTime", allow_empty=False, optional=True),
],
optional=True,
),
dict(name="activationPolicy", allow_empty=False, optional=True),
dict(name="authorizedGaeApplications", type="list", optional=True),
dict(name="crashSafeReplicationEnabled", optional=True),
dict(name="dataDiskSizeGb", optional=True),
dict(name="dataDiskType", allow_empty=False, optional=True),
dict(name="databaseFlags", type="list", optional=True),
dict(
name="ipConfiguration",
type="dict",
fields=[
dict(
name="authorizedNetworks",
type="list",
fields=[
dict(name="expirationTime", optional=True),
dict(name="name", allow_empty=False, optional=True),
dict(name="value", allow_empty=False, optional=True),
],
optional=True,
),
dict(name="ipv4Enabled", optional=True),
dict(name="privateNetwork", allow_empty=False, optional=True),
dict(name="requireSsl", optional=True),
],
optional=True,
),
dict(
name="locationPreference",
type="dict",
fields=[
dict(name="followGaeApplication", allow_empty=False, optional=True),
dict(name="zone", allow_empty=False, optional=True),
],
optional=True,
),
dict(
name="maintenanceWindow",
type="dict",
fields=[
dict(name="hour", optional=True),
dict(name="day", optional=True),
dict(name="updateTrack", allow_empty=False, optional=True),
],
optional=True,
),
dict(name="pricingPlan", allow_empty=False, optional=True),
dict(name="replicationType", allow_empty=False, optional=True),
dict(name="storageAutoResize", optional=True),
dict(name="storageAutoResizeLimit", optional=True),
dict(name="userLabels", type="dict", optional=True),
],
),
dict(name="databaseVersion", allow_empty=False, optional=True),
dict(name="failoverReplica", type="dict", fields=[dict(name="name", allow_empty=False)], optional=True),
dict(name="masterInstanceName", allow_empty=False, optional=True),
dict(name="onPremisesConfiguration", type="dict", optional=True),
dict(name="region", allow_empty=False, optional=True),
dict(
name="replicaConfiguration",
type="dict",
fields=[
dict(name="failoverTarget", optional=True),
dict(
name="mysqlReplicaConfiguration",
type="dict",
fields=[
dict(name="caCertificate", allow_empty=False, optional=True),
dict(name="clientCertificate", allow_empty=False, optional=True),
dict(name="clientKey", allow_empty=False, optional=True),
dict(name="connectRetryInterval", optional=True),
dict(name="dumpFilePath", allow_empty=False, optional=True),
dict(name="masterHeartbeatPeriod", optional=True),
dict(name="password", allow_empty=False, optional=True),
dict(name="sslCipher", allow_empty=False, optional=True),
dict(name="username", allow_empty=False, optional=True),
dict(name="verifyServerCertificate", optional=True),
],
optional=True,
),
],
optional=True,
),
]
CLOUD_SQL_EXPORT_VALIDATION = [
dict(
name="exportContext",
type="dict",
fields=[
dict(name="fileType", allow_empty=False),
dict(name="uri", allow_empty=False),
dict(name="databases", optional=True, type="list"),
dict(
name="sqlExportOptions",
type="dict",
optional=True,
fields=[
dict(name="tables", optional=True, type="list"),
dict(name="schemaOnly", optional=True),
dict(
name="mysqlExportOptions",
type="dict",
optional=True,
fields=[dict(name="masterData")],
),
],
),
dict(
name="csvExportOptions",
type="dict",
optional=True,
fields=[
dict(name="selectQuery"),
dict(name="escapeCharacter", optional=True),
dict(name="quoteCharacter", optional=True),
dict(name="fieldsTerminatedBy", optional=True),
dict(name="linesTerminatedBy", optional=True),
],
),
dict(name="offload", optional=True),
],
)
]
CLOUD_SQL_IMPORT_VALIDATION = [
dict(
name="importContext",
type="dict",
fields=[
dict(name="fileType", allow_empty=False),
dict(name="uri", allow_empty=False),
dict(name="database", optional=True, allow_empty=False),
dict(name="importUser", optional=True),
dict(
name="csvImportOptions",
type="dict",
optional=True,
fields=[dict(name="table"), dict(name="columns", type="list", optional=True)],
),
],
)
]
CLOUD_SQL_DATABASE_CREATE_VALIDATION = [
dict(name="instance", allow_empty=False),
dict(name="name", allow_empty=False),
dict(name="project", allow_empty=False),
]
CLOUD_SQL_DATABASE_PATCH_VALIDATION = [
dict(name="instance", optional=True),
dict(name="name", optional=True),
dict(name="project", optional=True),
dict(name="etag", optional=True),
dict(name="charset", optional=True),
dict(name="collation", optional=True),
]
class CloudSQLBaseOperator(GoogleCloudBaseOperator):
"""Abstract base operator for Google Cloud SQL operators.
:param instance: Cloud SQL instance ID. This does not include the project ID.
:param project_id: Optional, Google Cloud Project ID. f set to None or missing,
the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param api_version: API version used (e.g. v1beta4).
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
def __init__(
self,
*,
instance: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1beta4",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.project_id = project_id
self.instance = instance
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.impersonation_chain = impersonation_chain
self._validate_inputs()
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if self.project_id == "":
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance:
raise AirflowException("The required parameter 'instance' is empty or None")
def _check_if_instance_exists(self, instance, hook: CloudSQLHook) -> dict | bool:
try:
return hook.get_instance(project_id=self.project_id, instance=instance)
except HttpError as e:
status = e.resp.status
if status == 404:
return False
raise e
def _check_if_db_exists(self, db_name, hook: CloudSQLHook) -> dict | bool:
try:
return hook.get_database(project_id=self.project_id, instance=self.instance, database=db_name)
except HttpError as e:
status = e.resp.status
if status == 404:
return False
raise e
def execute(self, context: Context):
pass
@staticmethod
def _get_settings_version(instance):
return instance.get(SETTINGS).get(SETTINGS_VERSION)
class CloudSQLCreateInstanceOperator(CloudSQLBaseOperator):
"""Create a new Cloud SQL instance.
If an instance with the same name exists, no action will be taken and
the operator will succeed.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudSQLCreateInstanceOperator`
:param body: Body required by the Cloud SQL insert API, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/insert
#request-body
:param instance: Cloud SQL instance ID. This does not include the project ID.
:param project_id: Optional, Google Cloud Project ID. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param api_version: API version used (e.g. v1beta4).
:param validate_body: True if body should be validated, False otherwise.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_sql_create_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance",
"body",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gcp_sql_create_template_fields]
ui_color = "#FADBDA"
operator_extra_links = (CloudSQLInstanceLink(),)
def __init__(
self,
*,
body: dict,
instance: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1beta4",
validate_body: bool = True,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.body = body
self.validate_body = validate_body
super().__init__(
project_id=project_id,
instance=instance,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.body:
raise AirflowException("The required parameter 'body' is empty")
def _validate_body_fields(self) -> None:
if self.validate_body:
GcpBodyFieldValidator(CLOUD_SQL_CREATE_VALIDATION, api_version=self.api_version).validate(
self.body
)
def execute(self, context: Context) -> None:
hook = CloudSQLHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self._validate_body_fields()
if not self._check_if_instance_exists(self.instance, hook):
hook.create_instance(project_id=self.project_id, body=self.body)
else:
self.log.info("Cloud SQL instance with ID %s already exists. Aborting create.", self.instance)
CloudSQLInstanceLink.persist(
context=context,
task_instance=self,
cloud_sql_instance=self.instance,
project_id=self.project_id or hook.project_id,
)
instance_resource = hook.get_instance(project_id=self.project_id, instance=self.instance)
service_account_email = instance_resource["serviceAccountEmailAddress"]
task_instance = context["task_instance"]
task_instance.xcom_push(key="service_account_email", value=service_account_email)
class CloudSQLInstancePatchOperator(CloudSQLBaseOperator):
"""Update settings of a Cloud SQL instance.
Caution: This is a partial update, so only included values for the settings will be
updated.
In the request body, supply the relevant portions of an instance resource, according
to the rules of patch semantics.
https://cloud.google.com/sql/docs/mysql/admin-api/how-tos/performance#patch
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudSQLInstancePatchOperator`
:param body: Body required by the Cloud SQL patch API, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/patch#request-body
:param instance: Cloud SQL instance ID. This does not include the project ID.
:param project_id: Optional, Google Cloud Project ID. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param api_version: API version used (e.g. v1beta4).
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_sql_patch_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance",
"body",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gcp_sql_patch_template_fields]
ui_color = "#FBDAC8"
operator_extra_links = (CloudSQLInstanceLink(),)
def __init__(
self,
*,
body: dict,
instance: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1beta4",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.body = body
super().__init__(
project_id=project_id,
instance=instance,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.body:
raise AirflowException("The required parameter 'body' is empty")
def execute(self, context: Context):
hook = CloudSQLHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
if not self._check_if_instance_exists(self.instance, hook):
raise AirflowException(
f"Cloud SQL instance with ID {self.instance} does not exist. "
"Please specify another instance to patch."
)
else:
CloudSQLInstanceLink.persist(
context=context,
task_instance=self,
cloud_sql_instance=self.instance,
project_id=self.project_id or hook.project_id,
)
return hook.patch_instance(project_id=self.project_id, body=self.body, instance=self.instance)
class CloudSQLDeleteInstanceOperator(CloudSQLBaseOperator):
"""Delete a Cloud SQL instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudSQLDeleteInstanceOperator`
:param instance: Cloud SQL instance ID. This does not include the project ID.
:param project_id: Optional, Google Cloud Project ID. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param api_version: API version used (e.g. v1beta4).
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_sql_delete_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gcp_sql_delete_template_fields]
ui_color = "#FEECD2"
def execute(self, context: Context) -> bool | None:
hook = CloudSQLHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
if not self._check_if_instance_exists(self.instance, hook):
print(f"Cloud SQL instance with ID {self.instance} does not exist. Aborting delete.")
return True
else:
return hook.delete_instance(project_id=self.project_id, instance=self.instance)
class CloudSQLCloneInstanceOperator(CloudSQLBaseOperator):
"""Clone an instance to a target instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudSQLCloneInstanceOperator`
:param instance: Database instance ID to be cloned. This does not include the
project ID.
:param destination_instance_name: Database instance ID to be created. This does not include the
project ID.
:param clone_context: additional clone_context parameters as described in
https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1/instances/clone
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param api_version: API version used (e.g. v1beta4).
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_sql_clone_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance",
"destination_instance_name",
"gcp_conn_id",
"api_version",
)
# [END gcp_sql_clone_template_fields]
def __init__(
self,
*,
instance: str,
destination_instance_name: str,
clone_context: dict | None = None,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1beta4",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.destination_instance_name = destination_instance_name
self.clone_context = clone_context or {}
super().__init__(
project_id=project_id,
instance=instance,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.destination_instance_name:
raise AirflowException("The required parameter 'destination_instance_name' is empty or None")
def execute(self, context: Context):
hook = CloudSQLHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
if not self._check_if_instance_exists(self.instance, hook):
raise AirflowException(
f"Cloud SQL instance with ID {self.instance} does not exist. "
"Please specify another instance to patch."
)
else:
body = {
"cloneContext": {
"kind": "sql#cloneContext",
"destinationInstanceName": self.destination_instance_name,
**self.clone_context,
}
}
return hook.clone_instance(
project_id=self.project_id,
body=body,
instance=self.instance,
)
class CloudSQLCreateInstanceDatabaseOperator(CloudSQLBaseOperator):
"""Create a new database inside a Cloud SQL instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudSQLCreateInstanceDatabaseOperator`
:param instance: Database instance ID. This does not include the project ID.
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases/insert#request-body
:param project_id: Optional, Google Cloud Project ID. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param api_version: API version used (e.g. v1beta4).
:param validate_body: Whether the body should be validated. Defaults to True.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_sql_db_create_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance",
"body",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gcp_sql_db_create_template_fields]
ui_color = "#FFFCDB"
operator_extra_links = (CloudSQLInstanceDatabaseLink(),)
def __init__(
self,
*,
instance: str,
body: dict,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1beta4",
validate_body: bool = True,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.body = body
self.validate_body = validate_body
super().__init__(
project_id=project_id,
instance=instance,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.body:
raise AirflowException("The required parameter 'body' is empty")
def _validate_body_fields(self) -> None:
if self.validate_body:
GcpBodyFieldValidator(
CLOUD_SQL_DATABASE_CREATE_VALIDATION, api_version=self.api_version
).validate(self.body)
def execute(self, context: Context) -> bool | None:
self._validate_body_fields()
database = self.body.get("name")
if not database:
self.log.error(
"Body doesn't contain 'name'. Cannot check if the"
" database already exists in the instance %s.",
self.instance,
)
return False
hook = CloudSQLHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
CloudSQLInstanceDatabaseLink.persist(
context=context,
task_instance=self,
cloud_sql_instance=self.instance,
project_id=self.project_id or hook.project_id,
)
if self._check_if_db_exists(database, hook):
self.log.info(
"Cloud SQL instance with ID %s already contains database '%s'. Aborting database insert.",
self.instance,
database,
)
return True
else:
return hook.create_database(project_id=self.project_id, instance=self.instance, body=self.body)
class CloudSQLPatchInstanceDatabaseOperator(CloudSQLBaseOperator):
"""Update resource containing information about a database using patch semantics.
See: https://cloud.google.com/sql/docs/mysql/admin-api/how-tos/performance#patch
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudSQLPatchInstanceDatabaseOperator`
:param instance: Database instance ID. This does not include the project ID.
:param database: Name of the database to be updated in the instance.
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases/patch#request-body
:param project_id: Optional, Google Cloud Project ID.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param api_version: API version used (e.g. v1beta4).
:param validate_body: Whether the body should be validated. Defaults to True.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_sql_db_patch_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance",
"body",
"database",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gcp_sql_db_patch_template_fields]
ui_color = "#ECF4D9"
operator_extra_links = (CloudSQLInstanceDatabaseLink(),)
def __init__(
self,
*,
instance: str,
database: str,
body: dict,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1beta4",
validate_body: bool = True,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.database = database
self.body = body
self.validate_body = validate_body
super().__init__(
project_id=project_id,
instance=instance,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.body:
raise AirflowException("The required parameter 'body' is empty")
if not self.database:
raise AirflowException("The required parameter 'database' is empty")
def _validate_body_fields(self) -> None:
if self.validate_body:
GcpBodyFieldValidator(CLOUD_SQL_DATABASE_PATCH_VALIDATION, api_version=self.api_version).validate(
self.body
)
def execute(self, context: Context) -> None:
self._validate_body_fields()
hook = CloudSQLHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
if not self._check_if_db_exists(self.database, hook):
raise AirflowException(
f"Cloud SQL instance with ID {self.instance} does not contain database '{self.database}'. "
"Please specify another database to patch."
)
else:
CloudSQLInstanceDatabaseLink.persist(
context=context,
task_instance=self,
cloud_sql_instance=self.instance,
project_id=self.project_id or hook.project_id,
)
return hook.patch_database(
project_id=self.project_id, instance=self.instance, database=self.database, body=self.body
)
class CloudSQLDeleteInstanceDatabaseOperator(CloudSQLBaseOperator):
"""Delete a database from a Cloud SQL instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudSQLDeleteInstanceDatabaseOperator`
:param instance: Database instance ID. This does not include the project ID.
:param database: Name of the database to be deleted in the instance.
:param project_id: Optional, Google Cloud Project ID. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param api_version: API version used (e.g. v1beta4).
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_sql_db_delete_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance",
"database",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gcp_sql_db_delete_template_fields]
ui_color = "#D5EAD8"
def __init__(
self,
*,
instance: str,
database: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1beta4",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.database = database
super().__init__(
project_id=project_id,
instance=instance,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.database:
raise AirflowException("The required parameter 'database' is empty")
def execute(self, context: Context) -> bool | None:
hook = CloudSQLHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
if not self._check_if_db_exists(self.database, hook):
print(
f"Cloud SQL instance with ID {self.instance!r} does not contain database {self.database!r}. "
f"Aborting database delete."
)
return True
else:
return hook.delete_database(
project_id=self.project_id, instance=self.instance, database=self.database
)
class CloudSQLExportInstanceOperator(CloudSQLBaseOperator):
"""Export data from a Cloud SQL instance to a Cloud Storage bucket.
The exported format can be a SQL dump or CSV file.
Note: This operator is idempotent. If executed multiple times with the same
export file URI, the export file in GCS will simply be overridden.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudSQLExportInstanceOperator`
:param instance: Cloud SQL instance ID. This does not include the project ID.
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/export#request-body
:param project_id: Optional, Google Cloud Project ID. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param api_version: API version used (e.g. v1beta4).
:param validate_body: Whether the body should be validated. Defaults to True.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run operator in the deferrable mode.
:param poke_interval: (Deferrable mode only) Time (seconds) to wait between calls
to check the run status.
"""
# [START gcp_sql_export_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance",
"body",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gcp_sql_export_template_fields]
ui_color = "#D4ECEA"
operator_extra_links = (CloudSQLInstanceLink(), FileDetailsLink())
def __init__(
self,
*,
instance: str,
body: dict,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1beta4",
validate_body: bool = True,
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poke_interval: int = 10,
**kwargs,
) -> None:
self.body = body
self.validate_body = validate_body
self.deferrable = deferrable
self.poke_interval = poke_interval
super().__init__(
project_id=project_id,
instance=instance,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.body:
raise AirflowException("The required parameter 'body' is empty")
def _validate_body_fields(self) -> None:
if self.validate_body:
GcpBodyFieldValidator(CLOUD_SQL_EXPORT_VALIDATION, api_version=self.api_version).validate(
self.body
)
def execute(self, context: Context) -> None:
self._validate_body_fields()
hook = CloudSQLHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
CloudSQLInstanceLink.persist(
context=context,
task_instance=self,
cloud_sql_instance=self.instance,
project_id=self.project_id or hook.project_id,
)
FileDetailsLink.persist(
context=context,
task_instance=self,
uri=self.body["exportContext"]["uri"][5:],
project_id=self.project_id or hook.project_id,
)
operation_name = hook.export_instance(
project_id=self.project_id, instance=self.instance, body=self.body
)
if not self.deferrable:
return hook._wait_for_operation_to_complete(
project_id=self.project_id, operation_name=operation_name
)
else:
self.defer(
trigger=CloudSQLExportTrigger(
operation_name=operation_name,
project_id=self.project_id or hook.project_id,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
poke_interval=self.poke_interval,
),
method_name="execute_complete",
)
def execute_complete(self, context, event=None) -> None:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event["status"] == "success":
self.log.info("Operation %s completed successfully", event["operation_name"])
else:
self.log.exception("Unexpected error in the operation.")
raise AirflowException(event["message"])
class CloudSQLImportInstanceOperator(CloudSQLBaseOperator):
"""Import data into a Cloud SQL instance from Cloud Storage.
CSV IMPORT
``````````
This operator is NOT idempotent for a CSV import. If the same file is imported
multiple times, the imported data will be duplicated in the database.
Moreover, if there are any unique constraints the duplicate import may result in an
error.
SQL IMPORT
``````````
This operator is idempotent for a SQL import if it was also exported by Cloud SQL.
The exported SQL contains 'DROP TABLE IF EXISTS' statements for all tables
to be imported.
If the import file was generated in a different way, idempotence is not guaranteed.
It has to be ensured on the SQL file level.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudSQLImportInstanceOperator`
:param instance: Cloud SQL instance ID. This does not include the project ID.
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/import#request-body
:param project_id: Optional, Google Cloud Project ID. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param api_version: API version used (e.g. v1beta4).
:param validate_body: Whether the body should be validated. Defaults to True.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_sql_import_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance",
"body",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gcp_sql_import_template_fields]
ui_color = "#D3EDFB"
operator_extra_links = (CloudSQLInstanceLink(), FileDetailsLink())
def __init__(
self,
*,
instance: str,
body: dict,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1beta4",
validate_body: bool = True,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.body = body
self.validate_body = validate_body
super().__init__(
project_id=project_id,
instance=instance,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.body:
raise AirflowException("The required parameter 'body' is empty")
def _validate_body_fields(self) -> None:
if self.validate_body:
GcpBodyFieldValidator(CLOUD_SQL_IMPORT_VALIDATION, api_version=self.api_version).validate(
self.body
)
def execute(self, context: Context) -> None:
self._validate_body_fields()
hook = CloudSQLHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
CloudSQLInstanceLink.persist(
context=context,
task_instance=self,
cloud_sql_instance=self.instance,
project_id=self.project_id or hook.project_id,
)
FileDetailsLink.persist(
context=context,
task_instance=self,
uri=self.body["importContext"]["uri"][5:],
project_id=self.project_id or hook.project_id,
)
return hook.import_instance(project_id=self.project_id, instance=self.instance, body=self.body)
class CloudSQLExecuteQueryOperator(GoogleCloudBaseOperator):
"""Perform DML or DDL query on an existing Cloud Sql instance.
It optionally uses cloud-sql-proxy to establish secure connection with the
database.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudSQLExecuteQueryOperator`
:param sql: SQL query or list of queries to run (should be DML or DDL query -
this operator does not return any data from the database,
so it is useless to pass it DQL queries. Note that it is responsibility of the
author of the queries to make sure that the queries are idempotent. For example
you can use CREATE TABLE IF NOT EXISTS to create a table.
:param parameters: (optional) the parameters to render the SQL query with.
:param autocommit: if True, each command is automatically committed.
(default value: False)
:param gcp_conn_id: The connection ID used to connect to Google Cloud for
cloud-sql-proxy authentication.
:param gcp_cloudsql_conn_id: The connection ID used to connect to Google Cloud SQL
its schema should be gcpcloudsql://.
See :class:`~airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLDatabaseHook` for
details on how to define ``gcpcloudsql://`` connection.
:param sql_proxy_binary_path: (optional) Path to the cloud-sql-proxy binary.
is not specified or the binary is not present, it is automatically downloaded.
"""
# [START gcp_sql_query_template_fields]
template_fields: Sequence[str] = ("sql", "gcp_cloudsql_conn_id", "gcp_conn_id")
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"sql": "sql"}
# [END gcp_sql_query_template_fields]
ui_color = "#D3DEF1"
def __init__(
self,
*,
sql: str | Iterable[str],
autocommit: bool = False,
parameters: Iterable | Mapping | None = None,
gcp_conn_id: str = "google_cloud_default",
gcp_cloudsql_conn_id: str = "google_cloud_sql_default",
sql_proxy_binary_path: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self.gcp_conn_id = gcp_conn_id
self.gcp_cloudsql_conn_id = gcp_cloudsql_conn_id
self.autocommit = autocommit
self.parameters = parameters
self.gcp_connection: Connection | None = None
self.sql_proxy_binary_path = sql_proxy_binary_path
def _execute_query(self, hook: CloudSQLDatabaseHook, database_hook: PostgresHook | MySqlHook) -> None:
cloud_sql_proxy_runner = None
try:
if hook.use_proxy:
cloud_sql_proxy_runner = hook.get_sqlproxy_runner()
hook.free_reserved_port()
# There is very, very slim chance that the socket will
# be taken over here by another bind(0).
# It's quite unlikely to happen though!
cloud_sql_proxy_runner.start_proxy()
self.log.info('Executing: "%s"', self.sql)
database_hook.run(self.sql, self.autocommit, parameters=self.parameters)
finally:
if cloud_sql_proxy_runner:
cloud_sql_proxy_runner.stop_proxy()
def execute(self, context: Context):
self.gcp_connection = BaseHook.get_connection(self.gcp_conn_id)
hook = CloudSQLDatabaseHook(
gcp_cloudsql_conn_id=self.gcp_cloudsql_conn_id,
gcp_conn_id=self.gcp_conn_id,
default_gcp_project_id=get_field(self.gcp_connection.extra_dejson, "project"),
sql_proxy_binary_path=self.sql_proxy_binary_path,
)
hook.validate_ssl_certs()
connection = hook.create_connection()
hook.validate_socket_path_length()
database_hook = hook.get_database_hook(connection=connection)
try:
self._execute_query(hook, database_hook)
finally:
hook.cleanup_database_hook()
| 51,053 | 40.205811 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/speech_to_text.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Speech to Text operator."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.speech_v1.types import RecognitionConfig
from google.protobuf.json_format import MessageToDict
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.speech_to_text import CloudSpeechToTextHook, RecognitionAudio
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
from airflow.providers.google.common.links.storage import FileDetailsLink
if TYPE_CHECKING:
from airflow.utils.context import Context
class CloudSpeechToTextRecognizeSpeechOperator(GoogleCloudBaseOperator):
"""
Recognizes speech from audio file and returns it as text.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudSpeechToTextRecognizeSpeechOperator`
:param config: information to the recognizer that specifies how to process the request. See more:
https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionConfig
:param audio: audio data to be recognized. See more:
https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionAudio
:param project_id: Optional, Google Cloud Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param retry: (Optional) A retry object used to retry requests. If None is specified,
requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_speech_to_text_synthesize_template_fields]
template_fields: Sequence[str] = (
"audio",
"config",
"project_id",
"gcp_conn_id",
"timeout",
"impersonation_chain",
)
# [END gcp_speech_to_text_synthesize_template_fields]
operator_extra_links = (FileDetailsLink(),)
def __init__(
self,
*,
audio: RecognitionAudio,
config: RecognitionConfig,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.audio = audio
self.config = config
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.retry = retry
self.timeout = timeout
self._validate_inputs()
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if self.audio == "":
raise AirflowException("The required parameter 'audio' is empty")
if self.config == "":
raise AirflowException("The required parameter 'config' is empty")
def execute(self, context: Context):
hook = CloudSpeechToTextHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
FileDetailsLink.persist(
context=context,
task_instance=self,
# Slice from: "gs://{BUCKET_NAME}/{FILE_NAME}" to: "{BUCKET_NAME}/{FILE_NAME}"
uri=self.audio["uri"][5:],
project_id=self.project_id or hook.project_id,
)
response = hook.recognize_speech(
config=self.config, audio=self.audio, retry=self.retry, timeout=self.timeout
)
return MessageToDict(response._pb)
| 5,548 | 43.039683 | 137 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/tasks.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Google Cloud Tasks operators which allow you to perform basic operations using Cloud Tasks queues/tasks."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence, Tuple
from google.api_core.exceptions import AlreadyExists
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.tasks_v2.types import Queue, Task
from google.protobuf.field_mask_pb2 import FieldMask
from airflow.providers.google.cloud.hooks.tasks import CloudTasksHook
from airflow.providers.google.cloud.links.cloud_tasks import CloudTasksLink, CloudTasksQueueLink
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
MetaData = Sequence[Tuple[str, str]]
class CloudTasksQueueCreateOperator(GoogleCloudBaseOperator):
"""
Creates a queue in Cloud Tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTasksQueueCreateOperator`
:param location: The location name in which the queue will be created.
:param task_queue: The task queue to create.
Queue's name cannot be the same as an existing queue.
If a dict is provided, it must be of the same form as the protobuf message Queue.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param queue_name: (Optional) The queue's name.
If provided, it will be used to construct the full queue path.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"task_queue",
"project_id",
"location",
"queue_name",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudTasksQueueLink(),)
def __init__(
self,
*,
location: str,
task_queue: Queue,
project_id: str | None = None,
queue_name: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.task_queue = task_queue
self.project_id = project_id
self.queue_name = queue_name
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudTasksHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
queue = hook.create_queue(
location=self.location,
task_queue=self.task_queue,
project_id=self.project_id,
queue_name=self.queue_name,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
if self.queue_name is None:
raise RuntimeError("The queue name should be set here!")
queue = hook.get_queue(
location=self.location,
project_id=self.project_id,
queue_name=self.queue_name,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
CloudTasksQueueLink.persist(
operator_instance=self,
context=context,
queue_name=queue.name,
)
return Queue.to_dict(queue)
class CloudTasksQueueUpdateOperator(GoogleCloudBaseOperator):
"""
Updates a queue in Cloud Tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTasksQueueUpdateOperator`
:param task_queue: The task queue to update.
This method creates the queue if it does not exist and updates the queue if
it does exist. The queue's name must be specified.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: (Optional) The location name in which the queue will be updated.
If provided, it will be used to construct the full queue path.
:param queue_name: (Optional) The queue's name.
If provided, it will be used to construct the full queue path.
:param update_mask: A mast used to specify which fields of the queue are being updated.
If empty, then all fields will be updated.
If a dict is provided, it must be of the same form as the protobuf message.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"task_queue",
"project_id",
"location",
"queue_name",
"update_mask",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudTasksQueueLink(),)
def __init__(
self,
*,
task_queue: Queue,
project_id: str | None = None,
location: str | None = None,
queue_name: str | None = None,
update_mask: FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.task_queue = task_queue
self.project_id = project_id
self.location = location
self.queue_name = queue_name
self.update_mask = update_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudTasksHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
queue = hook.update_queue(
task_queue=self.task_queue,
project_id=self.project_id,
location=self.location,
queue_name=self.queue_name,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
CloudTasksQueueLink.persist(
operator_instance=self,
context=context,
queue_name=queue.name,
)
return Queue.to_dict(queue)
class CloudTasksQueueGetOperator(GoogleCloudBaseOperator):
"""
Gets a queue from Cloud Tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTasksQueueGetOperator`
:param location: The location name in which the queue was created.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"queue_name",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudTasksQueueLink(),)
def __init__(
self,
*,
location: str,
queue_name: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.queue_name = queue_name
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudTasksHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
queue = hook.get_queue(
location=self.location,
queue_name=self.queue_name,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
CloudTasksQueueLink.persist(
operator_instance=self,
context=context,
queue_name=queue.name,
)
return Queue.to_dict(queue)
class CloudTasksQueuesListOperator(GoogleCloudBaseOperator):
"""
Lists queues from Cloud Tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTasksQueuesListOperator`
:param location: The location name in which the queues were created.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param results_filter: (Optional) Filter used to specify a subset of queues.
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudTasksLink(),)
def __init__(
self,
*,
location: str,
project_id: str | None = None,
results_filter: str | None = None,
page_size: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.project_id = project_id
self.results_filter = results_filter
self.page_size = page_size
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudTasksHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
queues = hook.list_queues(
location=self.location,
project_id=self.project_id,
results_filter=self.results_filter,
page_size=self.page_size,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
CloudTasksLink.persist(
operator_instance=self,
context=context,
project_id=self.project_id or hook.project_id,
)
return [Queue.to_dict(q) for q in queues]
class CloudTasksQueueDeleteOperator(GoogleCloudBaseOperator):
"""
Deletes a queue from Cloud Tasks, even if it has tasks in it.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTasksQueueDeleteOperator`
:param location: The location name in which the queue will be deleted.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"queue_name",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
location: str,
queue_name: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.queue_name = queue_name
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudTasksHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.delete_queue(
location=self.location,
queue_name=self.queue_name,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudTasksQueuePurgeOperator(GoogleCloudBaseOperator):
"""
Purges a queue by deleting all of its tasks from Cloud Tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTasksQueuePurgeOperator`
:param location: The location name in which the queue will be purged.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"queue_name",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudTasksQueueLink(),)
def __init__(
self,
*,
location: str,
queue_name: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.queue_name = queue_name
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudTasksHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
queue = hook.purge_queue(
location=self.location,
queue_name=self.queue_name,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
CloudTasksQueueLink.persist(
operator_instance=self,
context=context,
queue_name=queue.name,
)
return Queue.to_dict(queue)
class CloudTasksQueuePauseOperator(GoogleCloudBaseOperator):
"""
Pauses a queue in Cloud Tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTasksQueuePauseOperator`
:param location: The location name in which the queue will be paused.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"queue_name",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudTasksQueueLink(),)
def __init__(
self,
*,
location: str,
queue_name: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.queue_name = queue_name
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudTasksHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
queue = hook.pause_queue(
location=self.location,
queue_name=self.queue_name,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
CloudTasksQueueLink.persist(
operator_instance=self,
context=context,
queue_name=queue.name,
)
return Queue.to_dict(queue)
class CloudTasksQueueResumeOperator(GoogleCloudBaseOperator):
"""
Resumes a queue in Cloud Tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTasksQueueResumeOperator`
:param location: The location name in which the queue will be resumed.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"queue_name",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudTasksQueueLink(),)
def __init__(
self,
*,
location: str,
queue_name: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.queue_name = queue_name
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudTasksHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
queue = hook.resume_queue(
location=self.location,
queue_name=self.queue_name,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
CloudTasksQueueLink.persist(
operator_instance=self,
context=context,
queue_name=queue.name,
)
return Queue.to_dict(queue)
class CloudTasksTaskCreateOperator(GoogleCloudBaseOperator):
"""
Creates a task in Cloud Tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTasksTaskCreateOperator`
:param location: The location name in which the task will be created.
:param queue_name: The queue's name.
:param task: The task to add.
If a dict is provided, it must be of the same form as the protobuf message Task.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param task_name: (Optional) The task's name.
If provided, it will be used to construct the full task path.
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"task",
"project_id",
"location",
"queue_name",
"task_name",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudTasksQueueLink(),)
def __init__(
self,
*,
location: str,
queue_name: str,
task: dict | Task,
project_id: str | None = None,
task_name: str | None = None,
response_view: Task.View | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.queue_name = queue_name
self.task = task
self.project_id = project_id
self.task_name = task_name
self.response_view = response_view
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudTasksHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
task = hook.create_task(
location=self.location,
queue_name=self.queue_name,
task=self.task,
project_id=self.project_id,
task_name=self.task_name,
response_view=self.response_view,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
CloudTasksQueueLink.persist(
operator_instance=self,
context=context,
queue_name=task.name,
)
return Task.to_dict(task)
class CloudTasksTaskGetOperator(GoogleCloudBaseOperator):
"""
Gets a task from Cloud Tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTasksTaskGetOperator`
:param location: The location name in which the task was created.
:param queue_name: The queue's name.
:param task_name: The task's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"queue_name",
"task_name",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudTasksQueueLink(),)
def __init__(
self,
*,
location: str,
queue_name: str,
task_name: str,
project_id: str | None = None,
response_view: Task.View | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.queue_name = queue_name
self.task_name = task_name
self.project_id = project_id
self.response_view = response_view
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudTasksHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
task = hook.get_task(
location=self.location,
queue_name=self.queue_name,
task_name=self.task_name,
project_id=self.project_id,
response_view=self.response_view,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
CloudTasksQueueLink.persist(
operator_instance=self,
context=context,
queue_name=task.name,
)
return Task.to_dict(task)
class CloudTasksTasksListOperator(GoogleCloudBaseOperator):
"""
Lists the tasks in Cloud Tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTasksTasksListOperator`
:param location: The location name in which the tasks were created.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"queue_name",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudTasksQueueLink(),)
def __init__(
self,
*,
location: str,
queue_name: str,
project_id: str | None = None,
response_view: Task.View | None = None,
page_size: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.queue_name = queue_name
self.project_id = project_id
self.response_view = response_view
self.page_size = page_size
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudTasksHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
tasks = hook.list_tasks(
location=self.location,
queue_name=self.queue_name,
project_id=self.project_id,
response_view=self.response_view,
page_size=self.page_size,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
CloudTasksQueueLink.persist(
operator_instance=self,
context=context,
queue_name=f"projects/{self.project_id or hook.project_id}/"
f"locations/{self.location}/queues/{self.queue_name}",
)
return [Task.to_dict(t) for t in tasks]
class CloudTasksTaskDeleteOperator(GoogleCloudBaseOperator):
"""
Deletes a task from Cloud Tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTasksTaskDeleteOperator`
:param location: The location name in which the task will be deleted.
:param queue_name: The queue's name.
:param task_name: The task's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"queue_name",
"task_name",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
location: str,
queue_name: str,
task_name: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.queue_name = queue_name
self.task_name = task_name
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudTasksHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.delete_task(
location=self.location,
queue_name=self.queue_name,
task_name=self.task_name,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudTasksTaskRunOperator(GoogleCloudBaseOperator):
"""
Forces to run a task in Cloud Tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTasksTaskRunOperator`
:param location: The location name in which the task was created.
:param queue_name: The queue's name.
:param task_name: The task's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"queue_name",
"task_name",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudTasksQueueLink(),)
def __init__(
self,
*,
location: str,
queue_name: str,
task_name: str,
project_id: str | None = None,
response_view: Task.View | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.queue_name = queue_name
self.task_name = task_name
self.project_id = project_id
self.response_view = response_view
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudTasksHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
task = hook.run_task(
location=self.location,
queue_name=self.queue_name,
task_name=self.task_name,
project_id=self.project_id,
response_view=self.response_view,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
CloudTasksQueueLink.persist(
operator_instance=self,
context=context,
queue_name=task.name,
)
return Task.to_dict(task)
| 48,246 | 39.374059 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/automl.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google AutoML operators."""
from __future__ import annotations
import ast
from typing import TYPE_CHECKING, Sequence, Tuple
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.automl_v1beta1 import (
BatchPredictResult,
ColumnSpec,
Dataset,
Model,
PredictResponse,
TableSpec,
)
from airflow.providers.google.cloud.hooks.automl import CloudAutoMLHook
from airflow.providers.google.cloud.links.automl import (
AutoMLDatasetLink,
AutoMLDatasetListLink,
AutoMLModelLink,
AutoMLModelPredictLink,
AutoMLModelTrainLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
MetaData = Sequence[Tuple[str, str]]
class AutoMLTrainModelOperator(GoogleCloudBaseOperator):
"""
Creates Google Cloud AutoML model.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLTrainModelOperator`
:param model: Model definition.
:param project_id: ID of the Google Cloud project where model will be created if None then
default project_id is used.
:param location: The location of the project.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"model",
"location",
"project_id",
"impersonation_chain",
)
operator_extra_links = (
AutoMLModelTrainLink(),
AutoMLModelLink(),
)
def __init__(
self,
*,
model: dict,
location: str,
project_id: str | None = None,
metadata: MetaData = (),
timeout: float | None = None,
retry: Retry | _MethodDefault = DEFAULT,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.model = model
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating model %s...", self.model["display_name"])
operation = hook.create_model(
model=self.model,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
AutoMLModelTrainLink.persist(context=context, task_instance=self, project_id=project_id)
operation_result = hook.wait_for_operation(timeout=self.timeout, operation=operation)
result = Model.to_dict(operation_result)
model_id = hook.extract_object_id(result)
self.log.info("Model is created, model_id: %s", model_id)
self.xcom_push(context, key="model_id", value=model_id)
if project_id:
AutoMLModelLink.persist(
context=context,
task_instance=self,
dataset_id=self.model["dataset_id"] or "-",
model_id=model_id,
project_id=project_id,
)
return result
class AutoMLPredictOperator(GoogleCloudBaseOperator):
"""
Runs prediction operation on Google Cloud AutoML.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLPredictOperator`
:param model_id: Name of the model requested to serve the batch prediction.
:param payload: Name od the model used for the prediction.
:param project_id: ID of the Google Cloud project where model is located if None then
default project_id is used.
:param location: The location of the project.
:param operation_params: Additional domain-specific parameters for the predictions.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"model_id",
"location",
"project_id",
"impersonation_chain",
)
operator_extra_links = (AutoMLModelPredictLink(),)
def __init__(
self,
*,
model_id: str,
location: str,
payload: dict,
operation_params: dict[str, str] | None = None,
project_id: str | None = None,
metadata: MetaData = (),
timeout: float | None = None,
retry: Retry | _MethodDefault = DEFAULT,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.model_id = model_id
self.operation_params = operation_params # type: ignore
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.payload = payload
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = hook.predict(
model_id=self.model_id,
payload=self.payload,
location=self.location,
project_id=self.project_id,
params=self.operation_params,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
AutoMLModelPredictLink.persist(
context=context,
task_instance=self,
model_id=self.model_id,
project_id=project_id,
)
return PredictResponse.to_dict(result)
class AutoMLBatchPredictOperator(GoogleCloudBaseOperator):
"""
Perform a batch prediction on Google Cloud AutoML.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLBatchPredictOperator`
:param project_id: ID of the Google Cloud project where model will be created if None then
default project_id is used.
:param location: The location of the project.
:param model_id: Name of the model_id requested to serve the batch prediction.
:param input_config: Required. The input configuration for batch prediction.
If a dict is provided, it must be of the same form as the protobuf message
`google.cloud.automl_v1beta1.types.BatchPredictInputConfig`
:param output_config: Required. The Configuration specifying where output predictions should be
written. If a dict is provided, it must be of the same form as the protobuf message
`google.cloud.automl_v1beta1.types.BatchPredictOutputConfig`
:param prediction_params: Additional domain-specific parameters for the predictions,
any string must be up to 25000 characters long.
:param project_id: ID of the Google Cloud project where model is located if None then
default project_id is used.
:param location: The location of the project.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"model_id",
"input_config",
"output_config",
"location",
"project_id",
"impersonation_chain",
)
operator_extra_links = (AutoMLModelPredictLink(),)
def __init__(
self,
*,
model_id: str,
input_config: dict,
output_config: dict,
location: str,
project_id: str | None = None,
prediction_params: dict[str, str] | None = None,
metadata: MetaData = (),
timeout: float | None = None,
retry: Retry | _MethodDefault = DEFAULT,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.model_id = model_id
self.location = location
self.project_id = project_id
self.prediction_params = prediction_params
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.input_config = input_config
self.output_config = output_config
def execute(self, context: Context):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Fetch batch prediction.")
operation = hook.batch_predict(
model_id=self.model_id,
input_config=self.input_config,
output_config=self.output_config,
project_id=self.project_id,
location=self.location,
params=self.prediction_params,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
operation_result = hook.wait_for_operation(timeout=self.timeout, operation=operation)
result = BatchPredictResult.to_dict(operation_result)
self.log.info("Batch prediction is ready.")
project_id = self.project_id or hook.project_id
if project_id:
AutoMLModelPredictLink.persist(
context=context,
task_instance=self,
model_id=self.model_id,
project_id=project_id,
)
return result
class AutoMLCreateDatasetOperator(GoogleCloudBaseOperator):
"""
Creates a Google Cloud AutoML dataset.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLCreateDatasetOperator`
:param dataset: The dataset to create. If a dict is provided, it must be of the
same form as the protobuf message Dataset.
:param project_id: ID of the Google Cloud project where dataset is located if None then
default project_id is used.
:param location: The location of the project.
:param params: Additional domain-specific parameters for the predictions.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"dataset",
"location",
"project_id",
"impersonation_chain",
)
operator_extra_links = (AutoMLDatasetLink(),)
def __init__(
self,
*,
dataset: dict,
location: str,
project_id: str | None = None,
metadata: MetaData = (),
timeout: float | None = None,
retry: Retry | _MethodDefault = DEFAULT,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataset = dataset
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating dataset %s...", self.dataset)
result = hook.create_dataset(
dataset=self.dataset,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = Dataset.to_dict(result)
dataset_id = hook.extract_object_id(result)
self.log.info("Creating completed. Dataset id: %s", dataset_id)
self.xcom_push(context, key="dataset_id", value=dataset_id)
project_id = self.project_id or hook.project_id
if project_id:
AutoMLDatasetLink.persist(
context=context,
task_instance=self,
dataset_id=dataset_id,
project_id=project_id,
)
return result
class AutoMLImportDataOperator(GoogleCloudBaseOperator):
"""
Imports data to a Google Cloud AutoML dataset.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLImportDataOperator`
:param dataset_id: ID of dataset to be updated.
:param input_config: The desired input location and its domain specific semantics, if any.
If a dict is provided, it must be of the same form as the protobuf message InputConfig.
:param project_id: ID of the Google Cloud project where dataset is located if None then
default project_id is used.
:param location: The location of the project.
:param params: Additional domain-specific parameters for the predictions.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"dataset_id",
"input_config",
"location",
"project_id",
"impersonation_chain",
)
operator_extra_links = (AutoMLDatasetLink(),)
def __init__(
self,
*,
dataset_id: str,
location: str,
input_config: dict,
project_id: str | None = None,
metadata: MetaData = (),
timeout: float | None = None,
retry: Retry | _MethodDefault = DEFAULT,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataset_id = dataset_id
self.input_config = input_config
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Importing data to dataset...")
operation = hook.import_data(
dataset_id=self.dataset_id,
input_config=self.input_config,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Import is completed")
project_id = self.project_id or hook.project_id
if project_id:
AutoMLDatasetLink.persist(
context=context,
task_instance=self,
dataset_id=self.dataset_id,
project_id=project_id,
)
class AutoMLTablesListColumnSpecsOperator(GoogleCloudBaseOperator):
"""
Lists column specs in a table.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLTablesListColumnSpecsOperator`
:param dataset_id: Name of the dataset.
:param table_spec_id: table_spec_id for path builder.
:param field_mask: Mask specifying which fields to read. If a dict is provided, it must be of the same
form as the protobuf message `google.cloud.automl_v1beta1.types.FieldMask`
:param filter_: Filter expression, see go/filtering.
:param page_size: The maximum number of resources contained in the
underlying API response. If page streaming is performed per
resource, this parameter does not affect the return value. If page
streaming is performed per page, this determines the maximum number
of resources in a page.
:param project_id: ID of the Google Cloud project where dataset is located if None then
default project_id is used.
:param location: The location of the project.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"dataset_id",
"table_spec_id",
"field_mask",
"filter_",
"location",
"project_id",
"impersonation_chain",
)
operator_extra_links = (AutoMLDatasetLink(),)
def __init__(
self,
*,
dataset_id: str,
table_spec_id: str,
location: str,
field_mask: dict | None = None,
filter_: str | None = None,
page_size: int | None = None,
project_id: str | None = None,
metadata: MetaData = (),
timeout: float | None = None,
retry: Retry | _MethodDefault = DEFAULT,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataset_id = dataset_id
self.table_spec_id = table_spec_id
self.field_mask = field_mask
self.filter_ = filter_
self.page_size = page_size
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Requesting column specs.")
page_iterator = hook.list_column_specs(
dataset_id=self.dataset_id,
table_spec_id=self.table_spec_id,
field_mask=self.field_mask,
filter_=self.filter_,
page_size=self.page_size,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = [ColumnSpec.to_dict(spec) for spec in page_iterator]
self.log.info("Columns specs obtained.")
project_id = self.project_id or hook.project_id
if project_id:
AutoMLDatasetLink.persist(
context=context,
task_instance=self,
dataset_id=self.dataset_id,
project_id=project_id,
)
return result
class AutoMLTablesUpdateDatasetOperator(GoogleCloudBaseOperator):
"""
Updates a dataset.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLTablesUpdateDatasetOperator`
:param dataset: The dataset which replaces the resource on the server.
If a dict is provided, it must be of the same form as the protobuf message Dataset.
:param update_mask: The update mask applies to the resource. If a dict is provided, it must
be of the same form as the protobuf message FieldMask.
:param location: The location of the project.
:param params: Additional domain-specific parameters for the predictions.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"dataset",
"update_mask",
"location",
"impersonation_chain",
)
operator_extra_links = (AutoMLDatasetLink(),)
def __init__(
self,
*,
dataset: dict,
location: str,
update_mask: dict | None = None,
metadata: MetaData = (),
timeout: float | None = None,
retry: Retry | _MethodDefault = DEFAULT,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataset = dataset
self.update_mask = update_mask
self.location = location
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Updating AutoML dataset %s.", self.dataset["name"])
result = hook.update_dataset(
dataset=self.dataset,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Dataset updated.")
project_id = hook.project_id
if project_id:
AutoMLDatasetLink.persist(
context=context,
task_instance=self,
dataset_id=hook.extract_object_id(self.dataset),
project_id=project_id,
)
return Dataset.to_dict(result)
class AutoMLGetModelOperator(GoogleCloudBaseOperator):
"""
Get Google Cloud AutoML model.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLGetModelOperator`
:param model_id: Name of the model requested to serve the prediction.
:param project_id: ID of the Google Cloud project where model is located if None then
default project_id is used.
:param location: The location of the project.
:param params: Additional domain-specific parameters for the predictions.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"model_id",
"location",
"project_id",
"impersonation_chain",
)
operator_extra_links = (AutoMLModelLink(),)
def __init__(
self,
*,
model_id: str,
location: str,
project_id: str | None = None,
metadata: MetaData = (),
timeout: float | None = None,
retry: Retry | _MethodDefault = DEFAULT,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.model_id = model_id
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = hook.get_model(
model_id=self.model_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
model = Model.to_dict(result)
project_id = self.project_id or hook.project_id
if project_id:
AutoMLModelLink.persist(
context=context,
task_instance=self,
dataset_id=model["dataset_id"],
model_id=self.model_id,
project_id=project_id,
)
return model
class AutoMLDeleteModelOperator(GoogleCloudBaseOperator):
"""
Delete Google Cloud AutoML model.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLDeleteModelOperator`
:param model_id: Name of the model requested to serve the prediction.
:param project_id: ID of the Google Cloud project where model is located if None then
default project_id is used.
:param location: The location of the project.
:param params: Additional domain-specific parameters for the predictions.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"model_id",
"location",
"project_id",
"impersonation_chain",
)
def __init__(
self,
*,
model_id: str,
location: str,
project_id: str | None = None,
metadata: MetaData = (),
timeout: float | None = None,
retry: Retry | _MethodDefault = DEFAULT,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.model_id = model_id
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
operation = hook.delete_model(
model_id=self.model_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Deletion is completed")
class AutoMLDeployModelOperator(GoogleCloudBaseOperator):
"""
Deploys a model; if a model is already deployed, deploying it with the same parameters has no effect.
Deploying with different parameters (as e.g. changing node_number) will
reset the deployment state without pausing the model_id's availability.
Only applicable for Text Classification, Image Object Detection and Tables; all other
domains manage deployment automatically.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLDeployModelOperator`
:param model_id: Name of the model to be deployed.
:param image_detection_metadata: Model deployment metadata specific to Image Object Detection.
If a dict is provided, it must be of the same form as the protobuf message
ImageObjectDetectionModelDeploymentMetadata
:param project_id: ID of the Google Cloud project where model is located if None then
default project_id is used.
:param location: The location of the project.
:param params: Additional domain-specific parameters for the predictions.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"model_id",
"location",
"project_id",
"impersonation_chain",
)
def __init__(
self,
*,
model_id: str,
location: str,
project_id: str | None = None,
image_detection_metadata: dict | None = None,
metadata: Sequence[tuple[str, str]] = (),
timeout: float | None = None,
retry: Retry | _MethodDefault = DEFAULT,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.model_id = model_id
self.image_detection_metadata = image_detection_metadata
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Deploying model_id %s", self.model_id)
operation = hook.deploy_model(
model_id=self.model_id,
location=self.location,
project_id=self.project_id,
image_detection_metadata=self.image_detection_metadata,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Model was deployed successfully.")
class AutoMLTablesListTableSpecsOperator(GoogleCloudBaseOperator):
"""
Lists table specs in a dataset.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLTablesListTableSpecsOperator`
:param dataset_id: Name of the dataset.
:param filter_: Filter expression, see go/filtering.
:param page_size: The maximum number of resources contained in the
underlying API response. If page streaming is performed per
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
:param project_id: ID of the Google Cloud project if None then
default project_id is used.
:param location: The location of the project.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"dataset_id",
"filter_",
"location",
"project_id",
"impersonation_chain",
)
operator_extra_links = (AutoMLDatasetLink(),)
def __init__(
self,
*,
dataset_id: str,
location: str,
page_size: int | None = None,
filter_: str | None = None,
project_id: str | None = None,
metadata: MetaData = (),
timeout: float | None = None,
retry: Retry | _MethodDefault = DEFAULT,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataset_id = dataset_id
self.filter_ = filter_
self.page_size = page_size
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Requesting table specs for %s.", self.dataset_id)
page_iterator = hook.list_table_specs(
dataset_id=self.dataset_id,
filter_=self.filter_,
page_size=self.page_size,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = [TableSpec.to_dict(spec) for spec in page_iterator]
self.log.info(result)
self.log.info("Table specs obtained.")
project_id = self.project_id or hook.project_id
if project_id:
AutoMLDatasetLink.persist(
context=context,
task_instance=self,
dataset_id=self.dataset_id,
project_id=project_id,
)
return result
class AutoMLListDatasetOperator(GoogleCloudBaseOperator):
"""
Lists AutoML Datasets in project.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLListDatasetOperator`
:param project_id: ID of the Google Cloud project where datasets are located if None then
default project_id is used.
:param location: The location of the project.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"project_id",
"impersonation_chain",
)
operator_extra_links = (AutoMLDatasetListLink(),)
def __init__(
self,
*,
location: str,
project_id: str | None = None,
metadata: MetaData = (),
timeout: float | None = None,
retry: Retry | _MethodDefault = DEFAULT,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Requesting datasets")
page_iterator = hook.list_datasets(
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = [Dataset.to_dict(dataset) for dataset in page_iterator]
self.log.info("Datasets obtained.")
self.xcom_push(
context,
key="dataset_id_list",
value=[hook.extract_object_id(d) for d in result],
)
project_id = self.project_id or hook.project_id
if project_id:
AutoMLDatasetListLink.persist(context=context, task_instance=self, project_id=project_id)
return result
class AutoMLDeleteDatasetOperator(GoogleCloudBaseOperator):
"""
Deletes a dataset and all of its contents.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AutoMLDeleteDatasetOperator`
:param dataset_id: Name of the dataset_id, list of dataset_id or string of dataset_id
coma separated to be deleted.
:param project_id: ID of the Google Cloud project where dataset is located if None then
default project_id is used.
:param location: The location of the project.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"dataset_id",
"location",
"project_id",
"impersonation_chain",
)
def __init__(
self,
*,
dataset_id: str | list[str],
location: str,
project_id: str | None = None,
metadata: MetaData = (),
timeout: float | None = None,
retry: Retry | _MethodDefault = DEFAULT,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataset_id = dataset_id
self.location = location
self.project_id = project_id
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
@staticmethod
def _parse_dataset_id(dataset_id: str | list[str]) -> list[str]:
if not isinstance(dataset_id, str):
return dataset_id
try:
return ast.literal_eval(dataset_id)
except (SyntaxError, ValueError):
return dataset_id.split(",")
def execute(self, context: Context):
hook = CloudAutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
dataset_id_list = self._parse_dataset_id(self.dataset_id)
for dataset_id in dataset_id_list:
self.log.info("Deleting dataset %s", dataset_id)
hook.delete_dataset(
dataset_id=dataset_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Dataset deleted.")
| 51,090 | 39.70996 | 106 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/dataproc_metastore.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Dataproc Metastore operators."""
from __future__ import annotations
from time import sleep
from typing import TYPE_CHECKING, Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry, exponential_sleep_generator
from google.cloud.metastore_v1 import MetadataExport, MetadataManagementActivity
from google.cloud.metastore_v1.types import Backup, MetadataImport, Service
from google.cloud.metastore_v1.types.metastore import DatabaseDumpSpec, Restore
from google.protobuf.field_mask_pb2 import FieldMask
from googleapiclient.errors import HttpError
from airflow import AirflowException
from airflow.models import BaseOperator, BaseOperatorLink
from airflow.models.xcom import XCom
from airflow.providers.google.cloud.hooks.dataproc_metastore import DataprocMetastoreHook
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
from airflow.providers.google.common.links.storage import StorageLink
if TYPE_CHECKING:
from airflow.models.taskinstancekey import TaskInstanceKey
from airflow.utils.context import Context
BASE_LINK = "https://console.cloud.google.com"
METASTORE_BASE_LINK = BASE_LINK + "/dataproc/metastore/services/{region}/{service_id}"
METASTORE_BACKUP_LINK = METASTORE_BASE_LINK + "/backups/{resource}?project={project_id}"
METASTORE_BACKUPS_LINK = METASTORE_BASE_LINK + "/backuprestore?project={project_id}"
METASTORE_EXPORT_LINK = METASTORE_BASE_LINK + "/importexport?project={project_id}"
METASTORE_IMPORT_LINK = METASTORE_BASE_LINK + "/imports/{resource}?project={project_id}"
METASTORE_SERVICE_LINK = METASTORE_BASE_LINK + "/config?project={project_id}"
class DataprocMetastoreLink(BaseOperatorLink):
"""Helper class for constructing Dataproc Metastore resource link."""
name = "Dataproc Metastore"
key = "conf"
@staticmethod
def persist(
context: Context,
task_instance: (
DataprocMetastoreCreateServiceOperator
| DataprocMetastoreGetServiceOperator
| DataprocMetastoreRestoreServiceOperator
| DataprocMetastoreUpdateServiceOperator
| DataprocMetastoreListBackupsOperator
| DataprocMetastoreExportMetadataOperator
),
url: str,
):
task_instance.xcom_push(
context=context,
key=DataprocMetastoreLink.key,
value={
"region": task_instance.region,
"service_id": task_instance.service_id,
"project_id": task_instance.project_id,
"url": url,
},
)
def get_link(
self,
operator: BaseOperator,
*,
ti_key: TaskInstanceKey,
) -> str:
conf = XCom.get_value(key=self.key, ti_key=ti_key)
return (
conf["url"].format(
region=conf["region"],
service_id=conf["service_id"],
project_id=conf["project_id"],
)
if conf
else ""
)
class DataprocMetastoreDetailedLink(BaseOperatorLink):
"""Helper class for constructing Dataproc Metastore detailed resource link."""
name = "Dataproc Metastore resource"
key = "config"
@staticmethod
def persist(
context: Context,
task_instance: (
DataprocMetastoreCreateBackupOperator | DataprocMetastoreCreateMetadataImportOperator
),
url: str,
resource: str,
):
task_instance.xcom_push(
context=context,
key=DataprocMetastoreDetailedLink.key,
value={
"region": task_instance.region,
"service_id": task_instance.service_id,
"project_id": task_instance.project_id,
"url": url,
"resource": resource,
},
)
def get_link(
self,
operator: BaseOperator,
*,
ti_key: TaskInstanceKey,
) -> str:
conf = XCom.get_value(key=self.key, ti_key=ti_key)
return (
conf["url"].format(
region=conf["region"],
service_id=conf["service_id"],
project_id=conf["project_id"],
resource=conf["resource"],
)
if conf
else ""
)
class DataprocMetastoreCreateBackupOperator(GoogleCloudBaseOperator):
"""Create a new backup in a given project and location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param backup: Required. The backup to create. The ``name`` field is ignored. The ID of the created
backup must be provided in the request's ``backup_id`` field.
This corresponds to the ``backup`` field on the ``request`` instance; if ``request`` is provided, this
should not be set.
:param backup_id: Required. The ID of the backup, which is used as the final component of the backup's
name. This value must be between 1 and 64 characters long, begin with a letter, end with a letter or
number, and consist of alphanumeric ASCII characters or hyphens.
This corresponds to the ``backup_id`` field on the ``request`` instance; if ``request`` is provided,
this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"backup",
"impersonation_chain",
)
template_fields_renderers = {"backup": "json"}
operator_extra_links = (DataprocMetastoreDetailedLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
service_id: str,
backup: dict | Backup,
backup_id: str,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.service_id = service_id
self.backup = backup
self.backup_id = backup_id
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Creating Dataproc Metastore backup: %s", self.backup_id)
try:
operation = hook.create_backup(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
backup=self.backup,
backup_id=self.backup_id,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
backup = hook.wait_for_operation(self.timeout, operation)
self.log.info("Backup %s created successfully", self.backup_id)
except HttpError as err:
if err.resp.status not in (409, "409"):
raise
self.log.info("Backup %s already exists", self.backup_id)
backup = hook.get_backup(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
backup_id=self.backup_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataprocMetastoreDetailedLink.persist(
context=context, task_instance=self, url=METASTORE_BACKUP_LINK, resource=self.backup_id
)
return Backup.to_dict(backup)
class DataprocMetastoreCreateMetadataImportOperator(GoogleCloudBaseOperator):
"""Create a new MetadataImport in a given project and location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param metadata_import: Required. The metadata import to create. The ``name`` field is ignored. The ID of
the created metadata import must be provided in the request's ``metadata_import_id`` field.
This corresponds to the ``metadata_import`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param metadata_import_id: Required. The ID of the metadata import, which is used as the final component
of the metadata import's name. This value must be between 1 and 64 characters long, begin with a
letter, end with a letter or number, and consist of alphanumeric ASCII characters or hyphens.
This corresponds to the ``metadata_import_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"metadata_import",
"impersonation_chain",
)
template_fields_renderers = {"metadata_import": "json"}
operator_extra_links = (DataprocMetastoreDetailedLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
service_id: str,
metadata_import: dict | MetadataImport,
metadata_import_id: str,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.service_id = service_id
self.metadata_import = metadata_import
self.metadata_import_id = metadata_import_id
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Creating Dataproc Metastore metadata import: %s", self.metadata_import_id)
operation = hook.create_metadata_import(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
metadata_import=self.metadata_import,
metadata_import_id=self.metadata_import_id,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
metadata_import = hook.wait_for_operation(self.timeout, operation)
self.log.info("Metadata import %s created successfully", self.metadata_import_id)
DataprocMetastoreDetailedLink.persist(
context=context, task_instance=self, url=METASTORE_IMPORT_LINK, resource=self.metadata_import_id
)
return MetadataImport.to_dict(metadata_import)
class DataprocMetastoreCreateServiceOperator(GoogleCloudBaseOperator):
"""Create a metastore service in a project and location.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param service: Required. The Metastore service to create. The ``name`` field is ignored. The ID of
the created metastore service must be provided in the request's ``service_id`` field.
This corresponds to the ``service`` field on the ``request`` instance; if ``request`` is provided,
this should not be set.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"service",
"impersonation_chain",
)
template_fields_renderers = {"service": "json"}
operator_extra_links = (DataprocMetastoreLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
service: dict | Service,
service_id: str,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.service = service
self.service_id = service_id
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Creating Dataproc Metastore service: %s", self.project_id)
try:
operation = hook.create_service(
region=self.region,
project_id=self.project_id,
service=self.service,
service_id=self.service_id,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
service = hook.wait_for_operation(self.timeout, operation)
self.log.info("Service %s created successfully", self.service_id)
except HttpError as err:
if err.resp.status not in (409, "409"):
raise
self.log.info("Instance %s already exists", self.service_id)
service = hook.get_service(
region=self.region,
project_id=self.project_id,
service_id=self.service_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataprocMetastoreLink.persist(context=context, task_instance=self, url=METASTORE_SERVICE_LINK)
return Service.to_dict(service)
class DataprocMetastoreDeleteBackupOperator(GoogleCloudBaseOperator):
"""Delete a single backup.
:param project_id: Required. The ID of the Google Cloud project that the backup belongs to.
:param region: Required. The ID of the Google Cloud region that the backup belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param backup_id: Required. The ID of the backup, which is used as the final component of the backup's
name. This value must be between 1 and 64 characters long, begin with a letter, end with a letter or
number, and consist of alphanumeric ASCII characters or hyphens.
This corresponds to the ``backup_id`` field on the ``request`` instance; if ``request`` is provided,
this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"impersonation_chain",
)
def __init__(
self,
*,
project_id: str,
region: str,
service_id: str,
backup_id: str,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.service_id = service_id
self.backup_id = backup_id
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Deleting Dataproc Metastore backup: %s", self.backup_id)
operation = hook.delete_backup(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
backup_id=self.backup_id,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(self.timeout, operation)
self.log.info("Backup %s deleted successfully", self.project_id)
class DataprocMetastoreDeleteServiceOperator(GoogleCloudBaseOperator):
"""Delete a single service.
:param request: The request object. Request message for
[DataprocMetastore.DeleteService][google.cloud.metastore.v1.DataprocMetastore.DeleteService].
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id:
"""
template_fields: Sequence[str] = (
"project_id",
"impersonation_chain",
)
def __init__(
self,
*,
region: str,
project_id: str,
service_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.service_id = service_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Deleting Dataproc Metastore service: %s", self.project_id)
operation = hook.delete_service(
region=self.region,
project_id=self.project_id,
service_id=self.service_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(self.timeout, operation)
self.log.info("Service %s deleted successfully", self.project_id)
class DataprocMetastoreExportMetadataOperator(GoogleCloudBaseOperator):
"""Export metadata from a service.
:param destination_gcs_folder: A Cloud Storage URI of a folder, in the format
``gs://<bucket_name>/<path_inside_bucket>``. A sub-folder
``<export_folder>`` containing exported files will be
created below it.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"impersonation_chain",
)
operator_extra_links = (DataprocMetastoreLink(), StorageLink())
def __init__(
self,
*,
destination_gcs_folder: str,
project_id: str,
region: str,
service_id: str,
request_id: str | None = None,
database_dump_type: DatabaseDumpSpec | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.destination_gcs_folder = destination_gcs_folder
self.project_id = project_id
self.region = region
self.service_id = service_id
self.request_id = request_id
self.database_dump_type = database_dump_type
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Exporting metadata from Dataproc Metastore service: %s", self.service_id)
hook.export_metadata(
destination_gcs_folder=self.destination_gcs_folder,
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
request_id=self.request_id,
database_dump_type=self.database_dump_type,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
metadata_export = self._wait_for_export_metadata(hook)
self.log.info("Metadata from service %s exported successfully", self.service_id)
DataprocMetastoreLink.persist(context=context, task_instance=self, url=METASTORE_EXPORT_LINK)
uri = self._get_uri_from_destination(MetadataExport.to_dict(metadata_export)["destination_gcs_uri"])
StorageLink.persist(context=context, task_instance=self, uri=uri, project_id=self.project_id)
return MetadataExport.to_dict(metadata_export)
def _get_uri_from_destination(self, destination_uri: str):
return destination_uri[5:] if destination_uri.startswith("gs://") else destination_uri
def _wait_for_export_metadata(self, hook: DataprocMetastoreHook):
"""Check that export was created successfully.
This is a workaround to an issue parsing result to MetadataExport inside
the SDK.
"""
for time_to_wait in exponential_sleep_generator(initial=10, maximum=120):
sleep(time_to_wait)
service = hook.get_service(
region=self.region,
project_id=self.project_id,
service_id=self.service_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
activities: MetadataManagementActivity = service.metadata_management_activity
metadata_export: MetadataExport = activities.metadata_exports[0]
if metadata_export.state == MetadataExport.State.SUCCEEDED:
return metadata_export
if metadata_export.state == MetadataExport.State.FAILED:
raise AirflowException(
f"Exporting metadata from Dataproc Metastore {metadata_export.name} FAILED"
)
class DataprocMetastoreGetServiceOperator(GoogleCloudBaseOperator):
"""Get the details of a single service.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"impersonation_chain",
)
operator_extra_links = (DataprocMetastoreLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
service_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.service_id = service_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Gets the details of a single Dataproc Metastore service: %s", self.project_id)
result = hook.get_service(
region=self.region,
project_id=self.project_id,
service_id=self.service_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataprocMetastoreLink.persist(context=context, task_instance=self, url=METASTORE_SERVICE_LINK)
return Service.to_dict(result)
class DataprocMetastoreListBackupsOperator(GoogleCloudBaseOperator):
"""List backups in a service.
:param project_id: Required. The ID of the Google Cloud project that the backup belongs to.
:param region: Required. The ID of the Google Cloud region that the backup belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"impersonation_chain",
)
operator_extra_links = (DataprocMetastoreLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
service_id: str,
page_size: int | None = None,
page_token: str | None = None,
filter: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.service_id = service_id
self.page_size = page_size
self.page_token = page_token
self.filter = filter
self.order_by = order_by
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> list[dict]:
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Listing Dataproc Metastore backups: %s", self.service_id)
backups = hook.list_backups(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
page_size=self.page_size,
page_token=self.page_token,
filter=self.filter,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataprocMetastoreLink.persist(context=context, task_instance=self, url=METASTORE_BACKUPS_LINK)
return [Backup.to_dict(backup) for backup in backups]
class DataprocMetastoreRestoreServiceOperator(GoogleCloudBaseOperator):
"""Restore a service from a backup.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param backup_project_id: Required. The ID of the Google Cloud project that the metastore
service backup to restore from.
:param backup_region: Required. The ID of the Google Cloud region that the metastore
service backup to restore from.
:param backup_service_id: Required. The ID of the metastore service backup to restore from, which is
used as the final component of the metastore service's name. This value must be between 2 and 63
characters long inclusive, begin with a letter, end with a letter or number, and consist
of alphanumeric ASCII characters or hyphens.
:param backup_id: Required. The ID of the metastore service backup to restore from
:param restore_type: Optional. The type of restore. If unspecified, defaults to
``METADATA_ONLY``
:param request_id: Optional. A unique id used to identify the request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"impersonation_chain",
)
operator_extra_links = (DataprocMetastoreLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
service_id: str,
backup_project_id: str,
backup_region: str,
backup_service_id: str,
backup_id: str,
restore_type: Restore | None = None,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.service_id = service_id
self.backup_project_id = backup_project_id
self.backup_region = backup_region
self.backup_service_id = backup_service_id
self.backup_id = backup_id
self.restore_type = restore_type
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info(
"Restoring Dataproc Metastore service: %s from backup: %s", self.service_id, self.backup_id
)
hook.restore_service(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
backup_project_id=self.backup_project_id,
backup_region=self.backup_region,
backup_service_id=self.backup_service_id,
backup_id=self.backup_id,
restore_type=self.restore_type,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self._wait_for_restore_service(hook)
self.log.info("Service %s restored from backup %s", self.service_id, self.backup_id)
DataprocMetastoreLink.persist(context=context, task_instance=self, url=METASTORE_SERVICE_LINK)
def _wait_for_restore_service(self, hook: DataprocMetastoreHook):
"""Check that export was created successfully.
This is a workaround to an issue parsing result to MetadataExport inside
the SDK.
"""
for time_to_wait in exponential_sleep_generator(initial=10, maximum=120):
sleep(time_to_wait)
service = hook.get_service(
region=self.region,
project_id=self.project_id,
service_id=self.service_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
activities: MetadataManagementActivity = service.metadata_management_activity
restore_service: Restore = activities.restores[0]
if restore_service.state == Restore.State.SUCCEEDED:
return restore_service
if restore_service.state == Restore.State.FAILED:
raise AirflowException("Restoring service FAILED")
class DataprocMetastoreUpdateServiceOperator(GoogleCloudBaseOperator):
"""Update the parameters of a single service.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param service: Required. The metastore service to update. The server only merges fields in the service
if they are specified in ``update_mask``.
The metastore service's ``name`` field is used to identify the metastore service to be updated.
This corresponds to the ``service`` field on the ``request`` instance; if ``request`` is provided,
this should not be set.
:param update_mask: Required. A field mask used to specify the fields to be overwritten in the metastore
service resource by the update. Fields specified in the ``update_mask`` are relative to the resource
(not to the full request). A field is overwritten if it is in the mask.
This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided,
this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"impersonation_chain",
)
operator_extra_links = (DataprocMetastoreLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
service_id: str,
service: dict | Service,
update_mask: FieldMask,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.service_id = service_id
self.service = service
self.update_mask = update_mask
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Updating Dataproc Metastore service: %s", self.service.get("name"))
operation = hook.update_service(
project_id=self.project_id,
region=self.region,
service_id=self.service_id,
service=self.service,
update_mask=self.update_mask,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(self.timeout, operation)
self.log.info("Service %s updated successfully", self.service.get("name"))
DataprocMetastoreLink.persist(context=context, task_instance=self, url=METASTORE_SERVICE_LINK)
| 49,516 | 44.015455 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/kubernetes_engine.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Kubernetes Engine operators."""
from __future__ import annotations
import warnings
from functools import cached_property
from typing import TYPE_CHECKING, Any, Sequence
from google.api_core.exceptions import AlreadyExists
from google.cloud.container_v1.types import Cluster
from kubernetes.client.models import V1Pod
from airflow.configuration import conf
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.providers.cncf.kubernetes.utils.pod_manager import OnFinishAction
try:
from airflow.providers.cncf.kubernetes.operators.pod import KubernetesPodOperator
except ImportError:
# preserve backward compatibility for older versions of cncf.kubernetes provider
from airflow.providers.cncf.kubernetes.operators.kubernetes_pod import KubernetesPodOperator
from airflow.providers.google.cloud.hooks.kubernetes_engine import GKEHook, GKEPodHook
from airflow.providers.google.cloud.links.kubernetes_engine import (
KubernetesEngineClusterLink,
KubernetesEnginePodLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
from airflow.providers.google.cloud.triggers.kubernetes_engine import GKEOperationTrigger, GKEStartPodTrigger
from airflow.utils.timezone import utcnow
if TYPE_CHECKING:
from airflow.utils.context import Context
KUBE_CONFIG_ENV_VAR = "KUBECONFIG"
class GKEDeleteClusterOperator(GoogleCloudBaseOperator):
"""
Deletes the cluster, including the Kubernetes endpoint and all worker nodes.
To delete a certain cluster, you must specify the ``project_id``, the ``name``
of the cluster, the ``location`` that the cluster is in, and the ``task_id``.
**Operator Creation**: ::
operator = GKEClusterDeleteOperator(
task_id='cluster_delete',
project_id='my-project',
location='cluster-location'
name='cluster-name')
.. seealso::
For more detail about deleting clusters have a look at the reference:
https://google-cloud-python.readthedocs.io/en/latest/container/gapic/v1/api.html#google.cloud.container_v1.ClusterManagerClient.delete_cluster
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GKEDeleteClusterOperator`
:param project_id: The Google Developers Console [project ID or project number]
:param name: The name of the resource to delete, in this case cluster name
:param location: The name of the Google Kubernetes Engine zone or region in which the cluster
resides.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param api_version: The api version to use
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run operator in the deferrable mode.
:param poll_interval: Interval size which defines how often operation status is checked.
"""
template_fields: Sequence[str] = (
"project_id",
"gcp_conn_id",
"name",
"location",
"api_version",
"impersonation_chain",
)
def __init__(
self,
*,
name: str,
location: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v2",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: int = 10,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.location = location
self.api_version = api_version
self.name = name
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
self.poll_interval = poll_interval
self._check_input()
self._hook: GKEHook | None = None
def _check_input(self) -> None:
if not all([self.project_id, self.name, self.location]):
self.log.error("One of (project_id, name, location) is missing or incorrect")
raise AirflowException("Operator has incorrect or missing input.")
def execute(self, context: Context) -> str | None:
hook = self._get_hook()
wait_to_complete = not self.deferrable
operation = hook.delete_cluster(
name=self.name,
project_id=self.project_id,
wait_to_complete=wait_to_complete,
)
if self.deferrable and operation is not None:
self.defer(
trigger=GKEOperationTrigger(
operation_name=operation.name,
project_id=self.project_id,
location=self.location,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
poll_interval=self.poll_interval,
),
method_name="execute_complete",
)
return operation.self_link if operation is not None else None
def execute_complete(self, context: Context, event: dict) -> str:
"""Method to be executed after trigger job is done."""
status = event["status"]
message = event["message"]
if status == "failed" or status == "error":
self.log.exception("Trigger ended with one of the failed statuses.")
raise AirflowException(message)
self.log.info(message)
operation = self._get_hook().get_operation(
operation_name=event["operation_name"],
)
return operation.self_link
def _get_hook(self) -> GKEHook:
if self._hook is None:
self._hook = GKEHook(
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
return self._hook
class GKECreateClusterOperator(GoogleCloudBaseOperator):
"""
Create a Google Kubernetes Engine Cluster of specified dimensions and wait until the cluster is created.
The **minimum** required to define a cluster to create is:
``dict()`` ::
cluster_def = {'name': 'my-cluster-name',
'initial_node_count': 1}
or
``Cluster`` proto ::
from google.cloud.container_v1.types import Cluster
cluster_def = Cluster(name='my-cluster-name', initial_node_count=1)
**Operator Creation**: ::
operator = GKEClusterCreateOperator(
task_id='cluster_create',
project_id='my-project',
location='my-location'
body=cluster_def)
.. seealso::
For more detail on about creating clusters have a look at the reference:
:class:`google.cloud.container_v1.types.Cluster`
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GKECreateClusterOperator`
:param project_id: The Google Developers Console [project ID or project number]
:param location: The name of the Google Kubernetes Engine zone or region in which the cluster
resides.
:param body: The Cluster definition to create, can be protobuf or python dict, if
dict it must match protobuf message Cluster
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param api_version: The api version to use
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run operator in the deferrable mode.
:param poll_interval: Interval size which defines how often operation status is checked.
"""
template_fields: Sequence[str] = (
"project_id",
"gcp_conn_id",
"location",
"api_version",
"body",
"impersonation_chain",
)
operator_extra_links = (KubernetesEngineClusterLink(),)
def __init__(
self,
*,
location: str,
body: dict | Cluster,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v2",
impersonation_chain: str | Sequence[str] | None = None,
poll_interval: int = 10,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.location = location
self.api_version = api_version
self.body = body
self.impersonation_chain = impersonation_chain
self.poll_interval = poll_interval
self.deferrable = deferrable
self._validate_input()
self._hook: GKEHook | None = None
def _validate_input(self) -> None:
"""Primary validation of the input body."""
self._alert_deprecated_body_fields()
error_messages: list[str] = []
if not self._body_field("name"):
error_messages.append("Field body['name'] is missing or incorrect")
if self._body_field("initial_node_count"):
if self._body_field("node_pools"):
error_messages.append(
"Do not use filed body['initial_node_count'] and body['node_pools'] at the same time."
)
if self._body_field("node_config"):
if self._body_field("node_pools"):
error_messages.append(
"Do not use filed body['node_config'] and body['node_pools'] at the same time."
)
if self._body_field("node_pools"):
if any([self._body_field("node_config"), self._body_field("initial_node_count")]):
error_messages.append(
"The field body['node_pools'] should not be set if "
"body['node_config'] or body['initial_code_count'] are specified."
)
if not any([self._body_field("node_config"), self._body_field("initial_node_count")]):
if not self._body_field("node_pools"):
error_messages.append(
"Field body['node_pools'] is required if none of fields "
"body['initial_node_count'] or body['node_pools'] are specified."
)
for message in error_messages:
self.log.error(message)
if error_messages:
raise AirflowException("Operator has incorrect or missing input.")
def _body_field(self, field_name: str, default_value: Any = None) -> Any:
"""Extracts the value of the given field name."""
if isinstance(self.body, dict):
return self.body.get(field_name, default_value)
else:
return getattr(self.body, field_name, default_value)
def _alert_deprecated_body_fields(self) -> None:
"""Generates warning messages if deprecated fields were used in the body."""
deprecated_body_fields_with_replacement = [
("initial_node_count", "node_pool.initial_node_count"),
("node_config", "node_pool.config"),
("zone", "location"),
("instance_group_urls", "node_pools.instance_group_urls"),
]
for deprecated_field, replacement in deprecated_body_fields_with_replacement:
if self._body_field(deprecated_field):
warnings.warn(
f"The body field '{deprecated_field}' is deprecated. Use '{replacement}' instead."
)
def execute(self, context: Context) -> str:
hook = self._get_hook()
try:
wait_to_complete = not self.deferrable
operation = hook.create_cluster(
cluster=self.body,
project_id=self.project_id,
wait_to_complete=wait_to_complete,
)
KubernetesEngineClusterLink.persist(context=context, task_instance=self, cluster=self.body)
if self.deferrable:
self.defer(
trigger=GKEOperationTrigger(
operation_name=operation.name,
project_id=self.project_id,
location=self.location,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
poll_interval=self.poll_interval,
),
method_name="execute_complete",
)
return operation.target_link
except AlreadyExists as error:
self.log.info("Assuming Success: %s", error.message)
name = self.body.name if isinstance(self.body, Cluster) else self.body["name"]
return hook.get_cluster(name=name, project_id=self.project_id).self_link
def execute_complete(self, context: Context, event: dict) -> str:
status = event["status"]
message = event["message"]
if status == "failed" or status == "error":
self.log.exception("Trigger ended with one of the failed statuses.")
raise AirflowException(message)
self.log.info(message)
operation = self._get_hook().get_operation(
operation_name=event["operation_name"],
)
return operation.target_link
def _get_hook(self) -> GKEHook:
if self._hook is None:
self._hook = GKEHook(
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
return self._hook
class GKEStartPodOperator(KubernetesPodOperator):
"""
Executes a task in a Kubernetes pod in the specified Google Kubernetes Engine cluster.
This Operator assumes that the system has gcloud installed and has configured a
connection id with a service account.
The **minimum** required to define a cluster to create are the variables
``task_id``, ``project_id``, ``location``, ``cluster_name``, ``name``,
``namespace``, and ``image``
.. seealso::
For more detail about Kubernetes Engine authentication have a look at the reference:
https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl#internal_ip
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GKEStartPodOperator`
:param location: The name of the Google Kubernetes Engine zone or region in which the
cluster resides, e.g. 'us-central1-a'
:param cluster_name: The name of the Google Kubernetes Engine cluster the pod
should be spawned in
:param use_internal_ip: Use the internal IP address as the endpoint.
:param project_id: The Google Developers Console project id
:param gcp_conn_id: The Google cloud connection id to use. This allows for
users to specify a service account.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param regional: The location param is region name.
:param deferrable: Run operator in the deferrable mode.
:param on_finish_action: What to do when the pod reaches its final state, or the execution is interrupted.
If "delete_pod", the pod will be deleted regardless it's state; if "delete_succeeded_pod",
only succeeded pod will be deleted. You can set to "keep_pod" to keep the pod.
Current default is `keep_pod`, but this will be changed in the next major release of this provider.
:param is_delete_operator_pod: What to do when the pod reaches its final
state, or the execution is interrupted. If True, delete the
pod; if False, leave the pod. Current default is False, but this will be
changed in the next major release of this provider.
Deprecated - use `on_finish_action` instead.
"""
template_fields: Sequence[str] = tuple(
{"project_id", "location", "cluster_name"} | set(KubernetesPodOperator.template_fields)
)
operator_extra_links = (KubernetesEnginePodLink(),)
def __init__(
self,
*,
location: str,
cluster_name: str,
use_internal_ip: bool = False,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
regional: bool | None = None,
on_finish_action: str | None = None,
is_delete_operator_pod: bool | None = None,
**kwargs,
) -> None:
if is_delete_operator_pod is not None:
warnings.warn(
"`is_delete_operator_pod` parameter is deprecated, please use `on_finish_action`",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
kwargs["on_finish_action"] = (
OnFinishAction.DELETE_POD if is_delete_operator_pod else OnFinishAction.KEEP_POD
)
else:
if on_finish_action is not None:
kwargs["on_finish_action"] = OnFinishAction(on_finish_action)
else:
warnings.warn(
f"You have not set parameter `on_finish_action` in class {self.__class__.__name__}. "
"Currently the default for this parameter is `keep_pod` but in a future release"
" the default will be changed to `delete_pod`. To ensure pods are not deleted in"
" the future you will need to set `on_finish_action=keep_pod` explicitly.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
kwargs["on_finish_action"] = OnFinishAction.KEEP_POD
if regional is not None:
warnings.warn(
f"You have set parameter regional in class {self.__class__.__name__}. "
"In current implementation of the operator the parameter is not used and will "
"be deleted in future.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
super().__init__(**kwargs)
self.project_id = project_id
self.location = location
self.cluster_name = cluster_name
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.use_internal_ip = use_internal_ip
self.pod: V1Pod | None = None
self._ssl_ca_cert: str | None = None
self._cluster_url: str | None = None
if self.gcp_conn_id is None:
raise AirflowException(
"The gcp_conn_id parameter has become required. If you want to use Application Default "
"Credentials (ADC) strategy for authorization, create an empty connection "
"called `google_cloud_default`.",
)
# There is no need to manage the kube_config file, as it will be generated automatically.
# All Kubernetes parameters (except config_file) are also valid for the GKEStartPodOperator.
if self.config_file:
raise AirflowException("config_file is not an allowed parameter for the GKEStartPodOperator.")
@staticmethod
def get_gke_config_file():
warnings.warn(
"The `get_gke_config_file` method is deprecated, "
"please use `fetch_cluster_info` instead to get the cluster info for connecting to it.",
AirflowProviderDeprecationWarning,
stacklevel=1,
)
@cached_property
def cluster_hook(self) -> GKEHook:
return GKEHook(
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
@cached_property
def hook(self) -> GKEPodHook:
if self._cluster_url is None or self._ssl_ca_cert is None:
raise AttributeError(
"Cluster url and ssl_ca_cert should be defined before using self.hook method. "
"Try to use self.get_kube_creds method",
)
hook = GKEPodHook(
cluster_url=self._cluster_url,
ssl_ca_cert=self._ssl_ca_cert,
)
return hook
def execute(self, context: Context):
"""Executes process of creating pod and executing provided command inside it."""
self.fetch_cluster_info()
return super().execute(context)
def fetch_cluster_info(self) -> tuple[str, str | None]:
"""Fetches cluster info for connecting to it."""
cluster = self.cluster_hook.get_cluster(
name=self.cluster_name,
project_id=self.project_id,
)
if not self.use_internal_ip:
self._cluster_url = f"https://{cluster.endpoint}"
else:
self._cluster_url = f"https://{cluster.private_cluster_config.private_endpoint}"
self._ssl_ca_cert = cluster.master_auth.cluster_ca_certificate
return self._cluster_url, self._ssl_ca_cert
def invoke_defer_method(self):
"""Method to easily redefine triggers which are being used in child classes."""
trigger_start_time = utcnow()
self.defer(
trigger=GKEStartPodTrigger(
pod_name=self.pod.metadata.name,
pod_namespace=self.pod.metadata.namespace,
trigger_start_time=trigger_start_time,
cluster_url=self._cluster_url,
ssl_ca_cert=self._ssl_ca_cert,
get_logs=self.get_logs,
startup_timeout=self.startup_timeout_seconds,
cluster_context=self.cluster_context,
poll_interval=self.poll_interval,
in_cluster=self.in_cluster,
base_container_name=self.base_container_name,
on_finish_action=self.on_finish_action,
),
method_name="execute_complete",
kwargs={"cluster_url": self._cluster_url, "ssl_ca_cert": self._ssl_ca_cert},
)
def execute_complete(self, context: Context, event: dict, **kwargs):
# It is required for hook to be initialized
self._cluster_url = kwargs["cluster_url"]
self._ssl_ca_cert = kwargs["ssl_ca_cert"]
return super().execute_complete(context, event, **kwargs)
| 24,955 | 41.013468 | 150 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/natural_language.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Language operators."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence, Tuple
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.language_v1.types import Document, EncodingType
from google.protobuf.json_format import MessageToDict
from airflow.providers.google.cloud.hooks.natural_language import CloudNaturalLanguageHook
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
MetaData = Sequence[Tuple[str, str]]
class CloudNaturalLanguageAnalyzeEntitiesOperator(GoogleCloudBaseOperator):
"""
Finds named entities in the text along with various properties.
Examples properties: entity types, salience, mentions for each entity, and others.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudNaturalLanguageAnalyzeEntitiesOperator`
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START natural_language_analyze_entities_template_fields]
template_fields: Sequence[str] = (
"document",
"gcp_conn_id",
"impersonation_chain",
)
# [END natural_language_analyze_entities_template_fields]
def __init__(
self,
*,
document: dict | Document,
encoding_type: EncodingType | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.document = document
self.encoding_type = encoding_type
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudNaturalLanguageHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Start analyzing entities")
response = hook.analyze_entities(
document=self.document, retry=self.retry, timeout=self.timeout, metadata=self.metadata
)
self.log.info("Finished analyzing entities")
return MessageToDict(response._pb)
class CloudNaturalLanguageAnalyzeEntitySentimentOperator(GoogleCloudBaseOperator):
"""
Similar to AnalyzeEntities, also analyzes sentiment associated with each entity and its mentions.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudNaturalLanguageAnalyzeEntitySentimentOperator`
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START natural_language_analyze_entity_sentiment_template_fields]
template_fields: Sequence[str] = (
"document",
"gcp_conn_id",
"impersonation_chain",
)
# [END natural_language_analyze_entity_sentiment_template_fields]
def __init__(
self,
*,
document: dict | Document,
encoding_type: EncodingType | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.document = document
self.encoding_type = encoding_type
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudNaturalLanguageHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Start entity sentiment analyze")
response = hook.analyze_entity_sentiment(
document=self.document,
encoding_type=self.encoding_type,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Finished entity sentiment analyze")
return MessageToDict(response._pb)
class CloudNaturalLanguageAnalyzeSentimentOperator(GoogleCloudBaseOperator):
"""
Analyzes the sentiment of the provided text.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudNaturalLanguageAnalyzeSentimentOperator`
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START natural_language_analyze_sentiment_template_fields]
template_fields: Sequence[str] = (
"document",
"gcp_conn_id",
"impersonation_chain",
)
# [END natural_language_analyze_sentiment_template_fields]
def __init__(
self,
*,
document: dict | Document,
encoding_type: EncodingType | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.document = document
self.encoding_type = encoding_type
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudNaturalLanguageHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Start sentiment analyze")
response = hook.analyze_sentiment(
document=self.document, retry=self.retry, timeout=self.timeout, metadata=self.metadata
)
self.log.info("Finished sentiment analyze")
return MessageToDict(response._pb)
class CloudNaturalLanguageClassifyTextOperator(GoogleCloudBaseOperator):
"""
Classifies a document into categories.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudNaturalLanguageClassifyTextOperator`
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START natural_language_classify_text_template_fields]
template_fields: Sequence[str] = (
"document",
"gcp_conn_id",
"impersonation_chain",
)
# [END natural_language_classify_text_template_fields]
def __init__(
self,
*,
document: dict | Document,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.document = document
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudNaturalLanguageHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Start text classify")
response = hook.classify_text(
document=self.document, retry=self.retry, timeout=self.timeout, metadata=self.metadata
)
self.log.info("Finished text classify")
return MessageToDict(response._pb)
| 13,709 | 41.184615 | 101 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/compute.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Compute Engine operators."""
from __future__ import annotations
from copy import deepcopy
from typing import TYPE_CHECKING, Any, Sequence
from google.api_core import exceptions
from google.api_core.retry import Retry
from google.cloud.compute_v1.types import Instance, InstanceGroupManager, InstanceTemplate
from json_merge_patch import merge
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.compute import ComputeEngineHook
from airflow.providers.google.cloud.links.compute import (
ComputeInstanceDetailsLink,
ComputeInstanceGroupManagerDetailsLink,
ComputeInstanceTemplateDetailsLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
from airflow.providers.google.cloud.utils.field_sanitizer import GcpBodyFieldSanitizer
from airflow.providers.google.cloud.utils.field_validator import GcpBodyFieldValidator
if TYPE_CHECKING:
from airflow.utils.context import Context
class ComputeEngineBaseOperator(GoogleCloudBaseOperator):
"""Abstract base operator for Google Compute Engine operators to inherit from."""
def __init__(
self,
*,
zone: str,
resource_id: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.project_id = project_id
self.zone = zone
self.resource_id = resource_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.impersonation_chain = impersonation_chain
self._validate_inputs()
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if self.project_id == "":
raise AirflowException("The required parameter 'project_id' is missing")
if not self.zone:
raise AirflowException("The required parameter 'zone' is missing")
def execute(self, context: Context):
pass
class ComputeEngineInsertInstanceOperator(ComputeEngineBaseOperator):
"""
Creates an Instance in Google Compute Engine based on specified parameters.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineInsertInstanceOperator`
:param body: Instance representation as an object. Should at least include 'name', 'machine_type',
'disks' and 'network_interfaces' fields but doesn't include 'zone' field, as it will be specified
in 'zone' parameter.
Full or partial URL and can be represented as examples below:
1. "machine_type": "projects/your-project-name/zones/your-zone/machineTypes/your-machine-type"
2. "disk_type": "projects/your-project-name/zones/your-zone/diskTypes/your-disk-type"
3. "subnetwork": "projects/your-project-name/regions/your-region/subnetworks/your-subnetwork"
:param zone: Google Cloud zone where the Instance exists
:param project_id: Google Cloud project ID where the Compute Engine Instance exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param resource_id: Name of the Instance. If the name of Instance is not specified in body['name'],
the name will be taken from 'resource_id' parameter
:param request_id: Unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again)
It should be in UUID format as defined in RFC 4122
:param gcp_conn_id: The connection ID used to connect to Google Cloud. Defaults to 'google_cloud_default'.
:param api_version: API version used (for example v1 - or beta). Defaults to v1.
:param impersonation_chain: Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
operator_extra_links = (ComputeInstanceDetailsLink(),)
# [START gce_instance_insert_fields]
template_fields: Sequence[str] = (
"body",
"project_id",
"zone",
"request_id",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gce_instance_insert_fields]
def __init__(
self,
*,
body: dict,
zone: str,
resource_id: str | None = None,
project_id: str | None = None,
request_id: str | None = None,
retry: Retry | None = None,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
validate_body: bool = True,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.body = body
self.zone = zone
self.request_id = request_id
self.resource_id = self.body["name"] if "name" in body else resource_id
self._field_validator = None # Optional[GcpBodyFieldValidator]
self.retry = retry
self.timeout = timeout
self.metadata = metadata
if validate_body:
self._field_validator = GcpBodyFieldValidator(
GCE_INSTANCE_TEMPLATE_VALIDATION_PATCH_SPECIFICATION, api_version=api_version
)
self._field_sanitizer = GcpBodyFieldSanitizer(GCE_INSTANCE_FIELDS_TO_SANITIZE)
super().__init__(
resource_id=self.resource_id,
zone=zone,
project_id=project_id,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def check_body_fields(self) -> None:
required_params = ["machine_type", "disks", "network_interfaces"]
for param in required_params:
if param in self.body:
continue
readable_param = param.replace("_", " ")
raise AirflowException(
f"The body '{self.body}' should contain at least {readable_param} for the new operator "
f"in the '{param}' field. Check (google.cloud.compute_v1.types.Instance) "
f"for more details about body fields description."
)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.resource_id and "name" not in self.body:
raise AirflowException(
"The required parameters 'resource_id' and body['name'] are missing. "
"Please, provide at least one of them."
)
def _validate_all_body_fields(self) -> None:
if self._field_validator:
self._field_validator.validate(self.body)
def execute(self, context: Context) -> dict:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self._validate_all_body_fields()
self.check_body_fields()
try:
# Idempotence check (sort of) - we want to check if the new Instance
# is already created and if is, then we assume it was created previously - we do
# not check if content of the Instance is as expected.
# We assume success if the Instance is simply present.
existing_instance = hook.get_instance(
resource_id=self.resource_id,
project_id=self.project_id,
zone=self.zone,
)
except exceptions.NotFound as e:
# We actually expect to get 404 / Not Found here as the should not yet exist
if not e.code == 404:
raise e
else:
self.log.info("The %s Instance already exists", self.resource_id)
ComputeInstanceDetailsLink.persist(
context=context,
task_instance=self,
location_id=self.zone,
resource_id=self.resource_id,
project_id=self.project_id or hook.project_id,
)
return Instance.to_dict(existing_instance)
self._field_sanitizer.sanitize(self.body)
self.log.info("Creating Instance with specified body: %s", self.body)
hook.insert_instance(
body=self.body,
request_id=self.request_id,
project_id=self.project_id,
zone=self.zone,
)
self.log.info("The specified Instance has been created SUCCESSFULLY")
new_instance = hook.get_instance(
resource_id=self.resource_id,
project_id=self.project_id,
zone=self.zone,
)
ComputeInstanceDetailsLink.persist(
context=context,
task_instance=self,
location_id=self.zone,
resource_id=self.resource_id,
project_id=self.project_id or hook.project_id,
)
return Instance.to_dict(new_instance)
class ComputeEngineInsertInstanceFromTemplateOperator(ComputeEngineBaseOperator):
"""
Creates an Instance in Google Compute Engine based on specified parameters from existing Template.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineInsertInstanceFromTemplateOperator`
:param body: Instance representation as object. For this Operator only 'name' parameter is required for
creating new Instance since all other parameters will be passed through the Template.
:param source_instance_template: Existing Instance Template that will be used as a base while creating
new Instance. When specified, only name of new Instance should be provided as input arguments in
'body' parameter when creating new Instance. All other parameters, such as 'machine_type', 'disks'
and 'network_interfaces' will be passed to Instance as they are specified in the Instance Template.
Full or partial URL and can be represented as examples below:
1. "https://www.googleapis.com/compute/v1/projects/your-project-name/global/instanceTemplates/temp"
2. "projects/your-project-name/global/instanceTemplates/temp"
3. "global/instanceTemplates/temp"
:param zone: Google Cloud zone where the instance exists.
:param project_id: Google Cloud project ID where the Compute Engine Instance exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param resource_id: Name of the Instance. If the name of Instance is not specified in body['name'],
the name will be taken from 'resource_id' parameter
:param request_id: Unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again)
It should be in UUID format as defined in RFC 4122
:param gcp_conn_id: The connection ID used to connect to Google Cloud. Defaults to 'google_cloud_default'.
:param api_version: API version used (for example v1 - or beta). Defaults to v1.
:param impersonation_chain: Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
operator_extra_links = (ComputeInstanceDetailsLink(),)
# [START gce_instance_insert_from_template_fields]
template_fields: Sequence[str] = (
"body",
"source_instance_template",
"project_id",
"zone",
"request_id",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gce_instance_insert_from_template_fields]
def __init__(
self,
*,
source_instance_template: str,
body: dict,
zone: str,
resource_id: str | None = None,
project_id: str | None = None,
request_id: str | None = None,
retry: Retry | None = None,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
validate_body: bool = True,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.source_instance_template = source_instance_template
self.body = body
self.zone = zone
self.resource_id = self.body["name"] if "name" in body else resource_id
self.request_id = request_id
self._field_validator = None # Optional[GcpBodyFieldValidator]
self.retry = retry
self.timeout = timeout
self.metadata = metadata
if validate_body:
self._field_validator = GcpBodyFieldValidator(
GCE_INSTANCE_TEMPLATE_VALIDATION_PATCH_SPECIFICATION, api_version=api_version
)
self._field_sanitizer = GcpBodyFieldSanitizer(GCE_INSTANCE_FIELDS_TO_SANITIZE)
super().__init__(
resource_id=self.resource_id,
zone=zone,
project_id=project_id,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_all_body_fields(self) -> None:
if self._field_validator:
self._field_validator.validate(self.body)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.resource_id and "name" not in self.body:
raise AirflowException(
"The required parameters 'resource_id' and body['name'] are missing. "
"Please, provide at least one of them."
)
def execute(self, context: Context) -> dict:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self._validate_all_body_fields()
try:
# Idempotence check (sort of) - we want to check if the new Instance
# is already created and if is, then we assume it was created - we do
# not check if content of the Instance is as expected.
# We assume success if the Instance is simply present
existing_instance = hook.get_instance(
resource_id=self.resource_id,
project_id=self.project_id,
zone=self.zone,
)
except exceptions.NotFound as e:
# We actually expect to get 404 / Not Found here as the template should
# not yet exist
if not e.code == 404:
raise e
else:
self.log.info("The %s Instance already exists", self.resource_id)
ComputeInstanceDetailsLink.persist(
context=context,
task_instance=self,
location_id=self.zone,
resource_id=self.resource_id,
project_id=self.project_id or hook.project_id,
)
return Instance.to_dict(existing_instance)
self._field_sanitizer.sanitize(self.body)
self.log.info("Creating Instance with specified body: %s", self.body)
hook.insert_instance(
body=self.body,
request_id=self.request_id,
project_id=self.project_id,
zone=self.zone,
source_instance_template=self.source_instance_template,
)
self.log.info("The specified Instance has been created SUCCESSFULLY")
new_instance_from_template = hook.get_instance(
resource_id=self.resource_id,
project_id=self.project_id,
zone=self.zone,
)
ComputeInstanceDetailsLink.persist(
context=context,
task_instance=self,
location_id=self.zone,
resource_id=self.resource_id,
project_id=self.project_id or hook.project_id,
)
return Instance.to_dict(new_instance_from_template)
class ComputeEngineDeleteInstanceOperator(ComputeEngineBaseOperator):
"""
Deletes an Instance in Google Compute Engine.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineDeleteInstanceOperator`
:param project_id: Google Cloud project ID where the Compute Engine Instance exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param zone: Google Cloud zone where the instance exists.
:param resource_id: Name of the Instance.
:param request_id: Unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again)
It should be in UUID format as defined in RFC 4122
:param gcp_conn_id: The connection ID used to connect to Google Cloud. Defaults to 'google_cloud_default'.
:param api_version: API version used (for example v1 - or beta). Defaults to v1.
:param impersonation_chain: Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
# [START gce_instance_delete_template_fields]
template_fields: Sequence[str] = (
"zone",
"resource_id",
"request_id",
"project_id",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gce_instance_delete_template_fields]
def __init__(
self,
*,
resource_id: str,
zone: str,
request_id: str | None = None,
project_id: str | None = None,
retry: Retry | None = None,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
validate_body: bool = True,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.zone = zone
self.request_id = request_id
self.resource_id = resource_id
self._field_validator = None # Optional[GcpBodyFieldValidator]
self.retry = retry
self.timeout = timeout
self.metadata = metadata
if validate_body:
self._field_validator = GcpBodyFieldValidator(
GCE_INSTANCE_TEMPLATE_VALIDATION_PATCH_SPECIFICATION, api_version=api_version
)
self._field_sanitizer = GcpBodyFieldSanitizer(GCE_INSTANCE_FIELDS_TO_SANITIZE)
super().__init__(
project_id=project_id,
zone=zone,
resource_id=resource_id,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.resource_id:
raise AirflowException("The required parameter 'resource_id' is missing. ")
def execute(self, context: Context) -> None:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
try:
# Checking if specified Instance exists and if it does, delete it
hook.get_instance(
resource_id=self.resource_id,
project_id=self.project_id,
zone=self.zone,
)
self.log.info("Successfully found Instance %s", self.resource_id)
hook.delete_instance(
resource_id=self.resource_id,
project_id=self.project_id,
request_id=self.request_id,
zone=self.zone,
)
self.log.info("Successfully deleted Instance %s", self.resource_id)
except exceptions.NotFound as e:
# Expecting 404 Error in case if Instance doesn't exist.
if e.code == 404:
self.log.error("Instance %s doesn't exist", self.resource_id)
raise e
class ComputeEngineStartInstanceOperator(ComputeEngineBaseOperator):
"""
Starts an instance in Google Compute Engine.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineStartInstanceOperator`
:param zone: Google Cloud zone where the instance exists.
:param resource_id: Name of the Compute Engine instance resource.
:param project_id: Optional, Google Cloud Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param api_version: Optional, API version used (for example v1 - or beta). Defaults
to v1.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
operator_extra_links = (ComputeInstanceDetailsLink(),)
# [START gce_instance_start_template_fields]
template_fields: Sequence[str] = (
"project_id",
"zone",
"resource_id",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gce_instance_start_template_fields]
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.resource_id:
raise AirflowException("The required parameter 'resource_id' is missing. ")
def execute(self, context: Context) -> None:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
ComputeInstanceDetailsLink.persist(
context=context,
task_instance=self,
location_id=self.zone,
resource_id=self.resource_id,
project_id=self.project_id or hook.project_id,
)
hook.start_instance(zone=self.zone, resource_id=self.resource_id, project_id=self.project_id)
class ComputeEngineStopInstanceOperator(ComputeEngineBaseOperator):
"""
Stops an instance in Google Compute Engine.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineStopInstanceOperator`
:param zone: Google Cloud zone where the instance exists.
:param resource_id: Name of the Compute Engine instance resource.
:param project_id: Optional, Google Cloud Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param api_version: Optional, API version used (for example v1 - or beta). Defaults
to v1.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
operator_extra_links = (ComputeInstanceDetailsLink(),)
# [START gce_instance_stop_template_fields]
template_fields: Sequence[str] = (
"project_id",
"zone",
"resource_id",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gce_instance_stop_template_fields]
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.resource_id:
raise AirflowException("The required parameter 'resource_id' is missing. ")
def execute(self, context: Context) -> None:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
ComputeInstanceDetailsLink.persist(
context=context,
task_instance=self,
location_id=self.zone,
resource_id=self.resource_id,
project_id=self.project_id or hook.project_id,
)
hook.stop_instance(zone=self.zone, resource_id=self.resource_id, project_id=self.project_id)
SET_MACHINE_TYPE_VALIDATION_SPECIFICATION = [
dict(name="machineType", regexp="^.+$"),
]
class ComputeEngineSetMachineTypeOperator(ComputeEngineBaseOperator):
"""
Changes the machine type for a stopped instance to the machine type specified in the request.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineSetMachineTypeOperator`
:param zone: Google Cloud zone where the instance exists.
:param resource_id: Name of the Compute Engine instance resource.
:param body: Body required by the Compute Engine setMachineType API, as described in
https://cloud.google.com/compute/docs/reference/rest/v1/instances/setMachineType#request-body
:param project_id: Optional, Google Cloud Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param api_version: Optional, API version used (for example v1 - or beta). Defaults
to v1.
:param validate_body: Optional, If set to False, body validation is not performed.
Defaults to False.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
operator_extra_links = (ComputeInstanceDetailsLink(),)
# [START gce_instance_set_machine_type_template_fields]
template_fields: Sequence[str] = (
"project_id",
"zone",
"resource_id",
"body",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gce_instance_set_machine_type_template_fields]
def __init__(
self,
*,
zone: str,
resource_id: str,
body: dict,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
validate_body: bool = True,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.body = body
self._field_validator: GcpBodyFieldValidator | None = None
if validate_body:
self._field_validator = GcpBodyFieldValidator(
SET_MACHINE_TYPE_VALIDATION_SPECIFICATION, api_version=api_version
)
super().__init__(
project_id=project_id,
zone=zone,
resource_id=resource_id,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_all_body_fields(self) -> None:
if self._field_validator:
self._field_validator.validate(self.body)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.resource_id:
raise AirflowException("The required parameter 'resource_id' is missing. ")
def execute(self, context: Context) -> None:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self._validate_all_body_fields()
ComputeInstanceDetailsLink.persist(
context=context,
task_instance=self,
location_id=self.zone,
resource_id=self.resource_id,
project_id=self.project_id or hook.project_id,
)
hook.set_machine_type(
zone=self.zone, resource_id=self.resource_id, body=self.body, project_id=self.project_id
)
GCE_INSTANCE_TEMPLATE_VALIDATION_PATCH_SPECIFICATION: list[dict[str, Any]] = [
dict(name="name", regexp="^.+$"),
dict(name="description", optional=True),
dict(
name="properties",
type="dict",
optional=True,
fields=[
dict(name="description", optional=True),
dict(name="tags", optional=True, fields=[dict(name="items", optional=True)]),
dict(name="machineType", optional=True),
dict(name="canIpForward", optional=True),
dict(name="networkInterfaces", optional=True), # not validating deeper
dict(name="disks", optional=True), # not validating the array deeper
dict(
name="metadata",
optional=True,
fields=[
dict(name="fingerprint", optional=True),
dict(name="items", optional=True),
dict(name="kind", optional=True),
],
),
dict(name="serviceAccounts", optional=True), # not validating deeper
dict(
name="scheduling",
optional=True,
fields=[
dict(name="onHostMaintenance", optional=True),
dict(name="automaticRestart", optional=True),
dict(name="preemptible", optional=True),
dict(name="nodeAffinities", optional=True), # not validating deeper
],
),
dict(name="labels", optional=True),
dict(name="guestAccelerators", optional=True), # not validating deeper
dict(name="minCpuPlatform", optional=True),
],
),
]
GCE_INSTANCE_FIELDS_TO_SANITIZE = [
"kind",
"id",
"creationTimestamp",
"properties.disks.sha256",
"properties.disks.kind",
"properties.disks.sourceImageEncryptionKey.sha256",
"properties.disks.index",
"properties.disks.licenses",
"properties.networkInterfaces.kind",
"properties.networkInterfaces.accessConfigs.kind",
"properties.networkInterfaces.name",
"properties.metadata.kind",
"selfLink",
]
class ComputeEngineInsertInstanceTemplateOperator(ComputeEngineBaseOperator):
"""
Creates an Instance Template using specified fields.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineInsertInstanceTemplateOperator`
:param body: Instance template representation as object.
:param project_id: Google Cloud project ID where the Compute Engine Instance exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param request_id: Unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again)
It should be in UUID format as defined in RFC 4122
:param resource_id: Name of the Instance Template. If the name of Instance Template is not specified in
body['name'], the name will be taken from 'resource_id' parameter
:param gcp_conn_id: The connection ID used to connect to Google Cloud. Defaults to 'google_cloud_default'.
:param api_version: API version used (for example v1 - or beta). Defaults to v1.
:param impersonation_chain: Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
operator_extra_links = (ComputeInstanceTemplateDetailsLink(),)
# [START gce_instance_template_insert_fields]
template_fields: Sequence[str] = (
"body",
"project_id",
"request_id",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gce_instance_template_insert_fields]
def __init__(
self,
*,
body: dict,
project_id: str | None = None,
resource_id: str | None = None,
request_id: str | None = None,
retry: Retry | None = None,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
validate_body: bool = True,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.body = body
self.request_id = request_id
self.resource_id = self.body["name"] if "name" in body else resource_id
self._field_validator = None # Optional[GcpBodyFieldValidator]
self.retry = retry
self.timeout = timeout
self.metadata = metadata
if validate_body:
self._field_validator = GcpBodyFieldValidator(
GCE_INSTANCE_TEMPLATE_VALIDATION_PATCH_SPECIFICATION, api_version=api_version
)
self._field_sanitizer = GcpBodyFieldSanitizer(GCE_INSTANCE_FIELDS_TO_SANITIZE)
super().__init__(
project_id=project_id,
zone="global",
resource_id=self.resource_id,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def check_body_fields(self) -> None:
required_params = ["machine_type", "disks", "network_interfaces"]
for param in required_params:
if param in self.body["properties"]:
continue
readable_param = param.replace("_", " ")
raise AirflowException(
f"The body '{self.body}' should contain at least {readable_param} for the new operator "
f"in the '{param}' field. Check (google.cloud.compute_v1.types.Instance) "
f"for more details about body fields description."
)
def _validate_all_body_fields(self) -> None:
if self._field_validator:
self._field_validator.validate(self.body)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.resource_id and "name" not in self.body:
raise AirflowException(
"The required parameters 'resource_id' and body['name'] are missing. "
"Please, provide at least one of them."
)
def execute(self, context: Context) -> dict:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self._validate_all_body_fields()
self.check_body_fields()
self._field_sanitizer.sanitize(self.body)
try:
# Idempotence check (sort of) - we want to check if the new Template
# is already created and if is, then we assume it was created by previous run
# of operator - we do not check if content of the Template
# is as expected. Templates are immutable, so we cannot update it anyway
# and deleting/recreating is not worth the hassle especially
# that we cannot delete template if it is already used in some Instance
# Group Manager. We assume success if the template is simply present
existing_template = hook.get_instance_template(
resource_id=self.resource_id, project_id=self.project_id
)
except exceptions.NotFound as e:
# We actually expect to get 404 / Not Found here as the template should
# not yet exist
if not e.code == 404:
raise e
else:
self.log.info("The %s Template already exists.", existing_template)
ComputeInstanceTemplateDetailsLink.persist(
context=context,
task_instance=self,
resource_id=self.resource_id,
project_id=self.project_id or hook.project_id,
)
return InstanceTemplate.to_dict(existing_template)
self._field_sanitizer.sanitize(self.body)
self.log.info("Creating Instance Template with specified body: %s", self.body)
hook.insert_instance_template(
body=self.body,
request_id=self.request_id,
project_id=self.project_id,
)
self.log.info("The specified Instance Template has been created SUCCESSFULLY", self.body)
new_template = hook.get_instance_template(
resource_id=self.resource_id,
project_id=self.project_id,
)
ComputeInstanceTemplateDetailsLink.persist(
context=context,
task_instance=self,
resource_id=self.resource_id,
project_id=self.project_id or hook.project_id,
)
return InstanceTemplate.to_dict(new_template)
class ComputeEngineDeleteInstanceTemplateOperator(ComputeEngineBaseOperator):
"""
Deletes an Instance Template in Google Compute Engine.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineDeleteInstanceTemplateOperator`
:param resource_id: Name of the Instance Template.
:param project_id: Google Cloud project ID where the Compute Engine Instance exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param request_id: Unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again)
It should be in UUID format as defined in RFC 4122
:param gcp_conn_id: The connection ID used to connect to Google Cloud. Defaults to 'google_cloud_default'.
:param api_version: API version used (for example v1 - or beta). Defaults to v1.
:param impersonation_chain: Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
# [START gce_instance_template_delete_fields]
template_fields: Sequence[str] = (
"resource_id",
"request_id",
"project_id",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gce_instance_template_delete_fields]
def __init__(
self,
*,
resource_id: str,
request_id: str | None = None,
project_id: str | None = None,
retry: Retry | None = None,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
validate_body: bool = True,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.request_id = request_id
self.resource_id = resource_id
self._field_validator = None # Optional[GcpBodyFieldValidator]
self.retry = retry
self.timeout = timeout
self.metadata = metadata
if validate_body:
self._field_validator = GcpBodyFieldValidator(
GCE_INSTANCE_TEMPLATE_VALIDATION_PATCH_SPECIFICATION, api_version=api_version
)
self._field_sanitizer = GcpBodyFieldSanitizer(GCE_INSTANCE_FIELDS_TO_SANITIZE)
super().__init__(
project_id=project_id,
zone="global",
resource_id=resource_id,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.resource_id:
raise AirflowException("The required parameter 'resource_id' is missing.")
def execute(self, context: Context) -> None:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
try:
# Checking if specified Instance Template exists and if it does, delete it
hook.get_instance_template(
resource_id=self.resource_id,
project_id=self.project_id,
)
self.log.info("Successfully found Instance Template %s", self.resource_id)
hook.delete_instance_template(
resource_id=self.resource_id,
project_id=self.project_id,
request_id=self.request_id,
)
self.log.info("Successfully deleted Instance template %s", self.resource_id)
except exceptions.NotFound as e:
# Expecting 404 Error in case if Instance template doesn't exist.
if e.code == 404:
self.log.error("Instance template %s doesn't exist", self.resource_id)
raise e
class ComputeEngineCopyInstanceTemplateOperator(ComputeEngineBaseOperator):
"""
Copies the instance template, applying specified changes.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineCopyInstanceTemplateOperator`
:param resource_id: Name of the Instance Template
:param body_patch: Patch to the body of instanceTemplates object following rfc7386
PATCH semantics. The body_patch content follows
https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates
Name field is required as we need to rename the template,
all the other fields are optional. It is important to follow PATCH semantics
- arrays are replaced fully, so if you need to update an array you should
provide the whole target array as patch element.
:param project_id: Optional, Google Cloud Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:param request_id: Optional, unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again).
It should be in UUID format as defined in RFC 4122.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param api_version: Optional, API version used (for example v1 - or beta). Defaults
to v1.
:param validate_body: Optional, If set to False, body validation is not performed.
Defaults to False.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
operator_extra_links = (ComputeInstanceTemplateDetailsLink(),)
# [START gce_instance_template_copy_operator_template_fields]
template_fields: Sequence[str] = (
"project_id",
"resource_id",
"request_id",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gce_instance_template_copy_operator_template_fields]
def __init__(
self,
*,
resource_id: str,
body_patch: dict,
project_id: str | None = None,
request_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
validate_body: bool = True,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.body_patch = body_patch
self.request_id = request_id
self._field_validator = None # GcpBodyFieldValidator | None
if "name" not in self.body_patch:
raise AirflowException(
f"The body '{body_patch}' should contain at least name for the new operator "
f"in the 'name' field"
)
if validate_body:
self._field_validator = GcpBodyFieldValidator(
GCE_INSTANCE_TEMPLATE_VALIDATION_PATCH_SPECIFICATION, api_version=api_version
)
self._field_sanitizer = GcpBodyFieldSanitizer(GCE_INSTANCE_FIELDS_TO_SANITIZE)
super().__init__(
project_id=project_id,
zone="global",
resource_id=resource_id,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_all_body_fields(self) -> None:
if self._field_validator:
self._field_validator.validate(self.body_patch)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.resource_id:
raise AirflowException("The required parameter 'resource_id' is missing.")
def execute(self, context: Context) -> dict:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self._validate_all_body_fields()
try:
# Idempotence check (sort of) - we want to check if the new template
# is already created and if is, then we assume it was created by previous run
# of CopyTemplate operator - we do not check if content of the template
# is as expected. Templates are immutable, so we cannot update it anyway
# and deleting/recreating is not worth the hassle especially
# that we cannot delete template if it is already used in some Instance
# Group Manager. We assume success if the template is simply present
existing_template = hook.get_instance_template(
resource_id=self.body_patch["name"],
project_id=self.project_id,
)
except exceptions.NotFound as e:
# We actually expect to get 404 / Not Found here as the template should
# not yet exist
if not e.code == 404:
raise e
else:
self.log.info(
"The %s template already exists. It was likely created by previous run of the operator. "
"Assuming success.",
existing_template,
)
ComputeInstanceTemplateDetailsLink.persist(
context=context,
task_instance=self,
resource_id=self.body_patch["name"],
project_id=self.project_id or hook.project_id,
)
return InstanceTemplate.to_dict(existing_template)
old_body = InstanceTemplate.to_dict(
hook.get_instance_template(
resource_id=self.resource_id,
project_id=self.project_id,
)
)
new_body = deepcopy(old_body)
self._field_sanitizer.sanitize(new_body)
new_body = merge(new_body, self.body_patch)
self.log.info("Calling insert instance template with updated body: %s", new_body)
hook.insert_instance_template(body=new_body, request_id=self.request_id, project_id=self.project_id)
new_instance_tmp = hook.get_instance_template(
resource_id=self.body_patch["name"], project_id=self.project_id
)
ComputeInstanceTemplateDetailsLink.persist(
context=context,
task_instance=self,
resource_id=self.body_patch["name"],
project_id=self.project_id or hook.project_id,
)
return InstanceTemplate.to_dict(new_instance_tmp)
class ComputeEngineInstanceGroupUpdateManagerTemplateOperator(ComputeEngineBaseOperator):
"""
Patches the Instance Group Manager, replacing source template URL with the destination one.
API V1 does not have update/patch operations for Instance Group Manager,
so you must use beta or newer API version. Beta is the default.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineInstanceGroupUpdateManagerTemplateOperator`
:param resource_id: Name of the Instance Group Manager
:param zone: Google Cloud zone where the Instance Group Manager exists.
:param source_template: URL of the template to replace.
:param destination_template: URL of the target template.
:param project_id: Optional, Google Cloud Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:param request_id: Optional, unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again).
It should be in UUID format as defined in RFC 4122.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param api_version: Optional, API version used (for example v1 - or beta). Defaults
to v1.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
operator_extra_links = (ComputeInstanceGroupManagerDetailsLink(),)
# [START gce_igm_update_template_operator_template_fields]
template_fields: Sequence[str] = (
"project_id",
"resource_id",
"zone",
"request_id",
"source_template",
"destination_template",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gce_igm_update_template_operator_template_fields]
def __init__(
self,
*,
resource_id: str,
zone: str,
source_template: str,
destination_template: str,
project_id: str | None = None,
update_policy: dict[str, Any] | None = None,
request_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version="beta",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.zone = zone
self.source_template = source_template
self.destination_template = destination_template
self.request_id = request_id
self.update_policy = update_policy
self._change_performed = False
if api_version == "v1":
raise AirflowException(
"Api version v1 does not have update/patch "
"operations for Instance Group Managers. Use beta"
" api version or above"
)
super().__init__(
project_id=project_id,
zone=self.zone,
resource_id=resource_id,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.resource_id:
raise AirflowException("The required parameter 'resource_id' is missing. ")
def _possibly_replace_template(self, dictionary: dict) -> None:
if dictionary.get("instanceTemplate") == self.source_template:
dictionary["instanceTemplate"] = self.destination_template
self._change_performed = True
def execute(self, context: Context) -> bool | None:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
old_instance_group_manager = hook.get_instance_group_manager(
zone=self.zone, resource_id=self.resource_id, project_id=self.project_id
)
patch_body = {}
igm_dict = InstanceGroupManager.to_dict(old_instance_group_manager)
if "versions" in igm_dict:
patch_body["versions"] = igm_dict["versions"]
if "instanceTemplate" in igm_dict:
patch_body["instanceTemplate"] = igm_dict["instanceTemplate"]
if self.update_policy:
patch_body["updatePolicy"] = self.update_policy
self._possibly_replace_template(patch_body)
if "versions" in patch_body:
for version in patch_body["versions"]:
self._possibly_replace_template(version)
if self._change_performed or self.update_policy:
self.log.info("Calling patch instance template with updated body: %s", patch_body)
ComputeInstanceGroupManagerDetailsLink.persist(
context=context,
task_instance=self,
location_id=self.zone,
resource_id=self.resource_id,
project_id=self.project_id or hook.project_id,
)
return hook.patch_instance_group_manager(
zone=self.zone,
resource_id=self.resource_id,
body=patch_body,
request_id=self.request_id,
project_id=self.project_id,
)
else:
# Idempotence achieved
ComputeInstanceGroupManagerDetailsLink.persist(
context=context,
task_instance=self,
location_id=self.zone,
resource_id=self.resource_id,
project_id=self.project_id or hook.project_id,
)
return True
class ComputeEngineInsertInstanceGroupManagerOperator(ComputeEngineBaseOperator):
"""
Creates an Instance Group Managers using the body specified.
After the group is created, instances in the group are created using the specified Instance Template.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineInsertInstanceGroupManagerOperator`
:param body: Instance Group Managers representation as object.
:param project_id: Google Cloud project ID where the Compute Engine Instance Group Managers exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param request_id: Unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new Instance Group Managers again)
It should be in UUID format as defined in RFC 4122
:param resource_id: Name of the Instance Group Managers. If the name of Instance Group Managers is
not specified in body['name'], the name will be taken from 'resource_id' parameter.
:param gcp_conn_id: The connection ID used to connect to Google Cloud. Defaults to 'google_cloud_default'.
:param api_version: API version used (for example v1 - or beta). Defaults to v1.
:param impersonation_chain: Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
operator_extra_links = (ComputeInstanceGroupManagerDetailsLink(),)
# [START gce_igm_insert_fields]
template_fields: Sequence[str] = (
"project_id",
"body",
"zone",
"request_id",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gce_igm_insert_fields]
def __init__(
self,
*,
body: dict,
zone: str,
project_id: str | None = None,
resource_id: str | None = None,
request_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version="v1",
retry: Retry | None = None,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
impersonation_chain: str | Sequence[str] | None = None,
validate_body: bool = True,
**kwargs,
) -> None:
self.body = body
self.zone = zone
self.request_id = request_id
self.resource_id = self.body["name"] if "name" in body else resource_id
self._field_validator = None # Optional[GcpBodyFieldValidator]
self.retry = retry
self.timeout = timeout
self.metadata = metadata
if validate_body:
self._field_validator = GcpBodyFieldValidator(
GCE_INSTANCE_TEMPLATE_VALIDATION_PATCH_SPECIFICATION, api_version=api_version
)
self._field_sanitizer = GcpBodyFieldSanitizer(GCE_INSTANCE_FIELDS_TO_SANITIZE)
super().__init__(
project_id=project_id,
zone=zone,
resource_id=self.resource_id,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def check_body_fields(self) -> None:
required_params = ["base_instance_name", "target_size", "instance_template"]
for param in required_params:
if param in self.body:
continue
readable_param = param.replace("_", " ")
raise AirflowException(
f"The body '{self.body}' should contain at least {readable_param} for the new operator "
f"in the '{param}' field. Check (google.cloud.compute_v1.types.Instance) "
f"for more details about body fields description."
)
def _validate_all_body_fields(self) -> None:
if self._field_validator:
self._field_validator.validate(self.body)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.resource_id and "name" not in self.body:
raise AirflowException(
"The required parameters 'resource_id' and body['name'] are missing. "
"Please, provide at least one of them."
)
def execute(self, context: Context) -> dict:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self._validate_all_body_fields()
self.check_body_fields()
try:
# Idempotence check (sort of) - we want to check if the new Instance Group Manager
# is already created and if isn't, we create new one
existing_instance_group_manager = hook.get_instance_group_manager(
resource_id=self.resource_id,
project_id=self.project_id,
zone=self.zone,
)
except exceptions.NotFound as e:
# We actually expect to get 404 / Not Found here as the Instance Group Manager should
# not yet exist
if not e.code == 404:
raise e
else:
self.log.info("The %s Instance Group Manager already exists", existing_instance_group_manager)
ComputeInstanceGroupManagerDetailsLink.persist(
context=context,
task_instance=self,
resource_id=self.resource_id,
project_id=self.project_id or hook.project_id,
location_id=self.zone,
)
return InstanceGroupManager.to_dict(existing_instance_group_manager)
self._field_sanitizer.sanitize(self.body)
self.log.info("Creating Instance Group Manager with specified body: %s", self.body)
hook.insert_instance_group_manager(
body=self.body,
request_id=self.request_id,
project_id=self.project_id,
zone=self.zone,
)
self.log.info("The specified Instance Group Manager has been created SUCCESSFULLY", self.body)
new_instance_group_manager = hook.get_instance_group_manager(
resource_id=self.resource_id,
project_id=self.project_id,
zone=self.zone,
)
ComputeInstanceGroupManagerDetailsLink.persist(
context=context,
task_instance=self,
location_id=self.zone,
resource_id=self.resource_id,
project_id=self.project_id or hook.project_id,
)
return InstanceGroupManager.to_dict(new_instance_group_manager)
class ComputeEngineDeleteInstanceGroupManagerOperator(ComputeEngineBaseOperator):
"""
Permanently and irrevocably deletes an Instance Group Managers.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineDeleteInstanceGroupManagerOperator`
:param resource_id: Name of the Instance Group Managers.
:param project_id: Google Cloud project ID where the Compute Engine Instance Group Managers exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param request_id: Unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new Instance Group Managers again)
It should be in UUID format as defined in RFC 4122
:param gcp_conn_id: The connection ID used to connect to Google Cloud. Defaults to 'google_cloud_default'.
:param api_version: API version used (for example v1 - or beta). Defaults to v1.
:param impersonation_chain: Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
# [START gce_igm_delete_fields]
template_fields: Sequence[str] = (
"project_id",
"resource_id",
"zone",
"request_id",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gce_igm_delete_fields]
def __init__(
self,
*,
resource_id: str,
zone: str,
project_id: str | None = None,
request_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
api_version="v1",
retry: Retry | None = None,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
impersonation_chain: str | Sequence[str] | None = None,
validate_body: bool = True,
**kwargs,
) -> None:
self.zone = zone
self.request_id = request_id
self.resource_id = resource_id
self._field_validator = None # Optional[GcpBodyFieldValidator]
self.retry = retry
self.timeout = timeout
self.metadata = metadata
if validate_body:
self._field_validator = GcpBodyFieldValidator(
GCE_INSTANCE_TEMPLATE_VALIDATION_PATCH_SPECIFICATION, api_version=api_version
)
self._field_sanitizer = GcpBodyFieldSanitizer(GCE_INSTANCE_FIELDS_TO_SANITIZE)
super().__init__(
project_id=project_id,
zone=zone,
resource_id=resource_id,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.resource_id:
raise AirflowException("The required parameter 'resource_id' is missing. ")
def execute(self, context: Context):
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
try:
# Checking if specified Instance Group Managers exists and if it does, delete it
hook.get_instance_group_manager(
resource_id=self.resource_id,
project_id=self.project_id,
zone=self.zone,
)
self.log.info("Successfully found Group Manager %s", self.resource_id)
hook.delete_instance_group_manager(
resource_id=self.resource_id,
project_id=self.project_id,
request_id=self.request_id,
zone=self.zone,
)
self.log.info("Successfully deleted Instance Group Managers")
except exceptions.NotFound as e:
# Expecting 404 Error in case if Instance Group Managers doesn't exist.
if e.code == 404:
self.log.error("Instance Group Managers %s doesn't exist", self.resource_id)
raise e
| 74,371 | 42.981076 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/workflows.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
import re
import uuid
from datetime import datetime, timedelta
from typing import TYPE_CHECKING, Sequence
import pytz
from google.api_core.exceptions import AlreadyExists
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.workflows.executions_v1beta import Execution
from google.cloud.workflows_v1beta import Workflow
from google.protobuf.field_mask_pb2 import FieldMask
from airflow.providers.google.cloud.hooks.workflows import WorkflowsHook
from airflow.providers.google.cloud.links.workflows import (
WorkflowsExecutionLink,
WorkflowsListOfWorkflowsLink,
WorkflowsWorkflowDetailsLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
try:
from airflow.utils.hashlib_wrapper import md5
except ModuleNotFoundError:
# Remove when Airflow providers min Airflow version is "2.7.0"
from hashlib import md5
class WorkflowsCreateWorkflowOperator(GoogleCloudBaseOperator):
"""
Creates a new workflow.
If a workflow with the specified name already exists in the specified
project and location, the long running operation will return
[ALREADY_EXISTS][google.rpc.Code.ALREADY_EXISTS] error.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:WorkflowsCreateWorkflowOperator`
:param workflow: Required. Workflow to be created.
:param workflow_id: Required. The ID of the workflow to be created.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
template_fields: Sequence[str] = ("location", "workflow", "workflow_id")
template_fields_renderers = {"workflow": "json"}
operator_extra_links = (WorkflowsWorkflowDetailsLink(),)
def __init__(
self,
*,
workflow: dict,
workflow_id: str,
location: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
force_rerun: bool = False,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.workflow = workflow
self.workflow_id = workflow_id
self.location = location
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.force_rerun = force_rerun
def _workflow_id(self, context):
if self.workflow_id and not self.force_rerun:
# If users provide workflow id then assuring the idempotency
# is on their side
return self.workflow_id
if self.force_rerun:
hash_base = str(uuid.uuid4())
else:
hash_base = json.dumps(self.workflow, sort_keys=True)
# We are limited by allowed length of workflow_id so
# we use hash of whole information
exec_date = context["execution_date"].isoformat()
base = f"airflow_{self.dag_id}_{self.task_id}_{exec_date}_{hash_base}"
workflow_id = md5(base.encode()).hexdigest()
return re.sub(r"[:\-+.]", "_", workflow_id)
def execute(self, context: Context):
hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
workflow_id = self._workflow_id(context)
self.log.info("Creating workflow")
try:
operation = hook.create_workflow(
workflow=self.workflow,
workflow_id=workflow_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
workflow = operation.result()
except AlreadyExists:
workflow = hook.get_workflow(
workflow_id=workflow_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
WorkflowsWorkflowDetailsLink.persist(
context=context,
task_instance=self,
location_id=self.location,
workflow_id=self.workflow_id,
project_id=self.project_id or hook.project_id,
)
return Workflow.to_dict(workflow)
class WorkflowsUpdateWorkflowOperator(GoogleCloudBaseOperator):
"""
Updates an existing workflow.
Running this method has no impact on already running
executions of the workflow. A new revision of the
workflow may be created as a result of a successful
update operation. In that case, such revision will be
used in new workflow executions.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:WorkflowsUpdateWorkflowOperator`
:param workflow_id: Required. The ID of the workflow to be updated.
:param location: Required. The GCP region in which to handle the request.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param update_mask: List of fields to be updated. If not present,
the entire workflow will be updated.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
template_fields: Sequence[str] = ("workflow_id", "update_mask")
template_fields_renderers = {"update_mask": "json"}
operator_extra_links = (WorkflowsWorkflowDetailsLink(),)
def __init__(
self,
*,
workflow_id: str,
location: str,
project_id: str | None = None,
update_mask: FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.workflow_id = workflow_id
self.location = location
self.project_id = project_id
self.update_mask = update_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
workflow = hook.get_workflow(
workflow_id=self.workflow_id,
project_id=self.project_id,
location=self.location,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Updating workflow")
operation = hook.update_workflow(
workflow=workflow,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
workflow = operation.result()
WorkflowsWorkflowDetailsLink.persist(
context=context,
task_instance=self,
location_id=self.location,
workflow_id=self.workflow_id,
project_id=self.project_id or hook.project_id,
)
return Workflow.to_dict(workflow)
class WorkflowsDeleteWorkflowOperator(GoogleCloudBaseOperator):
"""
Delete a workflow with the specified name and all running executions of the workflow.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:WorkflowsDeleteWorkflowOperator`
:param workflow_id: Required. The ID of the workflow to be created.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
template_fields: Sequence[str] = ("location", "workflow_id")
def __init__(
self,
*,
workflow_id: str,
location: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.workflow_id = workflow_id
self.location = location
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Deleting workflow %s", self.workflow_id)
operation = hook.delete_workflow(
workflow_id=self.workflow_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
operation.result()
class WorkflowsListWorkflowsOperator(GoogleCloudBaseOperator):
"""
Lists Workflows in a given project and location; the default order is not specified.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:WorkflowsListWorkflowsOperator`
:param filter_: Filter to restrict results to specific workflows.
:param order_by: Comma-separated list of fields that
specifies the order of the results. Default sorting order for a field is ascending.
To specify descending order for a field, append a "desc" suffix.
If not specified, the results will be returned in an unspecified order.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
template_fields: Sequence[str] = ("location", "order_by", "filter_")
operator_extra_links = (WorkflowsListOfWorkflowsLink(),)
def __init__(
self,
*,
location: str,
project_id: str | None = None,
filter_: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.filter_ = filter_
self.order_by = order_by
self.location = location
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Retrieving workflows")
workflows_iter = hook.list_workflows(
filter_=self.filter_,
order_by=self.order_by,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
WorkflowsListOfWorkflowsLink.persist(
context=context,
task_instance=self,
project_id=self.project_id or hook.project_id,
)
return [Workflow.to_dict(w) for w in workflows_iter]
class WorkflowsGetWorkflowOperator(GoogleCloudBaseOperator):
"""
Gets details of a single Workflow.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:WorkflowsGetWorkflowOperator`
:param workflow_id: Required. The ID of the workflow to be created.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
template_fields: Sequence[str] = ("location", "workflow_id")
operator_extra_links = (WorkflowsWorkflowDetailsLink(),)
def __init__(
self,
*,
workflow_id: str,
location: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.workflow_id = workflow_id
self.location = location
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Retrieving workflow")
workflow = hook.get_workflow(
workflow_id=self.workflow_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
WorkflowsWorkflowDetailsLink.persist(
context=context,
task_instance=self,
location_id=self.location,
workflow_id=self.workflow_id,
project_id=self.project_id or hook.project_id,
)
return Workflow.to_dict(workflow)
class WorkflowsCreateExecutionOperator(GoogleCloudBaseOperator):
"""
Creates a new execution using the latest revision of the given workflow.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:WorkflowsCreateExecutionOperator`
:param execution: Required. Execution to be created.
:param workflow_id: Required. The ID of the workflow.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
template_fields: Sequence[str] = ("location", "workflow_id", "execution")
template_fields_renderers = {"execution": "json"}
operator_extra_links = (WorkflowsExecutionLink(),)
def __init__(
self,
*,
workflow_id: str,
execution: dict,
location: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.workflow_id = workflow_id
self.execution = execution
self.location = location
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Creating execution")
execution = hook.create_execution(
workflow_id=self.workflow_id,
execution=self.execution,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
execution_id = execution.name.split("/")[-1]
self.xcom_push(context, key="execution_id", value=execution_id)
WorkflowsExecutionLink.persist(
context=context,
task_instance=self,
location_id=self.location,
workflow_id=self.workflow_id,
execution_id=execution_id,
project_id=self.project_id or hook.project_id,
)
return Execution.to_dict(execution)
class WorkflowsCancelExecutionOperator(GoogleCloudBaseOperator):
"""
Cancels an execution using the given ``workflow_id`` and ``execution_id``.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:WorkflowsCancelExecutionOperator`
:param workflow_id: Required. The ID of the workflow.
:param execution_id: Required. The ID of the execution.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
template_fields: Sequence[str] = ("location", "workflow_id", "execution_id")
operator_extra_links = (WorkflowsExecutionLink(),)
def __init__(
self,
*,
workflow_id: str,
execution_id: str,
location: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.workflow_id = workflow_id
self.execution_id = execution_id
self.location = location
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Canceling execution %s", self.execution_id)
execution = hook.cancel_execution(
workflow_id=self.workflow_id,
execution_id=self.execution_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
WorkflowsExecutionLink.persist(
context=context,
task_instance=self,
location_id=self.location,
workflow_id=self.workflow_id,
execution_id=self.execution_id,
project_id=self.project_id or hook.project_id,
)
return Execution.to_dict(execution)
class WorkflowsListExecutionsOperator(GoogleCloudBaseOperator):
"""
Returns a list of executions which belong to the workflow with the given name.
The method returns executions of all workflow revisions. Returned
executions are ordered by their start time (newest first).
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:WorkflowsListExecutionsOperator`
:param workflow_id: Required. The ID of the workflow to be created.
:param start_date_filter: If passed only executions older that this date will be returned.
By default operators return executions from last 60 minutes
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
template_fields: Sequence[str] = ("location", "workflow_id")
operator_extra_links = (WorkflowsWorkflowDetailsLink(),)
def __init__(
self,
*,
workflow_id: str,
location: str,
start_date_filter: datetime | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.workflow_id = workflow_id
self.location = location
self.start_date_filter = start_date_filter or datetime.now(tz=pytz.UTC) - timedelta(minutes=60)
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Retrieving executions for workflow %s", self.workflow_id)
execution_iter = hook.list_executions(
workflow_id=self.workflow_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
WorkflowsWorkflowDetailsLink.persist(
context=context,
task_instance=self,
location_id=self.location,
workflow_id=self.workflow_id,
project_id=self.project_id or hook.project_id,
)
return [
Execution.to_dict(e)
for e in execution_iter
if e.start_time.ToDatetime(tzinfo=pytz.UTC) > self.start_date_filter
]
class WorkflowsGetExecutionOperator(GoogleCloudBaseOperator):
"""
Returns an execution for the given ``workflow_id`` and ``execution_id``.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:WorkflowsGetExecutionOperator`
:param workflow_id: Required. The ID of the workflow.
:param execution_id: Required. The ID of the execution.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
template_fields: Sequence[str] = ("location", "workflow_id", "execution_id")
operator_extra_links = (WorkflowsExecutionLink(),)
def __init__(
self,
*,
workflow_id: str,
execution_id: str,
location: str,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.workflow_id = workflow_id
self.execution_id = execution_id
self.location = location
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Retrieving execution %s for workflow %s", self.execution_id, self.workflow_id)
execution = hook.get_execution(
workflow_id=self.workflow_id,
execution_id=self.execution_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
WorkflowsExecutionLink.persist(
context=context,
task_instance=self,
location_id=self.location,
workflow_id=self.workflow_id,
execution_id=self.execution_id,
project_id=self.project_id or hook.project_id,
)
return Execution.to_dict(execution)
| 28,848 | 37.671582 | 104 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/vertex_ai/hyperparameter_tuning_job.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Vertex AI operators.
.. spelling:word-list::
irreproducible
codepoints
Tensorboard
aiplatform
myVPC
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from google.api_core.exceptions import NotFound
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.aiplatform import gapic, hyperparameter_tuning
from google.cloud.aiplatform_v1.types import HyperparameterTuningJob
from airflow.providers.google.cloud.hooks.vertex_ai.hyperparameter_tuning_job import (
HyperparameterTuningJobHook,
)
from airflow.providers.google.cloud.links.vertex_ai import (
VertexAIHyperparameterTuningJobListLink,
VertexAITrainingLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class CreateHyperparameterTuningJobOperator(GoogleCloudBaseOperator):
"""
Create Hyperparameter Tuning job.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param display_name: Required. The user-defined name of the HyperparameterTuningJob. The name can be
up to 128 characters long and can be consist of any UTF-8 characters.
:param metric_spec: Required. Dictionary representing metrics to optimize. The dictionary key is the
metric_id, which is reported by your training job, and the dictionary value is the optimization
goal of the metric('minimize' or 'maximize').
example: metric_spec = {'loss': 'minimize', 'accuracy': 'maximize'}
:param parameter_spec: Required. Dictionary representing parameters to optimize. The dictionary key
is the metric_id, which is passed into your training job as a command line key word argument, and
the dictionary value is the parameter specification of the metric.
:param max_trial_count: Required. The desired total number of Trials.
:param parallel_trial_count: Required. The desired number of Trials to run in parallel.
:param worker_pool_specs: Required. The spec of the worker pools including machine type and Docker
image. Can provided as a list of dictionaries or list of WorkerPoolSpec proto messages.
:param base_output_dir: Optional. GCS output directory of job. If not provided a timestamped
directory in the staging directory will be used.
:param custom_job_labels: Optional. The labels with user-defined metadata to organize CustomJobs.
Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param custom_job_encryption_spec_key_name: Optional.Customer-managed encryption key name for a
CustomJob. If this is set, then all resources created by the CustomJob will be encrypted with the
provided encryption key.
:param staging_bucket: Optional. Bucket for produced custom job artifacts. Overrides staging_bucket
set in aiplatform.init.
:param max_failed_trial_count: Optional. The number of failed Trials that need to be seen before
failing the HyperparameterTuningJob. If set to 0, Vertex AI decides how many Trials must fail
before the whole job fails.
:param search_algorithm: The search algorithm specified for the Study. Accepts one of the following:
`None` - If you do not specify an algorithm, your job uses the default Vertex AI algorithm. The
default algorithm applies Bayesian optimization to arrive at the optimal solution with a more
effective search over the parameter space.
'grid' - A simple grid search within the feasible space. This option is particularly useful if
you want to specify a quantity of trials that is greater than the number of points in the
feasible space. In such cases, if you do not specify a grid search, the Vertex AI default
algorithm may generate duplicate suggestions. To use grid search, all parameter specs must be of
type `IntegerParameterSpec`, `CategoricalParameterSpace`, or `DiscreteParameterSpec`.
'random' - A simple random search within the feasible space.
:param measurement_selection: This indicates which measurement to use if/when the service
automatically selects the final measurement from previously reported intermediate measurements.
Accepts: 'best', 'last'
Choose this based on two considerations:
A) Do you expect your measurements to monotonically improve? If so, choose 'last'. On the other
hand, if you're in a situation where your system can "over-train" and you expect the performance
to get better for a while but then start declining, choose 'best'.
B) Are your measurements significantly noisy and/or irreproducible? If so, 'best' will tend to be
over-optimistic, and it may be better to choose 'last'.
If both or neither of (A) and (B) apply, it doesn't matter which selection type is chosen.
:param hyperparameter_tuning_job_labels: Optional. The labels with user-defined metadata to organize
HyperparameterTuningJobs. Label keys and values can be no longer than 64 characters (Unicode
codepoints), can only contain lowercase letters, numeric characters, underscores and dashes.
International characters are allowed. See https://goo.gl/xmQnxf for more information and examples
of labels.
:param hyperparameter_tuning_job_encryption_spec_key_name: Optional. Customer-managed encryption key
options for a HyperparameterTuningJob. If this is set, then all resources created by the
HyperparameterTuningJob will be encrypted with the provided encryption key.
:param service_account: Optional. Specifies the service account for workload run-as account. Users
submitting jobs must have act-as permission on this run-as account.
:param network: Optional. The full name of the Compute Engine network to which the job should be
peered. For example, projects/12345/global/networks/myVPC. Private services access must already
be configured for the network. If left unspecified, the job is not peered with any network.
:param timeout: The maximum job running time in seconds. The default is 7 days.
:param restart_job_on_worker_restart: Restarts the entire CustomJob if a worker gets restarted. This
feature can be used by distributed training jobs that are not resilient to workers leaving and
joining a job.
:param enable_web_access: Whether you want Vertex AI to enable interactive shell access to training
containers. https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell
:param tensorboard: Optional. The name of a Vertex AI
[Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard] resource to which this CustomJob will
upload Tensorboard logs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` The training script should
write Tensorboard to following Vertex AI environment variable: AIP_TENSORBOARD_LOG_DIR
`service_account` is required with provided `tensorboard`. For more information on configuring
your service account please visit:
https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training
:param sync: Whether to execute this method synchronously. If False, this method will unblock and it
will be executed in a concurrent Future.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = [
"region",
"project_id",
"impersonation_chain",
]
operator_extra_links = (VertexAITrainingLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
display_name: str,
metric_spec: dict[str, str],
parameter_spec: dict[str, hyperparameter_tuning._ParameterSpec],
max_trial_count: int,
parallel_trial_count: int,
# START: CustomJob param
worker_pool_specs: list[dict] | list[gapic.WorkerPoolSpec],
base_output_dir: str | None = None,
custom_job_labels: dict[str, str] | None = None,
custom_job_encryption_spec_key_name: str | None = None,
staging_bucket: str | None = None,
# END: CustomJob param
max_failed_trial_count: int = 0,
search_algorithm: str | None = None,
measurement_selection: str | None = "best",
hyperparameter_tuning_job_labels: dict[str, str] | None = None,
hyperparameter_tuning_job_encryption_spec_key_name: str | None = None,
# START: run param
service_account: str | None = None,
network: str | None = None,
timeout: int | None = None, # seconds
restart_job_on_worker_restart: bool = False,
enable_web_access: bool = False,
tensorboard: str | None = None,
sync: bool = True,
# END: run param
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.display_name = display_name
self.metric_spec = metric_spec
self.parameter_spec = parameter_spec
self.max_trial_count = max_trial_count
self.parallel_trial_count = parallel_trial_count
self.worker_pool_specs = worker_pool_specs
self.base_output_dir = base_output_dir
self.custom_job_labels = custom_job_labels
self.custom_job_encryption_spec_key_name = custom_job_encryption_spec_key_name
self.staging_bucket = staging_bucket
self.max_failed_trial_count = max_failed_trial_count
self.search_algorithm = search_algorithm
self.measurement_selection = measurement_selection
self.hyperparameter_tuning_job_labels = hyperparameter_tuning_job_labels
self.hyperparameter_tuning_job_encryption_spec_key_name = (
hyperparameter_tuning_job_encryption_spec_key_name
)
self.service_account = service_account
self.network = network
self.timeout = timeout
self.restart_job_on_worker_restart = restart_job_on_worker_restart
self.enable_web_access = enable_web_access
self.tensorboard = tensorboard
self.sync = sync
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.hook: HyperparameterTuningJobHook | None = None
def execute(self, context: Context):
self.log.info("Creating Hyperparameter Tuning job")
self.hook = HyperparameterTuningJobHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = self.hook.create_hyperparameter_tuning_job(
project_id=self.project_id,
region=self.region,
display_name=self.display_name,
metric_spec=self.metric_spec,
parameter_spec=self.parameter_spec,
max_trial_count=self.max_trial_count,
parallel_trial_count=self.parallel_trial_count,
worker_pool_specs=self.worker_pool_specs,
base_output_dir=self.base_output_dir,
custom_job_labels=self.custom_job_labels,
custom_job_encryption_spec_key_name=self.custom_job_encryption_spec_key_name,
staging_bucket=self.staging_bucket,
max_failed_trial_count=self.max_failed_trial_count,
search_algorithm=self.search_algorithm,
measurement_selection=self.measurement_selection,
hyperparameter_tuning_job_labels=self.hyperparameter_tuning_job_labels,
hyperparameter_tuning_job_encryption_spec_key_name=(
self.hyperparameter_tuning_job_encryption_spec_key_name
),
service_account=self.service_account,
network=self.network,
timeout=self.timeout,
restart_job_on_worker_restart=self.restart_job_on_worker_restart,
enable_web_access=self.enable_web_access,
tensorboard=self.tensorboard,
sync=self.sync,
)
hyperparameter_tuning_job = result.to_dict()
hyperparameter_tuning_job_id = self.hook.extract_hyperparameter_tuning_job_id(
hyperparameter_tuning_job
)
self.log.info("Hyperparameter Tuning job was created. Job id: %s", hyperparameter_tuning_job_id)
self.xcom_push(context, key="hyperparameter_tuning_job_id", value=hyperparameter_tuning_job_id)
VertexAITrainingLink.persist(
context=context, task_instance=self, training_id=hyperparameter_tuning_job_id
)
return hyperparameter_tuning_job
def on_kill(self) -> None:
"""Callback called when the operator is killed; cancel any running job."""
if self.hook:
self.hook.cancel_hyperparameter_tuning_job()
class GetHyperparameterTuningJobOperator(GoogleCloudBaseOperator):
"""
Gets a HyperparameterTuningJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param hyperparameter_tuning_job_id: Required. The name of the HyperparameterTuningJob resource.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "hyperparameter_tuning_job_id", "project_id", "impersonation_chain")
operator_extra_links = (VertexAITrainingLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
hyperparameter_tuning_job_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.hyperparameter_tuning_job_id = hyperparameter_tuning_job_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = HyperparameterTuningJobHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.log.info("Get hyperparameter tuning job: %s", self.hyperparameter_tuning_job_id)
result = hook.get_hyperparameter_tuning_job(
project_id=self.project_id,
region=self.region,
hyperparameter_tuning_job=self.hyperparameter_tuning_job_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
VertexAITrainingLink.persist(
context=context, task_instance=self, training_id=self.hyperparameter_tuning_job_id
)
self.log.info("Hyperparameter tuning job was gotten.")
return HyperparameterTuningJob.to_dict(result)
except NotFound:
self.log.info(
"The Hyperparameter tuning job %s does not exist.", self.hyperparameter_tuning_job_id
)
class DeleteHyperparameterTuningJobOperator(GoogleCloudBaseOperator):
"""
Deletes a HyperparameterTuningJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param hyperparameter_tuning_job_id: Required. The name of the HyperparameterTuningJob resource to be
deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
template_fields = ("region", "project_id", "hyperparameter_tuning_job_id", "impersonation_chain")
def __init__(
self,
*,
hyperparameter_tuning_job_id: str,
region: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.hyperparameter_tuning_job_id = hyperparameter_tuning_job_id
self.region = region
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = HyperparameterTuningJobHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.log.info("Deleting Hyperparameter Tuning job: %s", self.hyperparameter_tuning_job_id)
operation = hook.delete_hyperparameter_tuning_job(
region=self.region,
project_id=self.project_id,
hyperparameter_tuning_job=self.hyperparameter_tuning_job_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Hyperparameter Tuning job was deleted.")
except NotFound:
self.log.info(
"The Hyperparameter Tuning Job ID %s does not exist.", self.hyperparameter_tuning_job_id
)
class ListHyperparameterTuningJobOperator(GoogleCloudBaseOperator):
"""
Lists HyperparameterTuningJobs in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: The standard list filter.
Supported fields:
- ``display_name`` supports = and !=.
- ``state`` supports = and !=.
- ``model_display_name`` supports = and !=
Some examples of using the filter are:
- ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"``
- ``state="JOB_STATE_RUNNING" OR display_name="my_job"``
- ``NOT display_name="my_job"``
- ``state="JOB_STATE_FAILED"``
:param page_size: The standard list page size.
:param page_token: The standard list page token.
:param read_mask: Mask specifying which fields to read.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
template_fields = [
"region",
"project_id",
"impersonation_chain",
]
operator_extra_links = (VertexAIHyperparameterTuningJobListLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
page_size: int | None = None,
page_token: str | None = None,
filter: str | None = None,
read_mask: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.page_size = page_size
self.page_token = page_token
self.filter = filter
self.read_mask = read_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = HyperparameterTuningJobHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
results = hook.list_hyperparameter_tuning_jobs(
region=self.region,
project_id=self.project_id,
page_size=self.page_size,
page_token=self.page_token,
filter=self.filter,
read_mask=self.read_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
VertexAIHyperparameterTuningJobListLink.persist(context=context, task_instance=self)
return [HyperparameterTuningJob.to_dict(result) for result in results]
| 23,743 | 47.956701 | 105 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/vertex_ai/auto_ml.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Vertex AI operators."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from google.api_core.exceptions import NotFound
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.aiplatform import datasets
from google.cloud.aiplatform.models import Model
from google.cloud.aiplatform_v1.types.training_pipeline import TrainingPipeline
from airflow.providers.google.cloud.hooks.vertex_ai.auto_ml import AutoMLHook
from airflow.providers.google.cloud.links.vertex_ai import (
VertexAIModelLink,
VertexAITrainingLink,
VertexAITrainingPipelinesLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class AutoMLTrainingJobBaseOperator(GoogleCloudBaseOperator):
"""The base class for operators that launch AutoML jobs on VertexAI."""
def __init__(
self,
*,
project_id: str,
region: str,
display_name: str,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
# RUN
training_fraction_split: float | None = None,
test_fraction_split: float | None = None,
model_display_name: str | None = None,
model_labels: dict[str, str] | None = None,
sync: bool = True,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.display_name = display_name
self.labels = labels
self.training_encryption_spec_key_name = training_encryption_spec_key_name
self.model_encryption_spec_key_name = model_encryption_spec_key_name
# START Run param
self.training_fraction_split = training_fraction_split
self.test_fraction_split = test_fraction_split
self.model_display_name = model_display_name
self.model_labels = model_labels
self.sync = sync
# END Run param
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.hook: AutoMLHook | None = None
def on_kill(self) -> None:
"""Callback called when the operator is killed; cancel any running job."""
if self.hook:
self.hook.cancel_auto_ml_job()
class CreateAutoMLForecastingTrainingJobOperator(AutoMLTrainingJobBaseOperator):
"""Create AutoML Forecasting Training job."""
template_fields = (
"dataset_id",
"region",
"impersonation_chain",
)
operator_extra_links = (VertexAIModelLink(), VertexAITrainingLink())
def __init__(
self,
*,
dataset_id: str,
target_column: str,
time_column: str,
time_series_identifier_column: str,
unavailable_at_forecast_columns: list[str],
available_at_forecast_columns: list[str],
forecast_horizon: int,
data_granularity_unit: str,
data_granularity_count: int,
optimization_objective: str | None = None,
column_specs: dict[str, str] | None = None,
column_transformations: list[dict[str, dict[str, str]]] | None = None,
validation_fraction_split: float | None = None,
predefined_split_column_name: str | None = None,
weight_column: str | None = None,
time_series_attribute_columns: list[str] | None = None,
context_window: int | None = None,
export_evaluated_data_items: bool = False,
export_evaluated_data_items_bigquery_destination_uri: str | None = None,
export_evaluated_data_items_override_destination: bool = False,
quantiles: list[float] | None = None,
validation_options: str | None = None,
budget_milli_node_hours: int = 1000,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataset_id = dataset_id
self.target_column = target_column
self.time_column = time_column
self.time_series_identifier_column = time_series_identifier_column
self.unavailable_at_forecast_columns = unavailable_at_forecast_columns
self.available_at_forecast_columns = available_at_forecast_columns
self.forecast_horizon = forecast_horizon
self.data_granularity_unit = data_granularity_unit
self.data_granularity_count = data_granularity_count
self.optimization_objective = optimization_objective
self.column_specs = column_specs
self.column_transformations = column_transformations
self.validation_fraction_split = validation_fraction_split
self.predefined_split_column_name = predefined_split_column_name
self.weight_column = weight_column
self.time_series_attribute_columns = time_series_attribute_columns
self.context_window = context_window
self.export_evaluated_data_items = export_evaluated_data_items
self.export_evaluated_data_items_bigquery_destination_uri = (
export_evaluated_data_items_bigquery_destination_uri
)
self.export_evaluated_data_items_override_destination = (
export_evaluated_data_items_override_destination
)
self.quantiles = quantiles
self.validation_options = validation_options
self.budget_milli_node_hours = budget_milli_node_hours
def execute(self, context: Context):
self.hook = AutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
model, training_id = self.hook.create_auto_ml_forecasting_training_job(
project_id=self.project_id,
region=self.region,
display_name=self.display_name,
dataset=datasets.TimeSeriesDataset(dataset_name=self.dataset_id),
target_column=self.target_column,
time_column=self.time_column,
time_series_identifier_column=self.time_series_identifier_column,
unavailable_at_forecast_columns=self.unavailable_at_forecast_columns,
available_at_forecast_columns=self.available_at_forecast_columns,
forecast_horizon=self.forecast_horizon,
data_granularity_unit=self.data_granularity_unit,
data_granularity_count=self.data_granularity_count,
optimization_objective=self.optimization_objective,
column_specs=self.column_specs,
column_transformations=self.column_transformations,
labels=self.labels,
training_encryption_spec_key_name=self.training_encryption_spec_key_name,
model_encryption_spec_key_name=self.model_encryption_spec_key_name,
training_fraction_split=self.training_fraction_split,
validation_fraction_split=self.validation_fraction_split,
test_fraction_split=self.test_fraction_split,
predefined_split_column_name=self.predefined_split_column_name,
weight_column=self.weight_column,
time_series_attribute_columns=self.time_series_attribute_columns,
context_window=self.context_window,
export_evaluated_data_items=self.export_evaluated_data_items,
export_evaluated_data_items_bigquery_destination_uri=(
self.export_evaluated_data_items_bigquery_destination_uri
),
export_evaluated_data_items_override_destination=(
self.export_evaluated_data_items_override_destination
),
quantiles=self.quantiles,
validation_options=self.validation_options,
budget_milli_node_hours=self.budget_milli_node_hours,
model_display_name=self.model_display_name,
model_labels=self.model_labels,
sync=self.sync,
)
if model:
result = Model.to_dict(model)
model_id = self.hook.extract_model_id(result)
VertexAIModelLink.persist(context=context, task_instance=self, model_id=model_id)
else:
result = model # type: ignore
self.xcom_push(context, key="training_id", value=training_id)
VertexAITrainingLink.persist(context=context, task_instance=self, training_id=training_id)
return result
class CreateAutoMLImageTrainingJobOperator(AutoMLTrainingJobBaseOperator):
"""Create Auto ML Image Training job."""
template_fields = (
"dataset_id",
"region",
"impersonation_chain",
)
operator_extra_links = (VertexAIModelLink(), VertexAITrainingLink())
def __init__(
self,
*,
dataset_id: str,
prediction_type: str = "classification",
multi_label: bool = False,
model_type: str = "CLOUD",
base_model: Model | None = None,
validation_fraction_split: float | None = None,
training_filter_split: str | None = None,
validation_filter_split: str | None = None,
test_filter_split: str | None = None,
budget_milli_node_hours: int | None = None,
disable_early_stopping: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataset_id = dataset_id
self.prediction_type = prediction_type
self.multi_label = multi_label
self.model_type = model_type
self.base_model = base_model
self.validation_fraction_split = validation_fraction_split
self.training_filter_split = training_filter_split
self.validation_filter_split = validation_filter_split
self.test_filter_split = test_filter_split
self.budget_milli_node_hours = budget_milli_node_hours
self.disable_early_stopping = disable_early_stopping
def execute(self, context: Context):
self.hook = AutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
model, training_id = self.hook.create_auto_ml_image_training_job(
project_id=self.project_id,
region=self.region,
display_name=self.display_name,
dataset=datasets.ImageDataset(dataset_name=self.dataset_id),
prediction_type=self.prediction_type,
multi_label=self.multi_label,
model_type=self.model_type,
base_model=self.base_model,
labels=self.labels,
training_encryption_spec_key_name=self.training_encryption_spec_key_name,
model_encryption_spec_key_name=self.model_encryption_spec_key_name,
training_fraction_split=self.training_fraction_split,
validation_fraction_split=self.validation_fraction_split,
test_fraction_split=self.test_fraction_split,
training_filter_split=self.training_filter_split,
validation_filter_split=self.validation_filter_split,
test_filter_split=self.test_filter_split,
budget_milli_node_hours=self.budget_milli_node_hours,
model_display_name=self.model_display_name,
model_labels=self.model_labels,
disable_early_stopping=self.disable_early_stopping,
sync=self.sync,
)
if model:
result = Model.to_dict(model)
model_id = self.hook.extract_model_id(result)
VertexAIModelLink.persist(context=context, task_instance=self, model_id=model_id)
else:
result = model # type: ignore
self.xcom_push(context, key="training_id", value=training_id)
VertexAITrainingLink.persist(context=context, task_instance=self, training_id=training_id)
return result
class CreateAutoMLTabularTrainingJobOperator(AutoMLTrainingJobBaseOperator):
"""Create Auto ML Tabular Training job."""
template_fields = (
"dataset_id",
"region",
"impersonation_chain",
)
operator_extra_links = (VertexAIModelLink(), VertexAITrainingLink())
def __init__(
self,
*,
dataset_id: str,
target_column: str,
optimization_prediction_type: str,
optimization_objective: str | None = None,
column_specs: dict[str, str] | None = None,
column_transformations: list[dict[str, dict[str, str]]] | None = None,
optimization_objective_recall_value: float | None = None,
optimization_objective_precision_value: float | None = None,
validation_fraction_split: float | None = None,
predefined_split_column_name: str | None = None,
timestamp_split_column_name: str | None = None,
weight_column: str | None = None,
budget_milli_node_hours: int = 1000,
disable_early_stopping: bool = False,
export_evaluated_data_items: bool = False,
export_evaluated_data_items_bigquery_destination_uri: str | None = None,
export_evaluated_data_items_override_destination: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataset_id = dataset_id
self.target_column = target_column
self.optimization_prediction_type = optimization_prediction_type
self.optimization_objective = optimization_objective
self.column_specs = column_specs
self.column_transformations = column_transformations
self.optimization_objective_recall_value = optimization_objective_recall_value
self.optimization_objective_precision_value = optimization_objective_precision_value
self.validation_fraction_split = validation_fraction_split
self.predefined_split_column_name = predefined_split_column_name
self.timestamp_split_column_name = timestamp_split_column_name
self.weight_column = weight_column
self.budget_milli_node_hours = budget_milli_node_hours
self.disable_early_stopping = disable_early_stopping
self.export_evaluated_data_items = export_evaluated_data_items
self.export_evaluated_data_items_bigquery_destination_uri = (
export_evaluated_data_items_bigquery_destination_uri
)
self.export_evaluated_data_items_override_destination = (
export_evaluated_data_items_override_destination
)
def execute(self, context: Context):
self.hook = AutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
credentials, _ = self.hook.get_credentials_and_project_id()
model, training_id = self.hook.create_auto_ml_tabular_training_job(
project_id=self.project_id,
region=self.region,
display_name=self.display_name,
dataset=datasets.TabularDataset(
dataset_name=self.dataset_id,
project=self.project_id,
credentials=credentials,
),
target_column=self.target_column,
optimization_prediction_type=self.optimization_prediction_type,
optimization_objective=self.optimization_objective,
column_specs=self.column_specs,
column_transformations=self.column_transformations,
optimization_objective_recall_value=self.optimization_objective_recall_value,
optimization_objective_precision_value=self.optimization_objective_precision_value,
labels=self.labels,
training_encryption_spec_key_name=self.training_encryption_spec_key_name,
model_encryption_spec_key_name=self.model_encryption_spec_key_name,
training_fraction_split=self.training_fraction_split,
validation_fraction_split=self.validation_fraction_split,
test_fraction_split=self.test_fraction_split,
predefined_split_column_name=self.predefined_split_column_name,
timestamp_split_column_name=self.timestamp_split_column_name,
weight_column=self.weight_column,
budget_milli_node_hours=self.budget_milli_node_hours,
model_display_name=self.model_display_name,
model_labels=self.model_labels,
disable_early_stopping=self.disable_early_stopping,
export_evaluated_data_items=self.export_evaluated_data_items,
export_evaluated_data_items_bigquery_destination_uri=(
self.export_evaluated_data_items_bigquery_destination_uri
),
export_evaluated_data_items_override_destination=(
self.export_evaluated_data_items_override_destination
),
sync=self.sync,
)
if model:
result = Model.to_dict(model)
model_id = self.hook.extract_model_id(result)
VertexAIModelLink.persist(context=context, task_instance=self, model_id=model_id)
else:
result = model # type: ignore
self.xcom_push(context, key="training_id", value=training_id)
VertexAITrainingLink.persist(context=context, task_instance=self, training_id=training_id)
return result
class CreateAutoMLTextTrainingJobOperator(AutoMLTrainingJobBaseOperator):
"""Create Auto ML Text Training job."""
template_fields = [
"dataset_id",
"region",
"impersonation_chain",
]
operator_extra_links = (VertexAIModelLink(), VertexAITrainingLink())
def __init__(
self,
*,
dataset_id: str,
prediction_type: str,
multi_label: bool = False,
sentiment_max: int = 10,
validation_fraction_split: float | None = None,
training_filter_split: str | None = None,
validation_filter_split: str | None = None,
test_filter_split: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataset_id = dataset_id
self.prediction_type = prediction_type
self.multi_label = multi_label
self.sentiment_max = sentiment_max
self.validation_fraction_split = validation_fraction_split
self.training_filter_split = training_filter_split
self.validation_filter_split = validation_filter_split
self.test_filter_split = test_filter_split
def execute(self, context: Context):
self.hook = AutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
model, training_id = self.hook.create_auto_ml_text_training_job(
project_id=self.project_id,
region=self.region,
display_name=self.display_name,
dataset=datasets.TextDataset(dataset_name=self.dataset_id),
prediction_type=self.prediction_type,
multi_label=self.multi_label,
sentiment_max=self.sentiment_max,
labels=self.labels,
training_encryption_spec_key_name=self.training_encryption_spec_key_name,
model_encryption_spec_key_name=self.model_encryption_spec_key_name,
training_fraction_split=self.training_fraction_split,
validation_fraction_split=self.validation_fraction_split,
test_fraction_split=self.test_fraction_split,
training_filter_split=self.training_filter_split,
validation_filter_split=self.validation_filter_split,
test_filter_split=self.test_filter_split,
model_display_name=self.model_display_name,
model_labels=self.model_labels,
sync=self.sync,
)
if model:
result = Model.to_dict(model)
model_id = self.hook.extract_model_id(result)
VertexAIModelLink.persist(context=context, task_instance=self, model_id=model_id)
else:
result = model # type: ignore
self.xcom_push(context, key="training_id", value=training_id)
VertexAITrainingLink.persist(context=context, task_instance=self, training_id=training_id)
return result
class CreateAutoMLVideoTrainingJobOperator(AutoMLTrainingJobBaseOperator):
"""Create Auto ML Video Training job."""
template_fields = (
"dataset_id",
"region",
"impersonation_chain",
)
operator_extra_links = (VertexAIModelLink(), VertexAITrainingLink())
def __init__(
self,
*,
dataset_id: str,
prediction_type: str = "classification",
model_type: str = "CLOUD",
training_filter_split: str | None = None,
test_filter_split: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dataset_id = dataset_id
self.prediction_type = prediction_type
self.model_type = model_type
self.training_filter_split = training_filter_split
self.test_filter_split = test_filter_split
def execute(self, context: Context):
self.hook = AutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
model, training_id = self.hook.create_auto_ml_video_training_job(
project_id=self.project_id,
region=self.region,
display_name=self.display_name,
dataset=datasets.VideoDataset(dataset_name=self.dataset_id),
prediction_type=self.prediction_type,
model_type=self.model_type,
labels=self.labels,
training_encryption_spec_key_name=self.training_encryption_spec_key_name,
model_encryption_spec_key_name=self.model_encryption_spec_key_name,
training_fraction_split=self.training_fraction_split,
test_fraction_split=self.test_fraction_split,
training_filter_split=self.training_filter_split,
test_filter_split=self.test_filter_split,
model_display_name=self.model_display_name,
model_labels=self.model_labels,
sync=self.sync,
)
if model:
result = Model.to_dict(model)
model_id = self.hook.extract_model_id(result)
VertexAIModelLink.persist(context=context, task_instance=self, model_id=model_id)
else:
result = model # type: ignore
self.xcom_push(context, key="training_id", value=training_id)
VertexAITrainingLink.persist(context=context, task_instance=self, training_id=training_id)
return result
class DeleteAutoMLTrainingJobOperator(GoogleCloudBaseOperator):
"""
Delete an AutoML training job.
Can be used with AutoMLForecastingTrainingJob, AutoMLImageTrainingJob,
AutoMLTabularTrainingJob, AutoMLTextTrainingJob, or AutoMLVideoTrainingJob.
"""
template_fields = ("training_pipeline", "region", "project_id", "impersonation_chain")
def __init__(
self,
*,
training_pipeline_id: str,
region: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.training_pipeline = training_pipeline_id
self.region = region
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = AutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.log.info("Deleting Auto ML training pipeline: %s", self.training_pipeline)
training_pipeline_operation = hook.delete_training_pipeline(
training_pipeline=self.training_pipeline,
region=self.region,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=training_pipeline_operation)
self.log.info("Training pipeline was deleted.")
except NotFound:
self.log.info("The Training Pipeline ID %s does not exist.", self.training_pipeline)
class ListAutoMLTrainingJobOperator(GoogleCloudBaseOperator):
"""
List an AutoML training job.
Can be used with AutoMLForecastingTrainingJob, AutoMLImageTrainingJob, AutoMLTabularTrainingJob,
AutoMLTextTrainingJob, or AutoMLVideoTrainingJob in a Location.
"""
template_fields = (
"region",
"project_id",
"impersonation_chain",
)
operator_extra_links = [
VertexAITrainingPipelinesLink(),
]
def __init__(
self,
*,
region: str,
project_id: str,
page_size: int | None = None,
page_token: str | None = None,
filter: str | None = None,
read_mask: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.page_size = page_size
self.page_token = page_token
self.filter = filter
self.read_mask = read_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = AutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
results = hook.list_training_pipelines(
region=self.region,
project_id=self.project_id,
page_size=self.page_size,
page_token=self.page_token,
filter=self.filter,
read_mask=self.read_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
VertexAITrainingPipelinesLink.persist(context=context, task_instance=self)
return [TrainingPipeline.to_dict(result) for result in results]
| 27,511 | 41.067278 | 100 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/vertex_ai/endpoint_service.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Vertex AI operators.
.. spelling:word-list::
undeployed
undeploy
Undeploys
aiplatform
FieldMask
unassigns
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from google.api_core.exceptions import NotFound
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.aiplatform_v1.types import DeployedModel, Endpoint, endpoint_service
from google.protobuf.field_mask_pb2 import FieldMask
from airflow.providers.google.cloud.hooks.vertex_ai.endpoint_service import EndpointServiceHook
from airflow.providers.google.cloud.links.vertex_ai import (
VertexAIEndpointLink,
VertexAIEndpointListLink,
VertexAIModelLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class CreateEndpointOperator(GoogleCloudBaseOperator):
"""
Creates an Endpoint.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param endpoint: Required. The Endpoint to create.
:param endpoint_id: The ID of Endpoint. This value should be 1-10 characters, and valid characters
are /[0-9]/. If not provided, Vertex AI will generate a value for this ID.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "project_id", "impersonation_chain")
operator_extra_links = (VertexAIEndpointLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
endpoint: Endpoint | dict,
endpoint_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.endpoint = endpoint
self.endpoint_id = endpoint_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = EndpointServiceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating endpoint")
operation = hook.create_endpoint(
project_id=self.project_id,
region=self.region,
endpoint=self.endpoint,
endpoint_id=self.endpoint_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = hook.wait_for_operation(timeout=self.timeout, operation=operation)
endpoint = Endpoint.to_dict(result)
endpoint_id = hook.extract_endpoint_id(endpoint)
self.log.info("Endpoint was created. Endpoint ID: %s", endpoint_id)
self.xcom_push(context, key="endpoint_id", value=endpoint_id)
VertexAIEndpointLink.persist(context=context, task_instance=self, endpoint_id=endpoint_id)
return endpoint
class DeleteEndpointOperator(GoogleCloudBaseOperator):
"""
Deletes an Endpoint.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param endpoint_id: Required. The Endpoint ID to delete.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "endpoint_id", "project_id", "impersonation_chain")
def __init__(
self,
*,
region: str,
project_id: str,
endpoint_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.endpoint_id = endpoint_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = EndpointServiceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.log.info("Deleting endpoint: %s", self.endpoint_id)
operation = hook.delete_endpoint(
project_id=self.project_id,
region=self.region,
endpoint=self.endpoint_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Endpoint was deleted.")
except NotFound:
self.log.info("The Endpoint ID %s does not exist.", self.endpoint_id)
class DeployModelOperator(GoogleCloudBaseOperator):
"""
Deploys a Model into this Endpoint, creating a DeployedModel within it.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param endpoint_id: Required. The name of the Endpoint resource into which to deploy a Model. Format:
``projects/{project}/locations/{location}/endpoints/{endpoint}``
:param deployed_model: Required. The DeployedModel to be created within the Endpoint. Note that
[Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] must be updated for
the DeployedModel to start receiving traffic, either as part of this call, or via
[EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint].
:param traffic_split: A map from a DeployedModel's ID to the percentage of this Endpoint's traffic
that should be forwarded to that DeployedModel.
If this field is non-empty, then the Endpoint's
[traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] will be overwritten with it. To
refer to the ID of the just being deployed Model, a "0" should be used, and the actual ID of the
new DeployedModel will be filled in its place by this method. The traffic percentage values must
add up to 100.
If this field is empty, then the Endpoint's
[traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] is not updated.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "endpoint_id", "project_id", "deployed_model", "impersonation_chain")
operator_extra_links = (VertexAIModelLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
endpoint_id: str,
deployed_model: DeployedModel | dict,
traffic_split: Sequence | dict | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.endpoint_id = endpoint_id
self.deployed_model = deployed_model
self.traffic_split = traffic_split
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = EndpointServiceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Deploying model")
operation = hook.deploy_model(
project_id=self.project_id,
region=self.region,
endpoint=self.endpoint_id,
deployed_model=self.deployed_model,
traffic_split=self.traffic_split,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = hook.wait_for_operation(timeout=self.timeout, operation=operation)
deploy_model = endpoint_service.DeployModelResponse.to_dict(result)
deployed_model_id = hook.extract_deployed_model_id(deploy_model)
self.log.info("Model was deployed. Deployed Model ID: %s", deployed_model_id)
self.xcom_push(context, key="deployed_model_id", value=deployed_model_id)
VertexAIModelLink.persist(context=context, task_instance=self, model_id=deployed_model_id)
return deploy_model
class GetEndpointOperator(GoogleCloudBaseOperator):
"""
Gets an Endpoint.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param endpoint_id: Required. The Endpoint ID to get.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "endpoint_id", "project_id", "impersonation_chain")
operator_extra_links = (VertexAIEndpointLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
endpoint_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.endpoint_id = endpoint_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = EndpointServiceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.log.info("Get endpoint: %s", self.endpoint_id)
endpoint_obj = hook.get_endpoint(
project_id=self.project_id,
region=self.region,
endpoint=self.endpoint_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
VertexAIEndpointLink.persist(context=context, task_instance=self, endpoint_id=self.endpoint_id)
self.log.info("Endpoint was gotten.")
return Endpoint.to_dict(endpoint_obj)
except NotFound:
self.log.info("The Endpoint ID %s does not exist.", self.endpoint_id)
class ListEndpointsOperator(GoogleCloudBaseOperator):
"""
Lists Endpoints in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: The standard list filter.
Supported fields:
- ``display_name`` supports = and !=.
- ``state`` supports = and !=.
- ``model_display_name`` supports = and !=
Some examples of using the filter are:
- ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"``
- ``state="JOB_STATE_RUNNING" OR display_name="my_job"``
- ``NOT display_name="my_job"``
- ``state="JOB_STATE_FAILED"``
:param page_size: The standard list page size.
:param page_token: The standard list page token.
:param read_mask: Mask specifying which fields to read.
:param order_by: A comma-separated list of fields to order by, sorted in
ascending order. Use "desc" after a field name for
descending. Supported fields:
- ``display_name``
- ``create_time``
- ``update_time``
Example: ``display_name, create_time desc``.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "project_id", "impersonation_chain")
operator_extra_links = (VertexAIEndpointListLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
filter: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
read_mask: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.filter = filter
self.page_size = page_size
self.page_token = page_token
self.read_mask = read_mask
self.order_by = order_by
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = EndpointServiceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
results = hook.list_endpoints(
project_id=self.project_id,
region=self.region,
filter=self.filter,
page_size=self.page_size,
page_token=self.page_token,
read_mask=self.read_mask,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
VertexAIEndpointListLink.persist(context=context, task_instance=self)
return [Endpoint.to_dict(result) for result in results]
class UndeployModelOperator(GoogleCloudBaseOperator):
"""
Undeploys a Model from an Endpoint, removing a DeployedModel from it, and freeing all used resources.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param endpoint_id: Required. The name of the Endpoint resource from which to undeploy a Model. Format:
``projects/{project}/locations/{location}/endpoints/{endpoint}``
:param deployed_model_id: Required. The ID of the DeployedModel to be undeployed from the Endpoint.
:param traffic_split: If this field is provided, then the Endpoint's
[traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] will be overwritten with it. If
last DeployedModel is being undeployed from the Endpoint, the [Endpoint.traffic_split] will always
end up empty when this call returns. A DeployedModel will be successfully undeployed only if it
doesn't have any traffic assigned to it when this method executes, or if this field unassigns any
traffic to it.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "endpoint_id", "deployed_model_id", "project_id", "impersonation_chain")
def __init__(
self,
*,
region: str,
project_id: str,
endpoint_id: str,
deployed_model_id: str,
traffic_split: Sequence | dict | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.endpoint_id = endpoint_id
self.deployed_model_id = deployed_model_id
self.traffic_split = traffic_split
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = EndpointServiceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Removing a DeployedModel %s", self.deployed_model_id)
operation = hook.undeploy_model(
project_id=self.project_id,
region=self.region,
endpoint=self.endpoint_id,
deployed_model_id=self.deployed_model_id,
traffic_split=self.traffic_split,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("DeployedModel was removed successfully")
class UpdateEndpointOperator(GoogleCloudBaseOperator):
"""
Updates an Endpoint.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param endpoint_id: Required. The ID of the Endpoint to update.
:param endpoint: Required. The Endpoint which replaces the resource on the server.
:param update_mask: Required. The update mask applies to the resource. See
[google.protobuf.FieldMask][google.protobuf.FieldMask].
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "endpoint_id", "project_id", "impersonation_chain")
operator_extra_links = (VertexAIEndpointLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
endpoint_id: str,
endpoint: Endpoint | dict,
update_mask: FieldMask | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.endpoint_id = endpoint_id
self.endpoint = endpoint
self.update_mask = update_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = EndpointServiceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Updating endpoint: %s", self.endpoint_id)
result = hook.update_endpoint(
project_id=self.project_id,
region=self.region,
endpoint_id=self.endpoint_id,
endpoint=self.endpoint,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Endpoint was updated")
VertexAIEndpointLink.persist(context=context, task_instance=self, endpoint_id=self.endpoint_id)
return Endpoint.to_dict(result)
| 26,923 | 43.065466 | 108 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/vertex_ai/batch_prediction_job.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Vertex AI operators.
.. spelling:word-list::
jsonl
codepoints
aiplatform
gapic
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from google.api_core.exceptions import NotFound
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.aiplatform import Model, explain
from google.cloud.aiplatform_v1.types import BatchPredictionJob
from airflow.providers.google.cloud.hooks.vertex_ai.batch_prediction_job import BatchPredictionJobHook
from airflow.providers.google.cloud.links.vertex_ai import (
VertexAIBatchPredictionJobLink,
VertexAIBatchPredictionJobListLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class CreateBatchPredictionJobOperator(GoogleCloudBaseOperator):
"""
Creates a BatchPredictionJob. A BatchPredictionJob once created will right away be attempted to start.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param batch_prediction_job: Required. The BatchPredictionJob to create.
:param job_display_name: Required. The user-defined name of the BatchPredictionJob. The name can be
up to 128 characters long and can be consist of any UTF-8 characters.
:param model_name: Required. A fully-qualified model resource name or model ID.
:param instances_format: Required. The format in which instances are provided. Must be one of the
formats listed in `Model.supported_input_storage_formats`. Default is "jsonl" when using
`gcs_source`. If a `bigquery_source` is provided, this is overridden to "bigquery".
:param predictions_format: Required. The format in which Vertex AI outputs the predictions, must be
one of the formats specified in `Model.supported_output_storage_formats`. Default is "jsonl" when
using `gcs_destination_prefix`. If a `bigquery_destination_prefix` is provided, this is
overridden to "bigquery".
:param gcs_source: Google Cloud Storage URI(-s) to your instances to run batch prediction on. They
must match `instances_format`. May contain wildcards. For more information on wildcards, see
https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames.
:param bigquery_source: BigQuery URI to a table, up to 2000 characters long.
For example: `bq://projectId.bqDatasetId.bqTableId`
:param gcs_destination_prefix: The Google Cloud Storage location of the directory where the output is
to be written to. In the given directory a new directory is created. Its name is
``prediction-<model-display-name>-<job-create-time>``, where timestamp is in
YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files ``predictions_0001.<extension>``,
``predictions_0002.<extension>``, ..., ``predictions_N.<extension>`` are created where
``<extension>`` depends on chosen ``predictions_format``, and N may equal 0001 and depends on the
total number of successfully predicted instances. If the Model has both ``instance`` and
``prediction`` schemata defined then each such file contains predictions as per the
``predictions_format``. If prediction for any instance failed (partially or completely), then an
additional ``errors_0001.<extension>``, ``errors_0002.<extension>``,..., ``errors_N.<extension>``
files are created (N depends on total number of failed predictions). These files contain the
failed instances, as per their schema, followed by an additional ``error`` field which as value
has ```google.rpc.Status`` <Status>`__ containing only ``code`` and ``message`` fields.
:param bigquery_destination_prefix: The BigQuery project location where the output is to be written
to. In the given project a new dataset is created with name
``prediction_<model-display-name>_<job-create-time>`` where is made BigQuery-dataset-name
compatible (for example, most special characters become underscores), and timestamp is in
YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created,
``predictions``, and ``errors``. If the Model has both ``instance`` and ``prediction`` schemata
defined then the tables have columns as follows: The ``predictions`` table contains instances for
which the prediction succeeded, it has columns as per a concatenation of the Model's instance and
prediction schemata. The ``errors`` table contains rows for which the prediction has failed, it
has instance columns, as per the instance schema, followed by a single "errors" column, which as
values has ```google.rpc.Status`` <Status>`__ represented as a STRUCT, and containing only
``code`` and ``message``.
:param model_parameters: The parameters that govern the predictions. The schema of the parameters may
be specified via the Model's `parameters_schema_uri`.
:param machine_type: The type of machine for running batch prediction on dedicated resources. Not
specifying machine type will result in batch prediction job being run with automatic resources.
:param accelerator_type: The type of accelerator(s) that may be attached to the machine as per
`accelerator_count`. Only used if `machine_type` is set.
:param accelerator_count: The number of accelerators to attach to the `machine_type`. Only used if
`machine_type` is set.
:param starting_replica_count: The number of machine replicas used at the start of the batch
operation. If not set, Vertex AI decides starting number, not greater than `max_replica_count`.
Only used if `machine_type` is set.
:param max_replica_count: The maximum number of machine replicas the batch operation may be scaled
to. Only used if `machine_type` is set. Default is 10.
:param generate_explanation: Optional. Generate explanation along with the batch prediction results.
This will cause the batch prediction output to include explanations based on the
`prediction_format`:
- `bigquery`: output includes a column named `explanation`. The value is a struct that conforms to
the [aiplatform.gapic.Explanation] object.
- `jsonl`: The JSON objects on each line include an additional entry keyed `explanation`. The value
of the entry is a JSON object that conforms to the [aiplatform.gapic.Explanation] object.
- `csv`: Generating explanations for CSV format is not supported.
:param explanation_metadata: Optional. Explanation metadata configuration for this
BatchPredictionJob. Can be specified only if `generate_explanation` is set to `True`.
This value overrides the value of `Model.explanation_metadata`. All fields of
`explanation_metadata` are optional in the request. If a field of the `explanation_metadata`
object is not populated, the corresponding field of the `Model.explanation_metadata` object is
inherited. For more details, see `Ref docs <http://tinyurl.com/1igh60kt>`
:param explanation_parameters: Optional. Parameters to configure explaining for Model's predictions.
Can be specified only if `generate_explanation` is set to `True`.
This value overrides the value of `Model.explanation_parameters`. All fields of
`explanation_parameters` are optional in the request. If a field of the `explanation_parameters`
object is not populated, the corresponding field of the `Model.explanation_parameters` object is
inherited. For more details, see `Ref docs <http://tinyurl.com/1an4zake>`
:param labels: Optional. The labels with user-defined metadata to organize your BatchPredictionJobs.
Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer managed
encryption key used to protect the job. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be
in the same region as where the compute resource is created.
If this is set, then all resources created by the BatchPredictionJob will be encrypted with the
provided encryption key.
Overrides encryption_spec_key_name set in aiplatform.init.
:param sync: Whether to execute this method synchronously. If False, this method will be executed in
concurrent Future and any downstream object will be immediately returned and synced when the
Future has completed.
:param create_request_timeout: Optional. The timeout for the create request in seconds.
:param batch_size: Optional. The number of the records (e.g. instances)
of the operation given in each batch
to a machine replica. Machine type, and size of a single record should be considered
when setting this parameter, higher value speeds up the batch operation's execution,
but too high value will result in a whole batch not fitting in a machine's memory,
and the whole operation will fail.
The default value is same as in the aiplatform's BatchPredictionJob.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "project_id", "model_name", "impersonation_chain")
operator_extra_links = (VertexAIBatchPredictionJobLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
job_display_name: str,
model_name: str | Model,
instances_format: str = "jsonl",
predictions_format: str = "jsonl",
gcs_source: str | Sequence[str] | None = None,
bigquery_source: str | None = None,
gcs_destination_prefix: str | None = None,
bigquery_destination_prefix: str | None = None,
model_parameters: dict | None = None,
machine_type: str | None = None,
accelerator_type: str | None = None,
accelerator_count: int | None = None,
starting_replica_count: int | None = None,
max_replica_count: int | None = None,
generate_explanation: bool | None = False,
explanation_metadata: explain.ExplanationMetadata | None = None,
explanation_parameters: explain.ExplanationParameters | None = None,
labels: dict[str, str] | None = None,
encryption_spec_key_name: str | None = None,
sync: bool = True,
create_request_timeout: float | None = None,
batch_size: int | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.job_display_name = job_display_name
self.model_name = model_name
self.instances_format = instances_format
self.predictions_format = predictions_format
self.gcs_source = gcs_source
self.bigquery_source = bigquery_source
self.gcs_destination_prefix = gcs_destination_prefix
self.bigquery_destination_prefix = bigquery_destination_prefix
self.model_parameters = model_parameters
self.machine_type = machine_type
self.accelerator_type = accelerator_type
self.accelerator_count = accelerator_count
self.starting_replica_count = starting_replica_count
self.max_replica_count = max_replica_count
self.generate_explanation = generate_explanation
self.explanation_metadata = explanation_metadata
self.explanation_parameters = explanation_parameters
self.labels = labels
self.encryption_spec_key_name = encryption_spec_key_name
self.sync = sync
self.create_request_timeout = create_request_timeout
self.batch_size = batch_size
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.hook: BatchPredictionJobHook | None = None
def execute(self, context: Context):
self.log.info("Creating Batch prediction job")
self.hook = BatchPredictionJobHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = self.hook.create_batch_prediction_job(
region=self.region,
project_id=self.project_id,
job_display_name=self.job_display_name,
model_name=self.model_name,
instances_format=self.instances_format,
predictions_format=self.predictions_format,
gcs_source=self.gcs_source,
bigquery_source=self.bigquery_source,
gcs_destination_prefix=self.gcs_destination_prefix,
bigquery_destination_prefix=self.bigquery_destination_prefix,
model_parameters=self.model_parameters,
machine_type=self.machine_type,
accelerator_type=self.accelerator_type,
accelerator_count=self.accelerator_count,
starting_replica_count=self.starting_replica_count,
max_replica_count=self.max_replica_count,
generate_explanation=self.generate_explanation,
explanation_metadata=self.explanation_metadata,
explanation_parameters=self.explanation_parameters,
labels=self.labels,
encryption_spec_key_name=self.encryption_spec_key_name,
sync=self.sync,
create_request_timeout=self.create_request_timeout,
batch_size=self.batch_size,
)
batch_prediction_job = result.to_dict()
batch_prediction_job_id = self.hook.extract_batch_prediction_job_id(batch_prediction_job)
self.log.info("Batch prediction job was created. Job id: %s", batch_prediction_job_id)
self.xcom_push(context, key="batch_prediction_job_id", value=batch_prediction_job_id)
VertexAIBatchPredictionJobLink.persist(
context=context, task_instance=self, batch_prediction_job_id=batch_prediction_job_id
)
return batch_prediction_job
def on_kill(self) -> None:
"""Callback called when the operator is killed; cancel any running job."""
if self.hook:
self.hook.cancel_batch_prediction_job()
class DeleteBatchPredictionJobOperator(GoogleCloudBaseOperator):
"""
Deletes a BatchPredictionJob. Can only be called on jobs that already finished.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param batch_prediction_job_id: The ID of the BatchPredictionJob resource to be deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "project_id", "batch_prediction_job_id", "impersonation_chain")
def __init__(
self,
*,
region: str,
project_id: str,
batch_prediction_job_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.batch_prediction_job_id = batch_prediction_job_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = BatchPredictionJobHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.log.info("Deleting batch prediction job: %s", self.batch_prediction_job_id)
operation = hook.delete_batch_prediction_job(
project_id=self.project_id,
region=self.region,
batch_prediction_job=self.batch_prediction_job_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Batch prediction job was deleted.")
except NotFound:
self.log.info("The Batch prediction job %s does not exist.", self.batch_prediction_job_id)
class GetBatchPredictionJobOperator(GoogleCloudBaseOperator):
"""
Gets a BatchPredictionJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param batch_prediction_job: Required. The name of the BatchPredictionJob resource.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "project_id", "impersonation_chain")
operator_extra_links = (VertexAIBatchPredictionJobLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
batch_prediction_job: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.batch_prediction_job = batch_prediction_job
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = BatchPredictionJobHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.log.info("Get batch prediction job: %s", self.batch_prediction_job)
result = hook.get_batch_prediction_job(
project_id=self.project_id,
region=self.region,
batch_prediction_job=self.batch_prediction_job,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Batch prediction job was gotten.")
VertexAIBatchPredictionJobLink.persist(
context=context, task_instance=self, batch_prediction_job_id=self.batch_prediction_job
)
return BatchPredictionJob.to_dict(result)
except NotFound:
self.log.info("The Batch prediction job %s does not exist.", self.batch_prediction_job)
class ListBatchPredictionJobsOperator(GoogleCloudBaseOperator):
"""
Lists BatchPredictionJobs in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: The standard list filter.
Supported fields:
- ``display_name`` supports = and !=.
- ``state`` supports = and !=.
- ``model_display_name`` supports = and !=
Some examples of using the filter are:
- ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"``
- ``state="JOB_STATE_RUNNING" OR display_name="my_job"``
- ``NOT display_name="my_job"``
- ``state="JOB_STATE_FAILED"``
:param page_size: The standard list page size.
:param page_token: The standard list page token.
:param read_mask: Mask specifying which fields to read.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "project_id", "impersonation_chain")
operator_extra_links = (VertexAIBatchPredictionJobListLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
filter: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
read_mask: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.filter = filter
self.page_size = page_size
self.page_token = page_token
self.read_mask = read_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = BatchPredictionJobHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
results = hook.list_batch_prediction_jobs(
project_id=self.project_id,
region=self.region,
filter=self.filter,
page_size=self.page_size,
page_token=self.page_token,
read_mask=self.read_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
VertexAIBatchPredictionJobListLink.persist(context=context, task_instance=self)
return [BatchPredictionJob.to_dict(result) for result in results]
| 26,520 | 52.148297 | 107 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/vertex_ai/dataset.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Vertex AI operators."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from google.api_core.exceptions import NotFound
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.aiplatform_v1.types import Dataset, ExportDataConfig, ImportDataConfig
from google.protobuf.field_mask_pb2 import FieldMask
from airflow.providers.google.cloud.hooks.vertex_ai.dataset import DatasetHook
from airflow.providers.google.cloud.links.vertex_ai import VertexAIDatasetLink, VertexAIDatasetListLink
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class CreateDatasetOperator(GoogleCloudBaseOperator):
"""
Creates a Dataset.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param dataset: Required. The Dataset to create. This corresponds to the ``dataset`` field on the
``request`` instance; if ``request`` is provided, this should not be set.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "project_id", "impersonation_chain")
operator_extra_links = (VertexAIDatasetLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
dataset: Dataset | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.dataset = dataset
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DatasetHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating dataset")
operation = hook.create_dataset(
project_id=self.project_id,
region=self.region,
dataset=self.dataset,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = hook.wait_for_operation(timeout=self.timeout, operation=operation)
dataset = Dataset.to_dict(result)
dataset_id = hook.extract_dataset_id(dataset)
self.log.info("Dataset was created. Dataset id: %s", dataset_id)
self.xcom_push(context, key="dataset_id", value=dataset_id)
VertexAIDatasetLink.persist(context=context, task_instance=self, dataset_id=dataset_id)
return dataset
class GetDatasetOperator(GoogleCloudBaseOperator):
"""
Get a Dataset.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param dataset_id: Required. The ID of the Dataset to get.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "dataset_id", "project_id", "impersonation_chain")
operator_extra_links = (VertexAIDatasetLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
dataset_id: str,
read_mask: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.dataset_id = dataset_id
self.read_mask = read_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DatasetHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.log.info("Get dataset: %s", self.dataset_id)
dataset_obj = hook.get_dataset(
project_id=self.project_id,
region=self.region,
dataset=self.dataset_id,
read_mask=self.read_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
VertexAIDatasetLink.persist(context=context, task_instance=self, dataset_id=self.dataset_id)
self.log.info("Dataset was gotten.")
return Dataset.to_dict(dataset_obj)
except NotFound:
self.log.info("The Dataset ID %s does not exist.", self.dataset_id)
class DeleteDatasetOperator(GoogleCloudBaseOperator):
"""
Deletes a Dataset.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param dataset_id: Required. The ID of the Dataset to delete.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "dataset_id", "project_id", "impersonation_chain")
def __init__(
self,
*,
region: str,
project_id: str,
dataset_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.dataset_id = dataset_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DatasetHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.log.info("Deleting dataset: %s", self.dataset_id)
operation = hook.delete_dataset(
project_id=self.project_id,
region=self.region,
dataset=self.dataset_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Dataset was deleted.")
except NotFound:
self.log.info("The Dataset ID %s does not exist.", self.dataset_id)
class ExportDataOperator(GoogleCloudBaseOperator):
"""
Exports data from a Dataset.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param dataset_id: Required. The ID of the Dataset to delete.
:param export_config: Required. The desired output location.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "dataset_id", "project_id", "impersonation_chain")
def __init__(
self,
*,
region: str,
project_id: str,
dataset_id: str,
export_config: ExportDataConfig | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.dataset_id = dataset_id
self.export_config = export_config
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DatasetHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Exporting data: %s", self.dataset_id)
operation = hook.export_data(
project_id=self.project_id,
region=self.region,
dataset=self.dataset_id,
export_config=self.export_config,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Export was done successfully")
class ImportDataOperator(GoogleCloudBaseOperator):
"""
Imports data into a Dataset.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param dataset_id: Required. The ID of the Dataset to delete.
:param import_configs: Required. The desired input locations. The contents of all input locations will be
imported in one batch.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "dataset_id", "project_id", "impersonation_chain")
def __init__(
self,
*,
region: str,
project_id: str,
dataset_id: str,
import_configs: Sequence[ImportDataConfig] | list,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.dataset_id = dataset_id
self.import_configs = import_configs
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DatasetHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Importing data: %s", self.dataset_id)
operation = hook.import_data(
project_id=self.project_id,
region=self.region,
dataset=self.dataset_id,
import_configs=self.import_configs,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Import was done successfully")
class ListDatasetsOperator(GoogleCloudBaseOperator):
"""
Lists Datasets in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: The standard list filter.
:param page_size: The standard list page size.
:param page_token: The standard list page token.
:param read_mask: Mask specifying which fields to read.
:param order_by: A comma-separated list of fields to order by, sorted in ascending order. Use "desc"
after a field name for descending.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "project_id", "impersonation_chain")
operator_extra_links = (VertexAIDatasetListLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
filter: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
read_mask: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.filter = filter
self.page_size = page_size
self.page_token = page_token
self.read_mask = read_mask
self.order_by = order_by
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DatasetHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
results = hook.list_datasets(
project_id=self.project_id,
region=self.region,
filter=self.filter,
page_size=self.page_size,
page_token=self.page_token,
read_mask=self.read_mask,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
VertexAIDatasetListLink.persist(context=context, task_instance=self)
return [Dataset.to_dict(result) for result in results]
class UpdateDatasetOperator(GoogleCloudBaseOperator):
"""
Updates a Dataset.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param dataset_id: Required. The ID of the Dataset to update.
:param dataset: Required. The Dataset which replaces the resource on the server.
:param update_mask: Required. The update mask applies to the resource.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "dataset_id", "project_id", "impersonation_chain")
def __init__(
self,
*,
project_id: str,
region: str,
dataset_id: str,
dataset: Dataset | dict,
update_mask: FieldMask | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.dataset_id = dataset_id
self.dataset = dataset
self.update_mask = update_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DatasetHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Updating dataset: %s", self.dataset_id)
result = hook.update_dataset(
project_id=self.project_id,
region=self.region,
dataset_id=self.dataset_id,
dataset=self.dataset,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Dataset was updated")
return Dataset.to_dict(result)
| 23,041 | 41.512915 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/vertex_ai/custom_job.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Vertex AI operators."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from google.api_core.exceptions import NotFound
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.aiplatform.models import Model
from google.cloud.aiplatform_v1.types.dataset import Dataset
from google.cloud.aiplatform_v1.types.training_pipeline import TrainingPipeline
from airflow.providers.google.cloud.hooks.vertex_ai.custom_job import CustomJobHook
from airflow.providers.google.cloud.links.vertex_ai import (
VertexAIModelLink,
VertexAITrainingLink,
VertexAITrainingPipelinesLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class CustomTrainingJobBaseOperator(GoogleCloudBaseOperator):
"""The base class for operators that launch Custom jobs on VertexAI."""
def __init__(
self,
*,
project_id: str,
region: str,
display_name: str,
container_uri: str,
model_serving_container_image_uri: str | None = None,
model_serving_container_predict_route: str | None = None,
model_serving_container_health_route: str | None = None,
model_serving_container_command: Sequence[str] | None = None,
model_serving_container_args: Sequence[str] | None = None,
model_serving_container_environment_variables: dict[str, str] | None = None,
model_serving_container_ports: Sequence[int] | None = None,
model_description: str | None = None,
model_instance_schema_uri: str | None = None,
model_parameters_schema_uri: str | None = None,
model_prediction_schema_uri: str | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
staging_bucket: str | None = None,
# RUN
dataset_id: str | None = None,
annotation_schema_uri: str | None = None,
model_display_name: str | None = None,
model_labels: dict[str, str] | None = None,
base_output_dir: str | None = None,
service_account: str | None = None,
network: str | None = None,
bigquery_destination: str | None = None,
args: list[str | float | int] | None = None,
environment_variables: dict[str, str] | None = None,
replica_count: int = 1,
machine_type: str = "n1-standard-4",
accelerator_type: str = "ACCELERATOR_TYPE_UNSPECIFIED",
accelerator_count: int = 0,
boot_disk_type: str = "pd-ssd",
boot_disk_size_gb: int = 100,
training_fraction_split: float | None = None,
validation_fraction_split: float | None = None,
test_fraction_split: float | None = None,
training_filter_split: str | None = None,
validation_filter_split: str | None = None,
test_filter_split: str | None = None,
predefined_split_column_name: str | None = None,
timestamp_split_column_name: str | None = None,
tensorboard: str | None = None,
sync=True,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.display_name = display_name
# START Custom
self.container_uri = container_uri
self.model_serving_container_image_uri = model_serving_container_image_uri
self.model_serving_container_predict_route = model_serving_container_predict_route
self.model_serving_container_health_route = model_serving_container_health_route
self.model_serving_container_command = model_serving_container_command
self.model_serving_container_args = model_serving_container_args
self.model_serving_container_environment_variables = model_serving_container_environment_variables
self.model_serving_container_ports = model_serving_container_ports
self.model_description = model_description
self.model_instance_schema_uri = model_instance_schema_uri
self.model_parameters_schema_uri = model_parameters_schema_uri
self.model_prediction_schema_uri = model_prediction_schema_uri
self.labels = labels
self.training_encryption_spec_key_name = training_encryption_spec_key_name
self.model_encryption_spec_key_name = model_encryption_spec_key_name
self.staging_bucket = staging_bucket
# END Custom
# START Run param
self.dataset_id = dataset_id
self.annotation_schema_uri = annotation_schema_uri
self.model_display_name = model_display_name
self.model_labels = model_labels
self.base_output_dir = base_output_dir
self.service_account = service_account
self.network = network
self.bigquery_destination = bigquery_destination
self.args = args
self.environment_variables = environment_variables
self.replica_count = replica_count
self.machine_type = machine_type
self.accelerator_type = accelerator_type
self.accelerator_count = accelerator_count
self.boot_disk_type = boot_disk_type
self.boot_disk_size_gb = boot_disk_size_gb
self.training_fraction_split = training_fraction_split
self.validation_fraction_split = validation_fraction_split
self.test_fraction_split = test_fraction_split
self.training_filter_split = training_filter_split
self.validation_filter_split = validation_filter_split
self.test_filter_split = test_filter_split
self.predefined_split_column_name = predefined_split_column_name
self.timestamp_split_column_name = timestamp_split_column_name
self.tensorboard = tensorboard
self.sync = sync
# END Run param
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
class CreateCustomContainerTrainingJobOperator(CustomTrainingJobBaseOperator):
"""Create Custom Container Training job.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param command: The command to be invoked when the container is started.
It overrides the entrypoint instruction in Dockerfile when provided
:param container_uri: Required: Uri of the training container image in the GCR.
:param model_serving_container_image_uri: If the training produces a managed Vertex AI Model, the URI
of the Model serving container suitable for serving the model produced by the
training script.
:param model_serving_container_predict_route: If the training produces a managed Vertex AI Model, An
HTTP path to send prediction requests to the container, and which must be supported
by it. If not specified a default HTTP path will be used by Vertex AI.
:param model_serving_container_health_route: If the training produces a managed Vertex AI Model, an
HTTP path to send health check requests to the container, and which must be supported
by it. If not specified a standard HTTP path will be used by AI Platform.
:param model_serving_container_command: The command with which the container is run. Not executed
within a shell. The Docker image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's
environment. If a variable cannot be resolved, the reference in the
input string will be unchanged. The $(VAR_NAME) syntax can be escaped
with a double $$, ie: $$(VAR_NAME). Escaped references will never be
expanded, regardless of whether the variable exists or not.
:param model_serving_container_args: The arguments to the command. The Docker image's CMD is used if
this is not provided. Variable references $(VAR_NAME) are expanded using the
container's environment. If a variable cannot be resolved, the reference
in the input string will be unchanged. The $(VAR_NAME) syntax can be
escaped with a double $$, ie: $$(VAR_NAME). Escaped references will
never be expanded, regardless of whether the variable exists or not.
:param model_serving_container_environment_variables: The environment variables that are to be
present in the container. Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
:param model_serving_container_ports: Declaration of ports that are exposed by the container. This
field is primarily informational, it gives Vertex AI information about the
network connections the container uses. Listing or not a port here has
no impact on whether the port is actually exposed, any port listening on
the default "0.0.0.0" address inside a container will be accessible from
the network.
:param model_description: The description of the Model.
:param model_instance_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single instance, which
are used in
``PredictRequest.instances``,
``ExplainRequest.instances``
and
``BatchPredictionJob.input_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
:param model_parameters_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the parameters of prediction and
explanation via
``PredictRequest.parameters``,
``ExplainRequest.parameters``
and
``BatchPredictionJob.model_parameters``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform, if no parameters are supported it is set to an
empty string. Note: The URI given on output will be
immutable and probably different, including the URI scheme,
than the one given on input. The output URI will point to a
location where the user only has a read access.
:param model_prediction_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single prediction
produced by this Model, which are returned via
``PredictResponse.predictions``,
``ExplainResponse.explanations``,
and
``BatchPredictionJob.output_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
:param project_id: Project to run training in.
:param region: Location to run training in.
:param labels: Optional. The labels with user-defined metadata to
organize TrainingPipelines.
Label keys and values can be no longer than 64
characters, can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured
by this key if ``model_to_upload`` is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, the trained Model will be secured by this key.
:param staging_bucket: Bucket used to stage source and training artifacts.
:param dataset: Vertex AI to fit this training against.
:param annotation_schema_uri: Google Cloud Storage URI points to a YAML file describing
annotation schema. The schema is defined as an OpenAPI 3.0.2
[Schema Object]
(https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schema-object)
Only Annotations that both match this schema and belong to
DataItems not ignored by the split method are used in
respectively training, validation or test role, depending on
the role of the DataItem they are on.
When used in conjunction with
``annotations_filter``,
the Annotations used for training are filtered by both
``annotations_filter``
and
``annotation_schema_uri``.
:param model_display_name: If the script produces a managed Vertex AI Model. The display name of
the Model. The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters, can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
:param base_output_dir: GCS output directory of job. If not provided a timestamped directory in the
staging directory will be used.
Vertex AI sets the following environment variables when it runs your training code:
- AIP_MODEL_DIR: a Cloud Storage URI of a directory intended for saving model artifacts,
i.e. <base_output_dir>/model/
- AIP_CHECKPOINT_DIR: a Cloud Storage URI of a directory intended for saving checkpoints,
i.e. <base_output_dir>/checkpoints/
- AIP_TENSORBOARD_LOG_DIR: a Cloud Storage URI of a directory intended for saving TensorBoard
logs, i.e. <base_output_dir>/logs/
:param service_account: Specifies the service account for workload run-as account.
Users submitting jobs must have act-as permission on this run-as account.
:param network: The full name of the Compute Engine network to which the job
should be peered.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
:param bigquery_destination: Provide this field if `dataset` is a BiqQuery dataset.
The BigQuery project location where the training data is to
be written to. In the given project a new dataset is created
with name
``dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>``
where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All
training input data will be written into that dataset. In
the dataset three tables will be created, ``training``,
``validation`` and ``test``.
- AIP_DATA_FORMAT = "bigquery".
- AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training"
- AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation"
- AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test"
:param args: Command line arguments to be passed to the Python script.
:param environment_variables: Environment variables to be passed to the container.
Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
At most 10 environment variables can be specified.
The Name of the environment variable must be unique.
:param replica_count: The number of worker replicas. If replica count = 1 then one chief
replica will be provisioned. If replica_count > 1 the remainder will be
provisioned as a worker replica pool.
:param machine_type: The type of machine to use for training.
:param accelerator_type: Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED,
NVIDIA_TESLA_K80, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, NVIDIA_TESLA_P4,
NVIDIA_TESLA_T4
:param accelerator_count: The number of accelerators to attach to a worker replica.
:param boot_disk_type: Type of the boot disk, default is `pd-ssd`.
Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or
`pd-standard` (Persistent Disk Hard Disk Drive).
:param boot_disk_size_gb: Size in GB of the boot disk, default is 100GB.
boot disk size must be within the range of [100, 64000].
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param validation_fraction_split: Optional. The fraction of the input data that is to be used to
validate the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param training_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param validation_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param test_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param predefined_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``training``,
``validation``, ``test``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param timestamp_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key values of the key (the values in
the column) must be in RFC 3339 `date-time` format, where
`time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a
piece of data the key is not present or has an invalid value,
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param tensorboard: Optional. The name of a Vertex AI resource to which this CustomJob will upload
logs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
For more information on configuring your service account please visit:
https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training
:param sync: Whether to execute the AI Platform job synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"region",
"command",
"dataset_id",
"impersonation_chain",
)
operator_extra_links = (VertexAIModelLink(), VertexAITrainingLink())
def __init__(
self,
*,
command: Sequence[str] = [],
**kwargs,
) -> None:
super().__init__(**kwargs)
self.command = command
def execute(self, context: Context):
self.hook = CustomJobHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
model, training_id, custom_job_id = self.hook.create_custom_container_training_job(
project_id=self.project_id,
region=self.region,
display_name=self.display_name,
container_uri=self.container_uri,
command=self.command,
model_serving_container_image_uri=self.model_serving_container_image_uri,
model_serving_container_predict_route=self.model_serving_container_predict_route,
model_serving_container_health_route=self.model_serving_container_health_route,
model_serving_container_command=self.model_serving_container_command,
model_serving_container_args=self.model_serving_container_args,
model_serving_container_environment_variables=self.model_serving_container_environment_variables,
model_serving_container_ports=self.model_serving_container_ports,
model_description=self.model_description,
model_instance_schema_uri=self.model_instance_schema_uri,
model_parameters_schema_uri=self.model_parameters_schema_uri,
model_prediction_schema_uri=self.model_prediction_schema_uri,
labels=self.labels,
training_encryption_spec_key_name=self.training_encryption_spec_key_name,
model_encryption_spec_key_name=self.model_encryption_spec_key_name,
staging_bucket=self.staging_bucket,
# RUN
dataset=Dataset(name=self.dataset_id) if self.dataset_id else None,
annotation_schema_uri=self.annotation_schema_uri,
model_display_name=self.model_display_name,
model_labels=self.model_labels,
base_output_dir=self.base_output_dir,
service_account=self.service_account,
network=self.network,
bigquery_destination=self.bigquery_destination,
args=self.args,
environment_variables=self.environment_variables,
replica_count=self.replica_count,
machine_type=self.machine_type,
accelerator_type=self.accelerator_type,
accelerator_count=self.accelerator_count,
boot_disk_type=self.boot_disk_type,
boot_disk_size_gb=self.boot_disk_size_gb,
training_fraction_split=self.training_fraction_split,
validation_fraction_split=self.validation_fraction_split,
test_fraction_split=self.test_fraction_split,
training_filter_split=self.training_filter_split,
validation_filter_split=self.validation_filter_split,
test_filter_split=self.test_filter_split,
predefined_split_column_name=self.predefined_split_column_name,
timestamp_split_column_name=self.timestamp_split_column_name,
tensorboard=self.tensorboard,
sync=True,
)
if model:
result = Model.to_dict(model)
model_id = self.hook.extract_model_id(result)
VertexAIModelLink.persist(context=context, task_instance=self, model_id=model_id)
else:
result = model # type: ignore
self.xcom_push(context, key="training_id", value=training_id)
self.xcom_push(context, key="custom_job_id", value=custom_job_id)
VertexAITrainingLink.persist(context=context, task_instance=self, training_id=training_id)
return result
def on_kill(self) -> None:
"""Callback called when the operator is killed; cancel any running job."""
if self.hook:
self.hook.cancel_job()
class CreateCustomPythonPackageTrainingJobOperator(CustomTrainingJobBaseOperator):
"""Create Custom Python Package Training job.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param python_package_gcs_uri: Required: GCS location of the training python package.
:param python_module_name: Required: The module name of the training python package.
:param container_uri: Required: Uri of the training container image in the GCR.
:param model_serving_container_image_uri: If the training produces a managed Vertex AI Model, the URI
of the Model serving container suitable for serving the model produced by the
training script.
:param model_serving_container_predict_route: If the training produces a managed Vertex AI Model, An
HTTP path to send prediction requests to the container, and which must be supported
by it. If not specified a default HTTP path will be used by Vertex AI.
:param model_serving_container_health_route: If the training produces a managed Vertex AI Model, an
HTTP path to send health check requests to the container, and which must be supported
by it. If not specified a standard HTTP path will be used by AI Platform.
:param model_serving_container_command: The command with which the container is run. Not executed
within a shell. The Docker image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's
environment. If a variable cannot be resolved, the reference in the
input string will be unchanged. The $(VAR_NAME) syntax can be escaped
with a double $$, ie: $$(VAR_NAME). Escaped references will never be
expanded, regardless of whether the variable exists or not.
:param model_serving_container_args: The arguments to the command. The Docker image's CMD is used if
this is not provided. Variable references $(VAR_NAME) are expanded using the
container's environment. If a variable cannot be resolved, the reference
in the input string will be unchanged. The $(VAR_NAME) syntax can be
escaped with a double $$, ie: $$(VAR_NAME). Escaped references will
never be expanded, regardless of whether the variable exists or not.
:param model_serving_container_environment_variables: The environment variables that are to be
present in the container. Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
:param model_serving_container_ports: Declaration of ports that are exposed by the container. This
field is primarily informational, it gives Vertex AI information about the
network connections the container uses. Listing or not a port here has
no impact on whether the port is actually exposed, any port listening on
the default "0.0.0.0" address inside a container will be accessible from
the network.
:param model_description: The description of the Model.
:param model_instance_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single instance, which
are used in
``PredictRequest.instances``,
``ExplainRequest.instances``
and
``BatchPredictionJob.input_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
:param model_parameters_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the parameters of prediction and
explanation via
``PredictRequest.parameters``,
``ExplainRequest.parameters``
and
``BatchPredictionJob.model_parameters``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform, if no parameters are supported it is set to an
empty string. Note: The URI given on output will be
immutable and probably different, including the URI scheme,
than the one given on input. The output URI will point to a
location where the user only has a read access.
:param model_prediction_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single prediction
produced by this Model, which are returned via
``PredictResponse.predictions``,
``ExplainResponse.explanations``,
and
``BatchPredictionJob.output_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
:param project_id: Project to run training in.
:param region: Location to run training in.
:param labels: Optional. The labels with user-defined metadata to
organize TrainingPipelines.
Label keys and values can be no longer than 64
characters, can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured
by this key if ``model_to_upload`` is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, the trained Model will be secured by this key.
:param staging_bucket: Bucket used to stage source and training artifacts.
:param dataset: Vertex AI to fit this training against.
:param annotation_schema_uri: Google Cloud Storage URI points to a YAML file describing
annotation schema. The schema is defined as an OpenAPI 3.0.2
[Schema Object]
(https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schema-object)
Only Annotations that both match this schema and belong to
DataItems not ignored by the split method are used in
respectively training, validation or test role, depending on
the role of the DataItem they are on.
When used in conjunction with
``annotations_filter``,
the Annotations used for training are filtered by both
``annotations_filter``
and
``annotation_schema_uri``.
:param model_display_name: If the script produces a managed Vertex AI Model. The display name of
the Model. The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters, can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
:param base_output_dir: GCS output directory of job. If not provided a timestamped directory in the
staging directory will be used.
Vertex AI sets the following environment variables when it runs your training code:
- AIP_MODEL_DIR: a Cloud Storage URI of a directory intended for saving model artifacts,
i.e. <base_output_dir>/model/
- AIP_CHECKPOINT_DIR: a Cloud Storage URI of a directory intended for saving checkpoints,
i.e. <base_output_dir>/checkpoints/
- AIP_TENSORBOARD_LOG_DIR: a Cloud Storage URI of a directory intended for saving TensorBoard
logs, i.e. <base_output_dir>/logs/
:param service_account: Specifies the service account for workload run-as account.
Users submitting jobs must have act-as permission on this run-as account.
:param network: The full name of the Compute Engine network to which the job
should be peered.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
:param bigquery_destination: Provide this field if `dataset` is a BiqQuery dataset.
The BigQuery project location where the training data is to
be written to. In the given project a new dataset is created
with name
``dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>``
where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All
training input data will be written into that dataset. In
the dataset three tables will be created, ``training``,
``validation`` and ``test``.
- AIP_DATA_FORMAT = "bigquery".
- AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training"
- AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation"
- AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test"
:param args: Command line arguments to be passed to the Python script.
:param environment_variables: Environment variables to be passed to the container.
Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
At most 10 environment variables can be specified.
The Name of the environment variable must be unique.
:param replica_count: The number of worker replicas. If replica count = 1 then one chief
replica will be provisioned. If replica_count > 1 the remainder will be
provisioned as a worker replica pool.
:param machine_type: The type of machine to use for training.
:param accelerator_type: Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED,
NVIDIA_TESLA_K80, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, NVIDIA_TESLA_P4,
NVIDIA_TESLA_T4
:param accelerator_count: The number of accelerators to attach to a worker replica.
:param boot_disk_type: Type of the boot disk, default is `pd-ssd`.
Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or
`pd-standard` (Persistent Disk Hard Disk Drive).
:param boot_disk_size_gb: Size in GB of the boot disk, default is 100GB.
boot disk size must be within the range of [100, 64000].
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param validation_fraction_split: Optional. The fraction of the input data that is to be used to
validate the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param training_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param validation_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param test_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param predefined_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``training``,
``validation``, ``test``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param timestamp_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key values of the key (the values in
the column) must be in RFC 3339 `date-time` format, where
`time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a
piece of data the key is not present or has an invalid value,
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param tensorboard: Optional. The name of a Vertex AI resource to which this CustomJob will upload
logs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
For more information on configuring your service account please visit:
https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training
:param sync: Whether to execute the AI Platform job synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"region",
"dataset_id",
"impersonation_chain",
)
operator_extra_links = (VertexAIModelLink(), VertexAITrainingLink())
def __init__(
self,
*,
python_package_gcs_uri: str,
python_module_name: str,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.python_package_gcs_uri = python_package_gcs_uri
self.python_module_name = python_module_name
def execute(self, context: Context):
self.hook = CustomJobHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
model, training_id, custom_job_id = self.hook.create_custom_python_package_training_job(
project_id=self.project_id,
region=self.region,
display_name=self.display_name,
python_package_gcs_uri=self.python_package_gcs_uri,
python_module_name=self.python_module_name,
container_uri=self.container_uri,
model_serving_container_image_uri=self.model_serving_container_image_uri,
model_serving_container_predict_route=self.model_serving_container_predict_route,
model_serving_container_health_route=self.model_serving_container_health_route,
model_serving_container_command=self.model_serving_container_command,
model_serving_container_args=self.model_serving_container_args,
model_serving_container_environment_variables=self.model_serving_container_environment_variables,
model_serving_container_ports=self.model_serving_container_ports,
model_description=self.model_description,
model_instance_schema_uri=self.model_instance_schema_uri,
model_parameters_schema_uri=self.model_parameters_schema_uri,
model_prediction_schema_uri=self.model_prediction_schema_uri,
labels=self.labels,
training_encryption_spec_key_name=self.training_encryption_spec_key_name,
model_encryption_spec_key_name=self.model_encryption_spec_key_name,
staging_bucket=self.staging_bucket,
# RUN
dataset=Dataset(name=self.dataset_id) if self.dataset_id else None,
annotation_schema_uri=self.annotation_schema_uri,
model_display_name=self.model_display_name,
model_labels=self.model_labels,
base_output_dir=self.base_output_dir,
service_account=self.service_account,
network=self.network,
bigquery_destination=self.bigquery_destination,
args=self.args,
environment_variables=self.environment_variables,
replica_count=self.replica_count,
machine_type=self.machine_type,
accelerator_type=self.accelerator_type,
accelerator_count=self.accelerator_count,
boot_disk_type=self.boot_disk_type,
boot_disk_size_gb=self.boot_disk_size_gb,
training_fraction_split=self.training_fraction_split,
validation_fraction_split=self.validation_fraction_split,
test_fraction_split=self.test_fraction_split,
training_filter_split=self.training_filter_split,
validation_filter_split=self.validation_filter_split,
test_filter_split=self.test_filter_split,
predefined_split_column_name=self.predefined_split_column_name,
timestamp_split_column_name=self.timestamp_split_column_name,
tensorboard=self.tensorboard,
sync=True,
)
if model:
result = Model.to_dict(model)
model_id = self.hook.extract_model_id(result)
VertexAIModelLink.persist(context=context, task_instance=self, model_id=model_id)
else:
result = model # type: ignore
self.xcom_push(context, key="training_id", value=training_id)
self.xcom_push(context, key="custom_job_id", value=custom_job_id)
VertexAITrainingLink.persist(context=context, task_instance=self, training_id=training_id)
return result
def on_kill(self) -> None:
"""Callback called when the operator is killed; cancel any running job."""
if self.hook:
self.hook.cancel_job()
class CreateCustomTrainingJobOperator(CustomTrainingJobBaseOperator):
"""Create Custom Training job.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param script_path: Required. Local path to training script.
:param container_uri: Required: Uri of the training container image in the GCR.
:param requirements: List of python packages dependencies of script.
:param model_serving_container_image_uri: If the training produces a managed Vertex AI Model, the URI
of the Model serving container suitable for serving the model produced by the
training script.
:param model_serving_container_predict_route: If the training produces a managed Vertex AI Model, An
HTTP path to send prediction requests to the container, and which must be supported
by it. If not specified a default HTTP path will be used by Vertex AI.
:param model_serving_container_health_route: If the training produces a managed Vertex AI Model, an
HTTP path to send health check requests to the container, and which must be supported
by it. If not specified a standard HTTP path will be used by AI Platform.
:param model_serving_container_command: The command with which the container is run. Not executed
within a shell. The Docker image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's
environment. If a variable cannot be resolved, the reference in the
input string will be unchanged. The $(VAR_NAME) syntax can be escaped
with a double $$, ie: $$(VAR_NAME). Escaped references will never be
expanded, regardless of whether the variable exists or not.
:param model_serving_container_args: The arguments to the command. The Docker image's CMD is used if
this is not provided. Variable references $(VAR_NAME) are expanded using the
container's environment. If a variable cannot be resolved, the reference
in the input string will be unchanged. The $(VAR_NAME) syntax can be
escaped with a double $$, ie: $$(VAR_NAME). Escaped references will
never be expanded, regardless of whether the variable exists or not.
:param model_serving_container_environment_variables: The environment variables that are to be
present in the container. Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
:param model_serving_container_ports: Declaration of ports that are exposed by the container. This
field is primarily informational, it gives Vertex AI information about the
network connections the container uses. Listing or not a port here has
no impact on whether the port is actually exposed, any port listening on
the default "0.0.0.0" address inside a container will be accessible from
the network.
:param model_description: The description of the Model.
:param model_instance_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single instance, which
are used in
``PredictRequest.instances``,
``ExplainRequest.instances``
and
``BatchPredictionJob.input_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
:param model_parameters_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the parameters of prediction and
explanation via
``PredictRequest.parameters``,
``ExplainRequest.parameters``
and
``BatchPredictionJob.model_parameters``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform, if no parameters are supported it is set to an
empty string. Note: The URI given on output will be
immutable and probably different, including the URI scheme,
than the one given on input. The output URI will point to a
location where the user only has a read access.
:param model_prediction_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single prediction
produced by this Model, which are returned via
``PredictResponse.predictions``,
``ExplainResponse.explanations``,
and
``BatchPredictionJob.output_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
:param project_id: Project to run training in.
:param region: Location to run training in.
:param labels: Optional. The labels with user-defined metadata to
organize TrainingPipelines.
Label keys and values can be no longer than 64
characters, can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured
by this key if ``model_to_upload`` is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, the trained Model will be secured by this key.
:param staging_bucket: Bucket used to stage source and training artifacts.
:param dataset: Vertex AI to fit this training against.
:param annotation_schema_uri: Google Cloud Storage URI points to a YAML file describing
annotation schema. The schema is defined as an OpenAPI 3.0.2
[Schema Object]
(https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schema-object)
Only Annotations that both match this schema and belong to
DataItems not ignored by the split method are used in
respectively training, validation or test role, depending on
the role of the DataItem they are on.
When used in conjunction with
``annotations_filter``,
the Annotations used for training are filtered by both
``annotations_filter``
and
``annotation_schema_uri``.
:param model_display_name: If the script produces a managed Vertex AI Model. The display name of
the Model. The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters, can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
:param base_output_dir: GCS output directory of job. If not provided a timestamped directory in the
staging directory will be used.
Vertex AI sets the following environment variables when it runs your training code:
- AIP_MODEL_DIR: a Cloud Storage URI of a directory intended for saving model artifacts,
i.e. <base_output_dir>/model/
- AIP_CHECKPOINT_DIR: a Cloud Storage URI of a directory intended for saving checkpoints,
i.e. <base_output_dir>/checkpoints/
- AIP_TENSORBOARD_LOG_DIR: a Cloud Storage URI of a directory intended for saving TensorBoard
logs, i.e. <base_output_dir>/logs/
:param service_account: Specifies the service account for workload run-as account.
Users submitting jobs must have act-as permission on this run-as account.
:param network: The full name of the Compute Engine network to which the job
should be peered.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
:param bigquery_destination: Provide this field if `dataset` is a BiqQuery dataset.
The BigQuery project location where the training data is to
be written to. In the given project a new dataset is created
with name
``dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>``
where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All
training input data will be written into that dataset. In
the dataset three tables will be created, ``training``,
``validation`` and ``test``.
- AIP_DATA_FORMAT = "bigquery".
- AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training"
- AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation"
- AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test"
:param args: Command line arguments to be passed to the Python script.
:param environment_variables: Environment variables to be passed to the container.
Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
At most 10 environment variables can be specified.
The Name of the environment variable must be unique.
:param replica_count: The number of worker replicas. If replica count = 1 then one chief
replica will be provisioned. If replica_count > 1 the remainder will be
provisioned as a worker replica pool.
:param machine_type: The type of machine to use for training.
:param accelerator_type: Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED,
NVIDIA_TESLA_K80, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, NVIDIA_TESLA_P4,
NVIDIA_TESLA_T4
:param accelerator_count: The number of accelerators to attach to a worker replica.
:param boot_disk_type: Type of the boot disk, default is `pd-ssd`.
Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or
`pd-standard` (Persistent Disk Hard Disk Drive).
:param boot_disk_size_gb: Size in GB of the boot disk, default is 100GB.
boot disk size must be within the range of [100, 64000].
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param validation_fraction_split: Optional. The fraction of the input data that is to be used to
validate the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param training_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param validation_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param test_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param predefined_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``training``,
``validation``, ``test``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param timestamp_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key values of the key (the values in
the column) must be in RFC 3339 `date-time` format, where
`time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a
piece of data the key is not present or has an invalid value,
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param tensorboard: Optional. The name of a Vertex AI resource to which this CustomJob will upload
logs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
For more information on configuring your service account please visit:
https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training
:param sync: Whether to execute the AI Platform job synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"region",
"script_path",
"requirements",
"dataset_id",
"impersonation_chain",
)
operator_extra_links = (VertexAIModelLink(), VertexAITrainingLink())
def __init__(
self,
*,
script_path: str,
requirements: Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.requirements = requirements
self.script_path = script_path
def execute(self, context: Context):
self.hook = CustomJobHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
model, training_id, custom_job_id = self.hook.create_custom_training_job(
project_id=self.project_id,
region=self.region,
display_name=self.display_name,
script_path=self.script_path,
container_uri=self.container_uri,
requirements=self.requirements,
model_serving_container_image_uri=self.model_serving_container_image_uri,
model_serving_container_predict_route=self.model_serving_container_predict_route,
model_serving_container_health_route=self.model_serving_container_health_route,
model_serving_container_command=self.model_serving_container_command,
model_serving_container_args=self.model_serving_container_args,
model_serving_container_environment_variables=self.model_serving_container_environment_variables,
model_serving_container_ports=self.model_serving_container_ports,
model_description=self.model_description,
model_instance_schema_uri=self.model_instance_schema_uri,
model_parameters_schema_uri=self.model_parameters_schema_uri,
model_prediction_schema_uri=self.model_prediction_schema_uri,
labels=self.labels,
training_encryption_spec_key_name=self.training_encryption_spec_key_name,
model_encryption_spec_key_name=self.model_encryption_spec_key_name,
staging_bucket=self.staging_bucket,
# RUN
dataset=Dataset(name=self.dataset_id) if self.dataset_id else None,
annotation_schema_uri=self.annotation_schema_uri,
model_display_name=self.model_display_name,
model_labels=self.model_labels,
base_output_dir=self.base_output_dir,
service_account=self.service_account,
network=self.network,
bigquery_destination=self.bigquery_destination,
args=self.args,
environment_variables=self.environment_variables,
replica_count=self.replica_count,
machine_type=self.machine_type,
accelerator_type=self.accelerator_type,
accelerator_count=self.accelerator_count,
boot_disk_type=self.boot_disk_type,
boot_disk_size_gb=self.boot_disk_size_gb,
training_fraction_split=self.training_fraction_split,
validation_fraction_split=self.validation_fraction_split,
test_fraction_split=self.test_fraction_split,
training_filter_split=self.training_filter_split,
validation_filter_split=self.validation_filter_split,
test_filter_split=self.test_filter_split,
predefined_split_column_name=self.predefined_split_column_name,
timestamp_split_column_name=self.timestamp_split_column_name,
tensorboard=self.tensorboard,
sync=True,
)
if model:
result = Model.to_dict(model)
model_id = self.hook.extract_model_id(result)
VertexAIModelLink.persist(context=context, task_instance=self, model_id=model_id)
else:
result = model # type: ignore
self.xcom_push(context, key="training_id", value=training_id)
self.xcom_push(context, key="custom_job_id", value=custom_job_id)
VertexAITrainingLink.persist(context=context, task_instance=self, training_id=training_id)
return result
def on_kill(self) -> None:
"""Callback called when the operator is killed; cancel any running job."""
if self.hook:
self.hook.cancel_job()
class DeleteCustomTrainingJobOperator(GoogleCloudBaseOperator):
"""
Deletes a CustomTrainingJob, CustomPythonTrainingJob, or CustomContainerTrainingJob.
:param training_pipeline_id: Required. The name of the TrainingPipeline resource to be deleted.
:param custom_job_id: Required. The name of the CustomJob to delete.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("training_pipeline", "custom_job", "region", "project_id", "impersonation_chain")
def __init__(
self,
*,
training_pipeline_id: str,
custom_job_id: str,
region: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.training_pipeline = training_pipeline_id
self.custom_job = custom_job_id
self.region = region
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CustomJobHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.log.info("Deleting custom training pipeline: %s", self.training_pipeline)
training_pipeline_operation = hook.delete_training_pipeline(
training_pipeline=self.training_pipeline,
region=self.region,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=training_pipeline_operation)
self.log.info("Training pipeline was deleted.")
except NotFound:
self.log.info("The Training Pipeline ID %s does not exist.", self.training_pipeline)
try:
self.log.info("Deleting custom job: %s", self.custom_job)
custom_job_operation = hook.delete_custom_job(
custom_job=self.custom_job,
region=self.region,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=custom_job_operation)
self.log.info("Custom job was deleted.")
except NotFound:
self.log.info("The Custom Job ID %s does not exist.", self.custom_job)
class ListCustomTrainingJobOperator(GoogleCloudBaseOperator):
"""Lists CustomTrainingJob, CustomPythonTrainingJob, or CustomContainerTrainingJob in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: Optional. The standard list filter. Supported fields:
- ``display_name`` supports = and !=.
- ``state`` supports = and !=.
Some examples of using the filter are:
- ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"``
- ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"``
- ``NOT display_name="my_pipeline"``
- ``state="PIPELINE_STATE_FAILED"``
:param page_size: Optional. The standard list page size.
:param page_token: Optional. The standard list page token. Typically obtained via
[ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesResponse.next_page_token]
of the previous
[PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]
call.
:param read_mask: Optional. Mask specifying which fields to read.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = [
"region",
"project_id",
"impersonation_chain",
]
operator_extra_links = [
VertexAITrainingPipelinesLink(),
]
def __init__(
self,
*,
region: str,
project_id: str,
page_size: int | None = None,
page_token: str | None = None,
filter: str | None = None,
read_mask: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.page_size = page_size
self.page_token = page_token
self.filter = filter
self.read_mask = read_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CustomJobHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
results = hook.list_training_pipelines(
region=self.region,
project_id=self.project_id,
page_size=self.page_size,
page_token=self.page_token,
filter=self.filter,
read_mask=self.read_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
VertexAITrainingPipelinesLink.persist(context=context, task_instance=self)
return [TrainingPipeline.to_dict(result) for result in results]
| 78,121 | 55.857351 | 129 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/vertex_ai/model_service.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Vertex AI operators.
.. spelling:word-list::
aiplatform
camelCase
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from google.api_core.exceptions import NotFound
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.aiplatform_v1.types import Model, model_service
from airflow.providers.google.cloud.hooks.vertex_ai.model_service import ModelServiceHook
from airflow.providers.google.cloud.links.vertex_ai import (
VertexAIModelExportLink,
VertexAIModelLink,
VertexAIModelListLink,
)
from airflow.providers.google.cloud.operators.cloud_base import GoogleCloudBaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class DeleteModelOperator(GoogleCloudBaseOperator):
"""
Deletes a Model.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param model_id: Required. The name of the Model resource to be deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "model_id", "project_id", "impersonation_chain")
def __init__(
self,
*,
region: str,
project_id: str,
model_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.model_id = model_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = ModelServiceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.log.info("Deleting model: %s", self.model_id)
operation = hook.delete_model(
project_id=self.project_id,
region=self.region,
model=self.model_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Model was deleted.")
except NotFound:
self.log.info("The Model ID %s does not exist.", self.model_id)
class ExportModelOperator(GoogleCloudBaseOperator):
"""
Exports a trained, exportable Model to a location specified by the user.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param model_id: Required. The resource name of the Model to export.
:param output_config: Required. The desired output location and configuration.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "model_id", "project_id", "impersonation_chain")
operator_extra_links = (VertexAIModelExportLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
model_id: str,
output_config: model_service.ExportModelRequest.OutputConfig | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.model_id = model_id
self.output_config = output_config
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = ModelServiceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.log.info("Exporting model: %s", self.model_id)
operation = hook.export_model(
project_id=self.project_id,
region=self.region,
model=self.model_id,
output_config=self.output_config,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
VertexAIModelExportLink.persist(context=context, task_instance=self)
self.log.info("Model was exported.")
except NotFound:
self.log.info("The Model ID %s does not exist.", self.model_id)
class ListModelsOperator(GoogleCloudBaseOperator):
r"""
Lists Models in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param retry: Designation of what errors, if any, should be retried.
:param filter: An expression for filtering the results of the request. For field names both
snake_case and camelCase are supported.
- ``model`` supports = and !=. ``model`` represents the Model ID, i.e. the last segment of the
Model's [resource name][google.cloud.aiplatform.v1.Model.name].
- ``display_name`` supports = and !=
- ``labels`` supports general map functions that is:
-- ``labels.key=value`` - key:value equality
-- \`labels.key:\* or labels:key - key existence
-- A key including a space must be quoted. ``labels."a key"``.
:param page_size: The standard list page size.
:param page_token: The standard list page token. Typically obtained via
[ListModelsResponse.next_page_token][google.cloud.aiplatform.v1.ListModelsResponse.next_page_token]
of the previous
[ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels]
call.
:param read_mask: Mask specifying which fields to read.
:param order_by: A comma-separated list of fields to order by, sorted in ascending order. Use "desc"
after a field name for descending.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "project_id", "impersonation_chain")
operator_extra_links = (VertexAIModelListLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
filter: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
read_mask: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.filter = filter
self.page_size = page_size
self.page_token = page_token
self.read_mask = read_mask
self.order_by = order_by
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = ModelServiceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
results = hook.list_models(
project_id=self.project_id,
region=self.region,
filter=self.filter,
page_size=self.page_size,
page_token=self.page_token,
read_mask=self.read_mask,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
VertexAIModelListLink.persist(context=context, task_instance=self)
return [Model.to_dict(result) for result in results]
class UploadModelOperator(GoogleCloudBaseOperator):
"""
Uploads a Model artifact into Vertex AI.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param model: Required. The Model to create.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "project_id", "model", "impersonation_chain")
operator_extra_links = (VertexAIModelLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
model: Model | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.model = model
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = ModelServiceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Upload model")
operation = hook.upload_model(
project_id=self.project_id,
region=self.region,
model=self.model,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = hook.wait_for_operation(timeout=self.timeout, operation=operation)
model_resp = model_service.UploadModelResponse.to_dict(result)
model_id = hook.extract_model_id(model_resp)
self.log.info("Model was uploaded. Model ID: %s", model_id)
self.xcom_push(context, key="model_id", value=model_id)
VertexAIModelLink.persist(context=context, task_instance=self, model_id=model_id)
return model_resp
| 15,053 | 41.888889 | 107 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/operators/vertex_ai/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/triggers/mlengine.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
from typing import Any, AsyncIterator, Sequence
from airflow.providers.google.cloud.hooks.mlengine import MLEngineAsyncHook
from airflow.triggers.base import BaseTrigger, TriggerEvent
class MLEngineStartTrainingJobTrigger(BaseTrigger):
"""
MLEngineStartTrainingJobTrigger run on the trigger worker to perform starting training job operation.
:param conn_id: Reference to google cloud connection id
:param job_id: The ID of the job. It will be suffixed with hash of job configuration
:param project_id: Google Cloud Project where the job is running
:param poll_interval: polling period in seconds to check for the status
"""
def __init__(
self,
conn_id: str,
job_id: str,
region: str,
poll_interval: float = 4.0,
package_uris: list[str] | None = None,
training_python_module: str | None = None,
training_args: list[str] | None = None,
runtime_version: str | None = None,
python_version: str | None = None,
job_dir: str | None = None,
project_id: str | None = None,
labels: dict[str, str] | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
):
super().__init__()
self.log.info("Using the connection %s .", conn_id)
self.conn_id = conn_id
self.job_id = job_id
self._job_conn = None
self.project_id = project_id
self.region = region
self.poll_interval = poll_interval
self.runtime_version = runtime_version
self.python_version = python_version
self.job_dir = job_dir
self.package_uris = package_uris
self.training_python_module = training_python_module
self.training_args = training_args
self.labels = labels
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes MLEngineStartTrainingJobTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.mlengine.MLEngineStartTrainingJobTrigger",
{
"conn_id": self.conn_id,
"job_id": self.job_id,
"poll_interval": self.poll_interval,
"region": self.region,
"project_id": self.project_id,
"runtime_version": self.runtime_version,
"python_version": self.python_version,
"job_dir": self.job_dir,
"package_uris": self.package_uris,
"training_python_module": self.training_python_module,
"training_args": self.training_args,
"labels": self.labels,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]: # type: ignore[override]
"""Gets current job execution status and yields a TriggerEvent."""
hook = self._get_async_hook()
while True:
try:
# Poll for job execution status
response_from_hook = await hook.get_job_status(job_id=self.job_id, project_id=self.project_id)
if response_from_hook == "success":
yield TriggerEvent(
{
"job_id": self.job_id,
"status": "success",
"message": "Job completed",
}
)
elif response_from_hook == "pending":
self.log.info("Job is still running...")
self.log.info("Sleeping for %s seconds.", self.poll_interval)
await asyncio.sleep(self.poll_interval)
else:
yield TriggerEvent({"status": "error", "message": response_from_hook})
except Exception as e:
self.log.exception("Exception occurred while checking for query completion")
yield TriggerEvent({"status": "error", "message": str(e)})
def _get_async_hook(self) -> MLEngineAsyncHook:
return MLEngineAsyncHook(
gcp_conn_id=self.conn_id,
impersonation_chain=self.impersonation_chain,
)
| 5,137 | 41.114754 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/triggers/gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
import os
from datetime import datetime
from typing import Any, AsyncIterator
from aiohttp import ClientSession
from airflow.providers.google.cloud.hooks.gcs import GCSAsyncHook
from airflow.triggers.base import BaseTrigger, TriggerEvent
from airflow.utils import timezone
class GCSBlobTrigger(BaseTrigger):
"""
A trigger that fires and it finds the requested file or folder present in the given bucket.
:param bucket: the bucket in the google cloud storage where the objects are residing.
:param object_name: the file or folder present in the bucket
:param google_cloud_conn_id: reference to the Google Connection
:param poke_interval: polling period in seconds to check for file/folder
:param hook_params: Extra config params to be passed to the underlying hook.
Should match the desired hook constructor params.
"""
def __init__(
self,
bucket: str,
object_name: str,
poke_interval: float,
google_cloud_conn_id: str,
hook_params: dict[str, Any],
):
super().__init__()
self.bucket = bucket
self.object_name = object_name
self.poke_interval = poke_interval
self.google_cloud_conn_id: str = google_cloud_conn_id
self.hook_params = hook_params
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes GCSBlobTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.gcs.GCSBlobTrigger",
{
"bucket": self.bucket,
"object_name": self.object_name,
"poke_interval": self.poke_interval,
"google_cloud_conn_id": self.google_cloud_conn_id,
"hook_params": self.hook_params,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Loop until the relevant file/folder is found."""
try:
hook = self._get_async_hook()
while True:
res = await self._object_exists(
hook=hook, bucket_name=self.bucket, object_name=self.object_name
)
if res == "success":
yield TriggerEvent({"status": "success", "message": res})
return
await asyncio.sleep(self.poke_interval)
except Exception as e:
yield TriggerEvent({"status": "error", "message": str(e)})
def _get_async_hook(self) -> GCSAsyncHook:
return GCSAsyncHook(gcp_conn_id=self.google_cloud_conn_id, **self.hook_params)
async def _object_exists(self, hook: GCSAsyncHook, bucket_name: str, object_name: str) -> str:
"""
Checks for the existence of a file in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the blob_name to check in the Google cloud
storage bucket.
"""
async with ClientSession() as s:
client = await hook.get_storage_client(s)
bucket = client.get_bucket(bucket_name)
object_response = await bucket.blob_exists(blob_name=object_name)
if object_response:
return "success"
return "pending"
class GCSCheckBlobUpdateTimeTrigger(BaseTrigger):
"""
A trigger that makes an async call to GCS to check whether the object is updated in a bucket.
:param bucket: google cloud storage bucket name cloud storage where the objects are residing.
:param object_name: the file or folder present in the bucket
:param target_date: context datetime to compare with blob object updated time
:param poke_interval: polling period in seconds to check for file/folder
:param google_cloud_conn_id: reference to the Google Connection
:param hook_params: dict object that has delegate_to and impersonation_chain
"""
def __init__(
self,
bucket: str,
object_name: str,
target_date: datetime,
poke_interval: float,
google_cloud_conn_id: str,
hook_params: dict[str, Any],
):
super().__init__()
self.bucket = bucket
self.object_name = object_name
self.target_date = target_date
self.poke_interval = poke_interval
self.google_cloud_conn_id: str = google_cloud_conn_id
self.hook_params = hook_params
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes GCSCheckBlobUpdateTimeTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.gcs.GCSCheckBlobUpdateTimeTrigger",
{
"bucket": self.bucket,
"object_name": self.object_name,
"target_date": self.target_date,
"poke_interval": self.poke_interval,
"google_cloud_conn_id": self.google_cloud_conn_id,
"hook_params": self.hook_params,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Loop until the object updated time is greater than target datetime."""
try:
hook = self._get_async_hook()
while True:
status, res = await self._is_blob_updated_after(
hook=hook,
bucket_name=self.bucket,
object_name=self.object_name,
target_date=self.target_date,
)
if status:
yield TriggerEvent(res)
return
await asyncio.sleep(self.poke_interval)
except Exception as e:
yield TriggerEvent({"status": "error", "message": str(e)})
def _get_async_hook(self) -> GCSAsyncHook:
return GCSAsyncHook(gcp_conn_id=self.google_cloud_conn_id, **self.hook_params)
async def _is_blob_updated_after(
self, hook: GCSAsyncHook, bucket_name: str, object_name: str, target_date: datetime
) -> tuple[bool, dict[str, Any]]:
"""
Checks if the object in the bucket is updated.
:param hook: GCSAsyncHook Hook class
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the blob_name to check in the Google cloud
storage bucket.
:param target_date: context datetime to compare with blob object updated time
"""
async with ClientSession() as session:
client = await hook.get_storage_client(session)
bucket = client.get_bucket(bucket_name)
blob = await bucket.get_blob(blob_name=object_name)
if blob is None:
res = {
"message": f"Object ({object_name}) not found in Bucket ({bucket_name})",
"status": "error",
}
return True, res
blob_updated_date = blob.updated # type: ignore[attr-defined]
blob_updated_time = datetime.strptime(blob_updated_date, "%Y-%m-%dT%H:%M:%S.%fZ").replace(
tzinfo=timezone.utc
) # Blob updated time is in string format so converting the string format
# to datetime object to compare the last updated time
if blob_updated_time is not None:
if not target_date.tzinfo:
target_date = target_date.replace(tzinfo=timezone.utc)
self.log.info("Verify object date: %s > %s", blob_updated_time, target_date)
if blob_updated_time > target_date:
return True, {"status": "success", "message": "success"}
return False, {"status": "pending", "message": "pending"}
class GCSPrefixBlobTrigger(GCSBlobTrigger):
"""
Looks for objects in bucket matching a prefix.
If none found, sleep for interval and check again. Otherwise, return matches.
:param bucket: the bucket in the google cloud storage where the objects are residing.
:param prefix: The prefix of the blob_names to match in the Google cloud storage bucket
:param google_cloud_conn_id: reference to the Google Connection
:param poke_interval: polling period in seconds to check
:param hook_params: Extra config params to be passed to the underlying hook.
Should match the desired hook constructor params.
"""
def __init__(
self,
bucket: str,
prefix: str,
poke_interval: float,
google_cloud_conn_id: str,
hook_params: dict[str, Any],
):
super().__init__(
bucket=bucket,
object_name=prefix,
poke_interval=poke_interval,
google_cloud_conn_id=google_cloud_conn_id,
hook_params=hook_params,
)
self.prefix = prefix
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes GCSPrefixBlobTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.gcs.GCSPrefixBlobTrigger",
{
"bucket": self.bucket,
"prefix": self.prefix,
"poke_interval": self.poke_interval,
"google_cloud_conn_id": self.google_cloud_conn_id,
"hook_params": self.hook_params,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Loop until the matches are found for the given prefix on the bucket."""
try:
hook = self._get_async_hook()
while True:
self.log.info(
"Checking for existence of blobs with prefix %s in bucket %s", self.prefix, self.bucket
)
res = await self._list_blobs_with_prefix(
hook=hook, bucket_name=self.bucket, prefix=self.prefix
)
if len(res) > 0:
yield TriggerEvent(
{"status": "success", "message": "Successfully completed", "matches": res}
)
return
await asyncio.sleep(self.poke_interval)
except Exception as e:
yield TriggerEvent({"status": "error", "message": str(e)})
async def _list_blobs_with_prefix(self, hook: GCSAsyncHook, bucket_name: str, prefix: str) -> list[str]:
"""
Returns names of blobs which match the given prefix for a given bucket.
:param hook: The async hook to use for listing the blobs
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param prefix: The prefix of the blob_names to match in the Google cloud
storage bucket.
"""
async with ClientSession() as session:
client = await hook.get_storage_client(session)
bucket = client.get_bucket(bucket_name)
object_response = await bucket.list_blobs(prefix=prefix)
return object_response
class GCSUploadSessionTrigger(GCSPrefixBlobTrigger):
"""
Return Trigger Event if the inactivity period has passed with no increase in the number of objects.
:param bucket: The Google Cloud Storage bucket where the objects are expected.
:param prefix: The name of the prefix to check in the Google cloud storage bucket.
:param poke_interval: polling period in seconds to check
:param inactivity_period: The total seconds of inactivity to designate
an upload session is over. Note, this mechanism is not real time and
this operator may not return until a interval after this period
has passed with no additional objects sensed.
:param min_objects: The minimum number of objects needed for upload session
to be considered valid.
:param previous_objects: The set of object ids found during the last poke.
:param allow_delete: Should this sensor consider objects being deleted
between intervals valid behavior. If true a warning message will be logged
when this happens. If false an error will be raised.
:param google_cloud_conn_id: The connection ID to use when connecting
to Google Cloud Storage.
"""
def __init__(
self,
bucket: str,
prefix: str,
poke_interval: float,
google_cloud_conn_id: str,
hook_params: dict[str, Any],
inactivity_period: float = 60 * 60,
min_objects: int = 1,
previous_objects: set[str] | None = None,
allow_delete: bool = True,
):
super().__init__(
bucket=bucket,
prefix=prefix,
poke_interval=poke_interval,
google_cloud_conn_id=google_cloud_conn_id,
hook_params=hook_params,
)
self.inactivity_period = inactivity_period
self.min_objects = min_objects
self.previous_objects = previous_objects if previous_objects else set()
self.inactivity_seconds = 0.0
self.allow_delete = allow_delete
self.last_activity_time: datetime | None = None
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes GCSUploadSessionTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.gcs.GCSUploadSessionTrigger",
{
"bucket": self.bucket,
"prefix": self.prefix,
"poke_interval": self.poke_interval,
"google_cloud_conn_id": self.google_cloud_conn_id,
"hook_params": self.hook_params,
"inactivity_period": self.inactivity_period,
"min_objects": self.min_objects,
"previous_objects": self.previous_objects,
"allow_delete": self.allow_delete,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Loop until no new files or deleted files in list blob for the inactivity_period."""
try:
hook = self._get_async_hook()
while True:
list_blobs = await self._list_blobs_with_prefix(
hook=hook, bucket_name=self.bucket, prefix=self.prefix
)
res = self._is_bucket_updated(set(list_blobs))
if res["status"] in ("success", "error"):
yield TriggerEvent(res)
return
await asyncio.sleep(self.poke_interval)
except Exception as e:
yield TriggerEvent({"status": "error", "message": str(e)})
def _get_time(self) -> datetime:
"""This is just a wrapper of datetime.datetime.now to simplify mocking in the unittests."""
return datetime.now()
def _is_bucket_updated(self, current_objects: set[str]) -> dict[str, str]:
"""
Check whether new objects have been uploaded and the inactivity_period has passed; update the state.
:param current_objects: set of object ids in bucket during last check.
"""
current_num_objects = len(current_objects)
if current_objects > self.previous_objects:
# When new objects arrived, reset the inactivity_seconds
# and update previous_objects for the next check interval.
self.log.info(
"New objects found at %s resetting last_activity_time.",
os.path.join(self.bucket, self.prefix),
)
self.log.debug("New objects: %s", "\n".join(current_objects - self.previous_objects))
self.last_activity_time = self._get_time()
self.inactivity_seconds = 0
self.previous_objects = current_objects
return {"status": "pending"}
if self.previous_objects - current_objects:
# During the last interval check objects were deleted.
if self.allow_delete:
self.previous_objects = current_objects
self.last_activity_time = self._get_time()
self.log.warning(
"%s Objects were deleted during the last interval."
" Updating the file counter and resetting last_activity_time.",
self.previous_objects - current_objects,
)
return {"status": "pending"}
return {
"status": "error",
"message": "Illegal behavior: objects were deleted in between check intervals",
}
if self.last_activity_time:
self.inactivity_seconds = (self._get_time() - self.last_activity_time).total_seconds()
else:
# Handles the first check where last inactivity time is None.
self.last_activity_time = self._get_time()
self.inactivity_seconds = 0
if self.inactivity_seconds >= self.inactivity_period:
path = os.path.join(self.bucket, self.prefix)
if current_num_objects >= self.min_objects:
success_message = (
"SUCCESS: Sensor found %s objects at %s. Waited at least %s "
"seconds, with no new objects dropped."
)
self.log.info(success_message, current_num_objects, path, self.inactivity_seconds)
return {
"status": "success",
"message": success_message % (current_num_objects, path, self.inactivity_seconds),
}
error_message = "FAILURE: Inactivity Period passed, not enough objects found in %s"
self.log.error(error_message, path)
return {"status": "error", "message": error_message % path}
return {"status": "pending"}
| 18,508 | 41.745958 | 108 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/triggers/cloud_composer.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
from typing import Any, Sequence
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.cloud_composer import CloudComposerAsyncHook
from airflow.triggers.base import BaseTrigger, TriggerEvent
class CloudComposerExecutionTrigger(BaseTrigger):
"""The trigger handles the async communication with the Google Cloud Composer."""
def __init__(
self,
project_id: str,
region: str,
operation_name: str,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
pooling_period_seconds: int = 30,
):
super().__init__()
self.project_id = project_id
self.region = region
self.operation_name = operation_name
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.pooling_period_seconds = pooling_period_seconds
self.gcp_hook = CloudComposerAsyncHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
def serialize(self) -> tuple[str, dict[str, Any]]:
return (
"airflow.providers.google.cloud.triggers.cloud_composer.CloudComposerExecutionTrigger",
{
"project_id": self.project_id,
"region": self.region,
"operation_name": self.operation_name,
"gcp_conn_id": self.gcp_conn_id,
"impersonation_chain": self.impersonation_chain,
"pooling_period_seconds": self.pooling_period_seconds,
},
)
async def run(self):
while True:
operation = await self.gcp_hook.get_operation(operation_name=self.operation_name)
if operation.done:
break
elif operation.error.message:
raise AirflowException(f"Cloud Composer Environment error: {operation.error.message}")
await asyncio.sleep(self.pooling_period_seconds)
yield TriggerEvent(
{
"operation_name": operation.name,
"operation_done": operation.done,
}
)
| 3,022 | 36.320988 | 102 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/triggers/cloud_build.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
from typing import Any, AsyncIterator, Sequence
from google.cloud.devtools.cloudbuild_v1.types import Build
from airflow.providers.google.cloud.hooks.cloud_build import CloudBuildAsyncHook
from airflow.triggers.base import BaseTrigger, TriggerEvent
class CloudBuildCreateBuildTrigger(BaseTrigger):
"""
CloudBuildCreateBuildTrigger run on the trigger worker to perform create Build operation.
:param id_: The ID of the build.
:param project_id: Google Cloud Project where the job is running
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param poll_interval: polling period in seconds to check for the status
:param location: The location of the project.
"""
def __init__(
self,
id_: str,
project_id: str | None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
poll_interval: float = 4.0,
location: str = "global",
):
super().__init__()
self.id_ = id_
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.poll_interval = poll_interval
self.location = location
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes CloudBuildCreateBuildTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.cloud_build.CloudBuildCreateBuildTrigger",
{
"id_": self.id_,
"project_id": self.project_id,
"gcp_conn_id": self.gcp_conn_id,
"impersonation_chain": self.impersonation_chain,
"poll_interval": self.poll_interval,
"location": self.location,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]: # type: ignore[override]
"""Gets current build execution status and yields a TriggerEvent."""
hook = self._get_async_hook()
while True:
try:
# Poll for job execution status
cloud_build_instance = await hook.get_cloud_build(
id_=self.id_,
project_id=self.project_id,
location=self.location,
)
if cloud_build_instance._pb.status in (Build.Status.SUCCESS,):
yield TriggerEvent(
{
"instance": Build.to_dict(cloud_build_instance),
"id_": self.id_,
"status": "success",
"message": "Build completed",
}
)
return
elif cloud_build_instance._pb.status in (
Build.Status.WORKING,
Build.Status.PENDING,
Build.Status.QUEUED,
):
self.log.info("Build is still running...")
self.log.info("Sleeping for %s seconds.", self.poll_interval)
await asyncio.sleep(self.poll_interval)
elif cloud_build_instance._pb.status in (
Build.Status.FAILURE,
Build.Status.INTERNAL_ERROR,
Build.Status.TIMEOUT,
Build.Status.CANCELLED,
Build.Status.EXPIRED,
):
yield TriggerEvent({"status": "error", "message": cloud_build_instance.status_detail})
return
else:
yield TriggerEvent(
{"status": "error", "message": "Unidentified status of Cloud Build instance"}
)
return
except Exception as e:
self.log.exception("Exception occurred while checking for Cloud Build completion")
yield TriggerEvent({"status": "error", "message": str(e)})
return
def _get_async_hook(self) -> CloudBuildAsyncHook:
return CloudBuildAsyncHook(gcp_conn_id=self.gcp_conn_id)
| 5,665 | 42.922481 | 106 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/triggers/pubsub.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Pubsub triggers."""
from __future__ import annotations
import asyncio
from typing import TYPE_CHECKING, Any, AsyncIterator, Callable, Sequence
from google.cloud.pubsub_v1.types import ReceivedMessage
from airflow.providers.google.cloud.hooks.pubsub import PubSubAsyncHook
from airflow.triggers.base import BaseTrigger, TriggerEvent
if TYPE_CHECKING:
from airflow.utils.context import Context
class PubsubPullTrigger(BaseTrigger):
"""
Initialize the Pubsub Pull Trigger with needed parameters.
:param project_id: the Google Cloud project ID for the subscription (templated)
:param subscription: the Pub/Sub subscription name. Do not include the full subscription path.
:param max_messages: The maximum number of messages to retrieve per
PubSub pull request
:param ack_messages: If True, each message will be acknowledged
immediately rather than by any downstream tasks
:param gcp_conn_id: Reference to google cloud connection id
:param messages_callback: (Optional) Callback to process received messages.
It's return value will be saved to XCom.
If you are pulling large messages, you probably want to provide a custom callback.
If not provided, the default implementation will convert `ReceivedMessage` objects
into JSON-serializable dicts using `google.protobuf.json_format.MessageToDict` function.
:param poke_interval: polling period in seconds to check for the status
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
def __init__(
self,
project_id: str,
subscription: str,
max_messages: int,
ack_messages: bool,
gcp_conn_id: str,
messages_callback: Callable[[list[ReceivedMessage], Context], Any] | None = None,
poke_interval: float = 10.0,
impersonation_chain: str | Sequence[str] | None = None,
):
super().__init__()
self.project_id = project_id
self.subscription = subscription
self.max_messages = max_messages
self.ack_messages = ack_messages
self.messages_callback = messages_callback
self.poke_interval = poke_interval
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.hook = PubSubAsyncHook()
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes PubsubPullTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.pubsub.PubsubPullTrigger",
{
"project_id": self.project_id,
"subscription": self.subscription,
"max_messages": self.max_messages,
"ack_messages": self.ack_messages,
"messages_callback": self.messages_callback,
"poke_interval": self.poke_interval,
"gcp_conn_id": self.gcp_conn_id,
"impersonation_chain": self.impersonation_chain,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]: # type: ignore[override]
try:
pulled_messages = None
while True:
if pulled_messages:
if self.ack_messages:
await self.message_acknowledgement(pulled_messages)
yield TriggerEvent({"status": "success", "message": pulled_messages})
else:
yield TriggerEvent({"status": "success", "message": pulled_messages})
else:
pulled_messages = await self.hook.pull(
project_id=self.project_id,
subscription=self.subscription,
max_messages=self.max_messages,
return_immediately=True,
)
self.log.info("Sleeping for %s seconds.", self.poke_interval)
await asyncio.sleep(self.poke_interval)
except Exception as e:
yield TriggerEvent({"status": "error", "message": str(e)})
return
async def message_acknowledgement(self, pulled_messages):
await self.hook.acknowledge(
project_id=self.project_id,
subscription=self.subscription,
messages=pulled_messages,
)
self.log.info("Acknowledged ack_ids from subscription %s", self.subscription)
| 5,818 | 44.818898 | 98 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/triggers/bigquery.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
from typing import Any, AsyncIterator, SupportsAbs
from aiohttp import ClientSession
from aiohttp.client_exceptions import ClientResponseError
from airflow.providers.google.cloud.hooks.bigquery import BigQueryAsyncHook, BigQueryTableAsyncHook
from airflow.triggers.base import BaseTrigger, TriggerEvent
class BigQueryInsertJobTrigger(BaseTrigger):
"""
BigQueryInsertJobTrigger run on the trigger worker to perform insert operation.
:param conn_id: Reference to google cloud connection id
:param job_id: The ID of the job. It will be suffixed with hash of job configuration
:param project_id: Google Cloud Project where the job is running
:param dataset_id: The dataset ID of the requested table. (templated)
:param table_id: The table ID of the requested table. (templated)
:param poll_interval: polling period in seconds to check for the status
"""
def __init__(
self,
conn_id: str,
job_id: str | None,
project_id: str | None,
dataset_id: str | None = None,
table_id: str | None = None,
poll_interval: float = 4.0,
):
super().__init__()
self.log.info("Using the connection %s .", conn_id)
self.conn_id = conn_id
self.job_id = job_id
self._job_conn = None
self.dataset_id = dataset_id
self.project_id = project_id
self.table_id = table_id
self.poll_interval = poll_interval
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes BigQueryInsertJobTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.bigquery.BigQueryInsertJobTrigger",
{
"conn_id": self.conn_id,
"job_id": self.job_id,
"dataset_id": self.dataset_id,
"project_id": self.project_id,
"table_id": self.table_id,
"poll_interval": self.poll_interval,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]: # type: ignore[override]
"""Gets current job execution status and yields a TriggerEvent."""
hook = self._get_async_hook()
while True:
try:
# Poll for job execution status
response_from_hook = await hook.get_job_status(job_id=self.job_id, project_id=self.project_id)
self.log.debug("Response from hook: %s", response_from_hook)
if response_from_hook == "success":
yield TriggerEvent(
{
"job_id": self.job_id,
"status": "success",
"message": "Job completed",
}
)
return
elif response_from_hook == "pending":
self.log.info("Query is still running...")
self.log.info("Sleeping for %s seconds.", self.poll_interval)
await asyncio.sleep(self.poll_interval)
else:
yield TriggerEvent({"status": "error", "message": response_from_hook})
return
except Exception as e:
self.log.exception("Exception occurred while checking for query completion")
yield TriggerEvent({"status": "error", "message": str(e)})
return
def _get_async_hook(self) -> BigQueryAsyncHook:
return BigQueryAsyncHook(gcp_conn_id=self.conn_id)
class BigQueryCheckTrigger(BigQueryInsertJobTrigger):
"""BigQueryCheckTrigger run on the trigger worker."""
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes BigQueryCheckTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.bigquery.BigQueryCheckTrigger",
{
"conn_id": self.conn_id,
"job_id": self.job_id,
"dataset_id": self.dataset_id,
"project_id": self.project_id,
"table_id": self.table_id,
"poll_interval": self.poll_interval,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]: # type: ignore[override]
"""Gets current job execution status and yields a TriggerEvent."""
hook = self._get_async_hook()
while True:
try:
# Poll for job execution status
response_from_hook = await hook.get_job_status(job_id=self.job_id, project_id=self.project_id)
if response_from_hook == "success":
query_results = await hook.get_job_output(job_id=self.job_id, project_id=self.project_id)
records = hook.get_records(query_results)
# If empty list, then no records are available
if not records:
yield TriggerEvent(
{
"status": "success",
"records": None,
}
)
return
else:
# Extract only first record from the query results
first_record = records.pop(0)
yield TriggerEvent(
{
"status": "success",
"records": first_record,
}
)
return
elif response_from_hook == "pending":
self.log.info("Query is still running...")
self.log.info("Sleeping for %s seconds.", self.poll_interval)
await asyncio.sleep(self.poll_interval)
else:
yield TriggerEvent({"status": "error", "message": response_from_hook})
return
except Exception as e:
self.log.exception("Exception occurred while checking for query completion")
yield TriggerEvent({"status": "error", "message": str(e)})
return
class BigQueryGetDataTrigger(BigQueryInsertJobTrigger):
"""
BigQueryGetDataTrigger run on the trigger worker, inherits from BigQueryInsertJobTrigger class.
:param as_dict: if True returns the result as a list of dictionaries, otherwise as list of lists
(default: False).
"""
def __init__(self, as_dict: bool = False, **kwargs):
super().__init__(**kwargs)
self.as_dict = as_dict
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes BigQueryInsertJobTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.bigquery.BigQueryGetDataTrigger",
{
"conn_id": self.conn_id,
"job_id": self.job_id,
"dataset_id": self.dataset_id,
"project_id": self.project_id,
"table_id": self.table_id,
"poll_interval": self.poll_interval,
"as_dict": self.as_dict,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]: # type: ignore[override]
"""Gets current job execution status and yields a TriggerEvent with response data."""
hook = self._get_async_hook()
while True:
try:
# Poll for job execution status
response_from_hook = await hook.get_job_status(job_id=self.job_id, project_id=self.project_id)
if response_from_hook == "success":
query_results = await hook.get_job_output(job_id=self.job_id, project_id=self.project_id)
records = hook.get_records(query_results=query_results, as_dict=self.as_dict)
self.log.debug("Response from hook: %s", response_from_hook)
yield TriggerEvent(
{
"status": "success",
"message": response_from_hook,
"records": records,
}
)
return
elif response_from_hook == "pending":
self.log.info("Query is still running...")
self.log.info("Sleeping for %s seconds.", self.poll_interval)
await asyncio.sleep(self.poll_interval)
else:
yield TriggerEvent({"status": "error", "message": response_from_hook})
return
except Exception as e:
self.log.exception("Exception occurred while checking for query completion")
yield TriggerEvent({"status": "error", "message": str(e)})
return
class BigQueryIntervalCheckTrigger(BigQueryInsertJobTrigger):
"""
BigQueryIntervalCheckTrigger run on the trigger worker, inherits from BigQueryInsertJobTrigger class.
:param conn_id: Reference to google cloud connection id
:param first_job_id: The ID of the job 1 performed
:param second_job_id: The ID of the job 2 performed
:param project_id: Google Cloud Project where the job is running
:param dataset_id: The dataset ID of the requested table. (templated)
:param table: table name
:param metrics_thresholds: dictionary of ratios indexed by metrics
:param date_filter_column: column name
:param days_back: number of days between ds and the ds we want to check
against
:param ratio_formula: ration formula
:param ignore_zero: boolean value to consider zero or not
:param table_id: The table ID of the requested table. (templated)
:param poll_interval: polling period in seconds to check for the status
"""
def __init__(
self,
conn_id: str,
first_job_id: str,
second_job_id: str,
project_id: str | None,
table: str,
metrics_thresholds: dict[str, int],
date_filter_column: str | None = "ds",
days_back: SupportsAbs[int] = -7,
ratio_formula: str = "max_over_min",
ignore_zero: bool = True,
dataset_id: str | None = None,
table_id: str | None = None,
poll_interval: float = 4.0,
):
super().__init__(
conn_id=conn_id,
job_id=first_job_id,
project_id=project_id,
dataset_id=dataset_id,
table_id=table_id,
poll_interval=poll_interval,
)
self.conn_id = conn_id
self.first_job_id = first_job_id
self.second_job_id = second_job_id
self.project_id = project_id
self.table = table
self.metrics_thresholds = metrics_thresholds
self.date_filter_column = date_filter_column
self.days_back = days_back
self.ratio_formula = ratio_formula
self.ignore_zero = ignore_zero
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes BigQueryCheckTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.bigquery.BigQueryIntervalCheckTrigger",
{
"conn_id": self.conn_id,
"first_job_id": self.first_job_id,
"second_job_id": self.second_job_id,
"project_id": self.project_id,
"table": self.table,
"metrics_thresholds": self.metrics_thresholds,
"date_filter_column": self.date_filter_column,
"days_back": self.days_back,
"ratio_formula": self.ratio_formula,
"ignore_zero": self.ignore_zero,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]: # type: ignore[override]
"""Gets current job execution status and yields a TriggerEvent."""
hook = self._get_async_hook()
while True:
try:
first_job_response_from_hook = await hook.get_job_status(
job_id=self.first_job_id, project_id=self.project_id
)
second_job_response_from_hook = await hook.get_job_status(
job_id=self.second_job_id, project_id=self.project_id
)
if first_job_response_from_hook == "success" and second_job_response_from_hook == "success":
first_query_results = await hook.get_job_output(
job_id=self.first_job_id, project_id=self.project_id
)
second_query_results = await hook.get_job_output(
job_id=self.second_job_id, project_id=self.project_id
)
first_records = hook.get_records(first_query_results)
second_records = hook.get_records(second_query_results)
# If empty list, then no records are available
if not first_records:
first_job_row: str | None = None
else:
# Extract only first record from the query results
first_job_row = first_records.pop(0)
# If empty list, then no records are available
if not second_records:
second_job_row: str | None = None
else:
# Extract only first record from the query results
second_job_row = second_records.pop(0)
hook.interval_check(
first_job_row,
second_job_row,
self.metrics_thresholds,
self.ignore_zero,
self.ratio_formula,
)
yield TriggerEvent(
{
"status": "success",
"message": "Job completed",
"first_row_data": first_job_row,
"second_row_data": second_job_row,
}
)
return
elif first_job_response_from_hook == "pending" or second_job_response_from_hook == "pending":
self.log.info("Query is still running...")
self.log.info("Sleeping for %s seconds.", self.poll_interval)
await asyncio.sleep(self.poll_interval)
else:
yield TriggerEvent(
{"status": "error", "message": second_job_response_from_hook, "data": None}
)
return
except Exception as e:
self.log.exception("Exception occurred while checking for query completion")
yield TriggerEvent({"status": "error", "message": str(e)})
return
class BigQueryValueCheckTrigger(BigQueryInsertJobTrigger):
"""
BigQueryValueCheckTrigger run on the trigger worker, inherits from BigQueryInsertJobTrigger class.
:param conn_id: Reference to google cloud connection id
:param sql: the sql to be executed
:param pass_value: pass value
:param job_id: The ID of the job
:param project_id: Google Cloud Project where the job is running
:param tolerance: certain metrics for tolerance
:param dataset_id: The dataset ID of the requested table. (templated)
:param table_id: The table ID of the requested table. (templated)
:param poll_interval: polling period in seconds to check for the status
"""
def __init__(
self,
conn_id: str,
sql: str,
pass_value: int | float | str,
job_id: str | None,
project_id: str | None,
tolerance: Any = None,
dataset_id: str | None = None,
table_id: str | None = None,
poll_interval: float = 4.0,
):
super().__init__(
conn_id=conn_id,
job_id=job_id,
project_id=project_id,
dataset_id=dataset_id,
table_id=table_id,
poll_interval=poll_interval,
)
self.sql = sql
self.pass_value = pass_value
self.tolerance = tolerance
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes BigQueryValueCheckTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.bigquery.BigQueryValueCheckTrigger",
{
"conn_id": self.conn_id,
"pass_value": self.pass_value,
"job_id": self.job_id,
"dataset_id": self.dataset_id,
"project_id": self.project_id,
"sql": self.sql,
"table_id": self.table_id,
"tolerance": self.tolerance,
"poll_interval": self.poll_interval,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]: # type: ignore[override]
"""Gets current job execution status and yields a TriggerEvent."""
hook = self._get_async_hook()
while True:
try:
# Poll for job execution status
response_from_hook = await hook.get_job_status(job_id=self.job_id, project_id=self.project_id)
if response_from_hook == "success":
query_results = await hook.get_job_output(job_id=self.job_id, project_id=self.project_id)
records = hook.get_records(query_results)
records = records.pop(0) if records else None
hook.value_check(self.sql, self.pass_value, records, self.tolerance)
yield TriggerEvent({"status": "success", "message": "Job completed", "records": records})
return
elif response_from_hook == "pending":
self.log.info("Query is still running...")
self.log.info("Sleeping for %s seconds.", self.poll_interval)
await asyncio.sleep(self.poll_interval)
else:
yield TriggerEvent({"status": "error", "message": response_from_hook, "records": None})
return
except Exception as e:
self.log.exception("Exception occurred while checking for query completion")
yield TriggerEvent({"status": "error", "message": str(e)})
return
class BigQueryTableExistenceTrigger(BaseTrigger):
"""
Initialize the BigQuery Table Existence Trigger with needed parameters.
:param project_id: Google Cloud Project where the job is running
:param dataset_id: The dataset ID of the requested table.
:param table_id: The table ID of the requested table.
:param gcp_conn_id: Reference to google cloud connection id
:param hook_params: params for hook
:param poll_interval: polling period in seconds to check for the status
"""
def __init__(
self,
project_id: str,
dataset_id: str,
table_id: str,
gcp_conn_id: str,
hook_params: dict[str, Any],
poll_interval: float = 4.0,
):
self.dataset_id = dataset_id
self.project_id = project_id
self.table_id = table_id
self.gcp_conn_id: str = gcp_conn_id
self.poll_interval = poll_interval
self.hook_params = hook_params
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes BigQueryTableExistenceTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.bigquery.BigQueryTableExistenceTrigger",
{
"dataset_id": self.dataset_id,
"project_id": self.project_id,
"table_id": self.table_id,
"gcp_conn_id": self.gcp_conn_id,
"poll_interval": self.poll_interval,
"hook_params": self.hook_params,
},
)
def _get_async_hook(self) -> BigQueryTableAsyncHook:
return BigQueryTableAsyncHook(gcp_conn_id=self.gcp_conn_id)
async def run(self) -> AsyncIterator[TriggerEvent]: # type: ignore[override]
"""Will run until the table exists in the Google Big Query."""
while True:
try:
hook = self._get_async_hook()
response = await self._table_exists(
hook=hook, dataset=self.dataset_id, table_id=self.table_id, project_id=self.project_id
)
if response:
yield TriggerEvent({"status": "success", "message": "success"})
return
await asyncio.sleep(self.poll_interval)
except Exception as e:
self.log.exception("Exception occurred while checking for Table existence")
yield TriggerEvent({"status": "error", "message": str(e)})
return
async def _table_exists(
self, hook: BigQueryTableAsyncHook, dataset: str, table_id: str, project_id: str
) -> bool:
"""
Create session, make call to BigQueryTableAsyncHook, and check for the table in Google Big Query.
:param hook: BigQueryTableAsyncHook Hook class
:param dataset: The name of the dataset in which to look for the table storage bucket.
:param table_id: The name of the table to check the existence of.
:param project_id: The Google cloud project in which to look for the table.
The connection supplied to the hook must provide
access to the specified project.
"""
async with ClientSession() as session:
try:
client = await hook.get_table_client(
dataset=dataset, table_id=table_id, project_id=project_id, session=session
)
response = await client.get()
return True if response else False
except ClientResponseError as err:
if err.status == 404:
return False
raise err
class BigQueryTablePartitionExistenceTrigger(BigQueryTableExistenceTrigger):
"""
Initialize the BigQuery Table Partition Existence Trigger with needed parameters.
:param partition_id: The name of the partition to check the existence of.
:param project_id: Google Cloud Project where the job is running
:param dataset_id: The dataset ID of the requested table.
:param table_id: The table ID of the requested table.
:param gcp_conn_id: Reference to google cloud connection id
:param hook_params: params for hook
:param poll_interval: polling period in seconds to check for the status
"""
def __init__(self, partition_id: str, **kwargs):
super().__init__(**kwargs)
self.partition_id = partition_id
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes BigQueryTablePartitionExistenceTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.bigquery.BigQueryTablePartitionExistenceTrigger",
{
"partition_id": self.partition_id,
"dataset_id": self.dataset_id,
"project_id": self.project_id,
"table_id": self.table_id,
"gcp_conn_id": self.gcp_conn_id,
"poll_interval": self.poll_interval,
"hook_params": self.hook_params,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]: # type: ignore[override]
"""Will run until the table exists in the Google Big Query."""
hook = BigQueryAsyncHook(gcp_conn_id=self.gcp_conn_id)
job_id = None
while True:
if job_id is not None:
status = await hook.get_job_status(job_id=job_id, project_id=self.project_id)
if status == "success":
is_partition = await self._partition_exists(
hook=hook, job_id=job_id, project_id=self.project_id
)
if is_partition:
yield TriggerEvent(
{
"status": "success",
"message": f"Partition: {self.partition_id} in table: {self.table_id}",
}
)
return
job_id = None
elif status == "error":
yield TriggerEvent({"status": "error", "message": status})
return
self.log.info("Sleeping for %s seconds.", self.poll_interval)
await asyncio.sleep(self.poll_interval)
else:
job_id = await hook.create_job_for_partition_get(self.dataset_id, project_id=self.project_id)
self.log.info("Sleeping for %s seconds.", self.poll_interval)
await asyncio.sleep(self.poll_interval)
async def _partition_exists(self, hook: BigQueryAsyncHook, job_id: str | None, project_id: str):
query_results = await hook.get_job_output(job_id=job_id, project_id=project_id)
records = hook.get_records(query_results)
if records:
records = [row[0] for row in records]
return self.partition_id in records
| 26,394 | 41.918699 | 110 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.