repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
airflow
|
airflow-main/airflow/providers/google/cloud/triggers/bigquery_dts.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
from typing import Any, AsyncIterator, Sequence
from google.cloud.bigquery_datatransfer_v1 import TransferRun, TransferState
from airflow.providers.google.cloud.hooks.bigquery_dts import AsyncBiqQueryDataTransferServiceHook
from airflow.triggers.base import BaseTrigger, TriggerEvent
class BigQueryDataTransferRunTrigger(BaseTrigger):
"""
Triggers class to watch the Transfer Run state to define when the job is done.
:param project_id: The BigQuery project id where the transfer configuration should be
:param config_id: ID of the config of the Transfer Run which should be watched.
:param run_id: ID of the Transfer Run which should be watched.
:param poll_interval: Optional. Interval which defines how often triggers check status of the job.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param location: BigQuery Transfer Service location for regional transfers.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
def __init__(
self,
project_id: str | None,
config_id: str,
run_id: str,
poll_interval: int = 10,
gcp_conn_id: str = "google_cloud_default",
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
):
super().__init__()
self.project_id = project_id
self.config_id = config_id
self.run_id = run_id
self.poll_interval = poll_interval
self.gcp_conn_id = gcp_conn_id
self.location = location
self.impersonation_chain = impersonation_chain
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes class arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.bigquery_dts.BigQueryDataTransferRunTrigger",
{
"project_id": self.project_id,
"config_id": self.config_id,
"run_id": self.run_id,
"poll_interval": self.poll_interval,
"gcp_conn_id": self.gcp_conn_id,
"location": self.location,
"impersonation_chain": self.impersonation_chain,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""If the Transfer Run is in a terminal state, then yield TriggerEvent object."""
hook = self._get_async_hook()
while True:
try:
transfer_run: TransferRun = await hook.get_transfer_run(
project_id=self.project_id,
config_id=self.config_id,
run_id=self.run_id,
location=self.location,
)
state = transfer_run.state
self.log.info("Current state is %s", state)
if state == TransferState.SUCCEEDED:
self.log.info("Job has completed it's work.")
yield TriggerEvent(
{
"status": "success",
"run_id": self.run_id,
"message": "Job completed",
"config_id": self.config_id,
}
)
return
elif state == TransferState.FAILED:
self.log.info("Job has failed")
yield TriggerEvent(
{
"status": "failed",
"run_id": self.run_id,
"message": "Job has failed",
}
)
return
if state == TransferState.CANCELLED:
self.log.info("Job has been cancelled.")
yield TriggerEvent(
{
"status": "cancelled",
"run_id": self.run_id,
"message": "Job was cancelled",
}
)
return
else:
self.log.info("Job is still working...")
self.log.info("Waiting for %s seconds", self.poll_interval)
await asyncio.sleep(self.poll_interval)
except Exception as e:
yield TriggerEvent(
{
"status": "failed",
"message": f"Trigger failed with exception: {str(e)}",
}
)
return
def _get_async_hook(self) -> AsyncBiqQueryDataTransferServiceHook:
return AsyncBiqQueryDataTransferServiceHook(
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
| 6,277 | 41.707483 | 102 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/triggers/datafusion.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
from typing import Any, AsyncIterator, Sequence
from airflow.providers.google.cloud.hooks.datafusion import DataFusionAsyncHook
from airflow.triggers.base import BaseTrigger, TriggerEvent
class DataFusionStartPipelineTrigger(BaseTrigger):
"""
Trigger to perform checking the pipeline status until it reaches terminate state.
:param pipeline_name: Your pipeline name.
:param instance_url: Endpoint on which the REST APIs is accessible for the instance.
:param pipeline_id: Unique pipeline ID associated with specific pipeline
:param namespace: if your pipeline belongs to a Basic edition instance, the namespace ID
is always default. If your pipeline belongs to an Enterprise edition instance, you
can create a namespace.
:param gcp_conn_id: Reference to google cloud connection id
:param poll_interval: polling period in seconds to check for the status
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
def __init__(
self,
instance_url: str,
namespace: str,
pipeline_name: str,
pipeline_id: str,
poll_interval: float = 3.0,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
success_states: list[str] | None = None,
):
super().__init__()
self.instance_url = instance_url
self.namespace = namespace
self.pipeline_name = pipeline_name
self.pipeline_id = pipeline_id
self.poll_interval = poll_interval
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.success_states = success_states
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes DataFusionStartPipelineTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.datafusion.DataFusionStartPipelineTrigger",
{
"gcp_conn_id": self.gcp_conn_id,
"instance_url": self.instance_url,
"namespace": self.namespace,
"pipeline_name": self.pipeline_name,
"pipeline_id": self.pipeline_id,
"success_states": self.success_states,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]: # type: ignore[override]
"""Gets current pipeline status and yields a TriggerEvent."""
hook = self._get_async_hook()
while True:
try:
# Poll for job execution status
response_from_hook = await hook.get_pipeline_status(
success_states=self.success_states,
instance_url=self.instance_url,
namespace=self.namespace,
pipeline_name=self.pipeline_name,
pipeline_id=self.pipeline_id,
)
if response_from_hook == "success":
yield TriggerEvent(
{
"pipeline_id": self.pipeline_id,
"status": "success",
"message": "Pipeline is running",
}
)
return
elif response_from_hook == "pending":
self.log.info("Pipeline is not still in running state...")
self.log.info("Sleeping for %s seconds.", self.poll_interval)
await asyncio.sleep(self.poll_interval)
else:
yield TriggerEvent({"status": "error", "message": response_from_hook})
return
except Exception as e:
self.log.exception("Exception occurred while checking for pipeline state")
yield TriggerEvent({"status": "error", "message": str(e)})
return
def _get_async_hook(self) -> DataFusionAsyncHook:
return DataFusionAsyncHook(
instance_url=self.instance_url,
namespace=self.namespace,
pipeline_name=self.pipeline_name,
pipeline_id=self.pipeline_id,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
| 5,682 | 44.103175 | 96 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/triggers/cloud_storage_transfer_service.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
from typing import Any, AsyncIterator
from google.api_core.exceptions import GoogleAPIError
from google.cloud.storage_transfer_v1.types import TransferOperation
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.cloud_storage_transfer_service import (
CloudDataTransferServiceAsyncHook,
)
from airflow.triggers.base import BaseTrigger, TriggerEvent
class CloudStorageTransferServiceCreateJobsTrigger(BaseTrigger):
"""
StorageTransferJobTrigger run on the trigger worker to perform Cloud Storage Transfer job.
:param job_names: List of transfer jobs names.
:param project_id: GCP project id.
:param poll_interval: Interval in seconds between polls.
"""
def __init__(self, job_names: list[str], project_id: str | None = None, poll_interval: int = 10) -> None:
super().__init__()
self.project_id = project_id
self.job_names = job_names
self.poll_interval = poll_interval
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes StorageTransferJobsTrigger arguments and classpath."""
return (
f"{self.__class__.__module__ }.{self.__class__.__qualname__}",
{
"project_id": self.project_id,
"job_names": self.job_names,
"poll_interval": self.poll_interval,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]: # type: ignore[override]
"""Gets current data storage transfer jobs and yields a TriggerEvent."""
async_hook: CloudDataTransferServiceAsyncHook = self.get_async_hook()
while True:
self.log.info("Attempting to request jobs statuses")
jobs_completed_successfully = 0
try:
jobs_pager = await async_hook.get_jobs(job_names=self.job_names)
jobs, awaitable_operations = [], []
async for job in jobs_pager:
operation = async_hook.get_latest_operation(job)
jobs.append(job)
awaitable_operations.append(operation)
operations: list[TransferOperation] = await asyncio.gather(*awaitable_operations)
for job, operation in zip(jobs, operations):
if operation is None:
yield TriggerEvent(
{
"status": "error",
"message": f"Transfer job {job.name} has no latest operation.",
}
)
return
elif operation.status == TransferOperation.Status.SUCCESS:
jobs_completed_successfully += 1
elif operation.status in (
TransferOperation.Status.FAILED,
TransferOperation.Status.ABORTED,
):
yield TriggerEvent(
{
"status": "error",
"message": f"Transfer operation {operation.name} failed with status "
f"{TransferOperation.Status(operation.status).name}",
}
)
return
except (GoogleAPIError, AirflowException) as ex:
yield TriggerEvent(dict(status="error", message=str(ex)))
return
jobs_total = len(self.job_names)
self.log.info("Transfer jobs completed: %s of %s", jobs_completed_successfully, jobs_total)
if jobs_completed_successfully == jobs_total:
s = "s" if jobs_total > 1 else ""
job_names = ", ".join(j for j in self.job_names)
yield TriggerEvent(
{
"status": "success",
"message": f"Transfer job{s} {job_names} completed successfully",
}
)
return
self.log.info("Sleeping for %s seconds", self.poll_interval)
await asyncio.sleep(self.poll_interval)
def get_async_hook(self) -> CloudDataTransferServiceAsyncHook:
return CloudDataTransferServiceAsyncHook(project_id=self.project_id)
| 5,185 | 41.859504 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/triggers/dataproc.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Dataproc triggers."""
from __future__ import annotations
import asyncio
import time
from typing import Any, AsyncIterator, Sequence
from google.api_core.exceptions import NotFound
from google.cloud.dataproc_v1 import Batch, ClusterStatus, JobStatus
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.dataproc import DataprocAsyncHook
from airflow.triggers.base import BaseTrigger, TriggerEvent
class DataprocBaseTrigger(BaseTrigger):
"""Base class for Dataproc triggers."""
def __init__(
self,
region: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
polling_interval_seconds: int = 30,
):
super().__init__()
self.region = region
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.polling_interval_seconds = polling_interval_seconds
def get_async_hook(self):
return DataprocAsyncHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
class DataprocSubmitTrigger(DataprocBaseTrigger):
"""
DataprocSubmitTrigger run on the trigger worker to perform create Build operation.
:param job_id: The ID of a Dataproc job.
:param project_id: Google Cloud Project where the job is running
:param region: The Cloud Dataproc region in which to handle the request.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param polling_interval_seconds: polling period in seconds to check for the status
"""
def __init__(self, job_id: str, **kwargs):
self.job_id = job_id
super().__init__(**kwargs)
def serialize(self):
return (
"airflow.providers.google.cloud.triggers.dataproc.DataprocSubmitTrigger",
{
"job_id": self.job_id,
"project_id": self.project_id,
"region": self.region,
"gcp_conn_id": self.gcp_conn_id,
"impersonation_chain": self.impersonation_chain,
"polling_interval_seconds": self.polling_interval_seconds,
},
)
async def run(self):
while True:
job = await self.get_async_hook().get_job(
project_id=self.project_id, region=self.region, job_id=self.job_id
)
state = job.status.state
self.log.info("Dataproc job: %s is in state: %s", self.job_id, state)
if state in (JobStatus.State.ERROR, JobStatus.State.DONE, JobStatus.State.CANCELLED):
if state in (JobStatus.State.DONE, JobStatus.State.CANCELLED):
break
elif state == JobStatus.State.ERROR:
raise AirflowException(f"Dataproc job execution failed {self.job_id}")
await asyncio.sleep(self.polling_interval_seconds)
yield TriggerEvent({"job_id": self.job_id, "job_state": state})
class DataprocClusterTrigger(DataprocBaseTrigger):
"""
DataprocClusterTrigger run on the trigger worker to perform create Build operation.
:param cluster_name: The name of the cluster.
:param project_id: Google Cloud Project where the job is running
:param region: The Cloud Dataproc region in which to handle the request.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param polling_interval_seconds: polling period in seconds to check for the status
"""
def __init__(self, cluster_name: str, **kwargs):
super().__init__(**kwargs)
self.cluster_name = cluster_name
def serialize(self) -> tuple[str, dict[str, Any]]:
return (
"airflow.providers.google.cloud.triggers.dataproc.DataprocClusterTrigger",
{
"cluster_name": self.cluster_name,
"project_id": self.project_id,
"region": self.region,
"gcp_conn_id": self.gcp_conn_id,
"impersonation_chain": self.impersonation_chain,
"polling_interval_seconds": self.polling_interval_seconds,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
while True:
cluster = await self.get_async_hook().get_cluster(
project_id=self.project_id, region=self.region, cluster_name=self.cluster_name
)
state = cluster.status.state
self.log.info("Dataproc cluster: %s is in state: %s", self.cluster_name, state)
if state in (
ClusterStatus.State.ERROR,
ClusterStatus.State.RUNNING,
):
break
self.log.info("Sleeping for %s seconds.", self.polling_interval_seconds)
await asyncio.sleep(self.polling_interval_seconds)
yield TriggerEvent({"cluster_name": self.cluster_name, "cluster_state": state, "cluster": cluster})
class DataprocBatchTrigger(DataprocBaseTrigger):
"""
DataprocCreateBatchTrigger run on the trigger worker to perform create Build operation.
:param batch_id: The ID of the build.
:param project_id: Google Cloud Project where the job is running
:param region: The Cloud Dataproc region in which to handle the request.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param polling_interval_seconds: polling period in seconds to check for the status
"""
def __init__(self, batch_id: str, **kwargs):
super().__init__(**kwargs)
self.batch_id = batch_id
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes DataprocBatchTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.dataproc.DataprocBatchTrigger",
{
"batch_id": self.batch_id,
"project_id": self.project_id,
"region": self.region,
"gcp_conn_id": self.gcp_conn_id,
"impersonation_chain": self.impersonation_chain,
"polling_interval_seconds": self.polling_interval_seconds,
},
)
async def run(self):
while True:
batch = await self.get_async_hook().get_batch(
project_id=self.project_id, region=self.region, batch_id=self.batch_id
)
state = batch.state
if state in (Batch.State.FAILED, Batch.State.SUCCEEDED, Batch.State.CANCELLED):
break
self.log.info("Current state is %s", state)
self.log.info("Sleeping for %s seconds.", self.polling_interval_seconds)
await asyncio.sleep(self.polling_interval_seconds)
yield TriggerEvent({"batch_id": self.batch_id, "batch_state": state})
class DataprocDeleteClusterTrigger(DataprocBaseTrigger):
"""
DataprocDeleteClusterTrigger run on the trigger worker to perform delete cluster operation.
:param cluster_name: The name of the cluster
:param end_time: Time in second left to check the cluster status
:param project_id: The ID of the Google Cloud project the cluster belongs to
:param region: The Cloud Dataproc region in which to handle the request
:param metadata: Additional metadata that is provided to the method
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
:param polling_interval_seconds: Time in seconds to sleep between checks of cluster status
"""
def __init__(
self,
cluster_name: str,
end_time: float,
metadata: Sequence[tuple[str, str]] = (),
**kwargs: Any,
):
super().__init__(**kwargs)
self.cluster_name = cluster_name
self.end_time = end_time
self.metadata = metadata
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes DataprocDeleteClusterTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.dataproc.DataprocDeleteClusterTrigger",
{
"cluster_name": self.cluster_name,
"end_time": self.end_time,
"project_id": self.project_id,
"region": self.region,
"metadata": self.metadata,
"gcp_conn_id": self.gcp_conn_id,
"impersonation_chain": self.impersonation_chain,
"polling_interval_seconds": self.polling_interval_seconds,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Wait until cluster is deleted completely."""
while self.end_time > time.time():
try:
cluster = await self.get_async_hook().get_cluster(
region=self.region, # type: ignore[arg-type]
cluster_name=self.cluster_name,
project_id=self.project_id, # type: ignore[arg-type]
metadata=self.metadata,
)
self.log.info(
"Cluster status is %s. Sleeping for %s seconds.",
cluster.status.state,
self.polling_interval_seconds,
)
await asyncio.sleep(self.polling_interval_seconds)
except NotFound:
yield TriggerEvent({"status": "success", "message": ""})
return
except Exception as e:
yield TriggerEvent({"status": "error", "message": str(e)})
return
yield TriggerEvent({"status": "error", "message": "Timeout"})
class DataprocWorkflowTrigger(DataprocBaseTrigger):
"""
Trigger that periodically polls information from Dataproc API to verify status.
Implementation leverages asynchronous transport.
"""
def __init__(self, name: str, **kwargs: Any):
super().__init__(**kwargs)
self.name = name
def serialize(self):
return (
"airflow.providers.google.cloud.triggers.dataproc.DataprocWorkflowTrigger",
{
"name": self.name,
"project_id": self.project_id,
"region": self.region,
"gcp_conn_id": self.gcp_conn_id,
"impersonation_chain": self.impersonation_chain,
"polling_interval_seconds": self.polling_interval_seconds,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
hook = self.get_async_hook()
while True:
try:
operation = await hook.get_operation(region=self.region, operation_name=self.name)
if operation.done:
if operation.error.message:
yield TriggerEvent(
{
"operation_name": operation.name,
"operation_done": operation.done,
"status": "error",
"message": operation.error.message,
}
)
return
yield TriggerEvent(
{
"operation_name": operation.name,
"operation_done": operation.done,
"status": "success",
"message": "Operation is successfully ended.",
}
)
return
else:
self.log.info("Sleeping for %s seconds.", self.polling_interval_seconds)
await asyncio.sleep(self.polling_interval_seconds)
except Exception as e:
self.log.exception("Exception occurred while checking operation status.")
yield TriggerEvent(
{
"status": "failed",
"message": str(e),
}
)
return
| 15,541 | 43.405714 | 107 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/triggers/dataflow.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
from typing import Any, Sequence
from google.cloud.dataflow_v1beta3 import JobState
from airflow.providers.google.cloud.hooks.dataflow import AsyncDataflowHook
from airflow.triggers.base import BaseTrigger, TriggerEvent
DEFAULT_DATAFLOW_LOCATION = "us-central1"
class TemplateJobStartTrigger(BaseTrigger):
"""Dataflow trigger to check if templated job has been finished.
:param project_id: Required. the Google Cloud project ID in which the job was started.
:param job_id: Required. ID of the job.
:param location: Optional. the location where job is executed. If set to None then
the value of DEFAULT_DATAFLOW_LOCATION will be used
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional. Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param cancel_timeout: Optional. How long (in seconds) operator should wait for the pipeline to be
successfully cancelled when task is being killed.
"""
def __init__(
self,
job_id: str,
project_id: str | None,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
poll_sleep: int = 10,
impersonation_chain: str | Sequence[str] | None = None,
cancel_timeout: int | None = 5 * 60,
):
super().__init__()
self.project_id = project_id
self.job_id = job_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.poll_sleep = poll_sleep
self.impersonation_chain = impersonation_chain
self.cancel_timeout = cancel_timeout
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes class arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.dataflow.TemplateJobStartTrigger",
{
"project_id": self.project_id,
"job_id": self.job_id,
"location": self.location,
"gcp_conn_id": self.gcp_conn_id,
"poll_sleep": self.poll_sleep,
"impersonation_chain": self.impersonation_chain,
"cancel_timeout": self.cancel_timeout,
},
)
async def run(self):
"""
Main loop of the class in where it is fetching the job status and yields certain Event.
If the job has status success then it yields TriggerEvent with success status, if job has
status failed - with error status. In any other case Trigger will wait for specified
amount of time stored in self.poll_sleep variable.
"""
hook = self._get_async_hook()
while True:
try:
status = await hook.get_job_status(
project_id=self.project_id,
job_id=self.job_id,
location=self.location,
)
if status == JobState.JOB_STATE_DONE:
yield TriggerEvent(
{
"job_id": self.job_id,
"status": "success",
"message": "Job completed",
}
)
return
elif status == JobState.JOB_STATE_FAILED:
yield TriggerEvent(
{
"status": "error",
"message": f"Dataflow job with id {self.job_id} has failed its execution",
}
)
return
elif status == JobState.JOB_STATE_STOPPED:
yield TriggerEvent(
{
"status": "stopped",
"message": f"Dataflow job with id {self.job_id} was stopped",
}
)
return
else:
self.log.info("Job is still running...")
self.log.info("Current job status is: %s", status)
self.log.info("Sleeping for %s seconds.", self.poll_sleep)
await asyncio.sleep(self.poll_sleep)
except Exception as e:
self.log.exception("Exception occurred while checking for job completion.")
yield TriggerEvent({"status": "error", "message": str(e)})
return
def _get_async_hook(self) -> AsyncDataflowHook:
return AsyncDataflowHook(
gcp_conn_id=self.gcp_conn_id,
poll_sleep=self.poll_sleep,
impersonation_chain=self.impersonation_chain,
cancel_timeout=self.cancel_timeout,
)
| 6,133 | 41.597222 | 102 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/triggers/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/triggers/cloud_sql.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud SQL triggers."""
from __future__ import annotations
import asyncio
from typing import Sequence
from airflow.providers.google.cloud.hooks.cloud_sql import CloudSQLAsyncHook, CloudSqlOperationStatus
from airflow.triggers.base import BaseTrigger, TriggerEvent
class CloudSQLExportTrigger(BaseTrigger):
"""
Trigger that periodically polls information from Cloud SQL API to verify job status.
Implementation leverages asynchronous transport.
"""
def __init__(
self,
operation_name: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
poke_interval: int = 20,
):
super().__init__()
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.operation_name = operation_name
self.project_id = project_id
self.poke_interval = poke_interval
self.hook = CloudSQLAsyncHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
def serialize(self):
return (
"airflow.providers.google.cloud.triggers.cloud_sql.CloudSQLExportTrigger",
{
"operation_name": self.operation_name,
"project_id": self.project_id,
"gcp_conn_id": self.gcp_conn_id,
"impersonation_chain": self.impersonation_chain,
"poke_interval": self.poke_interval,
},
)
async def run(self):
while True:
try:
operation = await self.hook.get_operation(
project_id=self.project_id, operation_name=self.operation_name
)
if operation["status"] == CloudSqlOperationStatus.DONE:
if "error" in operation:
yield TriggerEvent(
{
"operation_name": operation["name"],
"status": "error",
"message": operation["error"]["message"],
}
)
return
yield TriggerEvent(
{
"operation_name": operation["name"],
"status": "success",
}
)
return
else:
self.log.info(
"Operation status is %s, sleeping for %s seconds.",
operation["status"],
self.poke_interval,
)
await asyncio.sleep(self.poke_interval)
except Exception as e:
self.log.exception("Exception occurred while checking operation status.")
yield TriggerEvent(
{
"status": "failed",
"message": str(e),
}
)
| 3,939 | 36.884615 | 101 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/triggers/kubernetes_engine.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
import warnings
from datetime import datetime
from typing import Any, AsyncIterator, Sequence
from google.cloud.container_v1.types import Operation
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.providers.cncf.kubernetes.utils.pod_manager import OnFinishAction
try:
from airflow.providers.cncf.kubernetes.triggers.pod import KubernetesPodTrigger
except ImportError:
# preserve backward compatibility for older versions of cncf.kubernetes provider
from airflow.providers.cncf.kubernetes.triggers.kubernetes_pod import KubernetesPodTrigger
from airflow.providers.google.cloud.hooks.kubernetes_engine import GKEAsyncHook, GKEPodAsyncHook
from airflow.triggers.base import BaseTrigger, TriggerEvent
class GKEStartPodTrigger(KubernetesPodTrigger):
"""
Trigger for checking pod status until it finishes its job.
:param pod_name: The name of the pod.
:param pod_namespace: The namespace of the pod.
:param cluster_url: The URL pointed to the cluster.
:param ssl_ca_cert: SSL certificate that is used for authentication to the pod.
:param cluster_context: Context that points to kubernetes cluster.
:param poll_interval: Polling period in seconds to check for the status.
:param trigger_start_time: time in Datetime format when the trigger was started
:param in_cluster: run kubernetes client with in_cluster configuration.
:param get_logs: get the stdout of the container as logs of the tasks.
:param startup_timeout: timeout in seconds to start up the pod.
:param base_container_name: The name of the base container in the pod. This container's logs
will appear as part of this task's logs if get_logs is True. Defaults to None. If None,
will consult the class variable BASE_CONTAINER_NAME (which defaults to "base") for the base
container name to use.
:param on_finish_action: What to do when the pod reaches its final state, or the execution is interrupted.
If "delete_pod", the pod will be deleted regardless it's state; if "delete_succeeded_pod",
only succeeded pod will be deleted. You can set to "keep_pod" to keep the pod.
:param should_delete_pod: What to do when the pod reaches its final
state, or the execution is interrupted. If True (default), delete the
pod; if False, leave the pod.
Deprecated - use `on_finish_action` instead.
"""
def __init__(
self,
pod_name: str,
pod_namespace: str,
cluster_url: str,
ssl_ca_cert: str,
base_container_name: str,
trigger_start_time: datetime,
cluster_context: str | None = None,
poll_interval: float = 2,
in_cluster: bool | None = None,
get_logs: bool = True,
startup_timeout: int = 120,
on_finish_action: str = "delete_pod",
should_delete_pod: bool | None = None,
*args,
**kwargs,
):
super().__init__(
pod_name,
pod_namespace,
trigger_start_time,
base_container_name,
*args,
**kwargs,
)
self.pod_name = pod_name
self.pod_namespace = pod_namespace
self.trigger_start_time = trigger_start_time
self.base_container_name = base_container_name
self.poll_interval = poll_interval
self.cluster_context = cluster_context
self.in_cluster = in_cluster
self.get_logs = get_logs
self.startup_timeout = startup_timeout
if should_delete_pod is not None:
warnings.warn(
"`should_delete_pod` parameter is deprecated, please use `on_finish_action`",
AirflowProviderDeprecationWarning,
)
self.on_finish_action = (
OnFinishAction.DELETE_POD if should_delete_pod else OnFinishAction.KEEP_POD
)
self.should_delete_pod = should_delete_pod
else:
self.on_finish_action = OnFinishAction(on_finish_action)
self.should_delete_pod = self.on_finish_action == OnFinishAction.DELETE_POD
self._cluster_url = cluster_url
self._ssl_ca_cert = ssl_ca_cert
def serialize(self) -> tuple[str, dict[str, Any]]:
return (
"airflow.providers.google.cloud.triggers.kubernetes_engine.GKEStartPodTrigger",
{
"pod_name": self.pod_name,
"pod_namespace": self.pod_namespace,
"cluster_url": self._cluster_url,
"ssl_ca_cert": self._ssl_ca_cert,
"poll_interval": self.poll_interval,
"cluster_context": self.cluster_context,
"in_cluster": self.in_cluster,
"get_logs": self.get_logs,
"startup_timeout": self.startup_timeout,
"trigger_start_time": self.trigger_start_time,
"base_container_name": self.base_container_name,
"should_delete_pod": self.should_delete_pod,
"on_finish_action": self.on_finish_action.value,
},
)
def _get_async_hook(self) -> GKEPodAsyncHook: # type: ignore[override]
return GKEPodAsyncHook(
cluster_url=self._cluster_url,
ssl_ca_cert=self._ssl_ca_cert,
)
class GKEOperationTrigger(BaseTrigger):
"""Trigger which checks status of the operation."""
def __init__(
self,
operation_name: str,
project_id: str | None,
location: str,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
poll_interval: int = 10,
):
super().__init__()
self.operation_name = operation_name
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.poll_interval = poll_interval
self._hook: GKEAsyncHook | None = None
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes GKEOperationTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.kubernetes_engine.GKEOperationTrigger",
{
"operation_name": self.operation_name,
"project_id": self.project_id,
"location": self.location,
"gcp_conn_id": self.gcp_conn_id,
"impersonation_chain": self.impersonation_chain,
"poll_interval": self.poll_interval,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]: # type: ignore[override]
"""Gets operation status and yields corresponding event."""
hook = self._get_hook()
while True:
try:
operation = await hook.get_operation(
operation_name=self.operation_name,
project_id=self.project_id,
)
status = operation.status
if status == Operation.Status.DONE:
yield TriggerEvent(
{
"status": "success",
"message": "Operation is successfully ended.",
"operation_name": operation.name,
}
)
return
elif status == Operation.Status.RUNNING or status == Operation.Status.PENDING:
self.log.info("Operation is still running.")
self.log.info("Sleeping for %ss...", self.poll_interval)
await asyncio.sleep(self.poll_interval)
else:
yield TriggerEvent(
{
"status": "failed",
"message": f"Operation has failed with status: {operation.status}",
}
)
return
except Exception as e:
self.log.exception("Exception occurred while checking operation status")
yield TriggerEvent(
{
"status": "error",
"message": str(e),
}
)
return
def _get_hook(self) -> GKEAsyncHook:
if self._hook is None:
self._hook = GKEAsyncHook(
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
return self._hook
| 9,498 | 39.76824 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/life_sciences.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hook for Google Cloud Life Sciences service."""
from __future__ import annotations
import time
from typing import Sequence
import google.api_core.path_template
from googleapiclient.discovery import build
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
# Time to sleep between active checks of the operation results
TIME_TO_SLEEP_IN_SECONDS = 5
class LifeSciencesHook(GoogleBaseHook):
"""
Hook for the Google Cloud Life Sciences APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param api_version: API version used (for example v1 or v1beta1).
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
_conn: build | None = None
def __init__(
self,
api_version: str = "v2beta",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self.api_version = api_version
def get_conn(self) -> build:
"""
Retrieves the connection to Cloud Life Sciences.
:return: Google Cloud Life Sciences service object.
"""
if not self._conn:
http_authorized = self._authorize()
self._conn = build("lifesciences", self.api_version, http=http_authorized, cache_discovery=False)
return self._conn
@GoogleBaseHook.fallback_to_default_project_id
def run_pipeline(self, body: dict, location: str, project_id: str) -> dict:
"""
Runs a pipeline.
:param body: The request body.
:param location: The location of the project. For example: "us-east1".
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the Google Cloud connection is used.
"""
parent = self._location_path(project_id=project_id, location=location)
service = self.get_conn()
request = service.projects().locations().pipelines().run(parent=parent, body=body)
response = request.execute(num_retries=self.num_retries)
# wait
operation_name = response["name"]
self._wait_for_operation_to_complete(operation_name)
return response
@GoogleBaseHook.fallback_to_default_project_id
def _location_path(self, project_id: str, location: str) -> str:
"""
Return a location string.
:param project_id: Optional, Google Cloud Project project_id where the
function belongs. If set to None or missing, the default project_id
from the Google Cloud connection is used.
:param location: The location of the project. For example: "us-east1".
"""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}",
project=project_id,
location=location,
)
def _wait_for_operation_to_complete(self, operation_name: str) -> None:
"""
Waits for the named operation to complete - checks status of the asynchronous call.
:param operation_name: The name of the operation.
:return: The response returned by the operation.
:exception: AirflowException in case error is returned.
"""
service = self.get_conn()
while True:
operation_response = (
service.projects()
.locations()
.operations()
.get(name=operation_name)
.execute(num_retries=self.num_retries)
)
self.log.info("Waiting for pipeline operation to complete")
if operation_response.get("done"):
response = operation_response.get("response")
error = operation_response.get("error")
# Note, according to documentation always either response or error is
# set when "done" == True
if error:
raise AirflowException(str(error))
return response
time.sleep(TIME_TO_SLEEP_IN_SECONDS)
| 6,073 | 39.493333 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/text_to_speech.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Text to Speech Hook."""
from __future__ import annotations
from typing import Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.texttospeech_v1 import TextToSpeechClient
from google.cloud.texttospeech_v1.types import (
AudioConfig,
SynthesisInput,
SynthesizeSpeechResponse,
VoiceSelectionParams,
)
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class CloudTextToSpeechHook(GoogleBaseHook):
"""
Hook for Google Cloud Text to Speech API.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._client: TextToSpeechClient | None = None
def get_conn(self) -> TextToSpeechClient:
"""
Retrieves connection to Cloud Text to Speech.
:return: Google Cloud Text to Speech client object.
"""
if not self._client:
self._client = TextToSpeechClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
return self._client
@GoogleBaseHook.quota_retry()
def synthesize_speech(
self,
input_data: dict | SynthesisInput,
voice: dict | VoiceSelectionParams,
audio_config: dict | AudioConfig,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
) -> SynthesizeSpeechResponse:
"""
Synthesizes text input.
:param input_data: text input to be synthesized. See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.SynthesisInput
:param voice: configuration of voice to be used in synthesis. See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.VoiceSelectionParams
:param audio_config: configuration of the synthesized audio. See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.AudioConfig
:param retry: (Optional) A retry object used to retry requests. If None is specified,
requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
:return: SynthesizeSpeechResponse See more:
https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.SynthesizeSpeechResponse
"""
client = self.get_conn()
if isinstance(input_data, dict):
input_data = SynthesisInput(input_data)
if isinstance(voice, dict):
voice = VoiceSelectionParams(voice)
if isinstance(audio_config, dict):
audio_config = AudioConfig(audio_config)
self.log.info("Synthesizing input: %s", input_data)
return client.synthesize_speech(
input=input_data, voice=voice, audio_config=audio_config, retry=retry, timeout=timeout
)
| 5,440 | 43.598361 | 160 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/cloud_memorystore.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Hooks for Cloud Memorystore service.
.. spelling:word-list::
DataProtectionMode
FieldMask
pb
memcache
"""
from __future__ import annotations
from typing import Sequence
from google.api_core import path_template
from google.api_core.exceptions import NotFound
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.memcache_v1beta2 import CloudMemcacheClient
from google.cloud.memcache_v1beta2.types import cloud_memcache
from google.cloud.redis_v1 import (
CloudRedisClient,
FailoverInstanceRequest,
InputConfig,
Instance,
OutputConfig,
)
from google.protobuf.field_mask_pb2 import FieldMask
from airflow import version
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import PROVIDE_PROJECT_ID, GoogleBaseHook
class CloudMemorystoreHook(GoogleBaseHook):
"""
Hook for Google Cloud Memorystore APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._client: CloudRedisClient | None = None
def get_conn(self) -> CloudRedisClient:
"""Retrieves client library object that allow access to Cloud Memorystore service."""
if not self._client:
self._client = CloudRedisClient(credentials=self.get_credentials())
return self._client
@staticmethod
def _append_label(instance: Instance, key: str, val: str) -> Instance:
"""
Append labels to provided Instance type.
Labels must fit the regex ``[a-z]([-a-z0-9]*[a-z0-9])?`` (current
airflow version string follows semantic versioning spec: x.y.z).
:param instance: The proto to append resource_label airflow
version to
:param key: The key label
:param val:
:return: The cluster proto updated with new label
"""
val = val.replace(".", "-").replace("+", "-")
instance.labels.update({key: val})
return instance
@GoogleBaseHook.fallback_to_default_project_id
def create_instance(
self,
location: str,
instance_id: str,
instance: dict | Instance,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Creates a Redis instance based on the specified tier and memory size.
By default, the instance is accessible from the project's `default network
<https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: Required. The logical name of the Redis instance in the customer project with the
following restrictions:
- Must contain only lowercase letters, numbers, and hyphens.
- Must start with a letter.
- Must be between 1-40 characters.
- Must end with a number or a letter.
- Must be unique within the customer project / location
:param instance: Required. A Redis [Instance] resource
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.Instance`
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
if isinstance(instance, dict):
instance = Instance(**instance)
elif not isinstance(instance, Instance):
raise AirflowException("instance is not instance of Instance type or python dict")
parent = f"projects/{project_id}/locations/{location}"
instance_name = f"projects/{project_id}/locations/{location}/instances/{instance_id}"
try:
self.log.info("Fetching instance: %s", instance_name)
instance = client.get_instance(
request={"name": instance_name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
self.log.info("Instance exists. Skipping creation.")
return instance
except NotFound:
self.log.info("Instance not exists.")
self._append_label(instance, "airflow-version", "v" + version.version)
result = client.create_instance(
request={"parent": parent, "instance_id": instance_id, "instance": instance},
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance created.")
return client.get_instance(
request={"name": instance_name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_instance(
self,
location: str,
instance: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Deletes a specific Redis instance. Instance stops serving and data is deleted.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/instances/{instance}"
self.log.info("Fetching Instance: %s", name)
instance = client.get_instance(
request={"name": name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
if not instance:
return
self.log.info("Deleting Instance: %s", name)
result = client.delete_instance(
request={"name": name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance deleted: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def export_instance(
self,
location: str,
instance: str,
output_config: dict | OutputConfig,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Export Redis instance data into a Redis RDB format file in Cloud Storage.
Redis will continue serving during this operation.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param output_config: Required. Specify data to be exported.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.OutputConfig`
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/instances/{instance}"
self.log.info("Exporting Instance: %s", name)
result = client.export_instance(
request={"name": name, "output_config": output_config},
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance exported: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def failover_instance(
self,
location: str,
instance: str,
data_protection_mode: FailoverInstanceRequest.DataProtectionMode,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Failover of the primary node to current replica node.
Initiates a failover of the primary node to current replica node for a specific STANDARD tier Cloud
Memorystore for Redis instance.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param data_protection_mode: Optional. Available data protection modes that the user can choose. If
it's unspecified, data protection mode will be LIMITED_DATA_LOSS by default.
.DataProtectionMode
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/instances/{instance}"
self.log.info("Failovering Instance: %s", name)
result = client.failover_instance(
request={"name": name, "data_protection_mode": data_protection_mode},
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance failovered: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def get_instance(
self,
location: str,
instance: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Gets the details of a specific Redis instance.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/instances/{instance}"
result = client.get_instance(
request={"name": name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Fetched Instance: %s", name)
return result
@GoogleBaseHook.fallback_to_default_project_id
def import_instance(
self,
location: str,
instance: str,
input_config: dict | InputConfig,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Import a Redis RDB snapshot file from Cloud Storage into a Redis instance.
Redis may stop serving during this operation. Instance state will be IMPORTING for entire operation.
When complete, the instance will contain only data from the imported file.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param input_config: Required. Specify data to be imported.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.InputConfig`
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/instances/{instance}"
self.log.info("Importing Instance: %s", name)
result = client.import_instance(
request={"name": name, "input_config": input_config},
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance imported: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def list_instances(
self,
location: str,
page_size: int,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
List Redis instances owned by a project at the specified location (region) or all locations.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
If it is specified as ``-`` (wildcard), then all regions available to the project are
queried, and the results are aggregated.
:param page_size: The maximum number of resources contained in the underlying API response. If page
streaming is performed per- resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number of resources in a page.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
parent = f"projects/{project_id}/locations/{location}"
result = client.list_instances(
request={"parent": parent, "page_size": page_size},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Fetched instances")
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_instance(
self,
update_mask: dict | FieldMask,
instance: dict | Instance,
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = None,
instance_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Updates the metadata and configuration of a specific Redis instance.
:param update_mask: Required. Mask of fields to update. At least one path must be supplied in this
field. The elements of the repeated paths field may only include these fields from ``Instance``:
- ``displayName``
- ``labels``
- ``memorySizeGb``
- ``redisConfig``
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param instance: Required. Update description. Only fields specified in ``update_mask`` are updated.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.Instance`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: The logical name of the Redis instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
if isinstance(instance, dict):
instance = Instance(**instance)
elif not isinstance(instance, Instance):
raise AirflowException("instance is not instance of Instance type or python dict")
if location and instance_id:
name = f"projects/{project_id}/locations/{location}/instances/{instance_id}"
instance.name = name
self.log.info("Updating instances: %s", instance.name)
result = client.update_instance(
request={"update_mask": update_mask, "instance": instance},
retry=retry,
timeout=timeout,
metadata=metadata,
)
updated_instance = result.result()
self.log.info("Instance updated: %s", instance.name)
return updated_instance
class CloudMemorystoreMemcachedHook(GoogleBaseHook):
"""
Hook for Google Cloud Memorystore for Memcached service APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._client: CloudMemcacheClient | None = None
def get_conn(self):
"""Retrieves client library object that allow access to Cloud Memorystore Memcached service."""
if not self._client:
self._client = CloudMemcacheClient(credentials=self.get_credentials())
return self._client
@staticmethod
def _append_label(instance: cloud_memcache.Instance, key: str, val: str) -> cloud_memcache.Instance:
"""
Append labels to provided Instance type.
Labels must fit the regex ``[a-z]([-a-z0-9]*[a-z0-9])?`` (current
airflow version string follows semantic versioning spec: x.y.z).
:param instance: The proto to append resource_label airflow
version to
:param key: The key label
:param val:
:return: The cluster proto updated with new label
"""
val = val.replace(".", "-").replace("+", "-")
instance.labels.update({key: val})
return instance
@GoogleBaseHook.fallback_to_default_project_id
def apply_parameters(
self,
node_ids: Sequence[str],
apply_all: bool,
project_id: str,
location: str,
instance_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Will update current set of Parameters to the set of specified nodes of the Memcached Instance.
:param node_ids: Nodes to which we should apply the instance-level parameter group.
:param apply_all: Whether to apply instance-level parameter group to all nodes. If set to true,
will explicitly restrict users from specifying any nodes, and apply parameter group updates
to all nodes within the instance.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: The logical name of the Memcached instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
metadata = metadata or ()
name = CloudMemcacheClient.instance_path(project_id, location, instance_id)
self.log.info("Applying update to instance: %s", instance_id)
result = client.apply_parameters(
name=name,
node_ids=node_ids,
apply_all=apply_all,
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance updated: %s", instance_id)
@GoogleBaseHook.fallback_to_default_project_id
def create_instance(
self,
location: str,
instance_id: str,
instance: dict | cloud_memcache.Instance,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Creates a Memcached instance based on the specified tier and memory size.
By default, the instance is accessible from the project's `default network
<https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: Required. The logical name of the Memcached instance in the customer project
with the following restrictions:
- Must contain only lowercase letters, numbers, and hyphens.
- Must start with a letter.
- Must be between 1-40 characters.
- Must end with a number or a letter.
- Must be unique within the customer project / location
:param instance: Required. A Memcached [Instance] resource
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.memcache_v1beta2.types.cloud_memcache.Instance`
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
metadata = metadata or ()
parent = path_template.expand(
"projects/{project}/locations/{location}", project=project_id, location=location
)
instance_name = CloudMemcacheClient.instance_path(project_id, location, instance_id)
try:
instance = client.get_instance(
name=instance_name, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info("Instance exists. Skipping creation.")
return instance
except NotFound:
self.log.info("Instance not exists.")
if isinstance(instance, dict):
instance = cloud_memcache.Instance(instance)
elif not isinstance(instance, cloud_memcache.Instance):
raise AirflowException("instance is not instance of Instance type or python dict")
self._append_label(instance, "airflow-version", "v" + version.version)
result = client.create_instance(
parent=parent,
instance_id=instance_id,
resource=instance,
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance created.")
return client.get_instance(
name=instance_name,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_instance(
self,
location: str,
instance: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Deletes a specific Memcached instance. Instance stops serving and data is deleted.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Memcached instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
metadata = metadata or ()
name = CloudMemcacheClient.instance_path(project_id, location, instance)
self.log.info("Fetching Instance: %s", name)
instance = client.get_instance(
name=name,
retry=retry,
timeout=timeout,
metadata=metadata,
)
if not instance:
return
self.log.info("Deleting Instance: %s", name)
result = client.delete_instance(
name=name,
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance deleted: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def get_instance(
self,
location: str,
instance: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Gets the details of a specific Memcached instance.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Memcached instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
metadata = metadata or ()
name = CloudMemcacheClient.instance_path(project_id, location, instance)
result = client.get_instance(name=name, retry=retry, timeout=timeout, metadata=metadata or ())
self.log.info("Fetched Instance: %s", name)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_instances(
self,
location: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
List Memcached instances owned by a project at the specified location (region) or all locations.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
If it is specified as ``-`` (wildcard), then all regions available to the project are
queried, and the results are aggregated.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
metadata = metadata or ()
parent = path_template.expand(
"projects/{project}/locations/{location}", project=project_id, location=location
)
result = client.list_instances(
parent=parent,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Fetched instances")
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_instance(
self,
update_mask: dict | FieldMask,
instance: dict | cloud_memcache.Instance,
project_id: str,
location: str | None = None,
instance_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Updates the metadata and configuration of a specific Memcached instance.
:param update_mask: Required. Mask of fields to update. At least one path must be supplied in this
field. The elements of the repeated paths field may only include these fields from ``Instance``:
- ``displayName``
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`)
Union[dict, google.protobuf.field_mask_pb2.FieldMask]
:param instance: Required. Update description. Only fields specified in ``update_mask`` are updated.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.memcache_v1beta2.types.cloud_memcache.Instance`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: The logical name of the Memcached instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
metadata = metadata or ()
if isinstance(instance, dict):
instance = cloud_memcache.Instance(instance)
elif not isinstance(instance, cloud_memcache.Instance):
raise AirflowException("instance is not instance of Instance type or python dict")
if location and instance_id:
name = CloudMemcacheClient.instance_path(project_id, location, instance_id)
instance.name = name
self.log.info("Updating instances: %s", instance.name)
result = client.update_instance(
update_mask=update_mask, resource=instance, retry=retry, timeout=timeout, metadata=metadata or ()
)
updated_instance = result.result()
self.log.info("Instance updated: %s", instance.name)
return updated_instance
@GoogleBaseHook.fallback_to_default_project_id
def update_parameters(
self,
update_mask: dict | FieldMask,
parameters: dict | cloud_memcache.MemcacheParameters,
project_id: str,
location: str,
instance_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Update the defined Memcached Parameters for an existing Instance.
This method only stages the parameters, it must be followed by apply_parameters
to apply the parameters to nodes of the Memcached Instance.
:param update_mask: Required. Mask of fields to update.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param parameters: The parameters to apply to the instance.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.memcache_v1beta2.types.cloud_memcache.MemcacheParameters`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: The logical name of the Memcached instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
metadata = metadata or ()
if isinstance(parameters, dict):
parameters = cloud_memcache.MemcacheParameters(parameters)
elif not isinstance(parameters, cloud_memcache.MemcacheParameters):
raise AirflowException("instance is not instance of MemcacheParameters type or python dict")
name = CloudMemcacheClient.instance_path(project_id, location, instance_id)
self.log.info("Staging update to instance: %s", instance_id)
result = client.update_parameters(
name=name,
update_mask=update_mask,
parameters=parameters,
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Update staged for instance: %s", instance_id)
| 40,409 | 44.353535 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/gdm.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any, Sequence
from googleapiclient.discovery import Resource, build
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class GoogleDeploymentManagerHook(GoogleBaseHook):
"""
Interact with Google Cloud Deployment Manager using the Google Cloud connection.
This allows for scheduled and programmatic inspection and deletion of resources managed by GDM.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
def get_conn(self) -> Resource:
"""Returns a Google Deployment Manager service object."""
http_authorized = self._authorize()
return build("deploymentmanager", "v2", http=http_authorized, cache_discovery=False)
@GoogleBaseHook.fallback_to_default_project_id
def list_deployments(
self,
project_id: str | None = None,
deployment_filter: str | None = None,
order_by: str | None = None,
) -> list[dict[str, Any]]:
"""
Lists deployments in a google cloud project.
:param project_id: The project ID for this request.
:param deployment_filter: A filter expression which limits resources returned in the response.
:param order_by: A field name to order by, ex: "creationTimestamp desc"
"""
deployments: list[dict] = []
conn = self.get_conn()
request = conn.deployments().list(project=project_id, filter=deployment_filter, orderBy=order_by)
while request is not None:
response = request.execute(num_retries=self.num_retries)
deployments.extend(response.get("deployments", []))
request = conn.deployments().list_next(previous_request=request, previous_response=response)
return deployments
@GoogleBaseHook.fallback_to_default_project_id
def delete_deployment(
self, project_id: str | None, deployment: str | None = None, delete_policy: str | None = None
) -> None:
"""
Deletes a deployment and all associated resources in a google cloud project.
:param project_id: The project ID for this request.
:param deployment: The name of the deployment for this request.
:param delete_policy: Sets the policy to use for deleting resources. (ABANDON | DELETE)
"""
conn = self.get_conn()
request = conn.deployments().delete(
project=project_id, deployment=deployment, deletePolicy=delete_policy
)
resp = request.execute()
if "error" in resp.keys():
raise AirflowException(
"Errors deleting deployment: ", ", ".join(err["message"] for err in resp["error"]["errors"])
)
| 4,099 | 38.805825 | 108 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/dataplex.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Dataplex hook."""
from __future__ import annotations
from typing import Any, Sequence
from google.api_core.client_options import ClientOptions
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.operation import Operation
from google.api_core.retry import Retry
from google.cloud.dataplex_v1 import DataplexServiceClient
from google.cloud.dataplex_v1.types import Lake, Task
from googleapiclient.discovery import Resource
from airflow.exceptions import AirflowException
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class DataplexHook(GoogleBaseHook):
"""
Hook for Google Dataplex.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
_conn: Resource | None = None
def __init__(
self,
api_version: str = "v1",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self.api_version = api_version
def get_dataplex_client(self) -> DataplexServiceClient:
"""Returns DataplexServiceClient."""
client_options = ClientOptions(api_endpoint="dataplex.googleapis.com:443")
return DataplexServiceClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def wait_for_operation(self, timeout: float | None, operation: Operation):
"""Waits for long-lasting operation to complete."""
try:
return operation.result(timeout=timeout)
except Exception:
error = operation.exception(timeout=timeout)
raise AirflowException(error)
@GoogleBaseHook.fallback_to_default_project_id
def create_task(
self,
project_id: str,
region: str,
lake_id: str,
body: dict[str, Any] | Task,
dataplex_task_id: str,
validate_only: bool | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Creates a task resource within a lake.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param lake_id: Required. The ID of the Google Cloud lake that the task belongs to.
:param body: Required. The Request body contains an instance of Task.
:param dataplex_task_id: Required. Task identifier.
:param validate_only: Optional. Only validate the request, but do not perform mutations.
The default is false.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
parent = f"projects/{project_id}/locations/{region}/lakes/{lake_id}"
client = self.get_dataplex_client()
result = client.create_task(
request={
"parent": parent,
"task_id": dataplex_task_id,
"task": body,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_task(
self,
project_id: str,
region: str,
lake_id: str,
dataplex_task_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Delete the task resource.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param lake_id: Required. The ID of the Google Cloud lake that the task belongs to.
:param dataplex_task_id: Required. The ID of the Google Cloud task to be deleted.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
name = f"projects/{project_id}/locations/{region}/lakes/{lake_id}/tasks/{dataplex_task_id}"
client = self.get_dataplex_client()
result = client.delete_task(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_tasks(
self,
project_id: str,
region: str,
lake_id: str,
page_size: int | None = None,
page_token: str | None = None,
filter: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Lists tasks under the given lake.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param lake_id: Required. The ID of the Google Cloud lake that the task belongs to.
:param page_size: Optional. Maximum number of tasks to return. The service may return fewer than this
value. If unspecified, at most 10 tasks will be returned. The maximum value is 1000;
values above 1000 will be coerced to 1000.
:param page_token: Optional. Page token received from a previous ListZones call. Provide this to
retrieve the subsequent page. When paginating, all other parameters provided to ListZones must
match the call that provided the page token.
:param filter: Optional. Filter request.
:param order_by: Optional. Order by fields for the result.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
parent = f"projects/{project_id}/locations/{region}/lakes/{lake_id}"
client = self.get_dataplex_client()
result = client.list_tasks(
request={
"parent": parent,
"page_size": page_size,
"page_token": page_token,
"filter": filter,
"order_by": order_by,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_task(
self,
project_id: str,
region: str,
lake_id: str,
dataplex_task_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Get task resource.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param lake_id: Required. The ID of the Google Cloud lake that the task belongs to.
:param dataplex_task_id: Required. The ID of the Google Cloud task to be retrieved.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
name = f"projects/{project_id}/locations/{region}/lakes/{lake_id}/tasks/{dataplex_task_id}"
client = self.get_dataplex_client()
result = client.get_task(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_lake(
self,
project_id: str,
region: str,
lake_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Delete the lake resource.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param lake_id: Required. The ID of the Google Cloud lake to be deleted.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
name = f"projects/{project_id}/locations/{region}/lakes/{lake_id}"
client = self.get_dataplex_client()
result = client.delete_lake(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def create_lake(
self,
project_id: str,
region: str,
lake_id: str,
body: dict[str, Any] | Lake,
validate_only: bool | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Creates a lake resource.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param lake_id: Required. Lake identifier.
:param body: Required. The Request body contains an instance of Lake.
:param validate_only: Optional. Only validate the request, but do not perform mutations.
The default is false.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
parent = f"projects/{project_id}/locations/{region}"
client = self.get_dataplex_client()
result = client.create_lake(
request={
"parent": parent,
"lake_id": lake_id,
"lake": body,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_lake(
self,
project_id: str,
region: str,
lake_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Get lake resource.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param lake_id: Required. The ID of the Google Cloud lake to be retrieved.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
name = f"projects/{project_id}/locations/{region}/lakes/{lake_id}/"
client = self.get_dataplex_client()
result = client.get_lake(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
| 15,387 | 41.274725 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/datastore.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Datastore hook."""
from __future__ import annotations
import time
from typing import Any, Sequence
from googleapiclient.discovery import Resource, build
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class DatastoreHook(GoogleBaseHook):
"""
Interact with Google Cloud Datastore. This hook uses the Google Cloud connection.
This object is not threads safe. If you want to make multiple requests
simultaneously, you will need to create a hook per thread.
:param api_version: The version of the API it is going to connect to.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self.connection = None
self.api_version = api_version
def get_conn(self) -> Resource:
"""
Establishes a connection to the Google API.
:return: a Google Cloud Datastore service object.
"""
if not self.connection:
http_authorized = self._authorize()
self.connection = build(
"datastore", self.api_version, http=http_authorized, cache_discovery=False
)
return self.connection
@GoogleBaseHook.fallback_to_default_project_id
def allocate_ids(self, partial_keys: list, project_id: str) -> list:
"""
Allocate IDs for incomplete keys.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/allocateIds
:param partial_keys: a list of partial keys.
:param project_id: Google Cloud project ID against which to make the request.
:return: a list of full keys.
"""
conn = self.get_conn()
resp = (
conn.projects()
.allocateIds(projectId=project_id, body={"keys": partial_keys})
.execute(num_retries=self.num_retries)
)
return resp["keys"]
@GoogleBaseHook.fallback_to_default_project_id
def begin_transaction(self, project_id: str, transaction_options: dict[str, Any]) -> str:
"""
Begins a new transaction.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/beginTransaction
:param project_id: Google Cloud project ID against which to make the request.
:param transaction_options: Options for a new transaction.
:return: a transaction handle.
"""
conn = self.get_conn()
resp = (
conn.projects()
.beginTransaction(projectId=project_id, body={"transactionOptions": transaction_options})
.execute(num_retries=self.num_retries)
)
return resp["transaction"]
@GoogleBaseHook.fallback_to_default_project_id
def commit(self, body: dict, project_id: str) -> dict:
"""
Commit a transaction, optionally creating, deleting or modifying some entities.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/commit
:param body: the body of the commit request.
:param project_id: Google Cloud project ID against which to make the request.
:return: the response body of the commit request.
"""
conn = self.get_conn()
resp = conn.projects().commit(projectId=project_id, body=body).execute(num_retries=self.num_retries)
return resp
@GoogleBaseHook.fallback_to_default_project_id
def lookup(
self,
keys: list,
project_id: str,
read_consistency: str | None = None,
transaction: str | None = None,
) -> dict:
"""
Lookup some entities by key.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/lookup
:param keys: the keys to lookup.
:param read_consistency: the read consistency to use. default, strong or eventual.
Cannot be used with a transaction.
:param transaction: the transaction to use, if any.
:param project_id: Google Cloud project ID against which to make the request.
:return: the response body of the lookup request.
"""
conn = self.get_conn()
body: dict[str, Any] = {"keys": keys}
if read_consistency:
body["readConsistency"] = read_consistency
if transaction:
body["transaction"] = transaction
resp = conn.projects().lookup(projectId=project_id, body=body).execute(num_retries=self.num_retries)
return resp
@GoogleBaseHook.fallback_to_default_project_id
def rollback(self, transaction: str, project_id: str) -> None:
"""
Roll back a transaction.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/rollback
:param transaction: the transaction to roll back.
:param project_id: Google Cloud project ID against which to make the request.
"""
conn: Any = self.get_conn()
conn.projects().rollback(projectId=project_id, body={"transaction": transaction}).execute(
num_retries=self.num_retries
)
@GoogleBaseHook.fallback_to_default_project_id
def run_query(self, body: dict, project_id: str) -> dict:
"""
Run a query for entities.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/runQuery
:param body: the body of the query request.
:param project_id: Google Cloud project ID against which to make the request.
:return: the batch of query results.
"""
conn = self.get_conn()
resp = conn.projects().runQuery(projectId=project_id, body=body).execute(num_retries=self.num_retries)
return resp["batch"]
def get_operation(self, name: str) -> dict:
"""
Gets the latest state of a long-running operation.
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/get
:param name: the name of the operation resource.
:return: a resource operation instance.
"""
conn: Any = self.get_conn()
resp = conn.projects().operations().get(name=name).execute(num_retries=self.num_retries)
return resp
def delete_operation(self, name: str) -> dict:
"""
Deletes the long-running operation.
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/delete
:param name: the name of the operation resource.
:return: none if successful.
"""
conn = self.get_conn()
resp = conn.projects().operations().delete(name=name).execute(num_retries=self.num_retries)
return resp
def poll_operation_until_done(self, name: str, polling_interval_in_seconds: float) -> dict:
"""
Poll backup operation state until it's completed.
:param name: the name of the operation resource
:param polling_interval_in_seconds: The number of seconds to wait before calling another request.
:return: a resource operation instance.
"""
while True:
result: dict = self.get_operation(name)
state: str = result["metadata"]["common"]["state"]
if state == "PROCESSING":
self.log.info(
"Operation is processing. Re-polling state in %s seconds", polling_interval_in_seconds
)
time.sleep(polling_interval_in_seconds)
else:
return result
@GoogleBaseHook.fallback_to_default_project_id
def export_to_storage_bucket(
self,
bucket: str,
project_id: str,
namespace: str | None = None,
entity_filter: dict | None = None,
labels: dict[str, str] | None = None,
) -> dict:
"""
Export entities from Cloud Datastore to Cloud Storage for backup.
.. note::
Keep in mind that this requests the Admin API not the Data API.
.. seealso::
https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/export
:param bucket: The name of the Cloud Storage bucket.
:param namespace: The Cloud Storage namespace path.
:param entity_filter: Description of what data from the project is included in the export.
:param labels: Client-assigned labels.
:param project_id: Google Cloud project ID against which to make the request.
:return: a resource operation instance.
"""
admin_conn = self.get_conn()
output_url_prefix = f"gs://{'/'.join(filter(None, [bucket, namespace]))}"
if not entity_filter:
entity_filter = {}
if not labels:
labels = {}
body = {
"outputUrlPrefix": output_url_prefix,
"entityFilter": entity_filter,
"labels": labels,
}
resp = (
admin_conn.projects()
.export(projectId=project_id, body=body)
.execute(num_retries=self.num_retries)
)
return resp
@GoogleBaseHook.fallback_to_default_project_id
def import_from_storage_bucket(
self,
bucket: str,
file: str,
project_id: str,
namespace: str | None = None,
entity_filter: dict | None = None,
labels: dict | str | None = None,
) -> dict:
"""
Import a backup from Cloud Storage to Cloud Datastore.
.. note::
Keep in mind that this requests the Admin API not the Data API.
.. seealso::
https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/import
:param bucket: The name of the Cloud Storage bucket.
:param file: the metadata file written by the projects.export operation.
:param namespace: The Cloud Storage namespace path.
:param entity_filter: specify which kinds/namespaces are to be imported.
:param labels: Client-assigned labels.
:param project_id: Google Cloud project ID against which to make the request.
:return: a resource operation instance.
"""
admin_conn = self.get_conn()
input_url = f"gs://{'/'.join(filter(None, [bucket, namespace, file]))}"
if not entity_filter:
entity_filter = {}
if not labels:
labels = {}
body = {
"inputUrl": input_url,
"entityFilter": entity_filter,
"labels": labels,
}
resp = (
admin_conn.projects()
.import_(projectId=project_id, body=body)
.execute(num_retries=self.num_retries)
)
return resp
| 12,161 | 34.354651 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/mlengine.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google ML Engine Hook."""
from __future__ import annotations
import logging
import random
import time
from typing import Callable
from aiohttp import ClientSession
from gcloud.aio.auth import AioSession, Token
from googleapiclient.discovery import Resource, build
from googleapiclient.errors import HttpError
from httplib2 import Response
from requests import Session
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseAsyncHook, GoogleBaseHook
from airflow.version import version as airflow_version
log = logging.getLogger(__name__)
_AIRFLOW_VERSION = "v" + airflow_version.replace(".", "-").replace("+", "-")
def _poll_with_exponential_delay(
request, execute_num_retries, max_n, is_done_func, is_error_func
) -> Response:
"""
Execute request with exponential delay.
This method is intended to handle and retry in case of api-specific errors,
such as 429 "Too Many Requests", unlike the `request.execute` which handles
lower level errors like `ConnectionError`/`socket.timeout`/`ssl.SSLError`.
:param request: request to be executed.
:param execute_num_retries: num_retries for `request.execute` method.
:param max_n: number of times to retry request in this method.
:param is_done_func: callable to determine if operation is done.
:param is_error_func: callable to determine if operation is failed.
:return: response
"""
for i in range(0, max_n):
try:
response = request.execute(num_retries=execute_num_retries)
if is_error_func(response):
raise ValueError(f"The response contained an error: {response}")
if is_done_func(response):
log.info("Operation is done: %s", response)
return response
time.sleep((2**i) + (random.randint(0, 1000) / 1000))
except HttpError as e:
if e.resp.status != 429:
log.info("Something went wrong. Not retrying: %s", format(e))
raise
else:
time.sleep((2**i) + (random.randint(0, 1000) / 1000))
raise ValueError(f"Connection could not be established after {max_n} retries.")
class MLEngineHook(GoogleBaseHook):
"""
Hook for Google ML Engine APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def __init__(self, **kwargs):
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(**kwargs)
def get_conn(self) -> Resource:
"""
Retrieves the connection to MLEngine.
:return: Google MLEngine services object.
"""
authed_http = self._authorize()
return build("ml", "v1", http=authed_http, cache_discovery=False)
@GoogleBaseHook.fallback_to_default_project_id
def create_job(self, job: dict, project_id: str, use_existing_job_fn: Callable | None = None) -> dict:
"""
Launches a MLEngine job and wait for it to reach a terminal state.
:param project_id: The Google Cloud project id within which MLEngine
job will be launched. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:param job: MLEngine Job object that should be provided to the MLEngine
API, such as: ::
{
'jobId': 'my_job_id',
'trainingInput': {
'scaleTier': 'STANDARD_1',
...
}
}
:param use_existing_job_fn: In case that a MLEngine job with the same
job_id already exist, this method (if provided) will decide whether
we should use this existing job, continue waiting for it to finish
and returning the job object. It should accepts a MLEngine job
object, and returns a boolean value indicating whether it is OK to
reuse the existing job. If 'use_existing_job_fn' is not provided,
we by default reuse the existing MLEngine job.
:return: The MLEngine job object if the job successfully reach a
terminal state (which might be FAILED or CANCELLED state).
"""
hook = self.get_conn()
self._append_label(job)
self.log.info("Creating job.")
request = hook.projects().jobs().create(parent=f"projects/{project_id}", body=job)
job_id = job["jobId"]
try:
request.execute(num_retries=self.num_retries)
except HttpError as e:
# 409 means there is an existing job with the same job ID.
if e.resp.status == 409:
if use_existing_job_fn is not None:
existing_job = self.get_job(project_id, job_id)
if not use_existing_job_fn(existing_job):
self.log.error(
"Job with job_id %s already exist, but it does not match our expectation: %s",
job_id,
existing_job,
)
raise
self.log.info("Job with job_id %s already exist. Will waiting for it to finish", job_id)
else:
self.log.error("Failed to create MLEngine job: %s", e)
raise
return self._wait_for_job_done(project_id, job_id)
@GoogleBaseHook.fallback_to_default_project_id
def create_job_without_waiting_result(
self,
body: dict,
project_id: str,
):
"""
Launches a MLEngine job and wait for it to reach a terminal state.
:param project_id: The Google Cloud project id within which MLEngine
job will be launched. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:param body: MLEngine Job object that should be provided to the MLEngine
API, such as: ::
{
'jobId': 'my_job_id',
'trainingInput': {
'scaleTier': 'STANDARD_1',
...
}
}
:return: The MLEngine job_id of the object if the job successfully reach a
terminal state (which might be FAILED or CANCELLED state).
"""
hook = self.get_conn()
self._append_label(body)
request = hook.projects().jobs().create(parent=f"projects/{project_id}", body=body)
job_id = body["jobId"]
request.execute(num_retries=self.num_retries)
return job_id
@GoogleBaseHook.fallback_to_default_project_id
def cancel_job(
self,
job_id: str,
project_id: str,
) -> dict:
"""
Cancels a MLEngine job.
:param project_id: The Google Cloud project id within which MLEngine
job will be cancelled. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:param job_id: A unique id for the want-to-be cancelled Google MLEngine training job.
:return: Empty dict if cancelled successfully
:raises: googleapiclient.errors.HttpError
"""
hook = self.get_conn()
request = hook.projects().jobs().cancel(name=f"projects/{project_id}/jobs/{job_id}")
try:
return request.execute(num_retries=self.num_retries)
except HttpError as e:
if e.resp.status == 404:
self.log.error("Job with job_id %s does not exist. ", job_id)
raise
elif e.resp.status == 400:
self.log.info("Job with job_id %s is already complete, cancellation aborted.", job_id)
return {}
else:
self.log.error("Failed to cancel MLEngine job: %s", e)
raise
def get_job(self, project_id: str, job_id: str) -> dict:
"""
Gets a MLEngine job based on the job id.
:param project_id: The project in which the Job is located. If set to None or missing, the default
project_id from the Google Cloud connection is used. (templated)
:param job_id: A unique id for the Google MLEngine job. (templated)
:return: MLEngine job object if succeed.
:raises: googleapiclient.errors.HttpError
"""
hook = self.get_conn()
job_name = f"projects/{project_id}/jobs/{job_id}"
request = hook.projects().jobs().get(name=job_name)
while True:
try:
return request.execute(num_retries=self.num_retries)
except HttpError as e:
if e.resp.status == 429:
# polling after 30 seconds when quota failure occurs
time.sleep(30)
else:
self.log.error("Failed to get MLEngine job: %s", e)
raise
def _wait_for_job_done(self, project_id: str, job_id: str, interval: int = 30):
"""
Waits for the Job to reach a terminal state.
This method will periodically check the job state until the job reach
a terminal state.
:param project_id: The project in which the Job is located. If set to None or missing, the default
project_id from the Google Cloud connection is used. (templated)
:param job_id: A unique id for the Google MLEngine job. (templated)
:param interval: Time expressed in seconds after which the job status is checked again. (templated)
:raises: googleapiclient.errors.HttpError
"""
self.log.info("Waiting for job. job_id=%s", job_id)
if interval <= 0:
raise ValueError("Interval must be > 0")
while True:
job = self.get_job(project_id, job_id)
if job["state"] in ["SUCCEEDED", "FAILED", "CANCELLED"]:
return job
time.sleep(interval)
@GoogleBaseHook.fallback_to_default_project_id
def create_version(
self,
model_name: str,
version_spec: dict,
project_id: str,
) -> dict:
"""
Creates the Version on Google Cloud ML Engine.
:param version_spec: A dictionary containing the information about the version. (templated)
:param model_name: The name of the Google Cloud ML Engine model that the version belongs to.
(templated)
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the Google Cloud connection is used.
(templated)
:return: If the version was created successfully, returns the operation.
Otherwise raises an error .
"""
hook = self.get_conn()
parent_name = f"projects/{project_id}/models/{model_name}"
self._append_label(version_spec)
create_request = hook.projects().models().versions().create(parent=parent_name, body=version_spec)
response = create_request.execute(num_retries=self.num_retries)
get_request = hook.projects().operations().get(name=response["name"])
return _poll_with_exponential_delay(
request=get_request,
execute_num_retries=self.num_retries,
max_n=9,
is_done_func=lambda resp: resp.get("done", False),
is_error_func=lambda resp: resp.get("error", None) is not None,
)
@GoogleBaseHook.fallback_to_default_project_id
def set_default_version(
self,
model_name: str,
version_name: str,
project_id: str,
) -> dict:
"""
Sets a version to be the default. Blocks until finished.
:param model_name: The name of the Google Cloud ML Engine model that the version belongs to.
(templated)
:param version_name: A name to use for the version being operated upon. (templated)
:param project_id: The Google Cloud project name to which MLEngine model belongs. If set to None
or missing, the default project_id from the Google Cloud connection is used. (templated)
:return: If successful, return an instance of Version.
Otherwise raises an error.
:raises: googleapiclient.errors.HttpError
"""
hook = self.get_conn()
full_version_name = f"projects/{project_id}/models/{model_name}/versions/{version_name}"
request = hook.projects().models().versions().setDefault(name=full_version_name, body={})
try:
response = request.execute(num_retries=self.num_retries)
self.log.info("Successfully set version: %s to default", response)
return response
except HttpError as e:
self.log.error("Something went wrong: %s", e)
raise
@GoogleBaseHook.fallback_to_default_project_id
def list_versions(
self,
model_name: str,
project_id: str,
) -> list[dict]:
"""
Lists all available versions of a model. Blocks until finished.
:param model_name: The name of the Google Cloud ML Engine model that the version
belongs to. (templated)
:param project_id: The Google Cloud project name to which MLEngine model belongs. If set to None or
missing, the default project_id from the Google Cloud connection is used. (templated)
:return: return an list of instance of Version.
:raises: googleapiclient.errors.HttpError
"""
hook = self.get_conn()
result: list[dict] = []
full_parent_name = f"projects/{project_id}/models/{model_name}"
request = hook.projects().models().versions().list(parent=full_parent_name, pageSize=100)
while request is not None:
response = request.execute(num_retries=self.num_retries)
result.extend(response.get("versions", []))
request = (
hook.projects()
.models()
.versions()
.list_next(previous_request=request, previous_response=response)
)
time.sleep(5)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_version(
self,
model_name: str,
version_name: str,
project_id: str,
) -> dict:
"""
Deletes the given version of a model. Blocks until finished.
:param model_name: The name of the Google Cloud ML Engine model that the version
belongs to. (templated)
:param project_id: The Google Cloud project name to which MLEngine
model belongs.
:param version_name: A name to use for the version being operated upon. (templated)
:return: If the version was deleted successfully, returns the operation.
Otherwise raises an error.
"""
hook = self.get_conn()
full_name = f"projects/{project_id}/models/{model_name}/versions/{version_name}"
delete_request = hook.projects().models().versions().delete(name=full_name)
response = delete_request.execute(num_retries=self.num_retries)
get_request = hook.projects().operations().get(name=response["name"])
return _poll_with_exponential_delay(
request=get_request,
execute_num_retries=self.num_retries,
max_n=9,
is_done_func=lambda resp: resp.get("done", False),
is_error_func=lambda resp: resp.get("error", None) is not None,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_model(
self,
model: dict,
project_id: str,
) -> dict:
"""
Create a Model. Blocks until finished.
:param model: A dictionary containing the information about the model.
:param project_id: The Google Cloud project name to which MLEngine model belongs. If set to None or
missing, the default project_id from the Google Cloud connection is used. (templated)
:return: If the version was created successfully, returns the instance of Model.
Otherwise raises an error.
:raises: googleapiclient.errors.HttpError
"""
hook = self.get_conn()
if "name" not in model or not model["name"]:
raise ValueError("Model name must be provided and could not be an empty string")
project = f"projects/{project_id}"
self._append_label(model)
try:
request = hook.projects().models().create(parent=project, body=model)
response = request.execute(num_retries=self.num_retries)
except HttpError as e:
if e.resp.status != 409:
raise e
str(e) # Fills in the error_details field
if not e.error_details or len(e.error_details) != 1:
raise e
error_detail = e.error_details[0]
if error_detail["@type"] != "type.googleapis.com/google.rpc.BadRequest":
raise e
if "fieldViolations" not in error_detail or len(error_detail["fieldViolations"]) != 1:
raise e
field_violation = error_detail["fieldViolations"][0]
if (
field_violation["field"] != "model.name"
or field_violation["description"] != "A model with the same name already exists."
):
raise e
response = self.get_model(model_name=model["name"], project_id=project_id)
return response
@GoogleBaseHook.fallback_to_default_project_id
def get_model(
self,
model_name: str,
project_id: str,
) -> dict | None:
"""
Gets a Model. Blocks until finished.
:param model_name: The name of the model.
:param project_id: The Google Cloud project name to which MLEngine model belongs. If set to None
or missing, the default project_id from the Google Cloud connection is used. (templated)
:return: If the model exists, returns the instance of Model.
Otherwise return None.
:raises: googleapiclient.errors.HttpError
"""
hook = self.get_conn()
if not model_name:
raise ValueError("Model name must be provided and it could not be an empty string")
full_model_name = f"projects/{project_id}/models/{model_name}"
request = hook.projects().models().get(name=full_model_name)
try:
return request.execute(num_retries=self.num_retries)
except HttpError as e:
if e.resp.status == 404:
self.log.error("Model was not found: %s", e)
return None
raise
@GoogleBaseHook.fallback_to_default_project_id
def delete_model(
self,
model_name: str,
project_id: str,
delete_contents: bool = False,
) -> None:
"""
Delete a Model. Blocks until finished.
:param model_name: The name of the model.
:param delete_contents: Whether to force the deletion even if the models is not empty.
Will delete all version (if any) in the dataset if set to True.
The default value is False.
:param project_id: The Google Cloud project name to which MLEngine model belongs. If set to None
or missing, the default project_id from the Google Cloud connection is used. (templated)
:raises: googleapiclient.errors.HttpError
"""
hook = self.get_conn()
if not model_name:
raise ValueError("Model name must be provided and it could not be an empty string")
model_path = f"projects/{project_id}/models/{model_name}"
if delete_contents:
self._delete_all_versions(model_name, project_id)
request = hook.projects().models().delete(name=model_path)
try:
request.execute(num_retries=self.num_retries)
except HttpError as e:
if e.resp.status == 404:
self.log.error("Model was not found: %s", e)
return
raise
def _delete_all_versions(self, model_name: str, project_id: str):
versions = self.list_versions(project_id=project_id, model_name=model_name)
# The default version can only be deleted when it is the last one in the model
non_default_versions = (version for version in versions if not version.get("isDefault", False))
for version in non_default_versions:
_, _, version_name = version["name"].rpartition("/")
self.delete_version(project_id=project_id, model_name=model_name, version_name=version_name)
default_versions = (version for version in versions if version.get("isDefault", False))
for version in default_versions:
_, _, version_name = version["name"].rpartition("/")
self.delete_version(project_id=project_id, model_name=model_name, version_name=version_name)
def _append_label(self, model: dict) -> None:
model["labels"] = model.get("labels", {})
model["labels"]["airflow-version"] = _AIRFLOW_VERSION
class MLEngineAsyncHook(GoogleBaseAsyncHook):
"""Class to get asynchronous hook for MLEngine."""
sync_hook_class = MLEngineHook
scopes = ["https://www.googleapis.com/auth/cloud-platform"]
def _check_fileds(
self,
job_id: str,
project_id: str | None = None,
):
if not project_id:
raise AirflowException("Google Cloud project id is required.")
if not job_id:
raise AirflowException("An unique job id is required for Google MLEngine training job.")
async def _get_link(self, url: str, session: Session):
async with Token(scopes=self.scopes) as token:
session_aio = AioSession(session)
headers = {
"Authorization": f"Bearer {await token.get()}",
}
try:
job = await session_aio.get(url=url, headers=headers)
except AirflowException:
pass # Because the job may not be visible in system yet
return job
async def get_job(self, job_id: str, session: Session, project_id: str | None = None):
"""Get the specified job resource by job ID and project ID."""
self._check_fileds(project_id=project_id, job_id=job_id)
url = f"https://ml.googleapis.com/v1/projects/{project_id}/jobs/{job_id}"
return await self._get_link(url=url, session=session)
async def get_job_status(
self,
job_id: str,
project_id: str | None = None,
) -> str | None:
"""
Polls for job status asynchronously using gcloud-aio.
Note that an OSError is raised when Job results are still pending.
Exception means that Job finished with errors
"""
self._check_fileds(project_id=project_id, job_id=job_id)
async with ClientSession() as session:
try:
job = await self.get_job(
project_id=project_id, job_id=job_id, session=session # type: ignore
)
job = await job.json(content_type=None)
self.log.info("Retrieving json_response: %s", job)
if job["state"] in ["SUCCEEDED", "FAILED", "CANCELLED"]:
job_status = "success"
elif job["state"] in ["PREPARING", "RUNNING"]:
job_status = "pending"
except OSError:
job_status = "pending"
except Exception as e:
self.log.info("Query execution finished with errors...")
job_status = str(e)
return job_status
| 24,910 | 40.107261 | 108 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/datacatalog.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud import datacatalog
from google.cloud.datacatalog import (
CreateTagRequest,
DataCatalogClient,
Entry,
EntryGroup,
SearchCatalogRequest,
Tag,
TagTemplate,
TagTemplateField,
)
from google.protobuf.field_mask_pb2 import FieldMask
from airflow import AirflowException
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import PROVIDE_PROJECT_ID, GoogleBaseHook
class CloudDataCatalogHook(GoogleBaseHook):
"""
Hook for Google Cloud Data Catalog Service.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._client: DataCatalogClient | None = None
def get_conn(self) -> DataCatalogClient:
"""Retrieves client library object that allow access to Cloud Data Catalog service."""
if not self._client:
self._client = DataCatalogClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
return self._client
@GoogleBaseHook.fallback_to_default_project_id
def create_entry(
self,
location: str,
entry_group: str,
entry_id: str,
entry: dict | Entry,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Entry:
"""
Creates an entry.
Currently only entries of 'FILESET' type can be created.
:param location: Required. The location of the entry to create.
:param entry_group: Required. Entry group ID under which the entry is created.
:param entry_id: Required. The id of the entry to create.
:param entry: Required. The entry to create.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.Entry`
:param project_id: The ID of the Google Cloud project that owns the entry.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If set to ``None`` or missing, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
parent = f"projects/{project_id}/locations/{location}/entryGroups/{entry_group}"
self.log.info("Creating a new entry: parent=%s", parent)
result = client.create_entry(
request={"parent": parent, "entry_id": entry_id, "entry": entry},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Created a entry: name=%s", result.name)
return result
@GoogleBaseHook.fallback_to_default_project_id
def create_entry_group(
self,
location: str,
entry_group_id: str,
entry_group: dict | EntryGroup,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> EntryGroup:
"""
Creates an EntryGroup.
:param location: Required. The location of the entry group to create.
:param entry_group_id: Required. The id of the entry group to create. The id must begin with a letter
or underscore, contain only English letters, numbers and underscores, and be at most 64
characters.
:param entry_group: The entry group to create. Defaults to an empty entry group.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.EntryGroup`
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
parent = f"projects/{project_id}/locations/{location}"
self.log.info("Creating a new entry group: parent=%s", parent)
result = client.create_entry_group(
request={"parent": parent, "entry_group_id": entry_group_id, "entry_group": entry_group},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Created a entry group: name=%s", result.name)
return result
@GoogleBaseHook.fallback_to_default_project_id
def create_tag(
self,
location: str,
entry_group: str,
entry: str,
tag: dict | Tag,
project_id: str = PROVIDE_PROJECT_ID,
template_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Tag:
"""
Creates a tag on an entry.
:param location: Required. The location of the tag to create.
:param entry_group: Required. Entry group ID under which the tag is created.
:param entry: Required. Entry group ID under which the tag is created.
:param tag: Required. The tag to create.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.Tag`
:param template_id: Required. Template ID used to create tag
:param project_id: The ID of the Google Cloud project that owns the tag.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
if template_id:
template_path = f"projects/{project_id}/locations/{location}/tagTemplates/{template_id}"
if isinstance(tag, Tag):
tag.template = template_path
else:
tag["template"] = template_path
parent = f"projects/{project_id}/locations/{location}/entryGroups/{entry_group}/entries/{entry}"
self.log.info("Creating a new tag: parent=%s", parent)
# HACK: google-cloud-datacatalog has problems with mapping messages where the value is not a
# primitive type, so we need to convert it manually.
# See: https://github.com/googleapis/python-datacatalog/issues/84
if isinstance(tag, dict):
tag = Tag(
name=tag.get("name"),
template=tag.get("template"),
template_display_name=tag.get("template_display_name"),
column=tag.get("column"),
fields={
k: datacatalog.TagField(**v) if isinstance(v, dict) else v
for k, v in tag.get("fields", {}).items()
},
)
request = CreateTagRequest(
parent=parent,
tag=tag,
)
result = client.create_tag(request=request, retry=retry, timeout=timeout, metadata=metadata or ())
self.log.info("Created a tag: name=%s", result.name)
return result
@GoogleBaseHook.fallback_to_default_project_id
def create_tag_template(
self,
location,
tag_template_id: str,
tag_template: dict | TagTemplate,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> TagTemplate:
"""
Creates a tag template.
:param location: Required. The location of the tag template to create.
:param tag_template_id: Required. The id of the tag template to create.
:param tag_template: Required. The tag template to create.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.TagTemplate`
:param project_id: The ID of the Google Cloud project that owns the tag template.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
parent = f"projects/{project_id}/locations/{location}"
self.log.info("Creating a new tag template: parent=%s", parent)
# HACK: google-cloud-datacatalog has problems with mapping messages where the value is not a
# primitive type, so we need to convert it manually.
# See: https://github.com/googleapis/python-datacatalog/issues/84
if isinstance(tag_template, dict):
tag_template = datacatalog.TagTemplate(
name=tag_template.get("name"),
display_name=tag_template.get("display_name"),
fields={
k: datacatalog.TagTemplateField(**v) if isinstance(v, dict) else v
for k, v in tag_template.get("fields", {}).items()
},
)
request = datacatalog.CreateTagTemplateRequest(
parent=parent, tag_template_id=tag_template_id, tag_template=tag_template
)
result = client.create_tag_template(
request=request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Created a tag template: name=%s", result.name)
return result
@GoogleBaseHook.fallback_to_default_project_id
def create_tag_template_field(
self,
location: str,
tag_template: str,
tag_template_field_id: str,
tag_template_field: dict | TagTemplateField,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> TagTemplateField:
r"""
Creates a field in a tag template.
:param location: Required. The location of the tag template field to create.
:param tag_template: Required. The id of the tag template to create.
:param tag_template_field_id: Required. The ID of the tag template field to create. Field ids can
contain letters (both uppercase and lowercase), numbers (0-9), underscores (\_) and dashes (-).
Field IDs must be at least 1 character long and at most 128 characters long. Field IDs must also
be unique within their template.
:param tag_template_field: Required. The tag template field to create.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.TagTemplateField`
:param project_id: The ID of the Google Cloud project that owns the tag template field.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
parent = f"projects/{project_id}/locations/{location}/tagTemplates/{tag_template}"
self.log.info("Creating a new tag template field: parent=%s", parent)
result = client.create_tag_template_field(
request={
"parent": parent,
"tag_template_field_id": tag_template_field_id,
"tag_template_field": tag_template_field,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Created a tag template field: name=%s", result.name)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_entry(
self,
location: str,
entry_group: str,
entry: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Deletes an existing entry.
:param location: Required. The location of the entry to delete.
:param entry_group: Required. Entry group ID for entries that is deleted.
:param entry: Entry ID that is deleted.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/entryGroups/{entry_group}/entries/{entry}"
self.log.info("Deleting a entry: name=%s", name)
client.delete_entry(request={"name": name}, retry=retry, timeout=timeout, metadata=metadata or ())
self.log.info("Deleted a entry: name=%s", name)
@GoogleBaseHook.fallback_to_default_project_id
def delete_entry_group(
self,
location,
entry_group,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Deletes an EntryGroup.
Only entry groups that do not contain entries can be deleted.
:param location: Required. The location of the entry group to delete.
:param entry_group: Entry group ID that is deleted.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/entryGroups/{entry_group}"
self.log.info("Deleting a entry group: name=%s", name)
client.delete_entry_group(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
self.log.info("Deleted a entry group: name=%s", name)
@GoogleBaseHook.fallback_to_default_project_id
def delete_tag(
self,
location: str,
entry_group: str,
entry: str,
tag: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Deletes a tag.
:param location: Required. The location of the tag to delete.
:param entry_group: Entry group ID for tag that is deleted.
:param entry: Entry ID for tag that is deleted.
:param tag: Identifier for TAG that is deleted.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = (
f"projects/{project_id}/locations/{location}/entryGroups/{entry_group}/entries/{entry}/tags/{tag}"
)
self.log.info("Deleting a tag: name=%s", name)
client.delete_tag(request={"name": name}, retry=retry, timeout=timeout, metadata=metadata or ())
self.log.info("Deleted a tag: name=%s", name)
@GoogleBaseHook.fallback_to_default_project_id
def delete_tag_template(
self,
location,
tag_template,
force: bool,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Deletes a tag template and all tags using the template.
:param location: Required. The location of the tag template to delete.
:param tag_template: ID for tag template that is deleted.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param force: Required. Currently, this field must always be set to ``true``. This confirms the
deletion of any possible tags using this template. ``force = false`` will be supported in the
future.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/tagTemplates/{tag_template}"
self.log.info("Deleting a tag template: name=%s", name)
client.delete_tag_template(
request={"name": name, "force": force}, retry=retry, timeout=timeout, metadata=metadata or ()
)
self.log.info("Deleted a tag template: name=%s", name)
@GoogleBaseHook.fallback_to_default_project_id
def delete_tag_template_field(
self,
location: str,
tag_template: str,
field: str,
force: bool,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Deletes a field in a tag template and all uses of that field.
:param location: Required. The location of the tag template to delete.
:param tag_template: Tag Template ID for tag template field that is deleted.
:param field: Name of field that is deleted.
:param force: Required. This confirms the deletion of this field from any tags using this field.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/tagTemplates/{tag_template}/fields/{field}"
self.log.info("Deleting a tag template field: name=%s", name)
client.delete_tag_template_field(
request={"name": name, "force": force}, retry=retry, timeout=timeout, metadata=metadata or ()
)
self.log.info("Deleted a tag template field: name=%s", name)
@GoogleBaseHook.fallback_to_default_project_id
def get_entry(
self,
location: str,
entry_group: str,
entry: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Entry:
"""
Gets an entry.
:param location: Required. The location of the entry to get.
:param entry_group: Required. The entry group of the entry to get.
:param entry: The ID of the entry to get.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/entryGroups/{entry_group}/entries/{entry}"
self.log.info("Getting a entry: name=%s", name)
result = client.get_entry(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
self.log.info("Received a entry: name=%s", result.name)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_entry_group(
self,
location: str,
entry_group: str,
project_id: str,
read_mask: FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> EntryGroup:
"""
Gets an entry group.
:param location: Required. The location of the entry group to get.
:param entry_group: The ID of the entry group to get.
:param read_mask: The fields to return. If not set or empty, all fields are returned.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/entryGroups/{entry_group}"
self.log.info("Getting a entry group: name=%s", name)
result = client.get_entry_group(
request={"name": name, "read_mask": read_mask},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Received a entry group: name=%s", result.name)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_tag_template(
self,
location: str,
tag_template: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> TagTemplate:
"""
Gets a tag template.
:param location: Required. The location of the tag template to get.
:param tag_template: Required. The ID of the tag template to get.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/tagTemplates/{tag_template}"
self.log.info("Getting a tag template: name=%s", name)
result = client.get_tag_template(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
self.log.info("Received a tag template: name=%s", result.name)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_tags(
self,
location: str,
entry_group: str,
entry: str,
project_id: str,
page_size: int = 100,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Lists the tags on an Entry.
:param location: Required. The location of the tags to get.
:param entry_group: Required. The entry group of the tags to get.
:param entry_group: Required. The entry of the tags to get.
:param page_size: The maximum number of resources contained in the underlying API response. If page
streaming is performed per- resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number of resources in a page.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
parent = f"projects/{project_id}/locations/{location}/entryGroups/{entry_group}/entries/{entry}"
self.log.info("Listing tag on entry: entry_name=%s", parent)
result = client.list_tags(
request={"parent": parent, "page_size": page_size},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Received tags.")
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_tag_for_template_name(
self,
location: str,
entry_group: str,
entry: str,
template_name: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Tag:
"""
Gets for a tag with a specific template for a specific entry.
:param location: Required. The location which contains the entry to search for.
:param entry_group: The entry group ID which contains the entry to search for.
:param entry: The name of the entry to search for.
:param template_name: The name of the template that will be the search criterion.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
tags_list = self.list_tags(
location=location,
entry_group=entry_group,
entry=entry,
project_id=project_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
tag = next(t for t in tags_list if t.template == template_name)
return tag
def lookup_entry(
self,
linked_resource: str | None = None,
sql_resource: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Entry:
r"""
Get an entry by target resource name.
This method allows clients to use the resource name from the source Google Cloud service
to get the Data Catalog Entry.
:param linked_resource: The full name of the Google Cloud resource the Data Catalog entry
represents. See: https://cloud.google.com/apis/design/resource\_names#full\_resource\_name. Full
names are case-sensitive.
:param sql_resource: The SQL name of the entry. SQL names are case-sensitive.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
if linked_resource and sql_resource:
raise AirflowException("Only one of linked_resource, sql_resource should be set.")
if not linked_resource and not sql_resource:
raise AirflowException("At least one of linked_resource, sql_resource should be set.")
if linked_resource:
self.log.info("Getting entry: linked_resource=%s", linked_resource)
result = client.lookup_entry(
request={"linked_resource": linked_resource},
retry=retry,
timeout=timeout,
metadata=metadata,
)
else:
self.log.info("Getting entry: sql_resource=%s", sql_resource)
result = client.lookup_entry(
request={"sql_resource": sql_resource},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Received entry. name=%s", result.name)
return result
@GoogleBaseHook.fallback_to_default_project_id
def rename_tag_template_field(
self,
location: str,
tag_template: str,
field: str,
new_tag_template_field_id: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> TagTemplateField:
"""
Renames a field in a tag template.
:param location: Required. The location of the tag template field to rename.
:param tag_template: The tag template ID for field that is renamed.
:param field: Required. The old ID of this tag template field. For example,
``my_old_field``.
:param new_tag_template_field_id: Required. The new ID of this tag template field. For example,
``my_new_field``.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/tagTemplates/{tag_template}/fields/{field}"
self.log.info(
"Renaming field: old_name=%s, new_tag_template_field_id=%s", name, new_tag_template_field_id
)
result = client.rename_tag_template_field(
request={"name": name, "new_tag_template_field_id": new_tag_template_field_id},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Renamed tag template field.")
return result
def search_catalog(
self,
scope: dict | SearchCatalogRequest.Scope,
query: str,
page_size: int = 100,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
r"""
Searches Data Catalog for multiple resources like entries, tags that match a query.
This does not return the complete resource, only the resource identifier and high level fields.
Clients can subsequently call ``Get`` methods.
Note that searches do not have full recall. There may be results that match your query but are not
returned, even in subsequent pages of results. These missing results may vary across repeated calls to
search. Do not rely on this method if you need to guarantee full recall.
:param scope: Required. The scope of this search request.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.Scope`
:param query: Required. The query string in search query syntax. The query must be non-empty.
Query strings can be simple as "x" or more qualified as:
- name:x
- column:x
- description:y
Note: Query tokens need to have a minimum of 3 characters for substring matching to work
correctly. See `Data Catalog Search Syntax <https://cloud.google.com/data-catalog/docs/how-
to/search-reference>`__ for more information.
:param page_size: The maximum number of resources contained in the underlying API response. If page
streaming is performed per-resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number of resources in a page.
:param order_by: Specifies the ordering of results, currently supported case-sensitive choices are:
- ``relevance``, only supports descending
- ``last_access_timestamp [asc|desc]``, defaults to descending if not specified
- ``last_modified_timestamp [asc|desc]``, defaults to descending if not specified
If not specified, defaults to ``relevance`` descending.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
self.log.info(
"Searching catalog: scope=%s, query=%s, page_size=%s, order_by=%s",
scope,
query,
page_size,
order_by,
)
result = client.search_catalog(
request={"scope": scope, "query": query, "page_size": page_size, "order_by": order_by},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Received items.")
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_entry(
self,
entry: dict | Entry,
update_mask: dict | FieldMask,
project_id: str,
location: str | None = None,
entry_group: str | None = None,
entry_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Entry:
"""
Updates an existing entry.
:param entry: Required. The updated entry. The "name" field must be set.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.Entry`
:param update_mask: The fields to update on the entry. If absent or empty, all modifiable fields are
updated.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param location: Required. The location of the entry to update.
:param entry_group: The entry group ID for the entry that is being updated.
:param entry_id: The entry ID that is being updated.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
if project_id and location and entry_group and entry_id:
full_entry_name = (
f"projects/{project_id}/locations/{location}/entryGroups/{entry_group}/entries/{entry_id}"
)
if isinstance(entry, Entry):
entry.name = full_entry_name
elif isinstance(entry, dict):
entry["name"] = full_entry_name
else:
raise AirflowException("Unable to set entry's name.")
elif location and entry_group and entry_id:
raise AirflowException(
"You must provide all the parameters (project_id, location, entry_group, entry_id) "
"contained in the name, or do not specify any parameters and pass the name on the object "
)
name = entry.name if isinstance(entry, Entry) else entry["name"]
self.log.info("Updating entry: name=%s", name)
# HACK: google-cloud-datacatalog has a problem with dictionaries for update methods.
if isinstance(entry, dict):
entry = Entry(**entry)
result = client.update_entry(
request={"entry": entry, "update_mask": update_mask},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Updated entry.")
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_tag(
self,
tag: dict | Tag,
update_mask: dict | FieldMask,
project_id: str,
location: str | None = None,
entry_group: str | None = None,
entry: str | None = None,
tag_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Tag:
"""
Updates an existing tag.
:param tag: Required. The updated tag. The "name" field must be set.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.Tag`
:param update_mask: The fields to update on the Tag. If absent or empty, all modifiable fields are
updated. Currently the only modifiable field is the field ``fields``.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.FieldMask`
:param location: Required. The location of the tag to rename.
:param entry_group: The entry group ID for the tag that is being updated.
:param entry: The entry ID for the tag that is being updated.
:param tag_id: The tag ID that is being updated.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
if project_id and location and entry_group and entry and tag_id:
full_tag_name = (
f"projects/{project_id}/locations/{location}/entryGroups/{entry_group}/entries/{entry}"
f"/tags/{tag_id}"
)
if isinstance(tag, Tag):
tag.name = full_tag_name
elif isinstance(tag, dict):
tag["name"] = full_tag_name
else:
raise AirflowException("Unable to set tag's name.")
elif location and entry_group and entry and tag_id:
raise AirflowException(
"You must provide all the parameters (project_id, location, entry_group, entry, tag_id) "
"contained in the name, or do not specify any parameters and pass the name on the object "
)
name = tag.name if isinstance(tag, Tag) else tag["name"]
self.log.info("Updating tag: name=%s", name)
# HACK: google-cloud-datacatalog has a problem with dictionaries for update methods.
if isinstance(tag, dict):
tag = Tag(**tag)
result = client.update_tag(
request={"tag": tag, "update_mask": update_mask},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Updated tag.")
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_tag_template(
self,
tag_template: dict | TagTemplate,
update_mask: dict | FieldMask,
project_id: str,
location: str | None = None,
tag_template_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> TagTemplate:
"""
Updates a tag template.
This method cannot be used to update the fields of a template. The tag
template fields are represented as separate resources and should be updated using their own
create/update/delete methods.
:param tag_template: Required. The template to update. The "name" field must be set.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.TagTemplate`
:param update_mask: The field mask specifies the parts of the template to overwrite.
If absent or empty, all of the allowed fields above will be updated.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param location: Required. The location of the tag template to rename.
:param tag_template_id: Optional. The tag template ID for the entry that is being updated.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
if project_id and location and tag_template:
full_tag_template_name = (
f"projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id}"
)
if isinstance(tag_template, TagTemplate):
tag_template.name = full_tag_template_name
elif isinstance(tag_template, dict):
tag_template["name"] = full_tag_template_name
else:
raise AirflowException("Unable to set name of tag template.")
elif location and tag_template:
raise AirflowException(
"You must provide all the parameters (project_id, location, tag_template_id) "
"contained in the name, or do not specify any parameters and pass the name on the object "
)
name = tag_template.name if isinstance(tag_template, TagTemplate) else tag_template["name"]
self.log.info("Updating tag template: name=%s", name)
# HACK: google-cloud-datacatalog has a problem with dictionaries for update methods.
if isinstance(tag_template, dict):
tag_template = TagTemplate(**tag_template)
result = client.update_tag_template(
request={"tag_template": tag_template, "update_mask": update_mask},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Updated tag template.")
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_tag_template_field(
self,
tag_template_field: dict | TagTemplateField,
update_mask: dict | FieldMask,
project_id: str,
tag_template_field_name: str | None = None,
location: str | None = None,
tag_template: str | None = None,
tag_template_field_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Updates a field in a tag template. This method cannot be used to update the field type.
:param tag_template_field: Required. The template to update.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.TagTemplateField`
:param update_mask: The field mask specifies the parts of the template to be updated. Allowed fields:
- ``display_name``
- ``type.enum_type``
If ``update_mask`` is not set or empty, all of the allowed fields above will be updated.
When updating an enum type, the provided values will be merged with the existing values.
Therefore, enum values can only be added, existing enum values cannot be deleted nor renamed.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param tag_template_field_name: Optional. The name of the tag template field to rename.
:param location: Optional. The location of the tag to rename.
:param tag_template: Optional. The tag template ID for tag template field to rename.
:param tag_template_field_id: Optional. The ID of tag template field to rename.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
if project_id and location and tag_template and tag_template_field_id:
tag_template_field_name = (
f"projects/{project_id}/locations/{location}/tagTemplates/{tag_template}"
f"/fields/{tag_template_field_id}"
)
self.log.info("Updating tag template field: name=%s", tag_template_field_name)
result = client.update_tag_template_field(
request={
"name": tag_template_field_name,
"tag_template_field": tag_template_field,
"update_mask": update_mask,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Updated tag template field.")
return result
| 54,262 | 45.577682 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/dlp.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a CloudDLPHook which allows you to connect to Google Cloud DLP service.
.. spelling:word-list::
ImageRedactionConfig
RedactImageRequest
"""
from __future__ import annotations
import re
import time
from typing import Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.dlp import DlpServiceClient
from google.cloud.dlp_v2.types import (
ByteContentItem,
ContentItem,
DeidentifyConfig,
DeidentifyContentResponse,
DeidentifyTemplate,
DlpJob,
InspectConfig,
InspectContentResponse,
InspectJobConfig,
InspectTemplate,
JobTrigger,
ListInfoTypesResponse,
RedactImageRequest,
RedactImageResponse,
ReidentifyContentResponse,
RiskAnalysisJobConfig,
StoredInfoType,
StoredInfoTypeConfig,
)
from google.protobuf.field_mask_pb2 import FieldMask
from airflow.exceptions import AirflowException
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import PROVIDE_PROJECT_ID, GoogleBaseHook
DLP_JOB_PATH_PATTERN = "^projects/[^/]+/dlpJobs/(?P<job>.*?)$"
class CloudDLPHook(GoogleBaseHook):
"""
Hook for Google Cloud Data Loss Prevention (DLP) APIs.
Cloud DLP allows clients to detect the presence of Personally Identifiable
Information (PII) and other privacy-sensitive data in user-supplied,
unstructured data streams, like text blocks or images. The service also
includes methods for sensitive data redaction and scheduling of data scans
on Google Cloud based data sets.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._client: DlpServiceClient | None = None
def get_conn(self) -> DlpServiceClient:
"""
Provides a client for interacting with the Cloud DLP API.
:return: Google Cloud DLP API Client
"""
if not self._client:
self._client = DlpServiceClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
return self._client
def _project_deidentify_template_path(self, project_id, template_id):
return f"{DlpServiceClient.common_project_path(project_id)}/deidentifyTemplates/{template_id}"
def _project_stored_info_type_path(self, project_id, info_type_id):
return f"{DlpServiceClient.common_project_path(project_id)}/storedInfoTypes/{info_type_id}"
def _project_inspect_template_path(self, project_id, inspect_template_id):
return f"{DlpServiceClient.common_project_path(project_id)}/inspectTemplates/{inspect_template_id}"
@GoogleBaseHook.fallback_to_default_project_id
def cancel_dlp_job(
self,
dlp_job_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Starts asynchronous cancellation on a long-running DLP job.
:param dlp_job_id: ID of the DLP job resource to be cancelled.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default project_id
from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
if not dlp_job_id:
raise AirflowException("Please provide the ID of the DLP job resource to be cancelled.")
name = DlpServiceClient.dlp_job_path(project_id, dlp_job_id)
client.cancel_dlp_job(
request=dict(
name=name,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
def create_deidentify_template(
self,
organization_id: str | None = None,
project_id: str | None = None,
deidentify_template: dict | DeidentifyTemplate | None = None,
template_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> DeidentifyTemplate:
"""
Create a deidentify template to reuse frequently-used configurations for content, images, and storage.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param deidentify_template: (Optional) The de-identify template to create.
:param template_id: (Optional) The template ID.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
# Handle project_id from connection configuration
project_id = project_id or self.project_id
if organization_id:
parent = DlpServiceClient.common_organization_path(organization_id)
elif project_id:
parent = DlpServiceClient.common_project_path(project_id)
else:
raise AirflowException("Please provide either organization_id or project_id.")
return client.create_deidentify_template(
request=dict(
parent=parent,
deidentify_template=deidentify_template,
template_id=template_id,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_dlp_job(
self,
project_id: str = PROVIDE_PROJECT_ID,
inspect_job: dict | InspectJobConfig | None = None,
risk_job: dict | RiskAnalysisJobConfig | None = None,
job_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
wait_until_finished: bool = True,
time_to_sleep_in_seconds: int = 60,
) -> DlpJob:
"""
Creates a new job to inspect storage or calculate risk metrics.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param inspect_job: (Optional) The configuration for the inspect job.
:param risk_job: (Optional) The configuration for the risk job.
:param job_id: (Optional) The job ID.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param wait_until_finished: (Optional) If true, it will keep polling the job state
until it is set to DONE.
:param time_to_sleep_in_seconds: (Optional) Time to sleep, in seconds, between active checks
of the operation results. Defaults to 60.
"""
client = self.get_conn()
parent = DlpServiceClient.common_project_path(project_id)
job = client.create_dlp_job(
request=dict(
parent=parent,
inspect_job=inspect_job,
risk_job=risk_job,
job_id=job_id,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
if wait_until_finished:
pattern = re.compile(DLP_JOB_PATH_PATTERN, re.IGNORECASE)
match = pattern.match(job.name)
if match is not None:
job_name = match.groupdict()["job"]
else:
raise AirflowException(f"Unable to retrieve DLP job's ID from {job.name}.")
while wait_until_finished:
job = self.get_dlp_job(dlp_job_id=job_name, project_id=project_id)
self.log.info("DLP job %s state: %s.", job.name, job.state)
if job.state == DlpJob.JobState.DONE:
return job
elif job.state in [
DlpJob.JobState.PENDING,
DlpJob.JobState.RUNNING,
DlpJob.JobState.JOB_STATE_UNSPECIFIED,
]:
time.sleep(time_to_sleep_in_seconds)
else:
raise AirflowException(
"Stopped polling DLP job state. "
f"DLP job {job.name} state: {DlpJob.JobState.Name(job.state)}."
)
return job
def create_inspect_template(
self,
organization_id: str | None = None,
project_id: str | None = None,
inspect_template: InspectTemplate | None = None,
template_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> InspectTemplate:
"""
Create an inspect template to reuse frequently used configurations for content, images, and storage.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param inspect_template: (Optional) The inspect template to create.
:param template_id: (Optional) The template ID.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
# Handle project_id from connection configuration
project_id = project_id or self.project_id
if organization_id:
parent = DlpServiceClient.common_organization_path(organization_id)
elif project_id:
parent = DlpServiceClient.common_project_path(project_id)
else:
raise AirflowException("Please provide either organization_id or project_id.")
return client.create_inspect_template(
request=dict(
parent=parent,
inspect_template=inspect_template,
template_id=template_id,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_job_trigger(
self,
project_id: str = PROVIDE_PROJECT_ID,
job_trigger: dict | JobTrigger | None = None,
trigger_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> JobTrigger:
"""
Create a job trigger to run DLP actions such as scanning storage for sensitive info on a set schedule.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param job_trigger: (Optional) The job trigger to create.
:param trigger_id: (Optional) The job trigger ID.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
parent = DlpServiceClient.common_project_path(project_id)
return client.create_job_trigger(
request=dict(
parent=parent,
job_trigger=job_trigger,
trigger_id=trigger_id,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
def create_stored_info_type(
self,
organization_id: str | None = None,
project_id: str | None = None,
config: dict | StoredInfoTypeConfig | None = None,
stored_info_type_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> StoredInfoType:
"""
Creates a pre-built stored info type to be used for inspection.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param config: (Optional) The config for the stored info type.
:param stored_info_type_id: (Optional) The stored info type ID.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
# Handle project_id from connection configuration
project_id = project_id or self.project_id
if organization_id:
parent = DlpServiceClient.common_organization_path(organization_id)
elif project_id:
parent = DlpServiceClient.common_project_path(project_id)
else:
raise AirflowException("Please provide either organization_id or project_id.")
return client.create_stored_info_type(
request=dict(
parent=parent,
config=config,
stored_info_type_id=stored_info_type_id,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def deidentify_content(
self,
project_id: str = PROVIDE_PROJECT_ID,
deidentify_config: dict | DeidentifyConfig | None = None,
inspect_config: dict | InspectConfig | None = None,
item: dict | ContentItem | None = None,
inspect_template_name: str | None = None,
deidentify_template_name: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> DeidentifyContentResponse:
"""
De-identifies potentially sensitive info from a content item; limits input size and output size.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param deidentify_config: (Optional) Configuration for the de-identification of the
content item. Items specified here will override the template referenced by the
deidentify_template_name argument.
:param inspect_config: (Optional) Configuration for the inspector. Items specified
here will override the template referenced by the inspect_template_name argument.
:param item: (Optional) The item to de-identify. Will be treated as text.
:param inspect_template_name: (Optional) Optional template to use. Any configuration
directly specified in inspect_config will override those set in the template.
:param deidentify_template_name: (Optional) Optional template to use. Any
configuration directly specified in deidentify_config will override those set
in the template.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
parent = DlpServiceClient.common_project_path(project_id)
return client.deidentify_content(
request=dict(
parent=parent,
deidentify_config=deidentify_config,
inspect_config=inspect_config,
item=item,
inspect_template_name=inspect_template_name,
deidentify_template_name=deidentify_template_name,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
def delete_deidentify_template(
self, template_id, organization_id=None, project_id=None, retry=DEFAULT, timeout=None, metadata=()
) -> None:
"""
Deletes a deidentify template.
:param template_id: The ID of deidentify template to be deleted.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
if not template_id:
raise AirflowException("Please provide the ID of deidentify template to be deleted.")
# Handle project_id from connection configuration
project_id = project_id or self.project_id
if organization_id:
name = DlpServiceClient.deidentify_template_path(organization_id, template_id)
elif project_id:
name = self._project_deidentify_template_path(project_id, template_id)
else:
raise AirflowException("Please provide either organization_id or project_id.")
client.delete_deidentify_template(
request=dict(
name=name,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_dlp_job(
self,
dlp_job_id: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Deletes a long-running DLP job.
This method indicates that the client is no longer interested in the DLP job result.
The job will be cancelled if possible.
:param dlp_job_id: The ID of the DLP job resource to be cancelled.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
if not dlp_job_id:
raise AirflowException("Please provide the ID of the DLP job resource to be cancelled.")
name = DlpServiceClient.dlp_job_path(project_id, dlp_job_id)
client.delete_dlp_job(
request=dict(
name=name,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
def delete_inspect_template(
self,
template_id: str,
organization_id: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Deletes an inspect template.
:param template_id: The ID of the inspect template to be deleted.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
if not template_id:
raise AirflowException("Please provide the ID of the inspect template to be deleted.")
# Handle project_id from connection configuration
project_id = project_id or self.project_id
if organization_id:
name = DlpServiceClient.inspect_template_path(organization_id, template_id)
elif project_id:
name = self._project_inspect_template_path(project_id, template_id)
else:
raise AirflowException("Please provide either organization_id or project_id.")
client.delete_inspect_template(
request=dict(
name=name,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_job_trigger(
self,
job_trigger_id: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Deletes a job trigger.
:param job_trigger_id: The ID of the DLP job trigger to be deleted.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
if not job_trigger_id:
raise AirflowException("Please provide the ID of the DLP job trigger to be deleted.")
name = DlpServiceClient.job_trigger_path(project_id, job_trigger_id)
client.delete_job_trigger(
request=dict(
name=name,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
def delete_stored_info_type(
self,
stored_info_type_id: str,
organization_id: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Deletes a stored info type.
:param stored_info_type_id: The ID of the stored info type to be deleted.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
if not stored_info_type_id:
raise AirflowException("Please provide the ID of the stored info type to be deleted.")
# Handle project_id from connection configuration
project_id = project_id or self.project_id
if organization_id:
name = DlpServiceClient.stored_info_type_path(organization_id, stored_info_type_id)
elif project_id:
name = self._project_stored_info_type_path(project_id, stored_info_type_id)
else:
raise AirflowException("Please provide either organization_id or project_id.")
client.delete_stored_info_type(
request=dict(
name=name,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
def get_deidentify_template(
self,
template_id: str,
organization_id: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> DeidentifyTemplate:
"""
Gets a deidentify template.
:param template_id: The ID of deidentify template to be read.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
if not template_id:
raise AirflowException("Please provide the ID of the deidentify template to be read.")
# Handle project_id from connection configuration
project_id = project_id or self.project_id
if organization_id:
name = DlpServiceClient.deidentify_template_path(organization_id, template_id)
elif project_id:
name = self._project_deidentify_template_path(project_id, template_id)
else:
raise AirflowException("Please provide either organization_id or project_id.")
return client.get_deidentify_template(
request=dict(
name=name,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_dlp_job(
self,
dlp_job_id: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> DlpJob:
"""
Gets the latest state of a long-running Dlp Job.
:param dlp_job_id: The ID of the DLP job resource to be read.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
if not dlp_job_id:
raise AirflowException("Please provide the ID of the DLP job resource to be read.")
name = DlpServiceClient.dlp_job_path(project_id, dlp_job_id)
return client.get_dlp_job(
request=dict(
name=name,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
def get_inspect_template(
self,
template_id: str,
organization_id: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> InspectTemplate:
"""
Gets an inspect template.
:param template_id: The ID of inspect template to be read.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
if not template_id:
raise AirflowException("Please provide the ID of the inspect template to be read.")
# Handle project_id from connection configuration
project_id = project_id or self.project_id
if organization_id:
name = DlpServiceClient.inspect_template_path(organization_id, template_id)
elif project_id:
name = self._project_inspect_template_path(project_id, template_id)
else:
raise AirflowException("Please provide either organization_id or project_id.")
return client.get_inspect_template(
request=dict(
name=name,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_job_trigger(
self,
job_trigger_id: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> JobTrigger:
"""
Gets a DLP job trigger.
:param job_trigger_id: The ID of the DLP job trigger to be read.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
if not job_trigger_id:
raise AirflowException("Please provide the ID of the DLP job trigger to be read.")
name = DlpServiceClient.job_trigger_path(project_id, job_trigger_id)
return client.get_job_trigger(
request=dict(
name=name,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
def get_stored_info_type(
self,
stored_info_type_id: str,
organization_id: str | None = None,
project_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> StoredInfoType:
"""
Gets a stored info type.
:param stored_info_type_id: The ID of the stored info type to be read.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
if not stored_info_type_id:
raise AirflowException("Please provide the ID of the stored info type to be read.")
# Handle project_id from connection configuration
project_id = project_id or self.project_id
if organization_id:
name = DlpServiceClient.stored_info_type_path(organization_id, stored_info_type_id)
elif project_id:
name = self._project_stored_info_type_path(project_id, stored_info_type_id)
else:
raise AirflowException("Please provide either organization_id or project_id.")
return client.get_stored_info_type(
request=dict(
name=name,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def inspect_content(
self,
project_id: str,
inspect_config: dict | InspectConfig | None = None,
item: dict | ContentItem | None = None,
inspect_template_name: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> InspectContentResponse:
"""
Finds potentially sensitive info in content; limits input size, processing time, and output size.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param inspect_config: (Optional) Configuration for the inspector. Items specified
here will override the template referenced by the inspect_template_name argument.
:param item: (Optional) The item to de-identify. Will be treated as text.
:param inspect_template_name: (Optional) Optional template to use. Any configuration
directly specified in inspect_config will override those set in the template.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
parent = DlpServiceClient.common_project_path(project_id)
return client.inspect_content(
request=dict(
parent=parent,
inspect_config=inspect_config,
item=item,
inspect_template_name=inspect_template_name,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
def list_deidentify_templates(
self,
organization_id: str | None = None,
project_id: str | None = None,
page_size: int | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> list[DeidentifyTemplate]:
"""
Lists deidentify templates.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:param order_by: (Optional) Optional comma separated list of fields to order by,
followed by asc or desc postfix.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
# Handle project_id from connection configuration
project_id = project_id or self.project_id
if organization_id:
parent = DlpServiceClient.common_organization_path(organization_id)
elif project_id:
parent = DlpServiceClient.common_project_path(project_id)
else:
raise AirflowException("Please provide either organization_id or project_id.")
results = client.list_deidentify_templates(
request=dict(
parent=parent,
page_size=page_size,
order_by=order_by,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
return list(results)
@GoogleBaseHook.fallback_to_default_project_id
def list_dlp_jobs(
self,
project_id: str,
results_filter: str | None = None,
page_size: int | None = None,
job_type: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> list[DlpJob]:
"""
Lists DLP jobs that match the specified filter in the request.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param results_filter: (Optional) Filter used to specify a subset of results.
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:param job_type: (Optional) The type of job.
:param order_by: (Optional) Optional comma separated list of fields to order by,
followed by asc or desc postfix.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
parent = DlpServiceClient.common_project_path(project_id)
results = client.list_dlp_jobs(
request=dict(
parent=parent,
filter=results_filter,
page_size=page_size,
type_=job_type,
order_by=order_by,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
return list(results)
def list_info_types(
self,
language_code: str | None = None,
results_filter: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListInfoTypesResponse:
"""
Returns a list of the sensitive information types that the DLP API supports.
:param language_code: (Optional) Optional BCP-47 language code for localized info
type friendly names. If omitted, or if localized strings are not available,
en-US strings will be returned.
:param results_filter: (Optional) Filter used to specify a subset of results.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
return client.list_info_types(
request=dict(
language_code=language_code,
filter=results_filter,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
def list_inspect_templates(
self,
organization_id: str | None = None,
project_id: str | None = None,
page_size: int | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> list[InspectTemplate]:
"""
Lists inspect templates.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:param order_by: (Optional) Optional comma separated list of fields to order by,
followed by asc or desc postfix.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
# Handle project_id from connection configuration
project_id = project_id or self.project_id
if organization_id:
parent = DlpServiceClient.common_organization_path(organization_id)
elif project_id:
parent = DlpServiceClient.common_project_path(project_id)
else:
raise AirflowException("Please provide either organization_id or project_id.")
results = client.list_inspect_templates(
request=dict(
parent=parent,
page_size=page_size,
order_by=order_by,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
return list(results)
@GoogleBaseHook.fallback_to_default_project_id
def list_job_triggers(
self,
project_id: str,
page_size: int | None = None,
order_by: str | None = None,
results_filter: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> list[JobTrigger]:
"""
Lists job triggers.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:param order_by: (Optional) Optional comma separated list of fields to order by,
followed by asc or desc postfix.
:param results_filter: (Optional) Filter used to specify a subset of results.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
parent = DlpServiceClient.common_project_path(project_id)
results = client.list_job_triggers(
request=dict(
parent=parent,
page_size=page_size,
order_by=order_by,
filter=results_filter,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
return list(results)
def list_stored_info_types(
self,
organization_id: str | None = None,
project_id: str | None = None,
page_size: int | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> list[StoredInfoType]:
"""
Lists stored info types.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:param order_by: (Optional) Optional comma separated list of fields to order by,
followed by asc or desc postfix.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
# Handle project_id from connection configuration
project_id = project_id or self.project_id
if organization_id:
parent = DlpServiceClient.common_organization_path(organization_id)
elif project_id:
parent = DlpServiceClient.common_project_path(project_id)
else:
raise AirflowException("Please provide either organization_id or project_id.")
results = client.list_stored_info_types(
request=dict(
parent=parent,
page_size=page_size,
order_by=order_by,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
return list(results)
@GoogleBaseHook.fallback_to_default_project_id
def redact_image(
self,
project_id: str,
inspect_config: dict | InspectConfig | None = None,
image_redaction_configs: None | (list[dict] | list[RedactImageRequest.ImageRedactionConfig]) = None,
include_findings: bool | None = None,
byte_item: dict | ByteContentItem | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> RedactImageResponse:
"""
Redacts potentially sensitive info from an image; limits input size, processing time, and output size.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param inspect_config: (Optional) Configuration for the inspector. Items specified
here will override the template referenced by the inspect_template_name argument.
:param image_redaction_configs: (Optional) The configuration for specifying what
content to redact from images.
list[google.cloud.dlp_v2.types.RedactImageRequest.ImageRedactionConfig]
:param include_findings: (Optional) Whether the response should include findings
along with the redacted image.
:param byte_item: (Optional) The content must be PNG, JPEG, SVG or BMP.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
parent = DlpServiceClient.common_project_path(project_id)
return client.redact_image(
request=dict(
parent=parent,
inspect_config=inspect_config,
image_redaction_configs=image_redaction_configs,
include_findings=include_findings,
byte_item=byte_item,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def reidentify_content(
self,
project_id: str,
reidentify_config: dict | DeidentifyConfig | None = None,
inspect_config: dict | InspectConfig | None = None,
item: dict | ContentItem | None = None,
inspect_template_name: str | None = None,
reidentify_template_name: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ReidentifyContentResponse:
"""
Re-identifies content that has been de-identified.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param reidentify_config: (Optional) Configuration for the re-identification of
the content item.
:param inspect_config: (Optional) Configuration for the inspector.
:param item: (Optional) The item to re-identify. Will be treated as text.
:param inspect_template_name: (Optional) Optional template to use. Any configuration
directly specified in inspect_config will override those set in the template.
:param reidentify_template_name: (Optional) Optional template to use. References an
instance of deidentify template. Any configuration directly specified in
reidentify_config or inspect_config will override those set in the template.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
parent = DlpServiceClient.common_project_path(project_id)
return client.reidentify_content(
request=dict(
parent=parent,
reidentify_config=reidentify_config,
inspect_config=inspect_config,
item=item,
inspect_template_name=inspect_template_name,
reidentify_template_name=reidentify_template_name,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
def update_deidentify_template(
self,
template_id: str,
organization_id: str | None = None,
project_id: str | None = None,
deidentify_template: dict | DeidentifyTemplate | None = None,
update_mask: dict | FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> DeidentifyTemplate:
"""
Updates the deidentify template.
:param template_id: The ID of deidentify template to be updated.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param deidentify_template: New deidentify template value.
:param update_mask: Mask to control which fields get updated.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
if not template_id:
raise AirflowException("Please provide the ID of deidentify template to be updated.")
# Handle project_id from connection configuration
project_id = project_id or self.project_id
if organization_id:
name = DlpServiceClient.deidentify_template_path(organization_id, template_id)
elif project_id:
name = self._project_deidentify_template_path(project_id, template_id)
else:
raise AirflowException("Please provide either organization_id or project_id.")
return client.update_deidentify_template(
request=dict(
name=name,
deidentify_template=deidentify_template,
update_mask=update_mask,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
def update_inspect_template(
self,
template_id: str,
organization_id: str | None = None,
project_id: str | None = None,
inspect_template: dict | InspectTemplate | None = None,
update_mask: dict | FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> InspectTemplate:
"""
Updates the inspect template.
:param template_id: The ID of the inspect template to be updated.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param inspect_template: New inspect template value.
:param update_mask: Mask to control which fields get updated.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
if not template_id:
raise AirflowException("Please provide the ID of the inspect template to be updated.")
# Handle project_id from connection configuration
project_id = project_id or self.project_id
if organization_id:
name = DlpServiceClient.inspect_template_path(organization_id, template_id)
elif project_id:
name = self._project_inspect_template_path(project_id, template_id)
else:
raise AirflowException("Please provide either organization_id or project_id.")
return client.update_inspect_template(
request=dict(
name=name,
inspect_template=inspect_template,
update_mask=update_mask,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def update_job_trigger(
self,
job_trigger_id: str,
project_id: str,
job_trigger: dict | JobTrigger | None = None,
update_mask: dict | FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> JobTrigger:
"""
Updates a job trigger.
:param job_trigger_id: The ID of the DLP job trigger to be updated.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param job_trigger: New job trigger value.
:param update_mask: Mask to control which fields get updated.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
if isinstance(job_trigger, dict):
job_trigger = JobTrigger(**job_trigger)
if isinstance(update_mask, dict):
update_mask = FieldMask(**update_mask)
if not job_trigger_id:
raise AirflowException("Please provide the ID of the DLP job trigger to be updated.")
name = DlpServiceClient.job_trigger_path(project_id, job_trigger_id)
return client.update_job_trigger(
name=name,
job_trigger=job_trigger,
update_mask=update_mask,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def update_stored_info_type(
self,
stored_info_type_id: str,
organization_id: str | None = None,
project_id: str | None = None,
config: dict | StoredInfoTypeConfig | None = None,
update_mask: dict | FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> StoredInfoType:
"""
Updates the stored info type by creating a new version.
:param stored_info_type_id: The ID of the stored info type to be updated.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param config: Updated configuration for the stored info type. If not provided, a new
version of the stored info type will be created with the existing configuration.
:param update_mask: Mask to control which fields get updated.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
if not stored_info_type_id:
raise AirflowException("Please provide the ID of the stored info type to be updated.")
# Handle project_id from connection configuration
project_id = project_id or self.project_id
if organization_id:
name = DlpServiceClient.stored_info_type_path(organization_id, stored_info_type_id)
elif project_id:
name = self._project_stored_info_type_path(project_id, stored_info_type_id)
else:
raise AirflowException("Please provide either organization_id or project_id.")
return client.update_stored_info_type(
request=dict(
name=name,
config=config,
update_mask=update_mask,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
| 67,359 | 42.069054 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/compute_ssh.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import shlex
import time
from functools import cached_property
from io import StringIO
from typing import Any
from google.api_core.retry import exponential_sleep_generator
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.compute import ComputeEngineHook
from airflow.providers.google.cloud.hooks.os_login import OSLoginHook
from airflow.providers.ssh.hooks.ssh import SSHHook
from airflow.utils.types import NOTSET, ArgNotSet
# Paramiko should be imported after airflow.providers.ssh. Then the import will fail with
# cannot import "airflow.providers.ssh" and will be correctly discovered as optional feature
# TODO:(potiuk) We should add test harness detecting such cases shortly
import paramiko # isort:skip
CMD_TIMEOUT = 10
class _GCloudAuthorizedSSHClient(paramiko.SSHClient):
"""SSH Client that maintains the context for gcloud authorization during the connection."""
def __init__(self, google_hook, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ssh_client = paramiko.SSHClient()
self.google_hook = google_hook
self.decorator = None
def connect(self, *args, **kwargs):
self.decorator = self.google_hook.provide_authorized_gcloud()
self.decorator.__enter__()
return super().connect(*args, **kwargs)
def close(self):
if self.decorator:
self.decorator.__exit__(None, None, None)
self.decorator = None
return super().close()
def __exit__(self, type_, value, traceback):
if self.decorator:
self.decorator.__exit__(type_, value, traceback)
self.decorator = None
return super().__exit__(type_, value, traceback)
class ComputeEngineSSHHook(SSHHook):
"""
Hook to connect to a remote instance in compute engine.
:param instance_name: The name of the Compute Engine instance
:param zone: The zone of the Compute Engine instance
:param user: The name of the user on which the login attempt will be made
:param project_id: The project ID of the remote instance
:param gcp_conn_id: The connection id to use when fetching connection info
:param hostname: The hostname of the target instance. If it is not passed, it will be detected
automatically.
:param use_iap_tunnel: Whether to connect through IAP tunnel
:param use_internal_ip: Whether to connect using internal IP
:param use_oslogin: Whether to manage keys using OsLogin API. If false,
keys are managed using instance metadata
:param expire_time: The maximum amount of time in seconds before the private key expires
:param gcp_conn_id: The connection id to use when fetching connection information
"""
conn_name_attr = "gcp_conn_id"
default_conn_name = "google_cloud_ssh_default"
conn_type = "gcpssh"
hook_name = "Google Cloud SSH"
@staticmethod
def get_ui_field_behaviour() -> dict[str, Any]:
return {
"hidden_fields": ["host", "schema", "login", "password", "port", "extra"],
"relabeling": {},
}
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
instance_name: str | None = None,
zone: str | None = None,
user: str | None = "root",
project_id: str | None = None,
hostname: str | None = None,
use_internal_ip: bool = False,
use_iap_tunnel: bool = False,
use_oslogin: bool = True,
expire_time: int = 300,
cmd_timeout: int | ArgNotSet = NOTSET,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
# Ignore original constructor
# super().__init__()
self.instance_name = instance_name
self.zone = zone
self.user = user
self.project_id = project_id
self.hostname = hostname
self.use_internal_ip = use_internal_ip
self.use_iap_tunnel = use_iap_tunnel
self.use_oslogin = use_oslogin
self.expire_time = expire_time
self.gcp_conn_id = gcp_conn_id
self.cmd_timeout = cmd_timeout
self._conn: Any | None = None
@cached_property
def _oslogin_hook(self) -> OSLoginHook:
return OSLoginHook(gcp_conn_id=self.gcp_conn_id)
@cached_property
def _compute_hook(self) -> ComputeEngineHook:
return ComputeEngineHook(gcp_conn_id=self.gcp_conn_id)
def _load_connection_config(self):
def _boolify(value):
if isinstance(value, bool):
return value
if isinstance(value, str):
if value.lower() == "false":
return False
elif value.lower() == "true":
return True
return False
def intify(key, value, default):
if value is None:
return default
if isinstance(value, str) and value.strip() == "":
return default
try:
return int(value)
except ValueError:
raise AirflowException(
f"The {key} field should be a integer. "
f'Current value: "{value}" (type: {type(value)}). '
f"Please check the connection configuration."
)
conn = self.get_connection(self.gcp_conn_id)
if conn and conn.conn_type == "gcpssh":
self.instance_name = self._compute_hook._get_field("instance_name", self.instance_name)
self.zone = self._compute_hook._get_field("zone", self.zone)
self.user = conn.login if conn.login else self.user
# self.project_id is skipped intentionally
self.hostname = conn.host if conn.host else self.hostname
self.use_internal_ip = _boolify(self._compute_hook._get_field("use_internal_ip"))
self.use_iap_tunnel = _boolify(self._compute_hook._get_field("use_iap_tunnel"))
self.use_oslogin = _boolify(self._compute_hook._get_field("use_oslogin"))
self.expire_time = intify(
"expire_time",
self._compute_hook._get_field("expire_time"),
self.expire_time,
)
if conn.extra is not None:
extra_options = conn.extra_dejson
if "cmd_timeout" in extra_options and self.cmd_timeout is NOTSET:
if extra_options["cmd_timeout"]:
self.cmd_timeout = int(extra_options["cmd_timeout"])
else:
self.cmd_timeout = None
if self.cmd_timeout is NOTSET:
self.cmd_timeout = CMD_TIMEOUT
def get_conn(self) -> paramiko.SSHClient:
"""Return SSH connection."""
self._load_connection_config()
if not self.project_id:
self.project_id = self._compute_hook.project_id
missing_fields = [k for k in ["instance_name", "zone", "project_id"] if not getattr(self, k)]
if not self.instance_name or not self.zone or not self.project_id:
raise AirflowException(
f"Required parameters are missing: {missing_fields}. These parameters be passed either as "
"keyword parameter or as extra field in Airflow connection definition. Both are not set!"
)
self.log.info(
"Connecting to instance: instance_name=%s, user=%s, zone=%s, "
"use_internal_ip=%s, use_iap_tunnel=%s, use_os_login=%s",
self.instance_name,
self.user,
self.zone,
self.use_internal_ip,
self.use_iap_tunnel,
self.use_oslogin,
)
if not self.hostname:
hostname = self._compute_hook.get_instance_address(
zone=self.zone,
resource_id=self.instance_name,
project_id=self.project_id,
use_internal_ip=self.use_internal_ip or self.use_iap_tunnel,
)
else:
hostname = self.hostname
privkey, pubkey = self._generate_ssh_key(self.user)
if self.use_oslogin:
user = self._authorize_os_login(pubkey)
else:
user = self.user
self._authorize_compute_engine_instance_metadata(pubkey)
proxy_command = None
if self.use_iap_tunnel:
proxy_command_args = [
"gcloud",
"compute",
"start-iap-tunnel",
str(self.instance_name),
"22",
"--listen-on-stdin",
f"--project={self.project_id}",
f"--zone={self.zone}",
"--verbosity=warning",
]
proxy_command = " ".join(shlex.quote(arg) for arg in proxy_command_args)
sshclient = self._connect_to_instance(user, hostname, privkey, proxy_command)
return sshclient
def _connect_to_instance(self, user, hostname, pkey, proxy_command) -> paramiko.SSHClient:
self.log.info("Opening remote connection to host: username=%s, hostname=%s", user, hostname)
max_time_to_wait = 10
for time_to_wait in exponential_sleep_generator(initial=1, maximum=max_time_to_wait):
try:
client = _GCloudAuthorizedSSHClient(self._compute_hook)
# Default is RejectPolicy
# No known host checking since we are not storing privatekey
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(
hostname=hostname,
username=user,
pkey=pkey,
sock=paramiko.ProxyCommand(proxy_command) if proxy_command else None,
look_for_keys=False,
)
return client
except paramiko.SSHException:
# exponential_sleep_generator is an infinite generator, so we need to
# check the end condition.
if time_to_wait == max_time_to_wait:
raise
self.log.info("Failed to connect. Waiting %ds to retry", time_to_wait)
time.sleep(time_to_wait)
raise AirflowException("Can not connect to instance")
def _authorize_compute_engine_instance_metadata(self, pubkey):
self.log.info("Appending SSH public key to instance metadata")
instance_info = self._compute_hook.get_instance_info(
zone=self.zone, resource_id=self.instance_name, project_id=self.project_id
)
keys = self.user + ":" + pubkey + "\n"
metadata = instance_info["metadata"]
items = metadata.get("items", [])
for item in items:
if item.get("key") == "ssh-keys":
keys += item["value"]
item["value"] = keys
break
else:
new_dict = dict(key="ssh-keys", value=keys)
metadata["items"] = [new_dict]
self._compute_hook.set_instance_metadata(
zone=self.zone, resource_id=self.instance_name, metadata=metadata, project_id=self.project_id
)
def _authorize_os_login(self, pubkey):
username = self._oslogin_hook._get_credentials_email()
self.log.info("Importing SSH public key using OSLogin: user=%s", username)
expiration = int((time.time() + self.expire_time) * 1000000)
ssh_public_key = {"key": pubkey, "expiration_time_usec": expiration}
response = self._oslogin_hook.import_ssh_public_key(
user=username, ssh_public_key=ssh_public_key, project_id=self.project_id
)
profile = response.login_profile
account = profile.posix_accounts[0]
user = account.username
return user
def _generate_ssh_key(self, user):
try:
self.log.info("Generating ssh keys...")
pkey_file = StringIO()
pkey_obj = paramiko.RSAKey.generate(2048)
pkey_obj.write_private_key(pkey_file)
pubkey = f"{pkey_obj.get_name()} {pkey_obj.get_base64()} {user}"
return pkey_obj, pubkey
except (OSError, paramiko.SSHException) as err:
raise AirflowException(f"Error encountered creating ssh keys, {err}")
| 13,347 | 40.197531 | 108 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Storage hook."""
from __future__ import annotations
import functools
import gzip as gz
import json
import os
import shutil
import time
import warnings
from contextlib import contextmanager
from datetime import datetime
from functools import partial
from io import BytesIO
from os import path
from tempfile import NamedTemporaryFile
from typing import IO, Any, Callable, Generator, Sequence, TypeVar, cast, overload
from urllib.parse import urlsplit
from aiohttp import ClientSession
from gcloud.aio.storage import Storage
from google.api_core.exceptions import GoogleAPICallError, NotFound
from google.api_core.retry import Retry
# not sure why but mypy complains on missing `storage` but it is clearly there and is importable
from google.cloud import storage # type: ignore[attr-defined]
from google.cloud.exceptions import GoogleCloudError
from google.cloud.storage.retry import DEFAULT_RETRY
from requests import Session
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.providers.google.cloud.utils.helpers import normalize_directory_path
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseAsyncHook, GoogleBaseHook
from airflow.utils import timezone
from airflow.version import version
try:
# Airflow 2.3 doesn't have this yet
from airflow.typing_compat import ParamSpec
except ImportError:
try:
from typing import ParamSpec # type: ignore[no-redef, attr-defined]
except ImportError:
from typing_extensions import ParamSpec
RT = TypeVar("RT")
T = TypeVar("T", bound=Callable)
FParams = ParamSpec("FParams")
# GCSHook has a method named 'list' (to junior devs: please don't do this), so
# we need to create an alias to prevent Mypy being confused.
List = list
# Use default timeout from google-cloud-storage
DEFAULT_TIMEOUT = 60
def _fallback_object_url_to_object_name_and_bucket_name(
object_url_keyword_arg_name="object_url",
bucket_name_keyword_arg_name="bucket_name",
object_name_keyword_arg_name="object_name",
) -> Callable[[T], T]:
"""
Decorator factory that convert object URL parameter to object name and bucket name parameter.
:param object_url_keyword_arg_name: Name of the object URL parameter
:param bucket_name_keyword_arg_name: Name of the bucket name parameter
:param object_name_keyword_arg_name: Name of the object name parameter
:return: Decorator
"""
def _wrapper(func: Callable[FParams, RT]) -> Callable[FParams, RT]:
@functools.wraps(func)
def _inner_wrapper(self, *args, **kwargs) -> RT:
if args:
raise AirflowException(
"You must use keyword arguments in this methods rather than positional"
)
object_url = kwargs.get(object_url_keyword_arg_name)
bucket_name = kwargs.get(bucket_name_keyword_arg_name)
object_name = kwargs.get(object_name_keyword_arg_name)
if object_url and bucket_name and object_name:
raise AirflowException(
"The mutually exclusive parameters. `object_url`, `bucket_name` together "
"with `object_name` parameters are present. "
"Please provide `object_url` or `bucket_name` and `object_name`."
)
if object_url:
bucket_name, object_name = _parse_gcs_url(object_url)
kwargs[bucket_name_keyword_arg_name] = bucket_name
kwargs[object_name_keyword_arg_name] = object_name
del kwargs[object_url_keyword_arg_name]
if not object_name or not bucket_name:
raise TypeError(
f"{func.__name__}() missing 2 required positional arguments: "
f"'{bucket_name_keyword_arg_name}' and '{object_name_keyword_arg_name}' "
f"or {object_url_keyword_arg_name}"
)
if not object_name:
raise TypeError(
f"{func.__name__}() missing 1 required positional argument: "
f"'{object_name_keyword_arg_name}'"
)
if not bucket_name:
raise TypeError(
f"{func.__name__}() missing 1 required positional argument: "
f"'{bucket_name_keyword_arg_name}'"
)
return func(self, *args, **kwargs)
return cast(Callable[FParams, RT], _inner_wrapper)
return cast(Callable[[T], T], _wrapper)
# A fake bucket to use in functions decorated by _fallback_object_url_to_object_name_and_bucket_name.
# This allows the 'bucket' argument to be of type str instead of str | None,
# making it easier to type hint the function body without dealing with the None
# case that can never happen at runtime.
PROVIDE_BUCKET: str = cast(str, None)
class GCSHook(GoogleBaseHook):
"""Use the Google Cloud connection to interact with Google Cloud Storage."""
_conn: storage.Client | None = None
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
def get_conn(self) -> storage.Client:
"""Returns a Google Cloud Storage service object."""
if not self._conn:
self._conn = storage.Client(
credentials=self.get_credentials(), client_info=CLIENT_INFO, project=self.project_id
)
return self._conn
def copy(
self,
source_bucket: str,
source_object: str,
destination_bucket: str | None = None,
destination_object: str | None = None,
) -> None:
"""
Copies an object from a bucket to another, with renaming if requested.
destination_bucket or destination_object can be omitted, in which case
source bucket/object is used, but not both.
:param source_bucket: The bucket of the object to copy from.
:param source_object: The object to copy.
:param destination_bucket: The destination of the object to copied to.
Can be omitted; then the same bucket is used.
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
"""
destination_bucket = destination_bucket or source_bucket
destination_object = destination_object or source_object
if source_bucket == destination_bucket and source_object == destination_object:
raise ValueError(
f"Either source/destination bucket or source/destination object must be different, "
f"not both the same: bucket={source_bucket}, object={source_object}"
)
if not source_bucket or not source_object:
raise ValueError("source_bucket and source_object cannot be empty.")
client = self.get_conn()
source_bucket = client.bucket(source_bucket)
source_object = source_bucket.blob(source_object) # type: ignore[attr-defined]
destination_bucket = client.bucket(destination_bucket)
destination_object = source_bucket.copy_blob( # type: ignore[attr-defined]
blob=source_object, destination_bucket=destination_bucket, new_name=destination_object
)
self.log.info(
"Object %s in bucket %s copied to object %s in bucket %s",
source_object.name, # type: ignore[attr-defined]
source_bucket.name, # type: ignore[attr-defined]
destination_object.name, # type: ignore[union-attr]
destination_bucket.name, # type: ignore[union-attr]
)
def rewrite(
self,
source_bucket: str,
source_object: str,
destination_bucket: str,
destination_object: str | None = None,
) -> None:
"""
Similar to copy; supports files over 5 TB, and copying between locations and/or storage classes.
destination_object can be omitted, in which case source_object is used.
:param source_bucket: The bucket of the object to copy from.
:param source_object: The object to copy.
:param destination_bucket: The destination of the object to copied to.
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
"""
destination_object = destination_object or source_object
if source_bucket == destination_bucket and source_object == destination_object:
raise ValueError(
f"Either source/destination bucket or source/destination object must be different, "
f"not both the same: bucket={source_bucket}, object={source_object}"
)
if not source_bucket or not source_object:
raise ValueError("source_bucket and source_object cannot be empty.")
client = self.get_conn()
source_bucket = client.bucket(source_bucket)
source_object = source_bucket.blob(blob_name=source_object) # type: ignore[attr-defined]
destination_bucket = client.bucket(destination_bucket)
token, bytes_rewritten, total_bytes = destination_bucket.blob( # type: ignore[attr-defined]
blob_name=destination_object
).rewrite(source=source_object)
self.log.info("Total Bytes: %s | Bytes Written: %s", total_bytes, bytes_rewritten)
while token is not None:
token, bytes_rewritten, total_bytes = destination_bucket.blob( # type: ignore[attr-defined]
blob_name=destination_object
).rewrite(source=source_object, token=token)
self.log.info("Total Bytes: %s | Bytes Written: %s", total_bytes, bytes_rewritten)
self.log.info(
"Object %s in bucket %s rewritten to object %s in bucket %s",
source_object.name, # type: ignore[attr-defined]
source_bucket.name, # type: ignore[attr-defined]
destination_object,
destination_bucket.name, # type: ignore[attr-defined]
)
@overload
def download(
self,
bucket_name: str,
object_name: str,
filename: None = None,
chunk_size: int | None = None,
timeout: int | None = DEFAULT_TIMEOUT,
num_max_attempts: int | None = 1,
) -> bytes:
...
@overload
def download(
self,
bucket_name: str,
object_name: str,
filename: str,
chunk_size: int | None = None,
timeout: int | None = DEFAULT_TIMEOUT,
num_max_attempts: int | None = 1,
) -> str:
...
def download(
self,
bucket_name: str,
object_name: str,
filename: str | None = None,
chunk_size: int | None = None,
timeout: int | None = DEFAULT_TIMEOUT,
num_max_attempts: int | None = 1,
) -> str | bytes:
"""
Downloads a file from Google Cloud Storage.
When no filename is supplied, the operator loads the file into memory and returns its
content. When a filename is supplied, it writes the file to the specified location and
returns the location. For file sizes that exceed the available memory it is recommended
to write to a file.
:param bucket_name: The bucket to fetch from.
:param object_name: The object to fetch.
:param filename: If set, a local file path where the file should be written to.
:param chunk_size: Blob chunk size.
:param timeout: Request timeout in seconds.
:param num_max_attempts: Number of attempts to download the file.
"""
# TODO: future improvement check file size before downloading,
# to check for local space availability
num_file_attempts = 0
while True:
try:
num_file_attempts += 1
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name, chunk_size=chunk_size)
if filename:
blob.download_to_filename(filename, timeout=timeout)
self.log.info("File downloaded to %s", filename)
return filename
else:
return blob.download_as_bytes()
except GoogleCloudError:
if num_file_attempts == num_max_attempts:
self.log.error(
"Download attempt of object: %s from %s has failed. Attempt: %s, max %s.",
object_name,
bucket_name,
num_file_attempts,
num_max_attempts,
)
raise
# Wait with exponential backoff scheme before retrying.
timeout_seconds = 1.0 * 2 ** (num_file_attempts - 1)
time.sleep(timeout_seconds)
continue
def download_as_byte_array(
self,
bucket_name: str,
object_name: str,
chunk_size: int | None = None,
timeout: int | None = DEFAULT_TIMEOUT,
num_max_attempts: int | None = 1,
) -> bytes:
"""
Downloads a file from Google Cloud Storage.
When no filename is supplied, the operator loads the file into memory and returns its
content. When a filename is supplied, it writes the file to the specified location and
returns the location. For file sizes that exceed the available memory it is recommended
to write to a file.
:param bucket_name: The bucket to fetch from.
:param object_name: The object to fetch.
:param chunk_size: Blob chunk size.
:param timeout: Request timeout in seconds.
:param num_max_attempts: Number of attempts to download the file.
"""
# We do not pass filename, so will never receive string as response
return self.download(
bucket_name=bucket_name,
object_name=object_name,
chunk_size=chunk_size,
timeout=timeout,
num_max_attempts=num_max_attempts,
)
@_fallback_object_url_to_object_name_and_bucket_name()
@contextmanager
def provide_file(
self,
bucket_name: str = PROVIDE_BUCKET,
object_name: str | None = None,
object_url: str | None = None,
dir: str | None = None,
) -> Generator[IO[bytes], None, None]:
"""
Downloads the file to a temporary directory and returns a file handle.
You can use this method by passing the bucket_name and object_name parameters
or just object_url parameter.
:param bucket_name: The bucket to fetch from.
:param object_name: The object to fetch.
:param object_url: File reference url. Must start with "gs: //"
:param dir: The tmp sub directory to download the file to. (passed to NamedTemporaryFile)
:return: File handler
"""
if object_name is None:
raise ValueError("Object name can not be empty")
_, _, file_name = object_name.rpartition("/")
with NamedTemporaryFile(suffix=file_name, dir=dir) as tmp_file:
self.download(bucket_name=bucket_name, object_name=object_name, filename=tmp_file.name)
tmp_file.flush()
yield tmp_file
@_fallback_object_url_to_object_name_and_bucket_name()
@contextmanager
def provide_file_and_upload(
self,
bucket_name: str = PROVIDE_BUCKET,
object_name: str | None = None,
object_url: str | None = None,
) -> Generator[IO[bytes], None, None]:
"""
Creates temporary file, returns a file handle and uploads the files content on close.
You can use this method by passing the bucket_name and object_name parameters
or just object_url parameter.
:param bucket_name: The bucket to fetch from.
:param object_name: The object to fetch.
:param object_url: File reference url. Must start with "gs: //"
:return: File handler
"""
if object_name is None:
raise ValueError("Object name can not be empty")
_, _, file_name = object_name.rpartition("/")
with NamedTemporaryFile(suffix=file_name) as tmp_file:
yield tmp_file
tmp_file.flush()
self.upload(bucket_name=bucket_name, object_name=object_name, filename=tmp_file.name)
def upload(
self,
bucket_name: str,
object_name: str,
filename: str | None = None,
data: str | bytes | None = None,
mime_type: str | None = None,
gzip: bool = False,
encoding: str = "utf-8",
chunk_size: int | None = None,
timeout: int | None = DEFAULT_TIMEOUT,
num_max_attempts: int = 1,
metadata: dict | None = None,
cache_control: str | None = None,
) -> None:
"""
Uploads a local file or file data as string or bytes to Google Cloud Storage.
:param bucket_name: The bucket to upload to.
:param object_name: The object name to set when uploading the file.
:param filename: The local file path to the file to be uploaded.
:param data: The file's data as a string or bytes to be uploaded.
:param mime_type: The file's mime type set when uploading the file.
:param gzip: Option to compress local file or file data for upload
:param encoding: bytes encoding for file data if provided as string
:param chunk_size: Blob chunk size.
:param timeout: Request timeout in seconds.
:param num_max_attempts: Number of attempts to try to upload the file.
:param metadata: The metadata to be uploaded with the file.
:param cache_control: Cache-Control metadata field.
"""
def _call_with_retry(f: Callable[[], None]) -> None:
"""
Helper functions to upload a file or a string with a retry mechanism and exponential back-off.
:param f: Callable that should be retried.
"""
num_file_attempts = 0
while num_file_attempts < num_max_attempts:
try:
num_file_attempts += 1
f()
except GoogleCloudError as e:
if num_file_attempts == num_max_attempts:
self.log.error(
"Upload attempt of object: %s from %s has failed. Attempt: %s, max %s.",
object_name,
object_name,
num_file_attempts,
num_max_attempts,
)
raise e
# Wait with exponential backoff scheme before retrying.
timeout_seconds = 1.0 * 2 ** (num_file_attempts - 1)
time.sleep(timeout_seconds)
continue
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name, chunk_size=chunk_size)
if metadata:
blob.metadata = metadata
if cache_control:
blob.cache_control = cache_control
if filename and data:
raise ValueError(
"'filename' and 'data' parameter provided. Please "
"specify a single parameter, either 'filename' for "
"local file uploads or 'data' for file content uploads."
)
elif filename:
if not mime_type:
mime_type = "application/octet-stream"
if gzip:
filename_gz = filename + ".gz"
with open(filename, "rb") as f_in:
with gz.open(filename_gz, "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
filename = filename_gz
_call_with_retry(
partial(blob.upload_from_filename, filename=filename, content_type=mime_type, timeout=timeout)
)
if gzip:
os.remove(filename)
self.log.info("File %s uploaded to %s in %s bucket", filename, object_name, bucket_name)
elif data:
if not mime_type:
mime_type = "text/plain"
if gzip:
if isinstance(data, str):
data = bytes(data, encoding)
out = BytesIO()
with gz.GzipFile(fileobj=out, mode="w") as f:
f.write(data)
data = out.getvalue()
_call_with_retry(partial(blob.upload_from_string, data, content_type=mime_type, timeout=timeout))
self.log.info("Data stream uploaded to %s in %s bucket", object_name, bucket_name)
else:
raise ValueError("'filename' and 'data' parameter missing. One is required to upload to gcs.")
def exists(self, bucket_name: str, object_name: str, retry: Retry = DEFAULT_RETRY) -> bool:
"""
Checks for the existence of a file in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the blob_name to check in the Google cloud
storage bucket.
:param retry: (Optional) How to retry the RPC
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name)
return blob.exists(retry=retry)
def get_blob_update_time(self, bucket_name: str, object_name: str):
"""
Get the update time of a file in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the blob to get updated time from the Google cloud
storage bucket.
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
if blob is None:
raise ValueError(f"Object ({object_name}) not found in Bucket ({bucket_name})")
return blob.updated
def is_updated_after(self, bucket_name: str, object_name: str, ts: datetime) -> bool:
"""
Checks if an blob_name is updated in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:param ts: The timestamp to check against.
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
if not ts.tzinfo:
ts = ts.replace(tzinfo=timezone.utc)
self.log.info("Verify object date: %s > %s", blob_update_time, ts)
if blob_update_time > ts:
return True
return False
def is_updated_between(
self, bucket_name: str, object_name: str, min_ts: datetime, max_ts: datetime
) -> bool:
"""
Checks if an blob_name is updated in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:param min_ts: The minimum timestamp to check against.
:param max_ts: The maximum timestamp to check against.
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
if not min_ts.tzinfo:
min_ts = min_ts.replace(tzinfo=timezone.utc)
if not max_ts.tzinfo:
max_ts = max_ts.replace(tzinfo=timezone.utc)
self.log.info("Verify object date: %s is between %s and %s", blob_update_time, min_ts, max_ts)
if min_ts <= blob_update_time < max_ts:
return True
return False
def is_updated_before(self, bucket_name: str, object_name: str, ts: datetime) -> bool:
"""
Checks if an blob_name is updated before given time in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:param ts: The timestamp to check against.
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
if not ts.tzinfo:
ts = ts.replace(tzinfo=timezone.utc)
self.log.info("Verify object date: %s < %s", blob_update_time, ts)
if blob_update_time < ts:
return True
return False
def is_older_than(self, bucket_name: str, object_name: str, seconds: int) -> bool:
"""
Check if object is older than given time.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:param seconds: The time in seconds to check against
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
from datetime import timedelta
current_time = timezone.utcnow()
given_time = current_time - timedelta(seconds=seconds)
self.log.info("Verify object date: %s is older than %s", blob_update_time, given_time)
if blob_update_time < given_time:
return True
return False
def delete(self, bucket_name: str, object_name: str) -> None:
"""
Deletes an object from the bucket.
:param bucket_name: name of the bucket, where the object resides
:param object_name: name of the object to delete
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name)
blob.delete()
self.log.info("Blob %s deleted.", object_name)
def delete_bucket(self, bucket_name: str, force: bool = False) -> None:
"""
Delete a bucket object from the Google Cloud Storage.
:param bucket_name: name of the bucket which will be deleted
:param force: false not allow to delete non empty bucket, set force=True
allows to delete non empty bucket
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
self.log.info("Deleting %s bucket", bucket_name)
try:
bucket.delete(force=force)
self.log.info("Bucket %s has been deleted", bucket_name)
except NotFound:
self.log.info("Bucket %s not exists", bucket_name)
def list(
self,
bucket_name: str,
versions: bool | None = None,
max_results: int | None = None,
prefix: str | List[str] | None = None,
delimiter: str | None = None,
match_glob: str | None = None,
):
"""
List all objects from the bucket with the given a single prefix or multiple prefixes.
:param bucket_name: bucket name
:param versions: if true, list all versions of the objects
:param max_results: max count of items to return in a single page of responses
:param prefix: string or list of strings which filter objects whose name begin with it/them
:param delimiter: (Deprecated) filters objects based on the delimiter (for e.g '.csv')
:param match_glob: (Optional) filters objects based on the glob pattern given by the string
(e.g, ``'**/*/.json'``).
:return: a stream of object names matching the filtering criteria
"""
if delimiter and delimiter != "/":
warnings.warn(
"Usage of 'delimiter' param is deprecated, please use 'match_glob' instead",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
if match_glob and delimiter and delimiter != "/":
raise AirflowException("'match_glob' param cannot be used with 'delimiter' that differs than '/'")
objects = []
if isinstance(prefix, list):
for prefix_item in prefix:
objects.extend(
self._list(
bucket_name=bucket_name,
versions=versions,
max_results=max_results,
prefix=prefix_item,
delimiter=delimiter,
match_glob=match_glob,
)
)
else:
objects.extend(
self._list(
bucket_name=bucket_name,
versions=versions,
max_results=max_results,
prefix=prefix,
delimiter=delimiter,
match_glob=match_glob,
)
)
return objects
def _list(
self,
bucket_name: str,
versions: bool | None = None,
max_results: int | None = None,
prefix: str | None = None,
delimiter: str | None = None,
match_glob: str | None = None,
) -> List:
"""
List all objects from the bucket with the give string prefix in name.
:param bucket_name: bucket name
:param versions: if true, list all versions of the objects
:param max_results: max count of items to return in a single page of responses
:param prefix: string which filters objects whose name begin with it
:param delimiter: (Deprecated) filters objects based on the delimiter (for e.g '.csv')
:param match_glob: (Optional) filters objects based on the glob pattern given by the string
(e.g, ``'**/*/.json'``).
:return: a stream of object names matching the filtering criteria
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
ids = []
page_token = None
while True:
if match_glob:
blobs = self._list_blobs_with_match_glob(
bucket=bucket,
client=client,
match_glob=match_glob,
max_results=max_results,
page_token=page_token,
path=bucket.path + "/o",
prefix=prefix,
versions=versions,
)
else:
blobs = bucket.list_blobs(
max_results=max_results,
page_token=page_token,
prefix=prefix,
delimiter=delimiter,
versions=versions,
)
blob_names = []
for blob in blobs:
blob_names.append(blob.name)
prefixes = blobs.prefixes
if prefixes:
ids += list(prefixes)
else:
ids += blob_names
page_token = blobs.next_page_token
if page_token is None:
# empty next page token
break
return ids
@staticmethod
def _list_blobs_with_match_glob(
bucket,
client,
path: str,
max_results: int | None = None,
page_token: str | None = None,
match_glob: str | None = None,
prefix: str | None = None,
versions: bool | None = None,
) -> Any:
"""
List blobs when match_glob param is given.
This method is a patched version of google.cloud.storage Client.list_blobs().
It is used as a temporary workaround to support "match_glob" param,
as it isn't officially supported by GCS Python client.
(follow `issue #1035<https://github.com/googleapis/python-storage/issues/1035>`__).
"""
from google.api_core import page_iterator
from google.cloud.storage.bucket import _blobs_page_start, _item_to_blob
extra_params: Any = {}
if prefix is not None:
extra_params["prefix"] = prefix
if match_glob is not None:
extra_params["matchGlob"] = match_glob
if versions is not None:
extra_params["versions"] = versions
api_request = functools.partial(
client._connection.api_request, timeout=DEFAULT_TIMEOUT, retry=DEFAULT_RETRY
)
blobs: Any = page_iterator.HTTPIterator(
client=client,
api_request=api_request,
path=path,
item_to_value=_item_to_blob,
page_token=page_token,
max_results=max_results,
extra_params=extra_params,
page_start=_blobs_page_start,
)
blobs.prefixes = set()
blobs.bucket = bucket
return blobs
def list_by_timespan(
self,
bucket_name: str,
timespan_start: datetime,
timespan_end: datetime,
versions: bool | None = None,
max_results: int | None = None,
prefix: str | None = None,
delimiter: str | None = None,
match_glob: str | None = None,
) -> List[str]:
"""
List all objects from the bucket with the given string prefix that were updated in the time range.
:param bucket_name: bucket name
:param timespan_start: will return objects that were updated at or after this datetime (UTC)
:param timespan_end: will return objects that were updated before this datetime (UTC)
:param versions: if true, list all versions of the objects
:param max_results: max count of items to return in a single page of responses
:param prefix: prefix string which filters objects whose name begin with
this prefix
:param delimiter: (Deprecated) filters objects based on the delimiter (for e.g '.csv')
:param match_glob: (Optional) filters objects based on the glob pattern given by the string
(e.g, ``'**/*/.json'``).
:return: a stream of object names matching the filtering criteria
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
ids = []
page_token = None
while True:
if match_glob:
blobs = self._list_blobs_with_match_glob(
bucket=bucket,
client=client,
match_glob=match_glob,
max_results=max_results,
page_token=page_token,
path=bucket.path + "/o",
prefix=prefix,
versions=versions,
)
else:
blobs = bucket.list_blobs(
max_results=max_results,
page_token=page_token,
prefix=prefix,
delimiter=delimiter,
versions=versions,
)
blob_names = []
for blob in blobs:
if timespan_start <= blob.updated.replace(tzinfo=timezone.utc) < timespan_end:
blob_names.append(blob.name)
prefixes = blobs.prefixes
if prefixes:
ids += list(prefixes)
else:
ids += blob_names
page_token = blobs.next_page_token
if page_token is None:
# empty next page token
break
return ids
def get_size(self, bucket_name: str, object_name: str) -> int:
"""
Gets the size of a file in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the blob_name is.
:param object_name: The name of the object to check in the Google
cloud storage bucket_name.
"""
self.log.info("Checking the file size of object: %s in bucket_name: %s", object_name, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob_size = blob.size
self.log.info("The file size of %s is %s bytes.", object_name, blob_size)
return blob_size
def get_crc32c(self, bucket_name: str, object_name: str):
"""
Gets the CRC32c checksum of an object in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the blob_name is.
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
"""
self.log.info(
"Retrieving the crc32c checksum of object_name: %s in bucket_name: %s",
object_name,
bucket_name,
)
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob_crc32c = blob.crc32c
self.log.info("The crc32c checksum of %s is %s", object_name, blob_crc32c)
return blob_crc32c
def get_md5hash(self, bucket_name: str, object_name: str) -> str:
"""
Gets the MD5 hash of an object in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the blob_name is.
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
"""
self.log.info("Retrieving the MD5 hash of object: %s in bucket: %s", object_name, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
blob_md5hash = blob.md5_hash
self.log.info("The md5Hash of %s is %s", object_name, blob_md5hash)
return blob_md5hash
@GoogleBaseHook.fallback_to_default_project_id
def create_bucket(
self,
bucket_name: str,
resource: dict | None = None,
storage_class: str = "MULTI_REGIONAL",
location: str = "US",
project_id: str | None = None,
labels: dict | None = None,
) -> str:
"""
Creates a new bucket.
Google Cloud Storage uses a flat namespace, so you can't
create a bucket with a name that is already in use.
.. seealso::
For more information, see Bucket Naming Guidelines:
https://cloud.google.com/storage/docs/bucketnaming.html#requirements
:param bucket_name: The name of the bucket.
:param resource: An optional dict with parameters for creating the bucket.
For information on available parameters, see Cloud Storage API doc:
https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
:param storage_class: This defines how objects in the bucket are stored
and determines the SLA and the cost of storage. Values include
- ``MULTI_REGIONAL``
- ``REGIONAL``
- ``STANDARD``
- ``NEARLINE``
- ``COLDLINE``.
If this value is not specified when the bucket is
created, it will default to STANDARD.
:param location: The location of the bucket.
Object data for objects in the bucket resides in physical storage
within this region. Defaults to US.
.. seealso::
https://developers.google.com/storage/docs/bucket-locations
:param project_id: The ID of the Google Cloud Project.
:param labels: User-provided labels, in key/value pairs.
:return: If successful, it returns the ``id`` of the bucket.
"""
self.log.info(
"Creating Bucket: %s; Location: %s; Storage Class: %s", bucket_name, location, storage_class
)
# Add airflow-version label to the bucket
labels = labels or {}
labels["airflow-version"] = "v" + version.replace(".", "-").replace("+", "-")
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
bucket_resource = resource or {}
for item in bucket_resource:
if item != "name":
bucket._patch_property(name=item, value=resource[item]) # type: ignore[index]
bucket.storage_class = storage_class
bucket.labels = labels
bucket.create(project=project_id, location=location)
return bucket.id
def insert_bucket_acl(
self, bucket_name: str, entity: str, role: str, user_project: str | None = None
) -> None:
"""
Creates a new ACL entry on the specified bucket_name.
See: https://cloud.google.com/storage/docs/json_api/v1/bucketAccessControls/insert
:param bucket_name: Name of a bucket_name.
:param entity: The entity holding the permission, in one of the following forms:
user-userId, user-email, group-groupId, group-email, domain-domain,
project-team-projectId, allUsers, allAuthenticatedUsers.
See: https://cloud.google.com/storage/docs/access-control/lists#scopes
:param role: The access permission for the entity.
Acceptable values are: "OWNER", "READER", "WRITER".
:param user_project: (Optional) The project to be billed for this request.
Required for Requester Pays buckets.
"""
self.log.info("Creating a new ACL entry in bucket: %s", bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
bucket.acl.reload()
bucket.acl.entity_from_dict(entity_dict={"entity": entity, "role": role})
if user_project:
bucket.acl.user_project = user_project
bucket.acl.save()
self.log.info("A new ACL entry created in bucket: %s", bucket_name)
def insert_object_acl(
self,
bucket_name: str,
object_name: str,
entity: str,
role: str,
generation: int | None = None,
user_project: str | None = None,
) -> None:
"""
Creates a new ACL entry on the specified object.
See: https://cloud.google.com/storage/docs/json_api/v1/objectAccessControls/insert
:param bucket_name: Name of a bucket_name.
:param object_name: Name of the object. For information about how to URL encode
object names to be path safe, see:
https://cloud.google.com/storage/docs/json_api/#encoding
:param entity: The entity holding the permission, in one of the following forms:
user-userId, user-email, group-groupId, group-email, domain-domain,
project-team-projectId, allUsers, allAuthenticatedUsers
See: https://cloud.google.com/storage/docs/access-control/lists#scopes
:param role: The access permission for the entity.
Acceptable values are: "OWNER", "READER".
:param generation: Optional. If present, selects a specific revision of this object.
:param user_project: (Optional) The project to be billed for this request.
Required for Requester Pays buckets.
"""
self.log.info("Creating a new ACL entry for object: %s in bucket: %s", object_name, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
blob = bucket.blob(blob_name=object_name, generation=generation)
# Reload fetches the current ACL from Cloud Storage.
blob.acl.reload()
blob.acl.entity_from_dict(entity_dict={"entity": entity, "role": role})
if user_project:
blob.acl.user_project = user_project
blob.acl.save()
self.log.info("A new ACL entry created for object: %s in bucket: %s", object_name, bucket_name)
def compose(self, bucket_name: str, source_objects: List[str], destination_object: str) -> None:
"""
Composes a list of existing object into a new object in the same storage bucket_name.
Currently it only supports up to 32 objects that can be concatenated
in a single operation
https://cloud.google.com/storage/docs/json_api/v1/objects/compose
:param bucket_name: The name of the bucket containing the source objects.
This is also the same bucket to store the composed destination object.
:param source_objects: The list of source objects that will be composed
into a single object.
:param destination_object: The path of the object if given.
"""
if not source_objects:
raise ValueError("source_objects cannot be empty.")
if not bucket_name or not destination_object:
raise ValueError("bucket_name and destination_object cannot be empty.")
self.log.info("Composing %s to %s in the bucket %s", source_objects, destination_object, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name)
destination_blob = bucket.blob(destination_object)
destination_blob.compose(
sources=[bucket.blob(blob_name=source_object) for source_object in source_objects]
)
self.log.info("Completed successfully.")
def sync(
self,
source_bucket: str,
destination_bucket: str,
source_object: str | None = None,
destination_object: str | None = None,
recursive: bool = True,
allow_overwrite: bool = False,
delete_extra_files: bool = False,
) -> None:
"""
Synchronizes the contents of the buckets.
Parameters ``source_object`` and ``destination_object`` describe the root sync directories. If they
are not passed, the entire bucket will be synchronized. If they are passed, they should point
to directories.
.. note::
The synchronization of individual files is not supported. Only entire directories can be
synchronized.
:param source_bucket: The name of the bucket containing the source objects.
:param destination_bucket: The name of the bucket containing the destination objects.
:param source_object: The root sync directory in the source bucket.
:param destination_object: The root sync directory in the destination bucket.
:param recursive: If True, subdirectories will be considered
:param recursive: If True, subdirectories will be considered
:param allow_overwrite: if True, the files will be overwritten if a mismatched file is found.
By default, overwriting files is not allowed
:param delete_extra_files: if True, deletes additional files from the source that not found in the
destination. By default extra files are not deleted.
.. note::
This option can delete data quickly if you specify the wrong source/destination combination.
:return: none
"""
client = self.get_conn()
# Create bucket object
source_bucket_obj = client.bucket(source_bucket)
destination_bucket_obj = client.bucket(destination_bucket)
# Normalize parameters when they are passed
source_object = normalize_directory_path(source_object)
destination_object = normalize_directory_path(destination_object)
# Calculate the number of characters that remove from the name, because they contain information
# about the parent's path
source_object_prefix_len = len(source_object) if source_object else 0
# Prepare synchronization plan
to_copy_blobs, to_delete_blobs, to_rewrite_blobs = self._prepare_sync_plan(
source_bucket=source_bucket_obj,
destination_bucket=destination_bucket_obj,
source_object=source_object,
destination_object=destination_object,
recursive=recursive,
)
self.log.info(
"Planned synchronization. To delete blobs count: %s, to upload blobs count: %s, "
"to rewrite blobs count: %s",
len(to_delete_blobs),
len(to_copy_blobs),
len(to_rewrite_blobs),
)
# Copy missing object to new bucket
if not to_copy_blobs:
self.log.info("Skipped blobs copying.")
else:
for blob in to_copy_blobs:
dst_object = self._calculate_sync_destination_path(
blob, destination_object, source_object_prefix_len
)
self.copy(
source_bucket=source_bucket_obj.name,
source_object=blob.name,
destination_bucket=destination_bucket_obj.name,
destination_object=dst_object,
)
self.log.info("Blobs copied.")
# Delete redundant files
if not to_delete_blobs:
self.log.info("Skipped blobs deleting.")
elif delete_extra_files:
# TODO: Add batch. I tried to do it, but the Google library is not stable at the moment.
for blob in to_delete_blobs:
self.delete(blob.bucket.name, blob.name)
self.log.info("Blobs deleted.")
# Overwrite files that are different
if not to_rewrite_blobs:
self.log.info("Skipped blobs overwriting.")
elif allow_overwrite:
for blob in to_rewrite_blobs:
dst_object = self._calculate_sync_destination_path(
blob, destination_object, source_object_prefix_len
)
self.rewrite(
source_bucket=source_bucket_obj.name,
source_object=blob.name,
destination_bucket=destination_bucket_obj.name,
destination_object=dst_object,
)
self.log.info("Blobs rewritten.")
self.log.info("Synchronization finished.")
def _calculate_sync_destination_path(
self, blob: storage.Blob, destination_object: str | None, source_object_prefix_len: int
) -> str:
return (
path.join(destination_object, blob.name[source_object_prefix_len:])
if destination_object
else blob.name[source_object_prefix_len:]
)
@staticmethod
def _prepare_sync_plan(
source_bucket: storage.Bucket,
destination_bucket: storage.Bucket,
source_object: str | None,
destination_object: str | None,
recursive: bool,
) -> tuple[set[storage.Blob], set[storage.Blob], set[storage.Blob]]:
# Calculate the number of characters that remove from the name, because they contain information
# about the parent's path
source_object_prefix_len = len(source_object) if source_object else 0
destination_object_prefix_len = len(destination_object) if destination_object else 0
delimiter = "/" if not recursive else None
# Fetch blobs list
source_blobs = list(source_bucket.list_blobs(prefix=source_object, delimiter=delimiter))
destination_blobs = list(
destination_bucket.list_blobs(prefix=destination_object, delimiter=delimiter)
)
# Create indexes that allow you to identify blobs based on their name
source_names_index = {a.name[source_object_prefix_len:]: a for a in source_blobs}
destination_names_index = {a.name[destination_object_prefix_len:]: a for a in destination_blobs}
# Create sets with names without parent object name
source_names = set(source_names_index.keys())
destination_names = set(destination_names_index.keys())
# Determine objects to copy and delete
to_copy = source_names - destination_names
to_delete = destination_names - source_names
to_copy_blobs: set[storage.Blob] = {source_names_index[a] for a in to_copy}
to_delete_blobs: set[storage.Blob] = {destination_names_index[a] for a in to_delete}
# Find names that are in both buckets
names_to_check = source_names.intersection(destination_names)
to_rewrite_blobs: set[storage.Blob] = set()
# Compare objects based on crc32
for current_name in names_to_check:
source_blob = source_names_index[current_name]
destination_blob = destination_names_index[current_name]
# if the objects are different, save it
if source_blob.crc32c != destination_blob.crc32c:
to_rewrite_blobs.add(source_blob)
return to_copy_blobs, to_delete_blobs, to_rewrite_blobs
def gcs_object_is_directory(bucket: str) -> bool:
"""Return True if given Google Cloud Storage URL (gs://<bucket>/<blob>) is a directory or empty bucket."""
_, blob = _parse_gcs_url(bucket)
return len(blob) == 0 or blob.endswith("/")
def parse_json_from_gcs(gcp_conn_id: str, file_uri: str) -> Any:
"""
Downloads and parses json file from Google cloud Storage.
:param gcp_conn_id: Airflow Google Cloud connection ID.
:param file_uri: full path to json file
example: ``gs://test-bucket/dir1/dir2/file``
"""
gcs_hook = GCSHook(gcp_conn_id=gcp_conn_id)
bucket, blob = _parse_gcs_url(file_uri)
with NamedTemporaryFile(mode="w+b") as file:
try:
gcs_hook.download(bucket_name=bucket, object_name=blob, filename=file.name)
except GoogleAPICallError as ex:
raise AirflowException(f"Failed to download file with query result: {ex}")
file.seek(0)
try:
json_data = file.read()
except (ValueError, OSError, RuntimeError) as ex:
raise AirflowException(f"Failed to read file: {ex}")
try:
result = json.loads(json_data)
except json.JSONDecodeError as ex:
raise AirflowException(f"Failed to decode query result from bytes to json: {ex}")
return result
def _parse_gcs_url(gsurl: str) -> tuple[str, str]:
"""
Given a Google Cloud Storage URL, return a tuple containing the corresponding bucket and blob.
Expected url format: gs://<bucket>/<blob>
"""
parsed_url = urlsplit(gsurl)
if not parsed_url.netloc:
raise AirflowException("Please provide a bucket name")
if parsed_url.scheme.lower() != "gs":
raise AirflowException(f"Schema must be to 'gs://': Current schema: '{parsed_url.scheme}://'")
bucket = parsed_url.netloc
# Remove leading '/' but NOT trailing one
blob = parsed_url.path.lstrip("/")
return bucket, blob
class GCSAsyncHook(GoogleBaseAsyncHook):
"""GCSAsyncHook run on the trigger worker, inherits from GoogleBaseHookAsync."""
sync_hook_class = GCSHook
async def get_storage_client(self, session: ClientSession) -> Storage:
"""Returns a Google Cloud Storage service object."""
with await self.service_file_as_context() as file:
return Storage(service_file=file, session=cast(Session, session))
| 56,577 | 40.058055 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/video_intelligence.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Video Intelligence Hook."""
from __future__ import annotations
from typing import Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.operation import Operation
from google.api_core.retry import Retry
from google.cloud.videointelligence_v1 import (
Feature,
VideoContext,
VideoIntelligenceServiceClient,
)
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class CloudVideoIntelligenceHook(GoogleBaseHook):
"""
Hook for Google Cloud Video Intelligence APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._conn: VideoIntelligenceServiceClient | None = None
def get_conn(self) -> VideoIntelligenceServiceClient:
"""Returns Gcp Video Intelligence Service client."""
if not self._conn:
self._conn = VideoIntelligenceServiceClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO
)
return self._conn
@GoogleBaseHook.quota_retry()
def annotate_video(
self,
input_uri: str | None = None,
input_content: bytes | None = None,
features: Sequence[Feature] | None = None,
video_context: dict | VideoContext | None = None,
output_uri: str | None = None,
location: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Performs video annotation.
:param input_uri: Input video location. Currently, only Google Cloud Storage URIs are supported,
which must be specified in the following format: ``gs://bucket-id/object-id``.
:param input_content: The video data bytes.
If unset, the input video(s) should be specified via ``input_uri``.
If set, ``input_uri`` should be unset.
:param features: Requested video annotation features.
:param output_uri: Optional, location where the output (in JSON format) should be stored. Currently,
only Google Cloud Storage URIs are supported, which must be specified in the following format:
``gs://bucket-id/object-id``.
:param video_context: Optional, Additional video context and/or feature-specific parameters.
:param location: Optional, cloud region where annotation should take place. Supported cloud regions:
us-east1, us-west1, europe-west1, asia-east1.
If no region is specified, a region will be determined based on video file location.
:param retry: Retry object used to determine when/if to retry requests.
If None is specified, requests will not be retried.
:param timeout: Optional, The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
:param metadata: Optional, Additional metadata that is provided to the method.
"""
client = self.get_conn()
return client.annotate_video(
request={
"input_uri": input_uri,
"features": features,
"input_content": input_content,
"video_context": video_context,
"output_uri": output_uri,
"location_id": location,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
| 5,768 | 43.72093 | 108 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/cloud_composer.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Sequence
from google.api_core.client_options import ClientOptions
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.operation import Operation
from google.api_core.operation_async import AsyncOperation
from google.api_core.retry import Retry
from google.cloud.orchestration.airflow.service_v1 import (
EnvironmentsAsyncClient,
EnvironmentsClient,
ImageVersionsClient,
)
from google.cloud.orchestration.airflow.service_v1.services.environments.pagers import ListEnvironmentsPager
from google.cloud.orchestration.airflow.service_v1.services.image_versions.pagers import (
ListImageVersionsPager,
)
from google.cloud.orchestration.airflow.service_v1.types import Environment
from google.protobuf.field_mask_pb2 import FieldMask
from airflow import AirflowException
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class CloudComposerHook(GoogleBaseHook):
"""Hook for Google Cloud Composer APIs."""
client_options = ClientOptions(api_endpoint="composer.googleapis.com:443")
def __init__(self, **kwargs):
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(**kwargs)
def get_environment_client(self) -> EnvironmentsClient:
"""Retrieves client library object that allow access Environments service."""
return EnvironmentsClient(
credentials=self.get_credentials(),
client_info=CLIENT_INFO,
client_options=self.client_options,
)
def get_image_versions_client(self) -> ImageVersionsClient:
"""Retrieves client library object that allow access Image Versions service."""
return ImageVersionsClient(
credentials=self.get_credentials(),
client_info=CLIENT_INFO,
client_options=self.client_options,
)
def wait_for_operation(self, operation: Operation, timeout: float | None = None):
"""Waits for long-lasting operation to complete."""
try:
return operation.result(timeout=timeout)
except Exception:
error = operation.exception(timeout=timeout)
raise AirflowException(error)
def get_operation(self, operation_name):
return self.get_environment_client().transport.operations_client.get_operation(name=operation_name)
def get_environment_name(self, project_id, region, environment_id):
return f"projects/{project_id}/locations/{region}/environments/{environment_id}"
def get_parent(self, project_id, region):
return f"projects/{project_id}/locations/{region}"
@GoogleBaseHook.fallback_to_default_project_id
def create_environment(
self,
project_id: str,
region: str,
environment: Environment | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Create a new environment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment: The environment to create. This corresponds to the ``environment`` field on the
``request`` instance; if ``request`` is provided, this should not be set.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_environment_client()
result = client.create_environment(
request={"parent": self.get_parent(project_id, region), "environment": environment},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_environment(
self,
project_id: str,
region: str,
environment_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Delete an environment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment_id: Required. The ID of the Google Cloud environment that the service belongs to.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_environment_client()
name = self.get_environment_name(project_id, region, environment_id)
result = client.delete_environment(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_environment(
self,
project_id: str,
region: str,
environment_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Environment:
"""
Get an existing environment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment_id: Required. The ID of the Google Cloud environment that the service belongs to.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_environment_client()
result = client.get_environment(
request={"name": self.get_environment_name(project_id, region, environment_id)},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_environments(
self,
project_id: str,
region: str,
page_size: int | None = None,
page_token: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListEnvironmentsPager:
"""
List environments.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param page_size: The maximum number of environments to return.
:param page_token: The next_page_token value returned from a previous List
request, if any.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_environment_client()
result = client.list_environments(
request={
"parent": self.get_parent(project_id, region),
"page_size": page_size,
"page_token": page_token,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_environment(
self,
project_id: str,
region: str,
environment_id: str,
environment: Environment | dict,
update_mask: dict | FieldMask,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
r"""
Update an environment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment_id: Required. The ID of the Google Cloud environment that the service belongs to.
:param environment: A patch environment. Fields specified by the ``updateMask`` will be copied from
the patch environment into the environment under update.
This corresponds to the ``environment`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param update_mask: Required. A comma-separated list of paths, relative to ``Environment``, of fields
to update. If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_environment_client()
name = self.get_environment_name(project_id, region, environment_id)
result = client.update_environment(
request={"name": name, "environment": environment, "update_mask": update_mask},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_image_versions(
self,
project_id: str,
region: str,
page_size: int | None = None,
page_token: str | None = None,
include_past_releases: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListImageVersionsPager:
"""
List ImageVersions for provided location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param page_size: The maximum number of environments to return.
:param page_token: The next_page_token value returned from a previous List
request, if any.
:param include_past_releases: Flag to include past releases
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_image_versions_client()
result = client.list_image_versions(
request={
"parent": self.get_parent(project_id, region),
"page_size": page_size,
"page_token": page_token,
"include_past_releases": include_past_releases,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
class CloudComposerAsyncHook(GoogleBaseHook):
"""Hook for Google Cloud Composer async APIs."""
def __init__(self, **kwargs):
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(**kwargs)
client_options = ClientOptions(api_endpoint="composer.googleapis.com:443")
def get_environment_client(self) -> EnvironmentsAsyncClient:
"""Retrieves client library object that allow access Environments service."""
return EnvironmentsAsyncClient(
credentials=self.get_credentials(),
client_info=CLIENT_INFO,
client_options=self.client_options,
)
def get_environment_name(self, project_id, region, environment_id):
return f"projects/{project_id}/locations/{region}/environments/{environment_id}"
def get_parent(self, project_id, region):
return f"projects/{project_id}/locations/{region}"
async def get_operation(self, operation_name):
return await self.get_environment_client().transport.operations_client.get_operation(
name=operation_name
)
@GoogleBaseHook.fallback_to_default_project_id
async def create_environment(
self,
project_id: str,
region: str,
environment: Environment | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AsyncOperation:
"""
Create a new environment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment: The environment to create. This corresponds to the ``environment`` field on the
``request`` instance; if ``request`` is provided, this should not be set.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_environment_client()
return await client.create_environment(
request={"parent": self.get_parent(project_id, region), "environment": environment},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
async def delete_environment(
self,
project_id: str,
region: str,
environment_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AsyncOperation:
"""
Delete an environment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment_id: Required. The ID of the Google Cloud environment that the service belongs to.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_environment_client()
name = self.get_environment_name(project_id, region, environment_id)
return await client.delete_environment(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata
)
@GoogleBaseHook.fallback_to_default_project_id
async def update_environment(
self,
project_id: str,
region: str,
environment_id: str,
environment: Environment | dict,
update_mask: dict | FieldMask,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AsyncOperation:
r"""
Update an environment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment_id: Required. The ID of the Google Cloud environment that the service belongs to.
:param environment: A patch environment. Fields specified by the ``updateMask`` will be copied from
the patch environment into the environment under update.
This corresponds to the ``environment`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param update_mask: Required. A comma-separated list of paths, relative to ``Environment``, of fields
to update. If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_environment_client()
name = self.get_environment_name(project_id, region, environment_id)
return await client.update_environment(
request={"name": name, "environment": environment, "update_mask": update_mask},
retry=retry,
timeout=timeout,
metadata=metadata,
)
| 18,285 | 42.642005 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/functions.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Functions Hook."""
from __future__ import annotations
import time
from typing import Sequence
import requests
from googleapiclient.discovery import build
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import PROVIDE_PROJECT_ID, GoogleBaseHook
# Time to sleep between active checks of the operation results
TIME_TO_SLEEP_IN_SECONDS = 1
class CloudFunctionsHook(GoogleBaseHook):
"""Google Cloud Functions APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
_conn: build | None = None
def __init__(
self,
api_version: str,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self.api_version = api_version
@staticmethod
def _full_location(project_id: str, location: str) -> str:
"""Retrieve full location of the function.
:param project_id: Google Cloud Project ID where the function belongs.
:param location: The location where the function is created.
:return: The full location, in the form of
``projects/<GCP_PROJECT_ID>/locations/<GCP_LOCATION>``.
"""
return f"projects/{project_id}/locations/{location}"
def get_conn(self) -> build:
"""Retrieve the connection to Cloud Functions.
:return: Google Cloud Function services object.
"""
if not self._conn:
http_authorized = self._authorize()
self._conn = build(
"cloudfunctions", self.api_version, http=http_authorized, cache_discovery=False
)
return self._conn
def get_function(self, name: str) -> dict:
"""Get the Cloud Function with given name.
:param name: Name of the function.
:return: A Cloud Functions object representing the function.
"""
operation = self.get_conn().projects().locations().functions().get(name=name)
return operation.execute(num_retries=self.num_retries)
@GoogleBaseHook.fallback_to_default_project_id
def create_new_function(self, location: str, body: dict, project_id: str) -> None:
"""Create a new function at the location specified in the body.
:param location: The location of the function.
:param body: The body required by the Cloud Functions insert API.
:param project_id: Google Cloud Project ID where the function belongs.
If set to None or missing, the default project ID from the Google
Cloud connection is used.
"""
operation = (
self.get_conn()
.projects()
.locations()
.functions()
.create(location=self._full_location(project_id, location), body=body)
)
response = operation.execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(operation_name=operation_name)
def update_function(self, name: str, body: dict, update_mask: list[str]) -> None:
"""Update Cloud Functions according to the specified update mask.
:param name: The name of the function.
:param body: The body required by the cloud function patch API.
:param update_mask: The update mask - array of fields that should be patched.
"""
operation = (
self.get_conn()
.projects()
.locations()
.functions()
.patch(updateMask=",".join(update_mask), name=name, body=body)
)
response = operation.execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(operation_name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
def upload_function_zip(self, location: str, zip_path: str, project_id: str) -> str:
"""Upload ZIP file with sources.
:param location: The location where the function is created.
:param zip_path: The path of the valid .zip file to upload.
:param project_id: Google Cloud Project ID where the function belongs.
If set to None or missing, the default project ID from the Google
Cloud connection is used.
:return: The upload URL that was returned by generateUploadUrl method.
"""
operation = (
self.get_conn()
.projects()
.locations()
.functions()
.generateUploadUrl(parent=self._full_location(project_id, location))
)
response = operation.execute(num_retries=self.num_retries)
upload_url = response.get("uploadUrl")
with open(zip_path, "rb") as file:
requests.put(
url=upload_url,
data=file,
# Those two headers needs to be specified according to:
# https://cloud.google.com/functions/docs/reference/rest/v1/projects.locations.functions/generateUploadUrl
headers={
"Content-type": "application/zip",
"x-goog-content-length-range": "0,104857600",
},
)
return upload_url
def delete_function(self, name: str) -> None:
"""Delete the specified Cloud Function.
:param name: The name of the function.
"""
operation = self.get_conn().projects().locations().functions().delete(name=name)
response = operation.execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(operation_name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
def call_function(
self,
function_id: str,
input_data: dict,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
) -> dict:
"""Invoke a deployed Cloud Function.
This is done synchronously and should only be used for testing purposes,
as very limited traffic is allowed.
:param function_id: ID of the function to be called
:param input_data: Input to be passed to the function
:param location: The location where the function is located.
:param project_id: Google Cloud Project ID where the function belongs.
If set to None or missing, the default project ID from the Google
Cloud connection is used.
"""
name = f"projects/{project_id}/locations/{location}/functions/{function_id}"
operation = self.get_conn().projects().locations().functions().call(name=name, body=input_data)
response = operation.execute(num_retries=self.num_retries)
if "error" in response:
raise AirflowException(response["error"])
return response
def _wait_for_operation_to_complete(self, operation_name: str) -> dict:
"""Wait for the named operation to complete.
This is used to check the status of an asynchronous call.
:param operation_name: The name of the operation.
:return: The response returned by the operation.
:exception: AirflowException in case error is returned.
"""
service = self.get_conn()
while True:
operation = service.operations().get(name=operation_name)
operation_response = operation.execute(num_retries=self.num_retries)
if operation_response.get("done"):
response = operation_response.get("response")
error = operation_response.get("error")
# Note, according to documentation always either response or error is
# set when "done" == True
if error:
raise AirflowException(str(error))
return response
time.sleep(TIME_TO_SLEEP_IN_SECONDS)
| 9,203 | 39.906667 | 122 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/cloud_build.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hook for Google Cloud Build service."""
from __future__ import annotations
import warnings
from typing import Sequence
from google.api_core.client_options import ClientOptions
from google.api_core.exceptions import AlreadyExists
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.operation import Operation
from google.api_core.retry import Retry
from google.cloud.devtools.cloudbuild_v1 import CloudBuildAsyncClient, CloudBuildClient, GetBuildRequest
from google.cloud.devtools.cloudbuild_v1.types import Build, BuildTrigger, RepoSource
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import PROVIDE_PROJECT_ID, GoogleBaseHook
# Time to sleep between active checks of the operation results
TIME_TO_SLEEP_IN_SECONDS = 5
class CloudBuildHook(GoogleBaseHook):
"""
Hook for the Google Cloud Build Service.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(gcp_conn_id=gcp_conn_id, impersonation_chain=impersonation_chain)
self._client: dict[str, CloudBuildClient] = {}
def _get_build_id_from_operation(self, operation: Operation) -> str:
"""
Retrieve Cloud Build ID from Operation Object.
:param operation: The proto to append resource_label airflow
version to
:return: Cloud Build ID
"""
try:
return operation.metadata.build.id
except Exception:
raise AirflowException("Could not retrieve Build ID from Operation.")
def wait_for_operation(self, operation: Operation, timeout: float | None = None):
"""Waits for long-lasting operation to complete."""
try:
return operation.result(timeout=timeout)
except Exception:
error = operation.exception(timeout=timeout)
raise AirflowException(error)
def get_conn(self, location: str = "global") -> CloudBuildClient:
"""
Retrieves the connection to Google Cloud Build.
:param location: The location of the project.
:return: Google Cloud Build client object.
"""
if location not in self._client:
client_options = None
if location != "global":
client_options = ClientOptions(api_endpoint=f"{location}-cloudbuild.googleapis.com:443")
self._client[location] = CloudBuildClient(
credentials=self.get_credentials(),
client_info=CLIENT_INFO,
client_options=client_options,
)
return self._client[location]
@GoogleBaseHook.fallback_to_default_project_id
def cancel_build(
self,
id_: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
location: str = "global",
) -> Build:
"""
Cancels a build in progress.
:param id_: The ID of the build.
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param location: The location of the project.
"""
client = self.get_conn(location=location)
self.log.info("Start cancelling build: %s.", id_)
build = client.cancel_build(
request={"project_id": project_id, "id": id_},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Build has been cancelled: %s.", id_)
return build
@GoogleBaseHook.fallback_to_default_project_id
def create_build_without_waiting_for_result(
self,
build: dict | Build,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
location: str = "global",
) -> tuple[Operation, str]:
"""
Starts a build with the specified configuration without waiting for it to finish.
:param build: The build resource to create. If a dict is provided, it must be of the same form
as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.Build`
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param location: The location of the project.
"""
client = self.get_conn(location=location)
parent = f"projects/{project_id}/locations/{location}"
self.log.info("Start creating build...")
operation = client.create_build(
request={"parent": parent, "project_id": project_id, "build": build},
retry=retry,
timeout=timeout,
metadata=metadata,
)
id_ = self._get_build_id_from_operation(operation)
self.log.info("Build has been created: %s.", id_)
return operation, id_
@GoogleBaseHook.fallback_to_default_project_id
def create_build(
self,
build: dict | Build,
project_id: str = PROVIDE_PROJECT_ID,
wait: bool = True,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Build:
"""
Starts a build with the specified configuration.
:param build: The build resource to create. If a dict is provided, it must be of the same form
as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.Build`
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param wait: Optional, wait for operation to finish.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
"""
warnings.warn(
"This method is deprecated. Please use `create_build_without_waiting_for_result`.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
client = self.get_conn()
self.log.info("Start creating build...")
operation = client.create_build(
request={"project_id": project_id, "build": build},
retry=retry,
timeout=timeout,
metadata=metadata,
)
id_ = self._get_build_id_from_operation(operation)
if not wait:
return self.get_build(id_=id_, project_id=project_id)
operation.result()
self.log.info("Build has been created: %s.", id_)
return self.get_build(id_=id_, project_id=project_id)
@GoogleBaseHook.fallback_to_default_project_id
def create_build_trigger(
self,
trigger: dict | BuildTrigger,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
location: str = "global",
) -> BuildTrigger:
"""
Creates a new BuildTrigger.
:param trigger: The BuildTrigger to create. If a dict is provided, it must be of the same form
as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.BuildTrigger`
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param location: The location of the project.
"""
client = self.get_conn(location=location)
self.log.info("Start creating build trigger...")
try:
trigger = client.create_build_trigger(
request={"project_id": project_id, "trigger": trigger},
retry=retry,
timeout=timeout,
metadata=metadata,
)
except AlreadyExists:
raise AirflowException("Cloud Build Trigger with such parameters already exists.")
self.log.info("Build trigger has been created.")
return trigger
@GoogleBaseHook.fallback_to_default_project_id
def delete_build_trigger(
self,
trigger_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
location: str = "global",
) -> None:
"""
Deletes a BuildTrigger by its project ID and trigger ID.
:param trigger_id: The ID of the BuildTrigger to delete.
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param location: The location of the project.
"""
client = self.get_conn(location=location)
self.log.info("Start deleting build trigger: %s.", trigger_id)
client.delete_build_trigger(
request={"project_id": project_id, "trigger_id": trigger_id},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Build trigger has been deleted: %s.", trigger_id)
@GoogleBaseHook.fallback_to_default_project_id
def get_build(
self,
id_: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
location: str = "global",
) -> Build:
"""
Returns information about a previously requested build.
:param id_: The ID of the build.
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param location: The location of the project.
"""
client = self.get_conn(location=location)
self.log.info("Start retrieving build: %s.", id_)
build = client.get_build(
request={"project_id": project_id, "id": id_},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Build has been retrieved: %s.", id_)
return build
@GoogleBaseHook.fallback_to_default_project_id
def get_build_trigger(
self,
trigger_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
location: str = "global",
) -> BuildTrigger:
"""
Returns information about a BuildTrigger.
:param trigger_id: The ID of the BuildTrigger to get.
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param location: The location of the project.
"""
client = self.get_conn(location=location)
self.log.info("Start retrieving build trigger: %s.", trigger_id)
trigger = client.get_build_trigger(
request={"project_id": project_id, "trigger_id": trigger_id},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Build trigger has been retrieved: %s.", trigger_id)
return trigger
@GoogleBaseHook.fallback_to_default_project_id
def list_build_triggers(
self,
location: str = "global",
project_id: str = PROVIDE_PROJECT_ID,
page_size: int | None = None,
page_token: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> list[BuildTrigger]:
"""
Lists existing BuildTriggers.
:param project_id: Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param location: The location of the project.
:param page_size: Optional, number of results to return in the list.
:param page_token: Optional, token to provide to skip to a particular spot in the list.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
"""
client = self.get_conn(location=location)
parent = f"projects/{project_id}/locations/{location}"
self.log.info("Start retrieving build triggers.")
response = client.list_build_triggers(
request={
"parent": parent,
"project_id": project_id,
"page_size": page_size,
"page_token": page_token,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Build triggers have been retrieved.")
return list(response.triggers)
@GoogleBaseHook.fallback_to_default_project_id
def list_builds(
self,
location: str = "global",
project_id: str = PROVIDE_PROJECT_ID,
page_size: int | None = None,
page_token: int | None = None,
filter_: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> list[Build]:
"""
Lists previously requested builds.
:param project_id: Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: The location of the project.
:param page_size: Optional, number of results to return in the list.
:param page_token: Optional, token to provide to skip to a particular spot in the list.
:param filter_: Optional, the raw filter text to constrain the results.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
"""
client = self.get_conn(location=location)
parent = f"projects/{project_id}/locations/{location}"
self.log.info("Start retrieving builds.")
response = client.list_builds(
request={
"parent": parent,
"project_id": project_id,
"page_size": page_size,
"page_token": page_token,
"filter": filter_,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Builds have been retrieved.")
return list(response.builds)
@GoogleBaseHook.fallback_to_default_project_id
def retry_build(
self,
id_: str,
project_id: str = PROVIDE_PROJECT_ID,
wait: bool = True,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
location: str = "global",
) -> Build:
"""
Create a new build using the original build request; may or may not result in an identical build.
:param id_: Build ID of the original build.
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param wait: Optional, wait for operation to finish.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param location: The location of the project.
"""
client = self.get_conn(location=location)
self.log.info("Start retrying build: %s.", id_)
operation = client.retry_build(
request={"project_id": project_id, "id": id_},
retry=retry,
timeout=timeout,
metadata=metadata,
)
id_ = self._get_build_id_from_operation(operation)
self.log.info("Build has been retried: %s.", id_)
if not wait:
return self.get_build(id_=id_, project_id=project_id, location=location)
self.wait_for_operation(operation, timeout)
return self.get_build(id_=id_, project_id=project_id, location=location)
@GoogleBaseHook.fallback_to_default_project_id
def run_build_trigger(
self,
trigger_id: str,
source: dict | RepoSource,
project_id: str = PROVIDE_PROJECT_ID,
wait: bool = True,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
location: str = "global",
) -> Build:
"""
Runs a BuildTrigger at a particular source revision.
:param trigger_id: The ID of the trigger.
:param source: Source to build against this trigger. If a dict is provided, it must be of the
same form as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.RepoSource`
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param wait: Optional, wait for operation to finish.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param location: The location of the project.
"""
client = self.get_conn(location=location)
self.log.info("Start running build trigger: %s.", trigger_id)
operation = client.run_build_trigger(
request={"project_id": project_id, "trigger_id": trigger_id, "source": source},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Build trigger has been run: %s.", trigger_id)
id_ = self._get_build_id_from_operation(operation)
self.log.info("Build has been created: %s.", id_)
if not wait:
return self.get_build(id_=id_, project_id=project_id, location=location)
self.wait_for_operation(operation, timeout)
return self.get_build(id_=id_, project_id=project_id, location=location)
@GoogleBaseHook.fallback_to_default_project_id
def update_build_trigger(
self,
trigger_id: str,
trigger: dict | BuildTrigger,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
location: str = "global",
) -> BuildTrigger:
"""
Updates a BuildTrigger by its project ID and trigger ID.
:param trigger_id: The ID of the trigger.
:param trigger: The BuildTrigger to create. If a dict is provided, it must be of the same form
as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.BuildTrigger`
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param location: The location of the project.
"""
client = self.get_conn(location=location)
self.log.info("Start updating build trigger: %s.", trigger_id)
trigger = client.update_build_trigger(
request={"project_id": project_id, "trigger_id": trigger_id, "trigger": trigger},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Build trigger has been updated: %s.", trigger_id)
return trigger
class CloudBuildAsyncHook(GoogleBaseHook):
"""Asynchronous Hook for the Google Cloud Build Service."""
def __init__(self, **kwargs):
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(**kwargs)
@GoogleBaseHook.fallback_to_default_project_id
async def get_cloud_build(
self,
id_: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
location: str = "global",
) -> Build:
"""Retrieves a Cloud Build with a specified id."""
if not id_:
raise AirflowException("Google Cloud Build id is required.")
client_options = None
if location != "global":
client_options = ClientOptions(api_endpoint=f"{location}-cloudbuild.googleapis.com:443")
client = CloudBuildAsyncClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
request = GetBuildRequest(
project_id=project_id,
id=id_,
)
build_instance = await client.get_build(
request=request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return build_instance
| 28,269 | 41.005944 | 108 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/pubsub.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a Google Pub/Sub Hook.
.. spelling:word-list::
MessageStoragePolicy
ReceivedMessage
"""
from __future__ import annotations
import warnings
from base64 import b64decode
from functools import cached_property
from typing import Any, Sequence
from uuid import uuid4
from google.api_core.exceptions import AlreadyExists, GoogleAPICallError
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.exceptions import NotFound
from google.cloud.pubsub_v1 import PublisherClient, SubscriberClient
from google.cloud.pubsub_v1.types import (
DeadLetterPolicy,
Duration,
ExpirationPolicy,
MessageStoragePolicy,
PushConfig,
ReceivedMessage,
RetryPolicy,
)
from google.pubsub_v1.services.subscriber.async_client import SubscriberAsyncClient
from googleapiclient.errors import HttpError
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import (
PROVIDE_PROJECT_ID,
GoogleBaseAsyncHook,
GoogleBaseHook,
)
from airflow.version import version
class PubSubException(Exception):
"""Alias for Exception."""
class PubSubHook(GoogleBaseHook):
"""
Hook for accessing Google Pub/Sub.
The Google Cloud project against which actions are applied is determined by
the project embedded in the Connection referenced by gcp_conn_id.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._client = None
def get_conn(self) -> PublisherClient:
"""
Retrieves connection to Google Cloud Pub/Sub.
:return: Google Cloud Pub/Sub client object.
"""
if not self._client:
self._client = PublisherClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
return self._client
@cached_property
def subscriber_client(self) -> SubscriberClient:
"""
Creates SubscriberClient.
:return: Google Cloud Pub/Sub client object.
"""
return SubscriberClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
@GoogleBaseHook.fallback_to_default_project_id
def publish(
self,
topic: str,
messages: list[dict],
project_id: str = PROVIDE_PROJECT_ID,
) -> None:
"""
Publishes messages to a Pub/Sub topic.
:param topic: the Pub/Sub topic to which to publish; do not
include the ``projects/{project}/topics/`` prefix.
:param messages: messages to publish; if the data field in a
message is set, it should be a bytestring (utf-8 encoded)
https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#pubsubmessage
:param project_id: Optional, the Google Cloud project ID in which to publish.
If set to None or missing, the default project_id from the Google Cloud connection is used.
"""
self._validate_messages(messages)
publisher = self.get_conn()
topic_path = f"projects/{project_id}/topics/{topic}"
self.log.info("Publish %d messages to topic (path) %s", len(messages), topic_path)
try:
for message in messages:
future = publisher.publish(
topic=topic_path, data=message.get("data", b""), **message.get("attributes", {})
)
future.result()
except GoogleAPICallError as e:
raise PubSubException(f"Error publishing to topic {topic_path}", e)
self.log.info("Published %d messages to topic (path) %s", len(messages), topic_path)
@staticmethod
def _validate_messages(messages) -> None:
for message in messages:
# To warn about broken backward compatibility
# TODO: remove one day
if "data" in message and isinstance(message["data"], str):
try:
b64decode(message["data"])
warnings.warn(
"The base 64 encoded string as 'data' field has been deprecated. "
"You should pass bytestring (utf-8 encoded).",
AirflowProviderDeprecationWarning,
stacklevel=4,
)
except ValueError:
pass
if not isinstance(message, dict):
raise PubSubException("Wrong message type. Must be a dictionary.")
if "data" not in message and "attributes" not in message:
raise PubSubException("Wrong message. Dictionary must contain 'data' or 'attributes'.")
if "data" in message and not isinstance(message["data"], bytes):
raise PubSubException("Wrong message. 'data' must be send as a bytestring")
if ("data" not in message and "attributes" in message and not message["attributes"]) or (
"attributes" in message and not isinstance(message["attributes"], dict)
):
raise PubSubException(
"Wrong message. If 'data' is not provided 'attributes' must be a non empty dictionary."
)
@GoogleBaseHook.fallback_to_default_project_id
def create_topic(
self,
topic: str,
project_id: str = PROVIDE_PROJECT_ID,
fail_if_exists: bool = False,
labels: dict[str, str] | None = None,
message_storage_policy: dict | MessageStoragePolicy = None,
kms_key_name: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Creates a Pub/Sub topic, if it does not already exist.
:param topic: the Pub/Sub topic name to create; do not
include the ``projects/{project}/topics/`` prefix.
:param project_id: Optional, the Google Cloud project ID in which to create the topic
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param fail_if_exists: if set, raise an exception if the topic
already exists
:param labels: Client-assigned labels; see
https://cloud.google.com/pubsub/docs/labels
:param message_storage_policy: Policy constraining the set
of Google Cloud regions where messages published to
the topic may be stored. If not present, then no constraints
are in effect.
Union[dict, google.cloud.pubsub_v1.types.MessageStoragePolicy]
:param kms_key_name: The resource name of the Cloud KMS CryptoKey
to be used to protect access to messages published on this topic.
The expected format is
``projects/*/locations/*/keyRings/*/cryptoKeys/*``.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
publisher = self.get_conn()
topic_path = f"projects/{project_id}/topics/{topic}"
# Add airflow-version label to the topic
labels = labels or {}
labels["airflow-version"] = "v" + version.replace(".", "-").replace("+", "-")
self.log.info("Creating topic (path) %s", topic_path)
try:
publisher.create_topic(
request={
"name": topic_path,
"labels": labels,
"message_storage_policy": message_storage_policy,
"kms_key_name": kms_key_name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
except AlreadyExists:
self.log.warning("Topic already exists: %s", topic)
if fail_if_exists:
raise PubSubException(f"Topic already exists: {topic}")
except GoogleAPICallError as e:
raise PubSubException(f"Error creating topic {topic}", e)
self.log.info("Created topic (path) %s", topic_path)
@GoogleBaseHook.fallback_to_default_project_id
def delete_topic(
self,
topic: str,
project_id: str = PROVIDE_PROJECT_ID,
fail_if_not_exists: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Deletes a Pub/Sub topic if it exists.
:param topic: the Pub/Sub topic name to delete; do not
include the ``projects/{project}/topics/`` prefix.
:param project_id: Optional, the Google Cloud project ID in which to delete the topic.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param fail_if_not_exists: if set, raise an exception if the topic
does not exist
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
publisher = self.get_conn()
topic_path = f"projects/{project_id}/topics/{topic}"
self.log.info("Deleting topic (path) %s", topic_path)
try:
publisher.delete_topic(
request={"topic": topic_path}, retry=retry, timeout=timeout, metadata=metadata or ()
)
except NotFound:
self.log.warning("Topic does not exist: %s", topic_path)
if fail_if_not_exists:
raise PubSubException(f"Topic does not exist: {topic_path}")
except GoogleAPICallError as e:
raise PubSubException(f"Error deleting topic {topic}", e)
self.log.info("Deleted topic (path) %s", topic_path)
@GoogleBaseHook.fallback_to_default_project_id
def create_subscription(
self,
topic: str,
project_id: str = PROVIDE_PROJECT_ID,
subscription: str | None = None,
subscription_project_id: str | None = None,
ack_deadline_secs: int = 10,
fail_if_exists: bool = False,
push_config: dict | PushConfig | None = None,
retain_acked_messages: bool | None = None,
message_retention_duration: dict | Duration | None = None,
labels: dict[str, str] | None = None,
enable_message_ordering: bool = False,
expiration_policy: dict | ExpirationPolicy | None = None,
filter_: str | None = None,
dead_letter_policy: dict | DeadLetterPolicy | None = None,
retry_policy: dict | RetryPolicy | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> str:
"""
Creates a Pub/Sub subscription, if it does not already exist.
:param topic: the Pub/Sub topic name that the subscription will be bound
to create; do not include the ``projects/{project}/subscriptions/`` prefix.
:param project_id: Optional, the Google Cloud project ID of the topic that the subscription will be
bound to. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:param subscription: the Pub/Sub subscription name. If empty, a random
name will be generated using the uuid module
:param subscription_project_id: the Google Cloud project ID where the subscription
will be created. If unspecified, ``project_id`` will be used.
:param ack_deadline_secs: Number of seconds that a subscriber has to
acknowledge each message pulled from the subscription
:param fail_if_exists: if set, raise an exception if the topic
already exists
:param push_config: If push delivery is used with this subscription,
this field is used to configure it. An empty ``pushConfig`` signifies
that the subscriber will pull and ack messages using API methods.
:param retain_acked_messages: Indicates whether to retain acknowledged
messages. If true, then messages are not expunged from the subscription's
backlog, even if they are acknowledged, until they fall out of the
``message_retention_duration`` window. This must be true if you would
like to Seek to a timestamp.
:param message_retention_duration: How long to retain unacknowledged messages
in the subscription's backlog, from the moment a message is published. If
``retain_acked_messages`` is true, then this also configures the
retention of acknowledged messages, and thus configures how far back in
time a ``Seek`` can be done. Defaults to 7 days. Cannot be more than 7
days or less than 10 minutes.
:param labels: Client-assigned labels; see
https://cloud.google.com/pubsub/docs/labels
:param enable_message_ordering: If true, messages published with the same
ordering_key in PubsubMessage will be delivered to the subscribers in the order
in which they are received by the Pub/Sub system. Otherwise, they may be
delivered in any order.
:param expiration_policy: A policy that specifies the conditions for this
subscription's expiration. A subscription is considered active as long as any
connected subscriber is successfully consuming messages from the subscription or
is issuing operations on the subscription. If expiration_policy is not set,
a default policy with ttl of 31 days will be used. The minimum allowed value for
expiration_policy.ttl is 1 day.
:param filter_: An expression written in the Cloud Pub/Sub filter language. If
non-empty, then only PubsubMessages whose attributes field matches the filter are
delivered on this subscription. If empty, then no messages are filtered out.
:param dead_letter_policy: A policy that specifies the conditions for dead lettering
messages in this subscription. If dead_letter_policy is not set, dead lettering is
disabled.
:param retry_policy: A policy that specifies how Pub/Sub retries message delivery
for this subscription. If not set, the default retry policy is applied. This
generally implies that messages will be retried as soon as possible for healthy
subscribers. RetryPolicy will be triggered on NACKs or acknowledgement deadline
exceeded events for a given message.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:return: subscription name which will be the system-generated value if
the ``subscription`` parameter is not supplied
"""
subscriber = self.subscriber_client
if not subscription:
subscription = f"sub-{uuid4()}"
if not subscription_project_id:
subscription_project_id = project_id
# Add airflow-version label to the subscription
labels = labels or {}
labels["airflow-version"] = "v" + version.replace(".", "-").replace("+", "-")
subscription_path = f"projects/{subscription_project_id}/subscriptions/{subscription}"
topic_path = f"projects/{project_id}/topics/{topic}"
self.log.info("Creating subscription (path) %s for topic (path) %a", subscription_path, topic_path)
try:
subscriber.create_subscription(
request={
"name": subscription_path,
"topic": topic_path,
"push_config": push_config,
"ack_deadline_seconds": ack_deadline_secs,
"retain_acked_messages": retain_acked_messages,
"message_retention_duration": message_retention_duration,
"labels": labels,
"enable_message_ordering": enable_message_ordering,
"expiration_policy": expiration_policy,
"filter": filter_,
"dead_letter_policy": dead_letter_policy,
"retry_policy": retry_policy,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
except AlreadyExists:
self.log.warning("Subscription already exists: %s", subscription_path)
if fail_if_exists:
raise PubSubException(f"Subscription already exists: {subscription_path}")
except GoogleAPICallError as e:
raise PubSubException(f"Error creating subscription {subscription_path}", e)
self.log.info("Created subscription (path) %s for topic (path) %s", subscription_path, topic_path)
return subscription
@GoogleBaseHook.fallback_to_default_project_id
def delete_subscription(
self,
subscription: str,
project_id: str = PROVIDE_PROJECT_ID,
fail_if_not_exists: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Deletes a Pub/Sub subscription, if it exists.
:param subscription: the Pub/Sub subscription name to delete; do not
include the ``projects/{project}/subscriptions/`` prefix.
:param project_id: Optional, the Google Cloud project ID where the subscription exists
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param fail_if_not_exists: if set, raise an exception if the topic does not exist
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
subscriber = self.subscriber_client
# E501
subscription_path = f"projects/{project_id}/subscriptions/{subscription}"
self.log.info("Deleting subscription (path) %s", subscription_path)
try:
subscriber.delete_subscription(
request={"subscription": subscription_path},
retry=retry,
timeout=timeout,
metadata=metadata,
)
except NotFound:
self.log.warning("Subscription does not exist: %s", subscription_path)
if fail_if_not_exists:
raise PubSubException(f"Subscription does not exist: {subscription_path}")
except GoogleAPICallError as e:
raise PubSubException(f"Error deleting subscription {subscription_path}", e)
self.log.info("Deleted subscription (path) %s", subscription_path)
@GoogleBaseHook.fallback_to_default_project_id
def pull(
self,
subscription: str,
max_messages: int,
project_id: str = PROVIDE_PROJECT_ID,
return_immediately: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> list[ReceivedMessage]:
"""
Pulls up to ``max_messages`` messages from Pub/Sub subscription.
:param subscription: the Pub/Sub subscription name to pull from; do not
include the 'projects/{project}/topics/' prefix.
:param max_messages: The maximum number of messages to return from
the Pub/Sub API.
:param project_id: Optional, the Google Cloud project ID where the subscription exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param return_immediately: If set, the Pub/Sub API will immediately
return if no messages are available. Otherwise, the request will
block for an undisclosed, but bounded period of time
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:return: A list of Pub/Sub ReceivedMessage objects each containing
an ``ackId`` property and a ``message`` property, which includes
the base64-encoded message content. See
https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#google.pubsub.v1.ReceivedMessage
"""
subscriber = self.subscriber_client
# E501
subscription_path = f"projects/{project_id}/subscriptions/{subscription}"
self.log.info("Pulling max %d messages from subscription (path) %s", max_messages, subscription_path)
try:
response = subscriber.pull(
request={
"subscription": subscription_path,
"max_messages": max_messages,
"return_immediately": return_immediately,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
result = getattr(response, "received_messages", [])
self.log.info("Pulled %d messages from subscription (path) %s", len(result), subscription_path)
return result
except (HttpError, GoogleAPICallError) as e:
raise PubSubException(f"Error pulling messages from subscription {subscription_path}", e)
@GoogleBaseHook.fallback_to_default_project_id
def acknowledge(
self,
subscription: str,
project_id: str,
ack_ids: list[str] | None = None,
messages: list[ReceivedMessage] | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Acknowledges the messages associated with the ``ack_ids`` from Pub/Sub subscription.
:param subscription: the Pub/Sub subscription name to delete; do not
include the 'projects/{project}/topics/' prefix.
:param ack_ids: List of ReceivedMessage ackIds from a previous pull response.
Mutually exclusive with ``messages`` argument.
:param messages: List of ReceivedMessage objects to acknowledge.
Mutually exclusive with ``ack_ids`` argument.
:param project_id: Optional, the Google Cloud project name or ID in which to create the topic
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
if ack_ids is not None and messages is None:
pass # use ack_ids as is
elif ack_ids is None and messages is not None:
ack_ids = [message.ack_id for message in messages] # extract ack_ids from messages
else:
raise ValueError("One and only one of 'ack_ids' and 'messages' arguments have to be provided")
subscriber = self.subscriber_client
# E501
subscription_path = f"projects/{project_id}/subscriptions/{subscription}"
self.log.info("Acknowledging %d ack_ids from subscription (path) %s", len(ack_ids), subscription_path)
try:
subscriber.acknowledge(
request={"subscription": subscription_path, "ack_ids": ack_ids},
retry=retry,
timeout=timeout,
metadata=metadata,
)
except (HttpError, GoogleAPICallError) as e:
raise PubSubException(
f"Error acknowledging {len(ack_ids)} messages pulled from subscription {subscription_path}",
e,
)
self.log.info("Acknowledged ack_ids from subscription (path) %s", subscription_path)
class PubSubAsyncHook(GoogleBaseAsyncHook):
"""Class to get asynchronous hook for Google Cloud PubSub."""
sync_hook_class = PubSubHook
def __init__(self, project_id: str | None = None, **kwargs: Any):
super().__init__(**kwargs)
self.project_id = project_id
self._client: SubscriberAsyncClient | None = None
async def _get_subscriber_client(self) -> SubscriberAsyncClient:
"""
Returns async connection to the Google PubSub.
:return: Google Pub/Sub asynchronous client.
"""
if not self._client:
credentials = (await self.get_sync_hook()).get_credentials()
self._client = SubscriberAsyncClient(credentials=credentials, client_info=CLIENT_INFO)
return self._client
@GoogleBaseHook.fallback_to_default_project_id
async def acknowledge(
self,
subscription: str,
project_id: str,
ack_ids: list[str] | None = None,
messages: list[ReceivedMessage] | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Acknowledges the messages associated with the ``ack_ids`` from Pub/Sub subscription.
:param subscription: the Pub/Sub subscription name to delete; do not
include the 'projects/{project}/topics/' prefix.
:param ack_ids: List of ReceivedMessage ackIds from a previous pull response.
Mutually exclusive with ``messages`` argument.
:param messages: List of ReceivedMessage objects to acknowledge.
Mutually exclusive with ``ack_ids`` argument.
:param project_id: Optional, the Google Cloud project name or ID in which to create the topic
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
subscriber = await self._get_subscriber_client()
if ack_ids is not None and messages is None:
pass # use ack_ids as is
elif ack_ids is None and messages is not None:
ack_ids = [message.ack_id for message in messages] # extract ack_ids from messages
else:
raise ValueError("One and only one of 'ack_ids' and 'messages' arguments have to be provided")
subscription_path = f"projects/{project_id}/subscriptions/{subscription}"
self.log.info("Acknowledging %d ack_ids from subscription (path) %s", len(ack_ids), subscription_path)
try:
await subscriber.acknowledge(
request={"subscription": subscription_path, "ack_ids": ack_ids},
retry=retry,
timeout=timeout,
metadata=metadata,
)
except (HttpError, GoogleAPICallError) as e:
raise PubSubException(
f"Error acknowledging {len(ack_ids)} messages pulled from subscription {subscription_path}",
e,
)
self.log.info("Acknowledged ack_ids from subscription (path) %s", subscription_path)
@GoogleBaseHook.fallback_to_default_project_id
async def pull(
self,
subscription: str,
max_messages: int,
project_id: str = PROVIDE_PROJECT_ID,
return_immediately: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> list[ReceivedMessage]:
"""
Pulls up to ``max_messages`` messages from Pub/Sub subscription.
:param subscription: the Pub/Sub subscription name to pull from; do not
include the 'projects/{project}/topics/' prefix.
:param max_messages: The maximum number of messages to return from
the Pub/Sub API.
:param project_id: Optional, the Google Cloud project ID where the subscription exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param return_immediately: If set, the Pub/Sub API will immediately
return if no messages are available. Otherwise, the request will
block for an undisclosed, but bounded period of time
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:return: A list of Pub/Sub ReceivedMessage objects each containing
an ``ackId`` property and a ``message`` property, which includes
the base64-encoded message content. See
https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#google.pubsub.v1.ReceivedMessage
"""
subscriber = await self._get_subscriber_client()
subscription_path = f"projects/{project_id}/subscriptions/{subscription}"
self.log.info("Pulling max %d messages from subscription (path) %s", max_messages, subscription_path)
try:
response = await subscriber.pull(
request={
"subscription": subscription_path,
"max_messages": max_messages,
"return_immediately": return_immediately,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
result = getattr(response, "received_messages", [])
self.log.info("Pulled %d messages from subscription (path) %s", len(result), subscription_path)
return result
except (HttpError, GoogleAPICallError) as e:
raise PubSubException(f"Error pulling messages from subscription {subscription_path}", e)
| 33,484 | 46.362093 | 112 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/translate.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Translate Hook."""
from __future__ import annotations
from typing import Sequence
from google.cloud.translate_v2 import Client
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class CloudTranslateHook(GoogleBaseHook):
"""
Hook for Google Cloud translate APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._client: Client | None = None
def get_conn(self) -> Client:
"""
Retrieves connection to Cloud Translate.
:return: Google Cloud Translate client object.
"""
if not self._client:
self._client = Client(credentials=self.get_credentials(), client_info=CLIENT_INFO)
return self._client
@GoogleBaseHook.quota_retry()
def translate(
self,
values: str | list[str],
target_language: str,
format_: str | None = None,
source_language: str | None = None,
model: str | list[str] | None = None,
) -> dict:
"""Translate a string or list of strings.
See https://cloud.google.com/translate/docs/translating-text
:param values: String or list of strings to translate.
:param target_language: The language to translate results into. This
is required by the API and defaults to
the target language of the current instance.
:param format_: (Optional) One of ``text`` or ``html``, to specify
if the input text is plain text or HTML.
:param source_language: (Optional) The language of the text to
be translated.
:param model: (Optional) The model used to translate the text, such
as ``'base'`` or ``'nmt'``.
:returns: A list of dictionaries for each queried value. Each
dictionary typically contains three keys (though not
all will be present in all cases)
* ``detectedSourceLanguage``: The detected language (as an
ISO 639-1 language code) of the text.
* ``translatedText``: The translation of the text into the
target language.
* ``input``: The corresponding input value.
* ``model``: The model used to translate the text.
If only a single value is passed, then only a single
dictionary will be returned.
:raises: :class:`~exceptions.ValueError` if the number of
values and translations differ.
"""
client = self.get_conn()
return client.translate(
values=values,
target_language=target_language,
format_=format_,
source_language=source_language,
model=model,
)
| 4,411 | 37.365217 | 108 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/stackdriver.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Stackdriver operators."""
from __future__ import annotations
import json
from typing import Any, Sequence
from google.api_core.exceptions import InvalidArgument
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud import monitoring_v3
from google.cloud.monitoring_v3 import AlertPolicy, NotificationChannel
from google.protobuf.field_mask_pb2 import FieldMask
from googleapiclient.errors import HttpError
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import PROVIDE_PROJECT_ID, GoogleBaseHook
class StackdriverHook(GoogleBaseHook):
"""Stackdriver Hook for connecting with Google Cloud Stackdriver."""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._policy_client = None
self._channel_client = None
def _get_policy_client(self):
if not self._policy_client:
self._policy_client = monitoring_v3.AlertPolicyServiceClient()
return self._policy_client
def _get_channel_client(self):
if not self._channel_client:
self._channel_client = monitoring_v3.NotificationChannelServiceClient()
return self._channel_client
@GoogleBaseHook.fallback_to_default_project_id
def list_alert_policies(
self,
project_id: str = PROVIDE_PROJECT_ID,
format_: str | None = None,
filter_: str | None = None,
order_by: str | None = None,
page_size: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Fetches all the Alert Policies identified by the filter passed as filter parameter.
The desired return type can be specified by the format parameter, the supported formats
are "dict", "json" and None which returns python dictionary, stringified JSON and protobuf
respectively.
:param format_: (Optional) Desired output format of the result. The
supported formats are "dict", "json" and None which returns
python dictionary, stringified JSON and protobuf respectively.
:param filter_: If provided, this field specifies the criteria that
must be met by alert policies to be included in the response.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param order_by: A comma-separated list of fields by which to sort the result.
Supports the same set of field references as the ``filter`` field. Entries
can be prefixed with a minus sign to sort by the field in descending order.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param page_size: The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param project_id: The project to fetch alerts from.
"""
client = self._get_policy_client()
policies_ = client.list_alert_policies(
request={
"name": f"projects/{project_id}",
"filter": filter_,
"order_by": order_by,
"page_size": page_size,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
if format_ == "dict":
return [AlertPolicy.to_dict(policy) for policy in policies_]
elif format_ == "json":
return [AlertPolicy.to_jsoon(policy) for policy in policies_]
else:
return policies_
@GoogleBaseHook.fallback_to_default_project_id
def _toggle_policy_status(
self,
new_state: bool,
project_id: str = PROVIDE_PROJECT_ID,
filter_: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
client = self._get_policy_client()
policies_ = self.list_alert_policies(project_id=project_id, filter_=filter_)
for policy in policies_:
if policy.enabled != bool(new_state):
policy.enabled = bool(new_state)
mask = FieldMask(paths=["enabled"])
client.update_alert_policy(
request={"alert_policy": policy, "update_mask": mask},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def enable_alert_policies(
self,
project_id: str = PROVIDE_PROJECT_ID,
filter_: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Enables one or more disabled alerting policies identified by filter parameter.
Inoperative in case the policy is already enabled.
:param project_id: The project in which alert needs to be enabled.
:param filter_: If provided, this field specifies the criteria that
must be met by alert policies to be enabled.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
self._toggle_policy_status(
new_state=True,
project_id=project_id,
filter_=filter_,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def disable_alert_policies(
self,
project_id: str = PROVIDE_PROJECT_ID,
filter_: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Disables one or more enabled alerting policies identified by filter parameter.
Inoperative in case the policy is already disabled.
:param project_id: The project in which alert needs to be disabled.
:param filter_: If provided, this field specifies the criteria that
must be met by alert policies to be disabled.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
self._toggle_policy_status(
filter_=filter_,
project_id=project_id,
new_state=False,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def upsert_alert(
self,
alerts: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Creates a new alert or updates an existing policy identified the name field in the alerts parameter.
:param project_id: The project in which alert needs to be created/updated.
:param alerts: A JSON string or file that specifies all the alerts that needs
to be either created or updated. For more details, see
https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.alertPolicies#AlertPolicy.
(templated)
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
policy_client = self._get_policy_client()
channel_client = self._get_channel_client()
record = json.loads(alerts)
existing_policies = [
policy["name"] for policy in self.list_alert_policies(project_id=project_id, format_="dict")
]
existing_channels = [
channel["name"]
for channel in self.list_notification_channels(project_id=project_id, format_="dict")
]
policies_ = []
channels = []
for channel in record.get("channels", []):
channels.append(NotificationChannel(**channel))
for policy in record.get("policies", []):
policies_.append(AlertPolicy(**policy))
channel_name_map = {}
for channel in channels:
channel.verification_status = (
monitoring_v3.NotificationChannel.VerificationStatus.VERIFICATION_STATUS_UNSPECIFIED
)
if channel.name in existing_channels:
channel_client.update_notification_channel(
request={"notification_channel": channel},
retry=retry,
timeout=timeout,
metadata=metadata,
)
else:
old_name = channel.name
channel.name = None
new_channel = channel_client.create_notification_channel(
request={"name": f"projects/{project_id}", "notification_channel": channel},
retry=retry,
timeout=timeout,
metadata=metadata,
)
channel_name_map[old_name] = new_channel.name
for policy in policies_:
policy.creation_record = None
policy.mutation_record = None
for i, channel in enumerate(policy.notification_channels):
new_channel = channel_name_map.get(channel)
if new_channel:
policy.notification_channels[i] = new_channel
if policy.name in existing_policies:
try:
policy_client.update_alert_policy(
request={"alert_policy": policy},
retry=retry,
timeout=timeout,
metadata=metadata,
)
except InvalidArgument:
pass
else:
policy.name = None
for condition in policy.conditions:
condition.name = None
policy_client.create_alert_policy(
request={"name": f"projects/{project_id}", "alert_policy": policy},
retry=retry,
timeout=timeout,
metadata=metadata,
)
def delete_alert_policy(
self,
name: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Deletes an alerting policy.
:param name: The alerting policy to delete. The format is:
``projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]``.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
policy_client = self._get_policy_client()
try:
policy_client.delete_alert_policy(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
except HttpError as err:
raise AirflowException(f"Delete alerting policy failed. Error was {err.content}")
@GoogleBaseHook.fallback_to_default_project_id
def list_notification_channels(
self,
project_id: str = PROVIDE_PROJECT_ID,
format_: str | None = None,
filter_: str | None = None,
order_by: str | None = None,
page_size: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Fetches all the Notification Channels identified by the filter passed as filter parameter.
The desired return type can be specified by the format parameter, the supported formats are
"dict", "json" and None which returns python dictionary, stringified JSON and protobuf
respectively.
:param format_: (Optional) Desired output format of the result. The
supported formats are "dict", "json" and None which returns
python dictionary, stringified JSON and protobuf respectively.
:param filter_: If provided, this field specifies the criteria that
must be met by notification channels to be included in the response.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param order_by: A comma-separated list of fields by which to sort the result.
Supports the same set of field references as the ``filter`` field. Entries
can be prefixed with a minus sign to sort by the field in descending order.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param page_size: The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param project_id: The project to fetch notification channels from.
"""
client = self._get_channel_client()
channels = client.list_notification_channels(
request={
"name": f"projects/{project_id}",
"filter": filter_,
"order_by": order_by,
"page_size": page_size,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
if format_ == "dict":
return [NotificationChannel.to_dict(channel) for channel in channels]
elif format_ == "json":
return [NotificationChannel.to_json(channel) for channel in channels]
else:
return channels
@GoogleBaseHook.fallback_to_default_project_id
def _toggle_channel_status(
self,
new_state: bool,
project_id: str = PROVIDE_PROJECT_ID,
filter_: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
client = self._get_channel_client()
channels = client.list_notification_channels(
request={"name": f"projects/{project_id}", "filter": filter_}
)
for channel in channels:
if channel.enabled != bool(new_state):
channel.enabled = bool(new_state)
mask = FieldMask(paths=["enabled"])
client.update_notification_channel(
request={"notification_channel": channel, "update_mask": mask},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def enable_notification_channels(
self,
project_id: str = PROVIDE_PROJECT_ID,
filter_: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Enables one or more disabled alerting policies identified by filter parameter.
Inoperative in case the policy is already enabled.
:param project_id: The project in which notification channels needs to be enabled.
:param filter_: If provided, this field specifies the criteria that
must be met by notification channels to be enabled.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
self._toggle_channel_status(
project_id=project_id,
filter_=filter_,
new_state=True,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def disable_notification_channels(
self,
project_id: str,
filter_: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Disables one or more enabled notification channels identified by filter parameter.
Inoperative in case the policy is already disabled.
:param project_id: The project in which notification channels needs to be enabled.
:param filter_: If provided, this field specifies the criteria that
must be met by alert policies to be disabled.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
self._toggle_channel_status(
filter_=filter_,
project_id=project_id,
new_state=False,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def upsert_channel(
self,
channels: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> dict:
"""
Create a new notification or updates an existing notification channel.
Channel is identified by the name field in the alerts parameter.
:param channels: A JSON string or file that specifies all the alerts that needs
to be either created or updated. For more details, see
https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.notificationChannels.
(templated)
:param project_id: The project in which notification channels needs to be created/updated.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
channel_client = self._get_channel_client()
record = json.loads(channels)
existing_channels = [
channel["name"]
for channel in self.list_notification_channels(project_id=project_id, format_="dict")
]
channels_list = []
channel_name_map = {}
for channel in record["channels"]:
channels_list.append(NotificationChannel(**channel))
for channel in channels_list:
channel.verification_status = (
monitoring_v3.NotificationChannel.VerificationStatus.VERIFICATION_STATUS_UNSPECIFIED
)
if channel.name in existing_channels:
channel_client.update_notification_channel(
request={"notification_channel": channel},
retry=retry,
timeout=timeout,
metadata=metadata,
)
else:
old_name = channel.name
channel.name = None
new_channel = channel_client.create_notification_channel(
request={"name": f"projects/{project_id}", "notification_channel": channel},
retry=retry,
timeout=timeout,
metadata=metadata,
)
channel_name_map[old_name] = new_channel.name
return channel_name_map
def delete_notification_channel(
self,
name: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Deletes a notification channel.
:param name: The alerting policy to delete. The format is:
``projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]``.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
channel_client = self._get_channel_client()
try:
channel_client.delete_notification_channel(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
except HttpError as err:
raise AirflowException(f"Delete notification channel failed. Error was {err.content}")
| 25,862 | 43.059625 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/dataprep.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Dataprep hook."""
from __future__ import annotations
import json
from enum import Enum
from typing import Any
from urllib.parse import urljoin
import requests
from requests import HTTPError
from tenacity import retry, stop_after_attempt, wait_exponential
from airflow.hooks.base import BaseHook
def _get_field(extras: dict, field_name: str):
"""Get field from extra, first checking short name, then for backcompat we check for prefixed name."""
backcompat_prefix = "extra__dataprep__"
if field_name.startswith("extra__"):
raise ValueError(
f"Got prefixed name {field_name}; please remove the '{backcompat_prefix}' prefix "
"when using this method."
)
if field_name in extras:
return extras[field_name] or None
prefixed_name = f"{backcompat_prefix}{field_name}"
return extras.get(prefixed_name) or None
class JobGroupStatuses(str, Enum):
"""Types of job group run statuses."""
CREATED = "Created"
UNDEFINED = "undefined"
IN_PROGRESS = "InProgress"
COMPLETE = "Complete"
FAILED = "Failed"
CANCELED = "Canceled"
class GoogleDataprepHook(BaseHook):
"""
Hook for connection with Dataprep API.
To get connection Dataprep with Airflow you need Dataprep token.
https://clouddataprep.com/documentation/api#section/Authentication
It should be added to the Connection in Airflow in JSON format.
"""
conn_name_attr = "dataprep_conn_id"
default_conn_name = "google_cloud_dataprep_default"
conn_type = "dataprep"
hook_name = "Google Dataprep"
def __init__(self, dataprep_conn_id: str = default_conn_name) -> None:
super().__init__()
self.dataprep_conn_id = dataprep_conn_id
conn = self.get_connection(self.dataprep_conn_id)
extras = conn.extra_dejson
self._token = _get_field(extras, "token")
self._base_url = _get_field(extras, "base_url") or "https://api.clouddataprep.com"
@property
def _headers(self) -> dict[str, str]:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self._token}",
}
return headers
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, max=10))
def get_jobs_for_job_group(self, job_id: int) -> dict[str, Any]:
"""
Get information about the batch jobs within a Cloud Dataprep job.
:param job_id: The ID of the job that will be fetched
"""
endpoint_path = f"v4/jobGroups/{job_id}/jobs"
url: str = urljoin(self._base_url, endpoint_path)
response = requests.get(url, headers=self._headers)
self._raise_for_status(response)
return response.json()
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, max=10))
def get_job_group(self, job_group_id: int, embed: str, include_deleted: bool) -> dict[str, Any]:
"""
Get the specified job group.
A job group is a job that is executed from a specific node in a flow.
:param job_group_id: The ID of the job that will be fetched
:param embed: Comma-separated list of objects to pull in as part of the response
:param include_deleted: if set to "true", will include deleted objects
"""
params: dict[str, Any] = {"embed": embed, "includeDeleted": include_deleted}
endpoint_path = f"v4/jobGroups/{job_group_id}"
url: str = urljoin(self._base_url, endpoint_path)
response = requests.get(url, headers=self._headers, params=params)
self._raise_for_status(response)
return response.json()
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, max=10))
def run_job_group(self, body_request: dict) -> dict[str, Any]:
"""
Creates a ``jobGroup``, which launches the specified job as the authenticated user.
This performs the same action as clicking on the Run Job button in the application.
To get recipe_id please follow the Dataprep API documentation
https://clouddataprep.com/documentation/api#operation/runJobGroup.
:param body_request: The identifier for the recipe you would like to run.
"""
endpoint_path = "v4/jobGroups"
url: str = urljoin(self._base_url, endpoint_path)
response = requests.post(url, headers=self._headers, data=json.dumps(body_request))
self._raise_for_status(response)
return response.json()
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, max=10))
def copy_flow(
self, *, flow_id: int, name: str = "", description: str = "", copy_datasources: bool = False
) -> dict:
"""
Create a copy of the provided flow id, as well as all contained recipes.
:param flow_id: ID of the flow to be copied
:param name: Name for the copy of the flow
:param description: Description of the copy of the flow
:param copy_datasources: Bool value to define should copies of data inputs be made or not.
"""
endpoint_path = f"v4/flows/{flow_id}/copy"
url: str = urljoin(self._base_url, endpoint_path)
body_request = {
"name": name,
"description": description,
"copyDatasources": copy_datasources,
}
response = requests.post(url, headers=self._headers, data=json.dumps(body_request))
self._raise_for_status(response)
return response.json()
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, max=10))
def delete_flow(self, *, flow_id: int) -> None:
"""
Delete the flow with the provided id.
:param flow_id: ID of the flow to be copied
"""
endpoint_path = f"v4/flows/{flow_id}"
url: str = urljoin(self._base_url, endpoint_path)
response = requests.delete(url, headers=self._headers)
self._raise_for_status(response)
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, max=10))
def run_flow(self, *, flow_id: int, body_request: dict) -> dict:
"""
Runs the flow with the provided id copy of the provided flow id.
:param flow_id: ID of the flow to be copied
:param body_request: Body of the POST request to be sent.
"""
endpoint = f"v4/flows/{flow_id}/run"
url: str = urljoin(self._base_url, endpoint)
response = requests.post(url, headers=self._headers, data=json.dumps(body_request))
self._raise_for_status(response)
return response.json()
@retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, max=10))
def get_job_group_status(self, *, job_group_id: int) -> JobGroupStatuses:
"""
Check the status of the Dataprep task to be finished.
:param job_group_id: ID of the job group to check
"""
endpoint = f"/v4/jobGroups/{job_group_id}/status"
url: str = urljoin(self._base_url, endpoint)
response = requests.get(url, headers=self._headers)
self._raise_for_status(response)
return response.json()
def _raise_for_status(self, response: requests.models.Response) -> None:
try:
response.raise_for_status()
except HTTPError:
self.log.error(response.json().get("exception"))
raise
| 8,220 | 38.524038 | 106 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/bigquery.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""BigQuery Hook and a very basic PEP 249 implementation for BigQuery."""
from __future__ import annotations
import json
import logging
import re
import time
import uuid
import warnings
from copy import deepcopy
from datetime import datetime, timedelta
from typing import Any, Iterable, Mapping, NoReturn, Sequence, Union, cast
from aiohttp import ClientSession as ClientSession
from gcloud.aio.bigquery import Job, Table as Table_async
from google.api_core.page_iterator import HTTPIterator
from google.api_core.retry import Retry
from google.cloud.bigquery import (
DEFAULT_RETRY,
Client,
CopyJob,
ExternalConfig,
ExtractJob,
LoadJob,
QueryJob,
SchemaField,
UnknownJob,
)
from google.cloud.bigquery.dataset import AccessEntry, Dataset, DatasetListItem, DatasetReference
from google.cloud.bigquery.table import EncryptionConfiguration, Row, RowIterator, Table, TableReference
from google.cloud.exceptions import NotFound
from googleapiclient.discovery import Resource, build
from pandas import DataFrame
from pandas_gbq import read_gbq
from pandas_gbq.gbq import GbqConnector # noqa
from requests import Session
from sqlalchemy import create_engine
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.providers.common.sql.hooks.sql import DbApiHook
from airflow.providers.google.cloud.utils.bigquery import bq_cast
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseAsyncHook, GoogleBaseHook, get_field
try:
from airflow.utils.hashlib_wrapper import md5
except ModuleNotFoundError:
# Remove when Airflow providers min Airflow version is "2.7.0"
from hashlib import md5
from airflow.utils.helpers import convert_camel_to_snake
from airflow.utils.log.logging_mixin import LoggingMixin
log = logging.getLogger(__name__)
BigQueryJob = Union[CopyJob, QueryJob, LoadJob, ExtractJob]
class BigQueryHook(GoogleBaseHook, DbApiHook):
"""Interact with BigQuery.
This hook uses the Google Cloud connection.
:param gcp_conn_id: The Airflow connection used for GCP credentials.
:param use_legacy_sql: This specifies whether to use legacy SQL dialect.
:param location: The location of the BigQuery resource.
:param priority: Specifies a priority for the query.
Possible values include INTERACTIVE and BATCH.
The default value is INTERACTIVE.
:param api_resource_configs: This contains params configuration applied for
Google BigQuery jobs.
:param impersonation_chain: This is the optional service account to
impersonate using short term credentials.
:param labels: The BigQuery resource label.
"""
conn_name_attr = "gcp_conn_id"
default_conn_name = "google_cloud_bigquery_default"
conn_type = "gcpbigquery"
hook_name = "Google Bigquery"
def __init__(
self,
gcp_conn_id: str = GoogleBaseHook.default_conn_name,
use_legacy_sql: bool = True,
location: str | None = None,
priority: str = "INTERACTIVE",
api_resource_configs: dict | None = None,
impersonation_chain: str | Sequence[str] | None = None,
labels: dict | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self.use_legacy_sql = use_legacy_sql
self.location = location
self.priority = priority
self.running_job_id: str | None = None
self.api_resource_configs: dict = api_resource_configs if api_resource_configs else {}
self.labels = labels
self.credentials_path = "bigquery_hook_credentials.json"
def get_conn(self) -> BigQueryConnection:
"""Get a BigQuery PEP 249 connection object."""
service = self.get_service()
return BigQueryConnection(
service=service,
project_id=self.project_id,
use_legacy_sql=self.use_legacy_sql,
location=self.location,
num_retries=self.num_retries,
hook=self,
)
def get_service(self) -> Resource:
"""Get a BigQuery service object. Deprecated."""
warnings.warn(
"This method will be deprecated. Please use `BigQueryHook.get_client` method",
AirflowProviderDeprecationWarning,
)
http_authorized = self._authorize()
return build("bigquery", "v2", http=http_authorized, cache_discovery=False)
def get_client(self, project_id: str | None = None, location: str | None = None) -> Client:
"""Get an authenticated BigQuery Client.
:param project_id: Project ID for the project which the client acts on behalf of.
:param location: Default location for jobs / datasets / tables.
"""
return Client(
client_info=CLIENT_INFO,
project=project_id,
location=location,
credentials=self.get_credentials(),
)
def get_uri(self) -> str:
"""Override from ``DbApiHook`` for ``get_sqlalchemy_engine()``."""
return f"bigquery://{self.project_id}"
def get_sqlalchemy_engine(self, engine_kwargs=None):
"""Create an SQLAlchemy engine object.
:param engine_kwargs: Kwargs used in :func:`~sqlalchemy.create_engine`.
"""
if engine_kwargs is None:
engine_kwargs = {}
extras = self.get_connection(self.gcp_conn_id).extra_dejson
credentials_path = get_field(extras, "key_path")
if credentials_path:
return create_engine(self.get_uri(), credentials_path=credentials_path, **engine_kwargs)
keyfile_dict = get_field(extras, "keyfile_dict")
if keyfile_dict:
keyfile_content = keyfile_dict if isinstance(keyfile_dict, dict) else json.loads(keyfile_dict)
return create_engine(self.get_uri(), credentials_info=keyfile_content, **engine_kwargs)
try:
# 1. If the environment variable GOOGLE_APPLICATION_CREDENTIALS is set
# ADC uses the service account key or configuration file that the variable points to.
# 2. If the environment variable GOOGLE_APPLICATION_CREDENTIALS isn't set
# ADC uses the service account that is attached to the resource that is running your code.
return create_engine(self.get_uri(), **engine_kwargs)
except Exception as e:
self.log.error(e)
raise AirflowException(
"For now, we only support instantiating SQLAlchemy engine by"
" using ADC or extra fields `key_path` and `keyfile_dict`."
)
def get_records(self, sql, parameters=None):
if self.location is None:
raise AirflowException("Need to specify 'location' to use BigQueryHook.get_records()")
return super().get_records(sql, parameters=parameters)
@staticmethod
def _resolve_table_reference(
table_resource: dict[str, Any],
project_id: str | None = None,
dataset_id: str | None = None,
table_id: str | None = None,
) -> dict[str, Any]:
try:
# Check if tableReference is present and is valid
TableReference.from_api_repr(table_resource["tableReference"])
except KeyError:
# Something is wrong so we try to build the reference
table_resource["tableReference"] = table_resource.get("tableReference", {})
values = [("projectId", project_id), ("tableId", table_id), ("datasetId", dataset_id)]
for key, value in values:
# Check if value is already present if no use the provided one
resolved_value = table_resource["tableReference"].get(key, value)
if not resolved_value:
# If there's no value in tableReference and provided one is None raise error
raise AirflowException(
f"Table resource is missing proper `tableReference` and `{key}` is None"
)
table_resource["tableReference"][key] = resolved_value
return table_resource
def insert_rows(
self,
table: Any,
rows: Any,
target_fields: Any = None,
commit_every: Any = 1000,
replace: Any = False,
**kwargs,
) -> None:
"""Insert rows.
Insertion is currently unsupported. Theoretically, you could use
BigQuery's streaming API to insert rows into a table, but this hasn't
been implemented.
"""
raise NotImplementedError()
def get_pandas_df(
self,
sql: str,
parameters: Iterable | Mapping | None = None,
dialect: str | None = None,
**kwargs,
) -> DataFrame:
"""Get a Pandas DataFrame for the BigQuery results.
The DbApiHook method must be overridden because Pandas doesn't support
PEP 249 connections, except for SQLite.
.. seealso::
https://github.com/pandas-dev/pandas/blob/055d008615272a1ceca9720dc365a2abd316f353/pandas/io/sql.py#L415
https://github.com/pandas-dev/pandas/issues/6900
:param sql: The BigQuery SQL to execute.
:param parameters: The parameters to render the SQL query with (not
used, leave to override superclass method)
:param dialect: Dialect of BigQuery SQL – legacy SQL or standard SQL
defaults to use `self.use_legacy_sql` if not specified
:param kwargs: (optional) passed into pandas_gbq.read_gbq method
"""
if dialect is None:
dialect = "legacy" if self.use_legacy_sql else "standard"
credentials, project_id = self.get_credentials_and_project_id()
return read_gbq(
sql, project_id=project_id, dialect=dialect, verbose=False, credentials=credentials, **kwargs
)
@GoogleBaseHook.fallback_to_default_project_id
def table_exists(self, dataset_id: str, table_id: str, project_id: str) -> bool:
"""Check if a table exists in Google BigQuery.
:param project_id: The Google cloud project in which to look for the
table. The connection supplied to the hook must provide access to
the specified project.
:param dataset_id: The name of the dataset in which to look for the
table.
:param table_id: The name of the table to check the existence of.
"""
table_reference = TableReference(DatasetReference(project_id, dataset_id), table_id)
try:
self.get_client(project_id=project_id).get_table(table_reference)
return True
except NotFound:
return False
@GoogleBaseHook.fallback_to_default_project_id
def table_partition_exists(
self, dataset_id: str, table_id: str, partition_id: str, project_id: str
) -> bool:
"""Check if a partition exists in Google BigQuery.
:param project_id: The Google cloud project in which to look for the
table. The connection supplied to the hook must provide access to
the specified project.
:param dataset_id: The name of the dataset in which to look for the
table.
:param table_id: The name of the table to check the existence of.
:param partition_id: The name of the partition to check the existence of.
"""
table_reference = TableReference(DatasetReference(project_id, dataset_id), table_id)
try:
return partition_id in self.get_client(project_id=project_id).list_partitions(table_reference)
except NotFound:
return False
@GoogleBaseHook.fallback_to_default_project_id
def create_empty_table(
self,
project_id: str | None = None,
dataset_id: str | None = None,
table_id: str | None = None,
table_resource: dict[str, Any] | None = None,
schema_fields: list | None = None,
time_partitioning: dict | None = None,
cluster_fields: list[str] | None = None,
labels: dict | None = None,
view: dict | None = None,
materialized_view: dict | None = None,
encryption_configuration: dict | None = None,
retry: Retry = DEFAULT_RETRY,
location: str | None = None,
exists_ok: bool = True,
) -> Table:
"""Create a new, empty table in the dataset.
To create a view, which is defined by a SQL query, parse a dictionary to
the *view* argument.
:param project_id: The project to create the table into.
:param dataset_id: The dataset to create the table into.
:param table_id: The Name of the table to be created.
:param table_resource: Table resource as described in documentation:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table
If provided all other parameters are ignored.
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
.. code-block:: python
schema_fields = [
{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"},
]
:param labels: a dictionary containing labels for the table, passed to BigQuery
:param retry: Optional. How to retry the RPC.
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning
:param cluster_fields: [Optional] The fields used for clustering.
BigQuery supports clustering for both partitioned and
non-partitioned tables.
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#clustering.fields
:param view: [Optional] A dictionary containing definition for the view.
If set, it will create a view instead of a table:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ViewDefinition
.. code-block:: python
view = {
"query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 1000",
"useLegacySql": False,
}
:param materialized_view: [Optional] The materialized view definition.
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
.. code-block:: python
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key",
}
:param num_retries: Maximum number of retries in case of connection problems.
:param location: (Optional) The geographic location where the table should reside.
:param exists_ok: If ``True``, ignore "already exists" errors when creating the table.
:return: Created table
"""
_table_resource: dict[str, Any] = {}
if self.location:
_table_resource["location"] = self.location
if schema_fields:
_table_resource["schema"] = {"fields": schema_fields}
if time_partitioning:
_table_resource["timePartitioning"] = time_partitioning
if cluster_fields:
_table_resource["clustering"] = {"fields": cluster_fields}
if labels:
_table_resource["labels"] = labels
if view:
_table_resource["view"] = view
if materialized_view:
_table_resource["materializedView"] = materialized_view
if encryption_configuration:
_table_resource["encryptionConfiguration"] = encryption_configuration
table_resource = table_resource or _table_resource
table_resource = self._resolve_table_reference(
table_resource=table_resource,
project_id=project_id,
dataset_id=dataset_id,
table_id=table_id,
)
table = Table.from_api_repr(table_resource)
return self.get_client(project_id=project_id, location=location).create_table(
table=table, exists_ok=exists_ok, retry=retry
)
@GoogleBaseHook.fallback_to_default_project_id
def create_empty_dataset(
self,
dataset_id: str | None = None,
project_id: str | None = None,
location: str | None = None,
dataset_reference: dict[str, Any] | None = None,
exists_ok: bool = True,
) -> dict[str, Any]:
"""Create a new empty dataset.
.. seealso:: https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/insert
:param project_id: The name of the project where we want to create
an empty a dataset. Don't need to provide, if projectId in dataset_reference.
:param dataset_id: The id of dataset. Don't need to provide, if datasetId in dataset_reference.
:param location: (Optional) The geographic location where the dataset should reside.
There is no default value but the dataset will be created in US if nothing is provided.
:param dataset_reference: Dataset reference that could be provided with request body. More info:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:param exists_ok: If ``True``, ignore "already exists" errors when creating the dataset.
"""
dataset_reference = dataset_reference or {}
if "datasetReference" not in dataset_reference:
dataset_reference["datasetReference"] = {}
for param, value in zip(["datasetId", "projectId"], [dataset_id, project_id]):
specified_param = dataset_reference["datasetReference"].get(param)
if specified_param:
if value:
self.log.info(
"`%s` was provided in both `dataset_reference` and as `%s`. "
"Using value from `dataset_reference`",
param,
convert_camel_to_snake(param),
)
continue # use specified value
if not value:
raise ValueError(
f"Please specify `{param}` either in `dataset_reference` "
f"or by providing `{convert_camel_to_snake(param)}`",
)
# dataset_reference has no param but we can fallback to default value
self.log.info(
"%s was not specified in `dataset_reference`. Will use default value %s.", param, value
)
dataset_reference["datasetReference"][param] = value
location = location or self.location
project_id = project_id or self.project_id
if location:
dataset_reference["location"] = dataset_reference.get("location", location)
dataset: Dataset = Dataset.from_api_repr(dataset_reference)
self.log.info("Creating dataset: %s in project: %s ", dataset.dataset_id, dataset.project)
dataset_object = self.get_client(project_id=project_id, location=location).create_dataset(
dataset=dataset, exists_ok=exists_ok
)
self.log.info("Dataset created successfully.")
return dataset_object.to_api_repr()
@GoogleBaseHook.fallback_to_default_project_id
def get_dataset_tables(
self,
dataset_id: str,
project_id: str | None = None,
max_results: int | None = None,
retry: Retry = DEFAULT_RETRY,
) -> list[dict[str, Any]]:
"""Get the list of tables for a given dataset.
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/list
:param dataset_id: the dataset ID of the requested dataset.
:param project_id: (Optional) the project of the requested dataset. If None,
self.project_id will be used.
:param max_results: (Optional) the maximum number of tables to return.
:param retry: How to retry the RPC.
:return: List of tables associated with the dataset.
"""
self.log.info("Start getting tables list from dataset: %s.%s", project_id, dataset_id)
tables = self.get_client().list_tables(
dataset=DatasetReference(project=project_id, dataset_id=dataset_id),
max_results=max_results,
retry=retry,
)
# Convert to a list (consumes all values)
return [t.reference.to_api_repr() for t in tables]
@GoogleBaseHook.fallback_to_default_project_id
def delete_dataset(
self,
dataset_id: str,
project_id: str | None = None,
delete_contents: bool = False,
retry: Retry = DEFAULT_RETRY,
) -> None:
"""Delete a dataset of Big query in your project.
:param project_id: The name of the project where we have the dataset.
:param dataset_id: The dataset to be delete.
:param delete_contents: If True, delete all the tables in the dataset.
If False and the dataset contains tables, the request will fail.
:param retry: How to retry the RPC.
"""
self.log.info("Deleting from project: %s Dataset:%s", project_id, dataset_id)
self.get_client(project_id=project_id).delete_dataset(
dataset=DatasetReference(project=project_id, dataset_id=dataset_id),
delete_contents=delete_contents,
retry=retry,
not_found_ok=True,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_external_table(
self,
external_project_dataset_table: str,
schema_fields: list,
source_uris: list,
source_format: str = "CSV",
autodetect: bool = False,
compression: str = "NONE",
ignore_unknown_values: bool = False,
max_bad_records: int = 0,
skip_leading_rows: int = 0,
field_delimiter: str = ",",
quote_character: str | None = None,
allow_quoted_newlines: bool = False,
allow_jagged_rows: bool = False,
encoding: str = "UTF-8",
src_fmt_configs: dict | None = None,
labels: dict | None = None,
description: str | None = None,
encryption_configuration: dict | None = None,
location: str | None = None,
project_id: str | None = None,
) -> Table:
"""Create an external table in the dataset with data from Google Cloud Storage.
.. seealso:: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
This method is deprecated. Please use :func:`.create_empty_table` with
the ``table_resource`` object. See function documentation for more
details about these parameters.
:param external_project_dataset_table:
The dotted ``(<project>.|<project>:)<dataset>.<table>($<partition>)`` BigQuery
table name to create external table.
If ``<project>`` is not included, project will be the
project defined in the connection json.
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:param source_format: File format to export.
:param autodetect: Try to detect schema and format options automatically.
Any option specified explicitly will be honored.
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:param field_delimiter: The delimiter to use when loading from a CSV.
:param quote_character: The value that is used to quote data sections in a CSV
file.
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
source_format is CSV.
:param encoding: The character encoding of the data. See:
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.csvOptions.encoding
:param src_fmt_configs: configure optional fields specific to the source format
:param labels: A dictionary containing labels for the BiqQuery table.
:param description: A string containing the description for the BigQuery table.
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
.. code-block:: python
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key",
}
"""
warnings.warn(
"This method is deprecated. Please use `BigQueryHook.create_empty_table` method with "
"passing the `table_resource` object. This gives more flexibility than this method.",
AirflowProviderDeprecationWarning,
)
location = location or self.location
src_fmt_configs = src_fmt_configs or {}
source_format = source_format.upper()
compression = compression.upper()
external_config_api_repr = {
"autodetect": autodetect,
"sourceFormat": source_format,
"sourceUris": source_uris,
"compression": compression,
"ignoreUnknownValues": ignore_unknown_values,
}
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
backward_compatibility_configs = {
"skipLeadingRows": skip_leading_rows,
"fieldDelimiter": field_delimiter,
"quote": quote_character,
"allowQuotedNewlines": allow_quoted_newlines,
"allowJaggedRows": allow_jagged_rows,
"encoding": encoding,
}
src_fmt_to_param_mapping = {"CSV": "csvOptions", "GOOGLE_SHEETS": "googleSheetsOptions"}
src_fmt_to_configs_mapping = {
"csvOptions": [
"allowJaggedRows",
"allowQuotedNewlines",
"fieldDelimiter",
"skipLeadingRows",
"quote",
"encoding",
],
"googleSheetsOptions": ["skipLeadingRows"],
}
if source_format in src_fmt_to_param_mapping.keys():
valid_configs = src_fmt_to_configs_mapping[src_fmt_to_param_mapping[source_format]]
src_fmt_configs = _validate_src_fmt_configs(
source_format, src_fmt_configs, valid_configs, backward_compatibility_configs
)
external_config_api_repr[src_fmt_to_param_mapping[source_format]] = src_fmt_configs
# build external config
external_config = ExternalConfig.from_api_repr(external_config_api_repr)
if schema_fields:
external_config.schema = [SchemaField.from_api_repr(f) for f in schema_fields]
if max_bad_records:
external_config.max_bad_records = max_bad_records
# build table definition
table = Table(table_ref=TableReference.from_string(external_project_dataset_table, project_id))
table.external_data_configuration = external_config
if labels:
table.labels = labels
if description:
table.description = description
if encryption_configuration:
table.encryption_configuration = EncryptionConfiguration.from_api_repr(encryption_configuration)
self.log.info("Creating external table: %s", external_project_dataset_table)
table_object = self.create_empty_table(
table_resource=table.to_api_repr(), project_id=project_id, location=location, exists_ok=True
)
self.log.info("External table created successfully: %s", external_project_dataset_table)
return table_object
@GoogleBaseHook.fallback_to_default_project_id
def update_table(
self,
table_resource: dict[str, Any],
fields: list[str] | None = None,
dataset_id: str | None = None,
table_id: str | None = None,
project_id: str | None = None,
) -> dict[str, Any]:
"""Change some fields of a table.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None``
in ``table``, the field value will be deleted.
If ``table.etag`` is not ``None``, the update will only succeed if
the table on the server has the same ETag. Thus reading a table with
``get_table``, changing its fields, and then passing it to
``update_table`` will ensure that the changes will only be saved if
no modifications to the table occurred since the read.
:param project_id: The project to create the table into.
:param dataset_id: The dataset to create the table into.
:param table_id: The Name of the table to be created.
:param table_resource: Table resource as described in documentation:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table
The table has to contain ``tableReference`` or ``project_id``, ``dataset_id`` and ``table_id``
have to be provided.
:param fields: The fields of ``table`` to change, spelled as the Table
properties (e.g. "friendly_name").
"""
fields = fields or list(table_resource.keys())
table_resource = self._resolve_table_reference(
table_resource=table_resource, project_id=project_id, dataset_id=dataset_id, table_id=table_id
)
table = Table.from_api_repr(table_resource)
self.log.info("Updating table: %s", table_resource["tableReference"])
table_object = self.get_client(project_id=project_id).update_table(table=table, fields=fields)
self.log.info("Table %s.%s.%s updated successfully", project_id, dataset_id, table_id)
return table_object.to_api_repr()
@GoogleBaseHook.fallback_to_default_project_id
def patch_table(
self,
dataset_id: str,
table_id: str,
project_id: str | None = None,
description: str | None = None,
expiration_time: int | None = None,
external_data_configuration: dict | None = None,
friendly_name: str | None = None,
labels: dict | None = None,
schema: list | None = None,
time_partitioning: dict | None = None,
view: dict | None = None,
require_partition_filter: bool | None = None,
encryption_configuration: dict | None = None,
) -> None:
"""Patch information in an existing table.
It only updates fields that are provided in the request object. This
method is deprecated. Please use :func:`.update_table` instead.
Reference: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/patch
:param dataset_id: The dataset containing the table to be patched.
:param table_id: The Name of the table to be patched.
:param project_id: The project containing the table to be patched.
:param description: [Optional] A user-friendly description of this table.
:param expiration_time: [Optional] The time when this table expires,
in milliseconds since the epoch.
:param external_data_configuration: [Optional] A dictionary containing
properties of a table stored outside of BigQuery.
:param friendly_name: [Optional] A descriptive name for this table.
:param labels: [Optional] A dictionary containing labels associated with this table.
:param schema: [Optional] If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
The supported schema modifications and unsupported schema modification are listed here:
https://cloud.google.com/bigquery/docs/managing-table-schemas
.. code-block:: python
schema = [
{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"},
]
:param time_partitioning: [Optional] A dictionary containing time-based partitioning
definition for the table.
:param view: [Optional] A dictionary containing definition for the view.
If set, it will patch a view instead of a table:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ViewDefinition
.. code-block:: python
view = {
"query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
"useLegacySql": False,
}
:param require_partition_filter: [Optional] If true, queries over the this table require a
partition filter. If false, queries over the table
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
.. code-block:: python
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key",
}
"""
warnings.warn(
"This method is deprecated, please use ``BigQueryHook.update_table`` method.",
AirflowProviderDeprecationWarning,
)
table_resource: dict[str, Any] = {}
if description is not None:
table_resource["description"] = description
if expiration_time is not None:
table_resource["expirationTime"] = expiration_time
if external_data_configuration:
table_resource["externalDataConfiguration"] = external_data_configuration
if friendly_name is not None:
table_resource["friendlyName"] = friendly_name
if labels:
table_resource["labels"] = labels
if schema:
table_resource["schema"] = {"fields": schema}
if time_partitioning:
table_resource["timePartitioning"] = time_partitioning
if view:
table_resource["view"] = view
if require_partition_filter is not None:
table_resource["requirePartitionFilter"] = require_partition_filter
if encryption_configuration:
table_resource["encryptionConfiguration"] = encryption_configuration
self.update_table(
table_resource=table_resource,
fields=list(table_resource.keys()),
project_id=project_id,
dataset_id=dataset_id,
table_id=table_id,
)
@GoogleBaseHook.fallback_to_default_project_id
def insert_all(
self,
project_id: str,
dataset_id: str,
table_id: str,
rows: list,
ignore_unknown_values: bool = False,
skip_invalid_rows: bool = False,
fail_on_error: bool = False,
) -> None:
"""Stream data into BigQuery one record at a time without a load job.
.. seealso::
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
:param project_id: The name of the project where we have the table
:param dataset_id: The name of the dataset where we have the table
:param table_id: The name of the table
:param rows: the rows to insert
.. code-block:: python
rows = [{"json": {"a_key": "a_value_0"}}, {"json": {"a_key": "a_value_1"}}]
:param ignore_unknown_values: [Optional] Accept rows that contain values
that do not match the schema. The unknown values are ignored.
The default value is false, which treats unknown values as errors.
:param skip_invalid_rows: [Optional] Insert all valid rows of a request,
even if invalid rows exist. The default value is false, which causes
the entire request to fail if any invalid rows exist.
:param fail_on_error: [Optional] Force the task to fail if any errors occur.
The default value is false, which indicates the task should not fail
even if any insertion errors occur.
"""
self.log.info("Inserting %s row(s) into table %s:%s.%s", len(rows), project_id, dataset_id, table_id)
table_ref = TableReference(dataset_ref=DatasetReference(project_id, dataset_id), table_id=table_id)
bq_client = self.get_client(project_id=project_id)
table = bq_client.get_table(table_ref)
errors = bq_client.insert_rows(
table=table,
rows=rows,
ignore_unknown_values=ignore_unknown_values,
skip_invalid_rows=skip_invalid_rows,
)
if errors:
error_msg = f"{len(errors)} insert error(s) occurred. Details: {errors}"
self.log.error(error_msg)
if fail_on_error:
raise AirflowException(f"BigQuery job failed. Error was: {error_msg}")
else:
self.log.info("All row(s) inserted successfully: %s:%s.%s", project_id, dataset_id, table_id)
@GoogleBaseHook.fallback_to_default_project_id
def update_dataset(
self,
fields: Sequence[str],
dataset_resource: dict[str, Any],
dataset_id: str | None = None,
project_id: str | None = None,
retry: Retry = DEFAULT_RETRY,
) -> Dataset:
"""Change some fields of a dataset.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None`` in
``dataset``, it will be deleted.
If ``dataset.etag`` is not ``None``, the update will only
succeed if the dataset on the server has the same ETag. Thus
reading a dataset with ``get_dataset``, changing its fields,
and then passing it to ``update_dataset`` will ensure that the changes
will only be saved if no modifications to the dataset occurred
since the read.
:param dataset_resource: Dataset resource that will be provided
in request body.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:param dataset_id: The id of the dataset.
:param fields: The properties of ``dataset`` to change (e.g. "friendly_name").
:param project_id: The Google Cloud Project ID
:param retry: How to retry the RPC.
"""
dataset_resource["datasetReference"] = dataset_resource.get("datasetReference", {})
for key, value in zip(["datasetId", "projectId"], [dataset_id, project_id]):
spec_value = dataset_resource["datasetReference"].get(key)
if value and not spec_value:
dataset_resource["datasetReference"][key] = value
self.log.info("Start updating dataset")
dataset = self.get_client(project_id=project_id).update_dataset(
dataset=Dataset.from_api_repr(dataset_resource),
fields=fields,
retry=retry,
)
self.log.info("Dataset successfully updated: %s", dataset)
return dataset
def patch_dataset(self, dataset_id: str, dataset_resource: dict, project_id: str | None = None) -> dict:
"""Patches information in an existing dataset.
It only replaces fields that are provided in the submitted dataset resource.
This method is deprecated. Please use :func:`.update_dataset` instead.
More info:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/patch
:param dataset_id: The BigQuery Dataset ID
:param dataset_resource: Dataset resource that will be provided
in request body.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:param project_id: The Google Cloud Project ID
"""
warnings.warn(
"This method is deprecated. Please use ``update_dataset``.", AirflowProviderDeprecationWarning
)
project_id = project_id or self.project_id
if not dataset_id or not isinstance(dataset_id, str):
raise ValueError(
f"dataset_id argument must be provided and has a type 'str'. You provided: {dataset_id}"
)
service = self.get_service()
dataset_project_id = project_id or self.project_id
self.log.info("Start patching dataset: %s:%s", dataset_project_id, dataset_id)
dataset = (
service.datasets()
.patch(
datasetId=dataset_id,
projectId=dataset_project_id,
body=dataset_resource,
)
.execute(num_retries=self.num_retries)
)
self.log.info("Dataset successfully patched: %s", dataset)
return dataset
def get_dataset_tables_list(
self,
dataset_id: str,
project_id: str | None = None,
table_prefix: str | None = None,
max_results: int | None = None,
) -> list[dict[str, Any]]:
"""List tables of a BigQuery dataset.
If a table prefix is specified, only tables beginning by it are
returned. This method is deprecated. Please use
:func:`.get_dataset_tables` instead.
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/list
:param dataset_id: The BigQuery Dataset ID
:param project_id: The Google Cloud Project ID
:param table_prefix: Tables must begin by this prefix to be returned (case sensitive)
:param max_results: The maximum number of results to return in a single response page.
Leverage the page tokens to iterate through the entire collection.
:return: List of tables associated with the dataset
"""
warnings.warn(
"This method is deprecated. Please use ``get_dataset_tables``.", AirflowProviderDeprecationWarning
)
project_id = project_id or self.project_id
tables = self.get_client().list_tables(
dataset=DatasetReference(project=project_id, dataset_id=dataset_id),
max_results=max_results,
)
if table_prefix:
result = [t.reference.to_api_repr() for t in tables if t.table_id.startswith(table_prefix)]
else:
result = [t.reference.to_api_repr() for t in tables]
self.log.info("%s tables found", len(result))
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_datasets_list(
self,
project_id: str | None = None,
include_all: bool = False,
filter_: str | None = None,
max_results: int | None = None,
page_token: str | None = None,
retry: Retry = DEFAULT_RETRY,
return_iterator: bool = False,
) -> list[DatasetListItem] | HTTPIterator:
"""Get all BigQuery datasets in the current project.
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list
:param project_id: Google Cloud Project for which you try to get all datasets
:param include_all: True if results include hidden datasets. Defaults to False.
:param filter_: An expression for filtering the results by label. For syntax, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list#filter.
:param filter_: str
:param max_results: Maximum number of datasets to return.
:param max_results: int
:param page_token: Token representing a cursor into the datasets. If not passed,
the API will return the first page of datasets. The token marks the beginning of the
iterator to be returned and the value of the ``page_token`` can be accessed at
``next_page_token`` of the :class:`~google.api_core.page_iterator.HTTPIterator`.
:param page_token: str
:param retry: How to retry the RPC.
:param return_iterator: Instead of returning a list[Row], returns a HTTPIterator
which can be used to obtain the next_page_token property.
"""
iterator = self.get_client(project_id=project_id).list_datasets(
project=project_id,
include_all=include_all,
filter=filter_,
max_results=max_results,
page_token=page_token,
retry=retry,
)
# If iterator is requested, we cannot perform a list() on it to log the number
# of datasets because we will have started iteration
if return_iterator:
# The iterator returned by list_datasets() is a HTTPIterator but annotated
# as Iterator
return iterator # type: ignore
datasets_list = list(iterator)
self.log.info("Datasets List: %s", len(datasets_list))
return datasets_list
@GoogleBaseHook.fallback_to_default_project_id
def get_dataset(self, dataset_id: str, project_id: str | None = None) -> Dataset:
"""Fetch the dataset referenced by *dataset_id*.
:param dataset_id: The BigQuery Dataset ID
:param project_id: The Google Cloud Project ID
:return: dataset_resource
.. seealso::
For more information, see Dataset Resource content:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
"""
dataset = self.get_client(project_id=project_id).get_dataset(
dataset_ref=DatasetReference(project_id, dataset_id)
)
self.log.info("Dataset Resource: %s", dataset)
return dataset
@GoogleBaseHook.fallback_to_default_project_id
def run_grant_dataset_view_access(
self,
source_dataset: str,
view_dataset: str,
view_table: str,
view_project: str | None = None,
project_id: str | None = None,
) -> dict[str, Any]:
"""Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_dataset: the source dataset
:param view_dataset: the dataset that the view is in
:param view_table: the table of the view
:param project_id: the project of the source dataset. If None,
self.project_id will be used.
:param view_project: the project that the view is in. If None,
self.project_id will be used.
:return: the datasets resource of the source dataset.
"""
view_project = view_project or project_id
view_access = AccessEntry(
role=None,
entity_type="view",
entity_id={"projectId": view_project, "datasetId": view_dataset, "tableId": view_table},
)
dataset = self.get_dataset(project_id=project_id, dataset_id=source_dataset)
# Check to see if the view we want to add already exists.
if view_access not in dataset.access_entries:
self.log.info(
"Granting table %s:%s.%s authorized view access to %s:%s dataset.",
view_project,
view_dataset,
view_table,
project_id,
source_dataset,
)
dataset.access_entries += [view_access]
dataset = self.update_dataset(
fields=["access"], dataset_resource=dataset.to_api_repr(), project_id=project_id
)
else:
self.log.info(
"Table %s:%s.%s already has authorized view access to %s:%s dataset.",
view_project,
view_dataset,
view_table,
project_id,
source_dataset,
)
return dataset.to_api_repr()
@GoogleBaseHook.fallback_to_default_project_id
def run_table_upsert(
self, dataset_id: str, table_resource: dict[str, Any], project_id: str | None = None
) -> dict[str, Any]:
"""Update a table if it exists, otherwise create a new one.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
"""
table_id = table_resource["tableReference"]["tableId"]
table_resource = self._resolve_table_reference(
table_resource=table_resource, project_id=project_id, dataset_id=dataset_id, table_id=table_id
)
tables_list_resp = self.get_dataset_tables(dataset_id=dataset_id, project_id=project_id)
if any(table["tableId"] == table_id for table in tables_list_resp):
self.log.info("Table %s:%s.%s exists, updating.", project_id, dataset_id, table_id)
table = self.update_table(table_resource=table_resource)
else:
self.log.info("Table %s:%s.%s does not exist. creating.", project_id, dataset_id, table_id)
table = self.create_empty_table(
table_resource=table_resource, project_id=project_id
).to_api_repr()
return table
def run_table_delete(self, deletion_dataset_table: str, ignore_if_missing: bool = False) -> None:
"""Delete an existing table from the dataset.
If the table does not exist, return an error unless *ignore_if_missing*
is set to True.
This method is deprecated. Please use :func:`.delete_table` instead.
:param deletion_dataset_table: A dotted
``(<project>.|<project>:)<dataset>.<table>`` that indicates which table
will be deleted.
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:return:
"""
warnings.warn(
"This method is deprecated. Please use `delete_table`.", AirflowProviderDeprecationWarning
)
return self.delete_table(table_id=deletion_dataset_table, not_found_ok=ignore_if_missing)
@GoogleBaseHook.fallback_to_default_project_id
def delete_table(
self,
table_id: str,
not_found_ok: bool = True,
project_id: str | None = None,
) -> None:
"""Delete an existing table from the dataset.
If the table does not exist, return an error unless *not_found_ok* is
set to True.
:param table_id: A dotted ``(<project>.|<project>:)<dataset>.<table>``
that indicates which table will be deleted.
:param not_found_ok: if True, then return success even if the
requested table does not exist.
:param project_id: the project used to perform the request
"""
self.get_client(project_id=project_id).delete_table(
table=table_id,
not_found_ok=not_found_ok,
)
self.log.info("Deleted table %s", table_id)
def get_tabledata(
self,
dataset_id: str,
table_id: str,
max_results: int | None = None,
selected_fields: str | None = None,
page_token: str | None = None,
start_index: int | None = None,
) -> list[dict]:
"""Get data from given table.
This method is deprecated. Please use :func:`.list_rows` instead.
.. seealso:: https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:return: list of rows
"""
warnings.warn("This method is deprecated. Please use `list_rows`.", AirflowProviderDeprecationWarning)
rows = self.list_rows(
dataset_id=dataset_id,
table_id=table_id,
max_results=max_results,
selected_fields=selected_fields,
page_token=page_token,
start_index=start_index,
)
return [dict(r) for r in rows]
@GoogleBaseHook.fallback_to_default_project_id
def list_rows(
self,
dataset_id: str,
table_id: str,
max_results: int | None = None,
selected_fields: list[str] | str | None = None,
page_token: str | None = None,
start_index: int | None = None,
project_id: str | None = None,
location: str | None = None,
retry: Retry = DEFAULT_RETRY,
return_iterator: bool = False,
) -> list[Row] | RowIterator:
"""List rows in a table.
See https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:param project_id: Project ID for the project which the client acts on behalf of.
:param location: Default location for job.
:param retry: How to retry the RPC.
:param return_iterator: Instead of returning a list[Row], returns a RowIterator
which can be used to obtain the next_page_token property.
:return: list of rows
"""
location = location or self.location
if isinstance(selected_fields, str):
selected_fields = selected_fields.split(",")
if selected_fields:
selected_fields_sequence = [SchemaField(n, "") for n in selected_fields]
else:
selected_fields_sequence = None
table = self._resolve_table_reference(
table_resource={},
project_id=project_id,
dataset_id=dataset_id,
table_id=table_id,
)
iterator = self.get_client(project_id=project_id, location=location).list_rows(
table=Table.from_api_repr(table),
selected_fields=selected_fields_sequence,
max_results=max_results,
page_token=page_token,
start_index=start_index,
retry=retry,
)
if return_iterator:
return iterator
return list(iterator)
@GoogleBaseHook.fallback_to_default_project_id
def get_schema(self, dataset_id: str, table_id: str, project_id: str | None = None) -> dict:
"""Get the schema for a given dataset and table.
.. seealso:: https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:param project_id: the optional project ID of the requested table.
If not provided, the connector's configured project will be used.
:return: a table schema
"""
table_ref = TableReference(dataset_ref=DatasetReference(project_id, dataset_id), table_id=table_id)
table = self.get_client(project_id=project_id).get_table(table_ref)
return {"fields": [s.to_api_repr() for s in table.schema]}
@GoogleBaseHook.fallback_to_default_project_id
def update_table_schema(
self,
schema_fields_updates: list[dict[str, Any]],
include_policy_tags: bool,
dataset_id: str,
table_id: str,
project_id: str | None = None,
) -> dict[str, Any]:
"""Update fields within a schema for a given dataset and table.
Note that some fields in schemas are immutable; trying to change them
will cause an exception.
If a new field is included, it will be inserted, which requires all
required fields to be set.
.. seealso:: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#TableSchema
:param include_policy_tags: If set to True policy tags will be included in
the update request which requires special permissions even if unchanged
see https://cloud.google.com/bigquery/docs/column-level-security#roles
:param dataset_id: the dataset ID of the requested table to be updated
:param table_id: the table ID of the table to be updated
:param schema_fields_updates: a partial schema resource. See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#TableSchema
.. code-block:: python
schema_fields_updates = [
{"name": "emp_name", "description": "Some New Description"},
{"name": "salary", "description": "Some New Description"},
{
"name": "departments",
"fields": [
{"name": "name", "description": "Some New Description"},
{"name": "type", "description": "Some New Description"},
],
},
]
:param project_id: The name of the project where we want to update the table.
"""
def _build_new_schema(
current_schema: list[dict[str, Any]], schema_fields_updates: list[dict[str, Any]]
) -> list[dict[str, Any]]:
# Turn schema_field_updates into a dict keyed on field names
schema_fields_updates_dict = {field["name"]: field for field in deepcopy(schema_fields_updates)}
# Create a new dict for storing the new schema, initiated based on the current_schema
# as of Python 3.6, dicts retain order.
new_schema = {field["name"]: field for field in deepcopy(current_schema)}
# Each item in schema_fields_updates contains a potential patch
# to a schema field, iterate over them
for field_name, patched_value in schema_fields_updates_dict.items():
# If this field already exists, update it
if field_name in new_schema:
# If this field is of type RECORD and has a fields key we need to patch it recursively
if "fields" in patched_value:
patched_value["fields"] = _build_new_schema(
new_schema[field_name]["fields"], patched_value["fields"]
)
# Update the new_schema with the patched value
new_schema[field_name].update(patched_value)
# This is a new field, just include the whole configuration for it
else:
new_schema[field_name] = patched_value
return list(new_schema.values())
def _remove_policy_tags(schema: list[dict[str, Any]]):
for field in schema:
if "policyTags" in field:
del field["policyTags"]
if "fields" in field:
_remove_policy_tags(field["fields"])
current_table_schema = self.get_schema(
dataset_id=dataset_id, table_id=table_id, project_id=project_id
)["fields"]
new_schema = _build_new_schema(current_table_schema, schema_fields_updates)
if not include_policy_tags:
_remove_policy_tags(new_schema)
table = self.update_table(
table_resource={"schema": {"fields": new_schema}},
fields=["schema"],
project_id=project_id,
dataset_id=dataset_id,
table_id=table_id,
)
return table
@GoogleBaseHook.fallback_to_default_project_id
def poll_job_complete(
self,
job_id: str,
project_id: str | None = None,
location: str | None = None,
retry: Retry = DEFAULT_RETRY,
) -> bool:
"""Check if jobs have completed.
:param job_id: id of the job.
:param project_id: Google Cloud Project where the job is running
:param location: location the job is running
:param retry: How to retry the RPC.
"""
location = location or self.location
job = self.get_client(project_id=project_id, location=location).get_job(job_id=job_id)
return job.done(retry=retry)
def cancel_query(self) -> None:
"""Cancel all started queries that have not yet completed."""
warnings.warn(
"This method is deprecated. Please use `BigQueryHook.cancel_job`.",
AirflowProviderDeprecationWarning,
)
if self.running_job_id:
self.cancel_job(job_id=self.running_job_id)
else:
self.log.info("No running BigQuery jobs to cancel.")
@GoogleBaseHook.fallback_to_default_project_id
def cancel_job(
self,
job_id: str,
project_id: str | None = None,
location: str | None = None,
) -> None:
"""Cancel a job and wait for cancellation to complete.
:param job_id: id of the job.
:param project_id: Google Cloud Project where the job is running
:param location: location the job is running
"""
project_id = project_id or self.project_id
location = location or self.location
if self.poll_job_complete(job_id=job_id, project_id=project_id, location=location):
self.log.info("No running BigQuery jobs to cancel.")
return
self.log.info("Attempting to cancel job : %s, %s", project_id, job_id)
self.get_client(location=location, project_id=project_id).cancel_job(job_id=job_id)
# Wait for all the calls to cancel to finish
max_polling_attempts = 12
polling_attempts = 0
job_complete = False
while polling_attempts < max_polling_attempts and not job_complete:
polling_attempts += 1
job_complete = self.poll_job_complete(job_id=job_id, project_id=project_id, location=location)
if job_complete:
self.log.info("Job successfully canceled: %s, %s", project_id, job_id)
elif polling_attempts == max_polling_attempts:
self.log.info(
"Stopping polling due to timeout. Job %s, %s "
"has not completed cancel and may or may not finish.",
project_id,
job_id,
)
else:
self.log.info("Waiting for canceled job %s, %s to finish.", project_id, job_id)
time.sleep(5)
@GoogleBaseHook.fallback_to_default_project_id
def get_job(
self,
job_id: str,
project_id: str | None = None,
location: str | None = None,
) -> CopyJob | QueryJob | LoadJob | ExtractJob | UnknownJob:
"""Retrieve a BigQuery job.
.. seealso:: https://cloud.google.com/bigquery/docs/reference/v2/jobs
:param job_id: The ID of the job. The ID must contain only letters (a-z, A-Z),
numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024
characters.
:param project_id: Google Cloud Project where the job is running
:param location: location the job is running
"""
client = self.get_client(project_id=project_id, location=location)
job = client.get_job(job_id=job_id, project=project_id, location=location)
return job
@staticmethod
def _custom_job_id(configuration: dict[str, Any]) -> str:
hash_base = json.dumps(configuration, sort_keys=True)
uniqueness_suffix = md5(hash_base.encode()).hexdigest()
microseconds_from_epoch = int(
(datetime.now() - datetime.fromtimestamp(0)) / timedelta(microseconds=1)
)
return f"airflow_{microseconds_from_epoch}_{uniqueness_suffix}"
@GoogleBaseHook.fallback_to_default_project_id
def insert_job(
self,
configuration: dict,
job_id: str | None = None,
project_id: str | None = None,
location: str | None = None,
nowait: bool = False,
retry: Retry = DEFAULT_RETRY,
timeout: float | None = None,
) -> BigQueryJob:
"""Execute a BigQuery job and wait for it to complete.
.. seealso:: https://cloud.google.com/bigquery/docs/reference/v2/jobs
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
:param job_id: The ID of the job. The ID must contain only letters (a-z, A-Z),
numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024
characters. If not provided then uuid will be generated.
:param project_id: Google Cloud Project where the job is running.
:param location: Location the job is running.
:param nowait: Whether to insert job without waiting for the result.
:param retry: How to retry the RPC.
:param timeout: The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
:return: The job ID.
"""
location = location or self.location
job_id = job_id or self._custom_job_id(configuration)
client = self.get_client(project_id=project_id, location=location)
job_data = {
"configuration": configuration,
"jobReference": {"jobId": job_id, "projectId": project_id, "location": location},
}
supported_jobs: dict[str, type[CopyJob] | type[QueryJob] | type[LoadJob] | type[ExtractJob]] = {
LoadJob._JOB_TYPE: LoadJob,
CopyJob._JOB_TYPE: CopyJob,
ExtractJob._JOB_TYPE: ExtractJob,
QueryJob._JOB_TYPE: QueryJob,
}
job: type[CopyJob] | type[QueryJob] | type[LoadJob] | type[ExtractJob] | None = None
for job_type, job_object in supported_jobs.items():
if job_type in configuration:
job = job_object
break
if not job:
raise AirflowException(f"Unknown job type. Supported types: {supported_jobs.keys()}")
job_api_repr = job.from_api_repr(job_data, client)
self.log.info("Inserting job %s", job_api_repr.job_id)
if nowait:
# Initiate the job and don't wait for it to complete.
job_api_repr._begin()
else:
# Start the job and wait for it to complete and get the result.
job_api_repr.result(timeout=timeout, retry=retry)
return job_api_repr
def run_with_configuration(self, configuration: dict) -> str:
"""Execute a BigQuery SQL query.
.. seealso:: https://cloud.google.com/bigquery/docs/reference/v2/jobs
This method is deprecated. Please use :func:`.insert_job` instead.
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
"""
warnings.warn(
"This method is deprecated. Please use `BigQueryHook.insert_job`",
AirflowProviderDeprecationWarning,
)
job = self.insert_job(configuration=configuration, project_id=self.project_id)
self.running_job_id = job.job_id
return job.job_id
def run_load(
self,
destination_project_dataset_table: str,
source_uris: list,
schema_fields: list | None = None,
source_format: str = "CSV",
create_disposition: str = "CREATE_IF_NEEDED",
skip_leading_rows: int = 0,
write_disposition: str = "WRITE_EMPTY",
field_delimiter: str = ",",
max_bad_records: int = 0,
quote_character: str | None = None,
ignore_unknown_values: bool = False,
allow_quoted_newlines: bool = False,
allow_jagged_rows: bool = False,
encoding: str = "UTF-8",
schema_update_options: Iterable | None = None,
src_fmt_configs: dict | None = None,
time_partitioning: dict | None = None,
cluster_fields: list | None = None,
autodetect: bool = False,
encryption_configuration: dict | None = None,
labels: dict | None = None,
description: str | None = None,
) -> str:
"""Load data from Google Cloud Storage to BigQuery.
.. seealso:: https://cloud.google.com/bigquery/docs/reference/v2/jobs
This method is deprecated. Please use :func:`.insert_job` instead.
:param destination_project_dataset_table:
The dotted ``(<project>.|<project>:)<dataset>.<table>($<partition>)`` BigQuery
table to load data into. If ``<project>`` is not included, project will be the
project defined in the connection json. If a partition is specified the
operator will automatically append the data, create a new partition or create
a new DAY partitioned table.
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
Required if autodetect=False; optional if autodetect=True.
:param autodetect: Attempt to autodetect the schema for CSV and JSON
source files.
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:param source_format: File format to export.
:param create_disposition: The create disposition if the table doesn't exist.
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:param write_disposition: The write disposition if the table already exists.
:param field_delimiter: The delimiter to use when loading from a CSV.
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:param quote_character: The value that is used to quote data sections in a CSV
file.
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
source_format is CSV.
:param encoding: The character encoding of the data.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.csvOptions.encoding
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the load job.
:param src_fmt_configs: configure optional fields specific to the source format
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
:param cluster_fields: Request that the result of this load be stored sorted
by one or more columns. BigQuery supports clustering for both partitioned and
non-partitioned tables. The order of columns given determines the sort order.
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
.. code-block:: python
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key",
}
:param labels: A dictionary containing labels for the BiqQuery table.
:param description: A string containing the description for the BigQuery table.
"""
warnings.warn(
"This method is deprecated. Please use `BigQueryHook.insert_job` method.",
AirflowProviderDeprecationWarning,
)
if not self.project_id:
raise ValueError("The project_id should be set")
# To provide backward compatibility
schema_update_options = list(schema_update_options or [])
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).sourceFormat # noqa
if schema_fields is None and not autodetect:
raise ValueError("You must either pass a schema or autodetect=True.")
if src_fmt_configs is None:
src_fmt_configs = {}
source_format = source_format.upper()
allowed_formats = [
"CSV",
"NEWLINE_DELIMITED_JSON",
"AVRO",
"GOOGLE_SHEETS",
"DATASTORE_BACKUP",
"PARQUET",
]
if source_format not in allowed_formats:
raise ValueError(
f"{source_format} is not a valid source format. "
f"Please use one of the following types: {allowed_formats}."
)
# bigquery also allows you to define how you want a table's schema to change
# as a side effect of a load
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schemaUpdateOptions
allowed_schema_update_options = ["ALLOW_FIELD_ADDITION", "ALLOW_FIELD_RELAXATION"]
if not set(allowed_schema_update_options).issuperset(set(schema_update_options)):
raise ValueError(
f"{schema_update_options} contains invalid schema update options. "
f"Please only use one or more of the following options: {allowed_schema_update_options}"
)
destination_project, destination_dataset, destination_table = self.split_tablename(
table_input=destination_project_dataset_table,
default_project_id=self.project_id,
var_name="destination_project_dataset_table",
)
configuration: dict[str, Any] = {
"load": {
"autodetect": autodetect,
"createDisposition": create_disposition,
"destinationTable": {
"projectId": destination_project,
"datasetId": destination_dataset,
"tableId": destination_table,
},
"sourceFormat": source_format,
"sourceUris": source_uris,
"writeDisposition": write_disposition,
"ignoreUnknownValues": ignore_unknown_values,
}
}
time_partitioning = _cleanse_time_partitioning(destination_project_dataset_table, time_partitioning)
if time_partitioning:
configuration["load"].update({"timePartitioning": time_partitioning})
if cluster_fields:
configuration["load"].update({"clustering": {"fields": cluster_fields}})
if schema_fields:
configuration["load"]["schema"] = {"fields": schema_fields}
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError(
"schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'."
)
else:
self.log.info("Adding experimental 'schemaUpdateOptions': %s", schema_update_options)
configuration["load"]["schemaUpdateOptions"] = schema_update_options
if max_bad_records:
configuration["load"]["maxBadRecords"] = max_bad_records
if encryption_configuration:
configuration["load"]["destinationEncryptionConfiguration"] = encryption_configuration
if labels or description:
configuration["load"].update({"destinationTableProperties": {}})
if labels:
configuration["load"]["destinationTableProperties"]["labels"] = labels
if description:
configuration["load"]["destinationTableProperties"]["description"] = description
src_fmt_to_configs_mapping = {
"CSV": [
"allowJaggedRows",
"allowQuotedNewlines",
"autodetect",
"fieldDelimiter",
"skipLeadingRows",
"ignoreUnknownValues",
"nullMarker",
"quote",
"encoding",
"preserveAsciiControlCharacters",
],
"DATASTORE_BACKUP": ["projectionFields"],
"NEWLINE_DELIMITED_JSON": ["autodetect", "ignoreUnknownValues"],
"PARQUET": ["autodetect", "ignoreUnknownValues"],
"AVRO": ["useAvroLogicalTypes"],
}
valid_configs = src_fmt_to_configs_mapping[source_format]
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
backward_compatibility_configs = {
"skipLeadingRows": skip_leading_rows,
"fieldDelimiter": field_delimiter,
"ignoreUnknownValues": ignore_unknown_values,
"quote": quote_character,
"allowQuotedNewlines": allow_quoted_newlines,
"encoding": encoding,
}
src_fmt_configs = _validate_src_fmt_configs(
source_format, src_fmt_configs, valid_configs, backward_compatibility_configs
)
configuration["load"].update(src_fmt_configs)
if allow_jagged_rows:
configuration["load"]["allowJaggedRows"] = allow_jagged_rows
job = self.insert_job(configuration=configuration, project_id=self.project_id)
self.running_job_id = job.job_id
return job.job_id
def run_copy(
self,
source_project_dataset_tables: list | str,
destination_project_dataset_table: str,
write_disposition: str = "WRITE_EMPTY",
create_disposition: str = "CREATE_IF_NEEDED",
labels: dict | None = None,
encryption_configuration: dict | None = None,
) -> str:
"""Copy data from one BigQuery table to another.
.. seealso:: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
This method is deprecated. Please use :func:`.insert_job` instead.
:param source_project_dataset_tables: One or more dotted
``(project:|project.)<dataset>.<table>``
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If ``<project>`` is not included, project will be the project defined
in the connection json.
:param destination_project_dataset_table: The destination BigQuery
table. Format is: ``(project:|project.)<dataset>.<table>``
:param write_disposition: The write disposition if the table already exists.
:param create_disposition: The create disposition if the table doesn't exist.
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
.. code-block:: python
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key",
}
"""
warnings.warn(
"This method is deprecated. Please use `BigQueryHook.insert_job` method.",
AirflowProviderDeprecationWarning,
)
if not self.project_id:
raise ValueError("The project_id should be set")
source_project_dataset_tables = (
[source_project_dataset_tables]
if not isinstance(source_project_dataset_tables, list)
else source_project_dataset_tables
)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = self.split_tablename(
table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name="source_project_dataset_table",
)
source_project_dataset_tables_fixup.append(
{"projectId": source_project, "datasetId": source_dataset, "tableId": source_table}
)
destination_project, destination_dataset, destination_table = self.split_tablename(
table_input=destination_project_dataset_table, default_project_id=self.project_id
)
configuration = {
"copy": {
"createDisposition": create_disposition,
"writeDisposition": write_disposition,
"sourceTables": source_project_dataset_tables_fixup,
"destinationTable": {
"projectId": destination_project,
"datasetId": destination_dataset,
"tableId": destination_table,
},
}
}
if labels:
configuration["labels"] = labels
if encryption_configuration:
configuration["copy"]["destinationEncryptionConfiguration"] = encryption_configuration
job = self.insert_job(configuration=configuration, project_id=self.project_id)
self.running_job_id = job.job_id
return job.job_id
def run_extract(
self,
source_project_dataset_table: str,
destination_cloud_storage_uris: list[str],
compression: str = "NONE",
export_format: str = "CSV",
field_delimiter: str = ",",
print_header: bool = True,
labels: dict | None = None,
return_full_job: bool = False,
) -> str | BigQueryJob:
"""Copy data from BigQuery to Google Cloud Storage.
.. seealso:: https://cloud.google.com/bigquery/docs/reference/v2/jobs
This method is deprecated. Please use :func:`.insert_job` instead.
:param source_project_dataset_table: The dotted ``<dataset>.<table>``
BigQuery table to use as the source data.
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:param compression: Type of compression to use.
:param export_format: File format to export.
:param field_delimiter: The delimiter to use when extracting to a CSV.
:param print_header: Whether to print a header for a CSV file extract.
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:param return_full_job: return full job instead of job id only
"""
warnings.warn(
"This method is deprecated. Please use `BigQueryHook.insert_job` method.",
AirflowProviderDeprecationWarning,
)
if not self.project_id:
raise ValueError("The project_id should be set")
source_project, source_dataset, source_table = self.split_tablename(
table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name="source_project_dataset_table",
)
configuration: dict[str, Any] = {
"extract": {
"sourceTable": {
"projectId": source_project,
"datasetId": source_dataset,
"tableId": source_table,
},
"compression": compression,
"destinationUris": destination_cloud_storage_uris,
"destinationFormat": export_format,
}
}
if labels:
configuration["labels"] = labels
if export_format == "CSV":
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration["extract"]["fieldDelimiter"] = field_delimiter
configuration["extract"]["printHeader"] = print_header
job = self.insert_job(configuration=configuration, project_id=self.project_id)
self.running_job_id = job.job_id
if return_full_job:
return job
return job.job_id
def run_query(
self,
sql: str,
destination_dataset_table: str | None = None,
write_disposition: str = "WRITE_EMPTY",
allow_large_results: bool = False,
flatten_results: bool | None = None,
udf_config: list | None = None,
use_legacy_sql: bool | None = None,
maximum_billing_tier: int | None = None,
maximum_bytes_billed: float | None = None,
create_disposition: str = "CREATE_IF_NEEDED",
query_params: list | None = None,
labels: dict | None = None,
schema_update_options: Iterable | None = None,
priority: str | None = None,
time_partitioning: dict | None = None,
api_resource_configs: dict | None = None,
cluster_fields: list[str] | None = None,
location: str | None = None,
encryption_configuration: dict | None = None,
) -> str:
"""Execute a BigQuery SQL query.
Optionally persists results in a BigQuery table.
.. seealso:: https://cloud.google.com/bigquery/docs/reference/v2/jobs
This method is deprecated. Please use :func:`.insert_job` instead.
For more details about these parameters.
:param sql: The BigQuery SQL to execute.
:param destination_dataset_table: The dotted ``<dataset>.<table>``
BigQuery table to save the query results.
:param write_disposition: What to do if the table already exists in
BigQuery.
:param allow_large_results: Whether to allow large results.
:param flatten_results: If true and query uses legacy SQL dialect, flattens
all nested and repeated fields in the query results. ``allowLargeResults``
must be true if this is set to false. For standard SQL queries, this
flag is ignored and results are never flattened.
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
If `None`, defaults to `self.use_legacy_sql`.
:param api_resource_configs: a dictionary that contain params
'configuration' applied for Google BigQuery Jobs API:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs
for example, {'query': {'useQueryCache': False}}. You could use it
if you need to provide some params that are not supported by the
BigQueryHook like args.
:param maximum_billing_tier: Positive integer that serves as a
multiplier of the basic price.
:param maximum_bytes_billed: Limits the bytes billed for this job.
Queries that will have bytes billed beyond this limit will fail
(without incurring a charge). If unspecified, this will be
set to your project default.
:param create_disposition: Specifies whether the job is allowed to
create new tables.
:param query_params: a list of dictionary containing query parameter types and
values, passed to BigQuery
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the query job.
:param priority: Specifies a priority for the query.
Possible values include INTERACTIVE and BATCH.
If `None`, defaults to `self.priority`.
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
:param cluster_fields: Request that the result of this query be stored sorted
by one or more columns. BigQuery supports clustering for both partitioned and
non-partitioned tables. The order of columns given determines the sort order.
:param location: The geographic location of the job. Required except for
US and EU. See details at
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
.. code-block:: python
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key",
}
"""
warnings.warn(
"This method is deprecated. Please use `BigQueryHook.insert_job` method.",
AirflowProviderDeprecationWarning,
)
if not self.project_id:
raise ValueError("The project_id should be set")
labels = labels or self.labels
schema_update_options = list(schema_update_options or [])
priority = priority or self.priority
if time_partitioning is None:
time_partitioning = {}
if not api_resource_configs:
api_resource_configs = self.api_resource_configs
else:
_validate_value("api_resource_configs", api_resource_configs, dict)
configuration = deepcopy(api_resource_configs)
if "query" not in configuration:
configuration["query"] = {}
else:
_validate_value("api_resource_configs['query']", configuration["query"], dict)
if sql is None and not configuration["query"].get("query", None):
raise TypeError("`BigQueryBaseCursor.run_query` missing 1 required positional argument: `sql`")
# BigQuery also allows you to define how you want a table's schema to change
# as a side effect of a query job
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.schemaUpdateOptions # noqa
allowed_schema_update_options = ["ALLOW_FIELD_ADDITION", "ALLOW_FIELD_RELAXATION"]
if not set(allowed_schema_update_options).issuperset(set(schema_update_options)):
raise ValueError(
f"{schema_update_options} contains invalid schema update options."
f" Please only use one or more of the following options: {allowed_schema_update_options}"
)
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError(
"schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'."
)
if destination_dataset_table:
destination_project, destination_dataset, destination_table = self.split_tablename(
table_input=destination_dataset_table, default_project_id=self.project_id
)
destination_dataset_table = { # type: ignore
"projectId": destination_project,
"datasetId": destination_dataset,
"tableId": destination_table,
}
if cluster_fields:
cluster_fields = {"fields": cluster_fields} # type: ignore
query_param_list: list[tuple[Any, str, str | bool | None | dict, type | tuple[type]]] = [
(sql, "query", None, (str,)),
(priority, "priority", priority, (str,)),
(use_legacy_sql, "useLegacySql", self.use_legacy_sql, bool),
(query_params, "queryParameters", None, list),
(udf_config, "userDefinedFunctionResources", None, list),
(maximum_billing_tier, "maximumBillingTier", None, int),
(maximum_bytes_billed, "maximumBytesBilled", None, float),
(time_partitioning, "timePartitioning", {}, dict),
(schema_update_options, "schemaUpdateOptions", None, list),
(destination_dataset_table, "destinationTable", None, dict),
(cluster_fields, "clustering", None, dict),
]
for param, param_name, param_default, param_type in query_param_list:
if param_name not in configuration["query"] and param in [None, {}, ()]:
if param_name == "timePartitioning":
param_default = _cleanse_time_partitioning(destination_dataset_table, time_partitioning)
param = param_default
if param in [None, {}, ()]:
continue
_api_resource_configs_duplication_check(param_name, param, configuration["query"])
configuration["query"][param_name] = param
# check valid type of provided param,
# it last step because we can get param from 2 sources,
# and first of all need to find it
_validate_value(param_name, configuration["query"][param_name], param_type)
if param_name == "schemaUpdateOptions" and param:
self.log.info("Adding experimental 'schemaUpdateOptions': %s", schema_update_options)
if param_name != "destinationTable":
continue
for key in ["projectId", "datasetId", "tableId"]:
if key not in configuration["query"]["destinationTable"]:
raise ValueError(
"Not correct 'destinationTable' in "
"api_resource_configs. 'destinationTable' "
"must be a dict with {'projectId':'', "
"'datasetId':'', 'tableId':''}"
)
configuration["query"].update(
{
"allowLargeResults": allow_large_results,
"flattenResults": flatten_results,
"writeDisposition": write_disposition,
"createDisposition": create_disposition,
}
)
if (
"useLegacySql" in configuration["query"]
and configuration["query"]["useLegacySql"]
and "queryParameters" in configuration["query"]
):
raise ValueError("Query parameters are not allowed when using legacy SQL")
if labels:
_api_resource_configs_duplication_check("labels", labels, configuration)
configuration["labels"] = labels
if encryption_configuration:
configuration["query"]["destinationEncryptionConfiguration"] = encryption_configuration
job = self.insert_job(configuration=configuration, project_id=self.project_id, location=location)
self.running_job_id = job.job_id
return job.job_id
def generate_job_id(self, job_id, dag_id, task_id, logical_date, configuration, force_rerun=False):
if force_rerun:
hash_base = str(uuid.uuid4())
else:
hash_base = json.dumps(configuration, sort_keys=True)
uniqueness_suffix = md5(hash_base.encode()).hexdigest()
if job_id:
return f"{job_id}_{uniqueness_suffix}"
exec_date = logical_date.isoformat()
job_id = f"airflow_{dag_id}_{task_id}_{exec_date}_{uniqueness_suffix}"
return re.sub(r"[:\-+.]", "_", job_id)
def split_tablename(
self, table_input: str, default_project_id: str, var_name: str | None = None
) -> tuple[str, str, str]:
if "." not in table_input:
raise ValueError(f"Expected table name in the format of <dataset>.<table>. Got: {table_input}")
if not default_project_id:
raise ValueError("INTERNAL: No default project is specified")
def var_print(var_name):
if var_name is None:
return ""
else:
return f"Format exception for {var_name}: "
if table_input.count(".") + table_input.count(":") > 3:
raise Exception(f"{var_print(var_name)}Use either : or . to specify project got {table_input}")
cmpt = table_input.rsplit(":", 1)
project_id = None
rest = table_input
if len(cmpt) == 1:
project_id = None
rest = cmpt[0]
elif len(cmpt) == 2 and cmpt[0].count(":") <= 1:
if cmpt[-1].count(".") != 2:
project_id = cmpt[0]
rest = cmpt[1]
else:
raise Exception(
f"{var_print(var_name)}Expect format of (<project:)<dataset>.<table>, got {table_input}"
)
cmpt = rest.split(".")
if len(cmpt) == 3:
if project_id:
raise ValueError(f"{var_print(var_name)}Use either : or . to specify project")
project_id = cmpt[0]
dataset_id = cmpt[1]
table_id = cmpt[2]
elif len(cmpt) == 2:
dataset_id = cmpt[0]
table_id = cmpt[1]
else:
raise Exception(
f"{var_print(var_name)} Expect format of (<project.|<project:)<dataset>.<table>, "
f"got {table_input}"
)
if project_id is None:
if var_name is not None:
self.log.info(
'Project is not included in %s: %s; using project "%s"',
var_name,
table_input,
default_project_id,
)
project_id = default_project_id
return project_id, dataset_id, table_id
class BigQueryConnection:
"""BigQuery connection.
BigQuery does not have a notion of a persistent connection. Thus, these
objects are small stateless factories for cursors, which do all the real
work.
"""
def __init__(self, *args, **kwargs) -> None:
self._args = args
self._kwargs = kwargs
def close(self) -> None:
"""The BigQueryConnection does not have anything to close."""
def commit(self) -> None:
"""The BigQueryConnection does not support transactions."""
def cursor(self) -> BigQueryCursor:
"""Return a new :py:class:`Cursor` object using the connection."""
return BigQueryCursor(*self._args, **self._kwargs)
def rollback(self) -> NoReturn:
"""The BigQueryConnection does not have transactions."""
raise NotImplementedError("BigQueryConnection does not have transactions")
class BigQueryBaseCursor(LoggingMixin):
"""BigQuery cursor.
The BigQuery base cursor contains helper methods to execute queries against
BigQuery. The methods can be used directly by operators, in cases where a
PEP 249 cursor isn't needed.
"""
def __init__(
self,
service: Any,
project_id: str,
hook: BigQueryHook,
use_legacy_sql: bool = True,
api_resource_configs: dict | None = None,
location: str | None = None,
num_retries: int = 5,
labels: dict | None = None,
) -> None:
super().__init__()
self.service = service
self.project_id = project_id
self.use_legacy_sql = use_legacy_sql
if api_resource_configs:
_validate_value("api_resource_configs", api_resource_configs, dict)
self.api_resource_configs: dict = api_resource_configs if api_resource_configs else {}
self.running_job_id: str | None = None
self.location = location
self.num_retries = num_retries
self.labels = labels
self.hook = hook
def create_empty_table(self, *args, **kwargs):
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.create_empty_table(*args, **kwargs)
def create_empty_dataset(self, *args, **kwargs) -> dict[str, Any]:
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_dataset`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_dataset`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.create_empty_dataset(*args, **kwargs)
def get_dataset_tables(self, *args, **kwargs) -> list[dict[str, Any]]:
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.get_dataset_tables(*args, **kwargs)
def delete_dataset(self, *args, **kwargs) -> None:
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.delete_dataset`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.delete_dataset`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.delete_dataset(*args, **kwargs)
def create_external_table(self, *args, **kwargs):
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_external_table`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_external_table`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.create_external_table(*args, **kwargs)
def patch_table(self, *args, **kwargs) -> None:
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.patch_table`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.patch_table`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.patch_table(*args, **kwargs)
def insert_all(self, *args, **kwargs) -> None:
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_all`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_all`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.insert_all(*args, **kwargs)
def update_dataset(self, *args, **kwargs) -> dict:
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return Dataset.to_api_repr(self.hook.update_dataset(*args, **kwargs))
def patch_dataset(self, *args, **kwargs) -> dict:
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.patch_dataset`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.patch_dataset`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.patch_dataset(*args, **kwargs)
def get_dataset_tables_list(self, *args, **kwargs) -> list[dict[str, Any]]:
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables_list`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables_list`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.get_dataset_tables_list(*args, **kwargs)
def get_datasets_list(self, *args, **kwargs) -> list | HTTPIterator:
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_datasets_list`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_datasets_list`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.get_datasets_list(*args, **kwargs)
def get_dataset(self, *args, **kwargs) -> Dataset:
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.get_dataset(*args, **kwargs)
def run_grant_dataset_view_access(self, *args, **kwargs) -> dict:
"""This method is deprecated.
Please use
:func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_grant_dataset_view_access`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks"
".bigquery.BigQueryHook.run_grant_dataset_view_access`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.run_grant_dataset_view_access(*args, **kwargs)
def run_table_upsert(self, *args, **kwargs) -> dict:
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_table_upsert`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_table_upsert`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.run_table_upsert(*args, **kwargs)
def run_table_delete(self, *args, **kwargs) -> None:
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_table_delete`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_table_delete`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.run_table_delete(*args, **kwargs)
def get_tabledata(self, *args, **kwargs) -> list[dict]:
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_tabledata`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_tabledata`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.get_tabledata(*args, **kwargs)
def get_schema(self, *args, **kwargs) -> dict:
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.get_schema(*args, **kwargs)
def poll_job_complete(self, *args, **kwargs) -> bool:
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.poll_job_complete(*args, **kwargs)
def cancel_query(self, *args, **kwargs) -> None:
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.cancel_query`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.cancel_query`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.cancel_query(*args, **kwargs) # type: ignore
def run_with_configuration(self, *args, **kwargs) -> str:
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_with_configuration`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_with_configuration`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.run_with_configuration(*args, **kwargs)
def run_load(self, *args, **kwargs) -> str:
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_load`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_load`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.run_load(*args, **kwargs)
def run_copy(self, *args, **kwargs) -> str:
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_copy`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_copy`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.run_copy(*args, **kwargs)
def run_extract(self, *args, **kwargs) -> str | BigQueryJob:
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_extract`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_extract`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.run_extract(*args, **kwargs)
def run_query(self, *args, **kwargs) -> str:
"""This method is deprecated.
Please use :func:`~airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_query`
instead.
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_query`",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
return self.hook.run_query(*args, **kwargs)
class BigQueryCursor(BigQueryBaseCursor):
"""A very basic BigQuery PEP 249 cursor implementation.
The PyHive PEP 249 implementation was used as a reference:
https://github.com/dropbox/PyHive/blob/master/pyhive/presto.py
https://github.com/dropbox/PyHive/blob/master/pyhive/common.py
"""
def __init__(
self,
service: Any,
project_id: str,
hook: BigQueryHook,
use_legacy_sql: bool = True,
location: str | None = None,
num_retries: int = 5,
) -> None:
super().__init__(
service=service,
project_id=project_id,
hook=hook,
use_legacy_sql=use_legacy_sql,
location=location,
num_retries=num_retries,
)
self.buffersize: int | None = None
self.page_token: str | None = None
self.job_id: str | None = None
self.buffer: list = []
self.all_pages_loaded: bool = False
self._description: list = []
@property
def description(self) -> list:
"""Return the cursor description."""
return self._description
@description.setter
def description(self, value):
self._description = value
def close(self) -> None:
"""By default, do nothing."""
@property
def rowcount(self) -> int:
"""By default, return -1 to indicate that this is not supported."""
return -1
def execute(self, operation: str, parameters: dict | None = None) -> None:
"""Execute a BigQuery query, and return the job ID.
:param operation: The query to execute.
:param parameters: Parameters to substitute into the query.
"""
sql = _bind_parameters(operation, parameters) if parameters else operation
self.flush_results()
self.job_id = self.hook.run_query(sql)
query_results = self._get_query_result()
if "schema" in query_results:
self.description = _format_schema_for_description(query_results["schema"])
else:
self.description = []
def executemany(self, operation: str, seq_of_parameters: list) -> None:
"""Execute a BigQuery query multiple times with different parameters.
:param operation: The query to execute.
:param seq_of_parameters: List of dictionary parameters to substitute into the
query.
"""
for parameters in seq_of_parameters:
self.execute(operation, parameters)
def flush_results(self) -> None:
"""Flush results related cursor attributes."""
self.page_token = None
self.job_id = None
self.all_pages_loaded = False
self.buffer = []
def fetchone(self) -> list | None:
"""Fetch the next row of a query result set."""
return self.next()
def next(self) -> list | None:
"""Return the next row from a buffer.
Helper method for ``fetchone``.
If the buffer is empty, attempts to paginate through the result set for
the next page, and load it into the buffer.
"""
if not self.job_id:
return None
if not self.buffer:
if self.all_pages_loaded:
return None
query_results = self._get_query_result()
if "rows" in query_results and query_results["rows"]:
self.page_token = query_results.get("pageToken")
fields = query_results["schema"]["fields"]
col_types = [field["type"] for field in fields]
rows = query_results["rows"]
for dict_row in rows:
typed_row = [bq_cast(vs["v"], col_types[idx]) for idx, vs in enumerate(dict_row["f"])]
self.buffer.append(typed_row)
if not self.page_token:
self.all_pages_loaded = True
else:
# Reset all state since we've exhausted the results.
self.flush_results()
return None
return self.buffer.pop(0)
def fetchmany(self, size: int | None = None) -> list:
"""Fetch the next set of rows of a query result.
This returns a sequence of sequences (e.g. a list of tuples). An empty
sequence is returned when no more rows are available.
The number of rows to fetch per call is specified by the parameter. If
it is not given, the cursor's arraysize determines the number of rows to
be fetched.
This method tries to fetch as many rows as indicated by the size
parameter. If this is not possible due to the specified number of rows
not being available, fewer rows may be returned.
An :py:class:`~pyhive.exc.Error` (or subclass) exception is raised if
the previous call to :py:meth:`execute` did not produce any result set,
or no call was issued yet.
"""
if size is None:
size = self.arraysize
result = []
for _ in range(size):
one = self.fetchone()
if one is None:
break
result.append(one)
return result
def fetchall(self) -> list[list]:
"""Fetch all (remaining) rows of a query result.
A sequence of sequences (e.g. a list of tuples) is returned.
"""
result = []
while True:
one = self.fetchone()
if one is None:
break
result.append(one)
return result
def get_arraysize(self) -> int:
"""Number of rows to fetch at a time.
.. seealso:: :func:`.fetchmany()`
"""
return self.buffersize or 1
def set_arraysize(self, arraysize: int) -> None:
"""Set the number of rows to fetch at a time.
.. seealso:: :func:`.fetchmany()`
"""
self.buffersize = arraysize
arraysize = property(get_arraysize, set_arraysize)
def setinputsizes(self, sizes: Any) -> None:
"""Does nothing by default."""
def setoutputsize(self, size: Any, column: Any = None) -> None:
"""Does nothing by default."""
def _get_query_result(self) -> dict:
"""Get job query results; data, schema, job type, etc."""
query_results = (
self.service.jobs()
.getQueryResults(
projectId=self.project_id,
jobId=self.job_id,
location=self.location,
pageToken=self.page_token,
)
.execute(num_retries=self.num_retries)
)
return query_results
def _bind_parameters(operation: str, parameters: dict) -> str:
"""Helper method that binds parameters to a SQL query."""
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {} # type dict[str, str]
for (name, value) in parameters.items():
if value is None:
string_parameters[name] = "NULL"
elif isinstance(value, str):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters
def _escape(s: str) -> str:
"""Helper method that escapes parameters to a SQL query."""
e = s
e = e.replace("\\", "\\\\")
e = e.replace("\n", "\\n")
e = e.replace("\r", "\\r")
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e
def split_tablename(
table_input: str, default_project_id: str, var_name: str | None = None
) -> tuple[str, str, str]:
if "." not in table_input:
raise ValueError(f"Expected table name in the format of <dataset>.<table>. Got: {table_input}")
if not default_project_id:
raise ValueError("INTERNAL: No default project is specified")
def var_print(var_name):
if var_name is None:
return ""
else:
return f"Format exception for {var_name}: "
if table_input.count(".") + table_input.count(":") > 3:
raise Exception(f"{var_print(var_name)}Use either : or . to specify project got {table_input}")
cmpt = table_input.rsplit(":", 1)
project_id = None
rest = table_input
if len(cmpt) == 1:
project_id = None
rest = cmpt[0]
elif len(cmpt) == 2 and cmpt[0].count(":") <= 1:
if cmpt[-1].count(".") != 2:
project_id = cmpt[0]
rest = cmpt[1]
else:
raise Exception(
f"{var_print(var_name)}Expect format of (<project:)<dataset>.<table>, got {table_input}"
)
cmpt = rest.split(".")
if len(cmpt) == 3:
if project_id:
raise ValueError(f"{var_print(var_name)}Use either : or . to specify project")
project_id = cmpt[0]
dataset_id = cmpt[1]
table_id = cmpt[2]
elif len(cmpt) == 2:
dataset_id = cmpt[0]
table_id = cmpt[1]
else:
raise Exception(
f"{var_print(var_name)}Expect format of (<project.|<project:)<dataset>.<table>, got {table_input}"
)
if project_id is None:
if var_name is not None:
log.info(
'Project is not included in %s: %s; using project "%s"',
var_name,
table_input,
default_project_id,
)
project_id = default_project_id
return project_id, dataset_id, table_id
def _cleanse_time_partitioning(
destination_dataset_table: str | None, time_partitioning_in: dict | None
) -> dict: # if it is a partitioned table ($ is in the table name) add partition load option
if time_partitioning_in is None:
time_partitioning_in = {}
time_partitioning_out = {}
if destination_dataset_table and "$" in destination_dataset_table:
time_partitioning_out["type"] = "DAY"
time_partitioning_out.update(time_partitioning_in)
return time_partitioning_out
def _validate_value(key: Any, value: Any, expected_type: type | tuple[type]) -> None:
"""Check expected type and raise error if type is not correct."""
if not isinstance(value, expected_type):
raise TypeError(f"{key} argument must have a type {expected_type} not {type(value)}")
def _api_resource_configs_duplication_check(
key: Any, value: Any, config_dict: dict, config_dict_name="api_resource_configs"
) -> None:
if key in config_dict and value != config_dict[key]:
raise ValueError(
"Values of {param_name} param are duplicated. "
"{dict_name} contained {param_name} param "
"in `query` config and {param_name} was also provided "
"with arg to run_query() method. Please remove duplicates.".format(
param_name=key, dict_name=config_dict_name
)
)
def _validate_src_fmt_configs(
source_format: str,
src_fmt_configs: dict,
valid_configs: list[str],
backward_compatibility_configs: dict | None = None,
) -> dict:
"""Validate ``src_fmt_configs`` against a valid config for the source format.
Adds the backward compatibility config to ``src_fmt_configs``.
:param source_format: File format to export.
:param src_fmt_configs: Configure optional fields specific to the source format.
:param valid_configs: Valid configuration specific to the source format
:param backward_compatibility_configs: The top-level params for backward-compatibility
"""
if backward_compatibility_configs is None:
backward_compatibility_configs = {}
for k, v in backward_compatibility_configs.items():
if k not in src_fmt_configs and k in valid_configs:
src_fmt_configs[k] = v
for k, v in src_fmt_configs.items():
if k not in valid_configs:
raise ValueError(f"{k} is not a valid src_fmt_configs for type {source_format}.")
return src_fmt_configs
def _format_schema_for_description(schema: dict) -> list:
"""Reformat the schema to match cursor description standard.
The description should be a tuple of 7 elemenbts: name, type, display_size,
internal_size, precision, scale, null_ok.
"""
description = []
for field in schema["fields"]:
mode = field.get("mode", "NULLABLE")
field_description = (
field["name"],
field["type"],
None,
None,
None,
None,
mode == "NULLABLE",
)
description.append(field_description)
return description
class BigQueryAsyncHook(GoogleBaseAsyncHook):
"""Uses gcloud-aio library to retrieve Job details."""
sync_hook_class = BigQueryHook
async def get_job_instance(
self, project_id: str | None, job_id: str | None, session: ClientSession
) -> Job:
"""Get the specified job resource by job ID and project ID."""
with await self.service_file_as_context() as f:
return Job(job_id=job_id, project=project_id, service_file=f, session=cast(Session, session))
async def get_job_status(
self,
job_id: str | None,
project_id: str | None = None,
) -> str | None:
"""Poll for job status asynchronously using gcloud-aio.
Note that an OSError is raised when Job results are still pending.
Exception means that Job finished with errors
"""
async with ClientSession() as s:
try:
self.log.info("Executing get_job_status...")
job_client = await self.get_job_instance(project_id, job_id, s)
job_status_response = await job_client.result(cast(Session, s))
if job_status_response:
job_status = "success"
except OSError:
job_status = "pending"
except Exception as e:
self.log.info("Query execution finished with errors...")
job_status = str(e)
return job_status
async def get_job_output(
self,
job_id: str | None,
project_id: str | None = None,
) -> dict[str, Any]:
"""Get the BigQuery job output for a given job ID asynchronously."""
async with ClientSession() as session:
self.log.info("Executing get_job_output..")
job_client = await self.get_job_instance(project_id, job_id, session)
job_query_response = await job_client.get_query_results(cast(Session, session))
return job_query_response
async def create_job_for_partition_get(
self,
dataset_id: str | None,
project_id: str | None = None,
):
"""Create a new job and get the job_id using gcloud-aio."""
async with ClientSession() as session:
self.log.info("Executing create_job..")
job_client = await self.get_job_instance(project_id, "", session)
query_request = {
"query": "SELECT partition_id "
f"FROM `{project_id}.{dataset_id}.INFORMATION_SCHEMA.PARTITIONS`",
"useLegacySql": False,
}
job_query_resp = await job_client.query(query_request, cast(Session, session))
return job_query_resp["jobReference"]["jobId"]
def get_records(self, query_results: dict[str, Any], as_dict: bool = False) -> list[Any]:
"""Convert a response from BigQuery to records.
:param query_results: the results from a SQL query
:param as_dict: if True returns the result as a list of dictionaries, otherwise as list of lists.
"""
buffer: list[Any] = []
if "rows" in query_results and query_results["rows"]:
rows = query_results["rows"]
fields = query_results["schema"]["fields"]
col_types = [field["type"] for field in fields]
for dict_row in rows:
typed_row = [bq_cast(vs["v"], col_types[idx]) for idx, vs in enumerate(dict_row["f"])]
if not as_dict:
buffer.append(typed_row)
else:
fields_names = [field["name"] for field in fields]
typed_row_dict = {k: v for k, v in zip(fields_names, typed_row)}
buffer.append(typed_row_dict)
return buffer
def value_check(
self,
sql: str,
pass_value: Any,
records: list[Any],
tolerance: float | None = None,
) -> None:
"""Match a single query resulting row and tolerance with pass_value.
:raise AirflowException: if matching fails
"""
if not records:
raise AirflowException("The query returned None")
pass_value_conv = self._convert_to_float_if_possible(pass_value)
is_numeric_value_check = isinstance(pass_value_conv, float)
tolerance_pct_str = str(tolerance * 100) + "%" if tolerance else None
error_msg = (
"Test failed.\nPass value:{pass_value_conv}\n"
"Tolerance:{tolerance_pct_str}\n"
"Query:\n{sql}\nResults:\n{records!s}"
).format(
pass_value_conv=pass_value_conv,
tolerance_pct_str=tolerance_pct_str,
sql=sql,
records=records,
)
if not is_numeric_value_check:
tests = [str(record) == pass_value_conv for record in records]
else:
try:
numeric_records = [float(record) for record in records]
except (ValueError, TypeError):
raise AirflowException(f"Converting a result to float failed.\n{error_msg}")
tests = self._get_numeric_matches(numeric_records, pass_value_conv, tolerance)
if not all(tests):
raise AirflowException(error_msg)
@staticmethod
def _get_numeric_matches(
records: list[float], pass_value: Any, tolerance: float | None = None
) -> list[bool]:
"""Match numeric pass_value, tolerance with records value.
:param records: List of value to match against
:param pass_value: Expected value
:param tolerance: Allowed tolerance for match to succeed
"""
if tolerance:
return [
pass_value * (1 - tolerance) <= record <= pass_value * (1 + tolerance) for record in records
]
return [record == pass_value for record in records]
@staticmethod
def _convert_to_float_if_possible(s: Any) -> Any:
"""Convert a string to a numeric value if appropriate.
:param s: the string to be converted
"""
try:
return float(s)
except (ValueError, TypeError):
return s
def interval_check(
self,
row1: str | None,
row2: str | None,
metrics_thresholds: dict[str, Any],
ignore_zero: bool,
ratio_formula: str,
) -> None:
"""Check values of metrics (SQL expressions) are within a certain tolerance.
:param row1: first resulting row of a query execution job for first SQL query
:param row2: first resulting row of a query execution job for second SQL query
:param metrics_thresholds: a dictionary of ratios indexed by metrics, for
example 'COUNT(*)': 1.5 would require a 50 percent or less difference
between the current day, and the prior days_back.
:param ignore_zero: whether we should ignore zero metrics
:param ratio_formula: which formula to use to compute the ratio between
the two metrics. Assuming cur is the metric of today and ref is
the metric to today - days_back.
max_over_min: computes max(cur, ref) / min(cur, ref)
relative_diff: computes abs(cur-ref) / ref
"""
if not row2:
raise AirflowException("The second SQL query returned None")
if not row1:
raise AirflowException("The first SQL query returned None")
ratio_formulas = {
"max_over_min": lambda cur, ref: float(max(cur, ref)) / min(cur, ref),
"relative_diff": lambda cur, ref: float(abs(cur - ref)) / ref,
}
metrics_sorted = sorted(metrics_thresholds.keys())
current = dict(zip(metrics_sorted, row1))
reference = dict(zip(metrics_sorted, row2))
ratios: dict[str, Any] = {}
test_results: dict[str, Any] = {}
for metric in metrics_sorted:
cur = float(current[metric])
ref = float(reference[metric])
threshold = float(metrics_thresholds[metric])
if cur == 0 or ref == 0:
ratios[metric] = None
test_results[metric] = ignore_zero
else:
ratios[metric] = ratio_formulas[ratio_formula](
float(current[metric]), float(reference[metric])
)
test_results[metric] = float(ratios[metric]) < threshold
self.log.info(
(
"Current metric for %s: %s\n"
"Past metric for %s: %s\n"
"Ratio for %s: %s\n"
"Threshold: %s\n"
),
metric,
cur,
metric,
ref,
metric,
ratios[metric],
threshold,
)
if not all(test_results.values()):
failed_tests = [metric for metric, value in test_results.items() if not value]
self.log.warning(
"The following %s tests out of %s failed:",
len(failed_tests),
len(metrics_sorted),
)
for k in failed_tests:
self.log.warning(
"'%s' check failed. %s is above %s",
k,
ratios[k],
metrics_thresholds[k],
)
raise AirflowException(f"The following tests have failed:\n {', '.join(sorted(failed_tests))}")
self.log.info("All tests have passed")
class BigQueryTableAsyncHook(GoogleBaseAsyncHook):
"""Async hook for BigQuery Table."""
sync_hook_class = BigQueryHook
async def get_table_client(
self, dataset: str, table_id: str, project_id: str, session: ClientSession
) -> Table_async:
"""Get a Google Big Query Table object.
:param dataset: The name of the dataset in which to look for the table storage bucket.
:param table_id: The name of the table to check the existence of.
:param project_id: The Google cloud project in which to look for the table.
The connection supplied to the hook must provide
access to the specified project.
:param session: aiohttp ClientSession
"""
with await self.service_file_as_context() as file:
return Table_async(
dataset_name=dataset,
table_name=table_id,
project=project_id,
service_file=file,
session=cast(Session, session),
)
| 140,547 | 40.817316 | 136 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/looker.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Looker hook."""
from __future__ import annotations
import json
import time
from enum import Enum
from looker_sdk.rtl import api_settings, auth_session, requests_transport, serialize
from looker_sdk.sdk.api40 import methods as methods40
from packaging.version import parse as parse_version
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models.connection import Connection
from airflow.version import version
class LookerHook(BaseHook):
"""Hook for Looker APIs."""
def __init__(
self,
looker_conn_id: str,
) -> None:
super().__init__()
self.looker_conn_id = looker_conn_id
# source is used to track origin of the requests
self.source = f"airflow:{version}"
def start_pdt_build(
self,
model: str,
view: str,
query_params: dict | None = None,
):
"""
Submits a PDT materialization job to Looker.
:param model: Required. The model of the PDT to start building.
:param view: Required. The view of the PDT to start building.
:param query_params: Optional. Additional materialization parameters.
"""
self.log.info("Submitting PDT materialization job. Model: '%s', view: '%s'.", model, view)
self.log.debug("PDT materialization job source: '%s'.", self.source)
sdk = self.get_looker_sdk()
looker_ver = sdk.versions().looker_release_version
if parse_version(looker_ver) < parse_version("22.2.0"):
raise AirflowException(f"This API requires Looker version 22.2+. Found: {looker_ver}.")
# unpack query_params dict into kwargs (if not None)
if query_params:
resp = sdk.start_pdt_build(model_name=model, view_name=view, source=self.source, **query_params)
else:
resp = sdk.start_pdt_build(model_name=model, view_name=view, source=self.source)
self.log.info("Start PDT build response: '%s'.", resp)
return resp
def check_pdt_build(
self,
materialization_id: str,
):
"""
Gets the PDT materialization job status from Looker.
:param materialization_id: Required. The materialization id to check status for.
"""
self.log.info("Requesting PDT materialization job status. Job id: %s.", materialization_id)
sdk = self.get_looker_sdk()
resp = sdk.check_pdt_build(materialization_id=materialization_id)
self.log.info("Check PDT build response: '%s'.", resp)
return resp
def pdt_build_status(
self,
materialization_id: str,
) -> dict:
"""
Gets the PDT materialization job status.
:param materialization_id: Required. The materialization id to check status for.
"""
resp = self.check_pdt_build(materialization_id=materialization_id)
status_json = resp["resp_text"]
status_dict = json.loads(status_json)
self.log.info(
"PDT materialization job id: %s. Status: '%s'.", materialization_id, status_dict["status"]
)
return status_dict
def stop_pdt_build(
self,
materialization_id: str,
):
"""
Starts a PDT materialization job cancellation request.
:param materialization_id: Required. The materialization id to stop.
"""
self.log.info("Stopping PDT materialization. Job id: %s.", materialization_id)
self.log.debug("PDT materialization job source: '%s'.", self.source)
sdk = self.get_looker_sdk()
resp = sdk.stop_pdt_build(materialization_id=materialization_id, source=self.source)
self.log.info("Stop PDT build response: '%s'.", resp)
return resp
def wait_for_job(
self,
materialization_id: str,
wait_time: int = 10,
timeout: int | None = None,
) -> None:
"""
Helper method which polls a PDT materialization job to check if it finishes.
:param materialization_id: Required. The materialization id to wait for.
:param wait_time: Optional. Number of seconds between checks.
:param timeout: Optional. How many seconds wait for job to be ready.
Used only if ``asynchronous`` is False.
"""
self.log.info("Waiting for PDT materialization job to complete. Job id: %s.", materialization_id)
status = None
start = time.monotonic()
while status not in (
JobStatus.DONE.value,
JobStatus.ERROR.value,
JobStatus.CANCELLED.value,
JobStatus.UNKNOWN.value,
):
if timeout and start + timeout < time.monotonic():
self.stop_pdt_build(materialization_id=materialization_id)
raise AirflowException(
f"Timeout: PDT materialization job is not ready after {timeout}s. "
f"Job id: {materialization_id}."
)
time.sleep(wait_time)
status_dict = self.pdt_build_status(materialization_id=materialization_id)
status = status_dict["status"]
if status == JobStatus.ERROR.value:
msg = status_dict["message"]
raise AirflowException(
f'PDT materialization job failed. Job id: {materialization_id}. Message:\n"{msg}"'
)
if status == JobStatus.CANCELLED.value:
raise AirflowException(f"PDT materialization job was cancelled. Job id: {materialization_id}.")
if status == JobStatus.UNKNOWN.value:
raise AirflowException(
f"PDT materialization job has unknown status. Job id: {materialization_id}."
)
self.log.info("PDT materialization job completed successfully. Job id: %s.", materialization_id)
def get_looker_sdk(self):
"""Returns Looker SDK client for Looker API 4.0."""
conn = self.get_connection(self.looker_conn_id)
settings = LookerApiSettings(conn)
transport = requests_transport.RequestsTransport.configure(settings)
return methods40.Looker40SDK(
auth_session.AuthSession(settings, transport, serialize.deserialize40, "4.0"),
serialize.deserialize40,
serialize.serialize,
transport,
"4.0",
)
class LookerApiSettings(api_settings.ApiSettings):
"""Custom implementation of Looker SDK's `ApiSettings` class."""
def __init__(
self,
conn: Connection,
) -> None:
self.conn = conn # need to init before `read_config` is called in super
super().__init__()
def read_config(self):
"""
Fetches the connection settings from Airflow's connection object.
Overrides the default logic of getting connection settings.
"""
config = {}
if self.conn.host is None:
raise AirflowException(f"No `host` was supplied in connection: {self.conn.id}.")
if self.conn.port:
config["base_url"] = f"{self.conn.host}:{self.conn.port}" # port is optional
else:
config["base_url"] = self.conn.host
if self.conn.login:
config["client_id"] = self.conn.login
else:
raise AirflowException(f"No `login` was supplied in connection: {self.conn.id}.")
if self.conn.password:
config["client_secret"] = self.conn.password
else:
raise AirflowException(f"No `password` was supplied in connection: {self.conn.id}.")
extras: dict = self.conn.extra_dejson
if "verify_ssl" in extras:
config["verify_ssl"] = extras["verify_ssl"] # optional
if "timeout" in extras:
config["timeout"] = extras["timeout"] # optional
return config
class JobStatus(Enum):
"""The job status string."""
QUEUED = "added"
PENDING = "pending"
RUNNING = "running"
CANCELLED = "killed"
DONE = "complete"
ERROR = "error"
UNKNOWN = "unknown"
| 8,903 | 33.645914 | 108 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/spanner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Spanner Hook."""
from __future__ import annotations
from typing import Callable, Sequence
from google.api_core.exceptions import AlreadyExists, GoogleAPICallError
from google.cloud.spanner_v1.client import Client
from google.cloud.spanner_v1.database import Database
from google.cloud.spanner_v1.instance import Instance
from google.cloud.spanner_v1.transaction import Transaction
from google.longrunning.operations_grpc_pb2 import Operation
from airflow.exceptions import AirflowException
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class SpannerHook(GoogleBaseHook):
"""
Hook for Google Cloud Spanner APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._client: Client | None = None
def _get_client(self, project_id: str) -> Client:
"""
Provides a client for interacting with the Cloud Spanner API.
:param project_id: The ID of the Google Cloud project.
:return: Client
"""
if not self._client:
self._client = Client(
project=project_id, credentials=self.get_credentials(), client_info=CLIENT_INFO
)
return self._client
@GoogleBaseHook.fallback_to_default_project_id
def get_instance(
self,
instance_id: str,
project_id: str,
) -> Instance | None:
"""
Gets information about a particular instance.
:param project_id: Optional, The ID of the Google Cloud project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:param instance_id: The ID of the Cloud Spanner instance.
:return: Spanner instance
"""
instance = self._get_client(project_id=project_id).instance(instance_id=instance_id)
if not instance.exists():
return None
return instance
def _apply_to_instance(
self,
project_id: str,
instance_id: str,
configuration_name: str,
node_count: int,
display_name: str,
func: Callable[[Instance], Operation],
) -> None:
"""
Invokes a method on a given instance by applying a specified Callable.
:param project_id: The ID of the Google Cloud project that owns the Cloud Spanner database.
:param instance_id: The ID of the instance.
:param configuration_name: Name of the instance configuration defining how the
instance will be created. Required for instances which do not yet exist.
:param node_count: (Optional) Number of nodes allocated to the instance.
:param display_name: (Optional) The display name for the instance in the Cloud
Console UI. (Must be between 4 and 30 characters.) If this value is not set
in the constructor, will fall back to the instance ID.
:param func: Method of the instance to be called.
"""
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id,
configuration_name=configuration_name,
node_count=node_count,
display_name=display_name,
)
try:
operation: Operation = func(instance)
except GoogleAPICallError as e:
self.log.error("An error occurred: %s. Exiting.", e.message)
raise e
if operation:
result = operation.result()
self.log.info(result)
@GoogleBaseHook.fallback_to_default_project_id
def create_instance(
self,
instance_id: str,
configuration_name: str,
node_count: int,
display_name: str,
project_id: str,
) -> None:
"""
Creates a new Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:param configuration_name: The name of the instance configuration defining how the
instance will be created. Possible configuration values can be retrieved via
https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list
:param node_count: (Optional) The number of nodes allocated to the Cloud Spanner
instance.
:param display_name: (Optional) The display name for the instance in the Google Cloud Console.
Must be between 4 and 30 characters. If this value is not passed, the name falls back
to the instance ID.
:param project_id: Optional, the ID of the Google Cloud project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:return: None
"""
self._apply_to_instance(
project_id, instance_id, configuration_name, node_count, display_name, lambda x: x.create()
)
@GoogleBaseHook.fallback_to_default_project_id
def update_instance(
self,
instance_id: str,
configuration_name: str,
node_count: int,
display_name: str,
project_id: str,
) -> None:
"""
Updates an existing Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:param configuration_name: The name of the instance configuration defining how the
instance will be created. Possible configuration values can be retrieved via
https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list
:param node_count: (Optional) The number of nodes allocated to the Cloud Spanner
instance.
:param display_name: (Optional) The display name for the instance in the Google Cloud
Console. Must be between 4 and 30 characters. If this value is not set in
the constructor, the name falls back to the instance ID.
:param project_id: Optional, the ID of the Google Cloud project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:return: None
"""
self._apply_to_instance(
project_id, instance_id, configuration_name, node_count, display_name, lambda x: x.update()
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_instance(self, instance_id: str, project_id: str) -> None:
"""
Deletes an existing Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:param project_id: Optional, the ID of the Google Cloud project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:return: None
"""
instance = self._get_client(project_id=project_id).instance(instance_id)
try:
instance.delete()
return
except GoogleAPICallError as e:
self.log.error("An error occurred: %s. Exiting.", e.message)
raise e
@GoogleBaseHook.fallback_to_default_project_id
def get_database(
self,
instance_id: str,
database_id: str,
project_id: str,
) -> Database | None:
"""
Retrieves a database in Cloud Spanner; return None if the database does not exist in the instance.
:param instance_id: The ID of the Cloud Spanner instance.
:param database_id: The ID of the database in Cloud Spanner.
:param project_id: Optional, the ID of the Google Cloud project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:return: Database object or None if database does not exist
"""
instance = self._get_client(project_id=project_id).instance(instance_id=instance_id)
if not instance.exists():
raise AirflowException(f"The instance {instance_id} does not exist in project {project_id} !")
database = instance.database(database_id=database_id)
if not database.exists():
return None
return database
@GoogleBaseHook.fallback_to_default_project_id
def create_database(
self,
instance_id: str,
database_id: str,
ddl_statements: list[str],
project_id: str,
) -> None:
"""
Creates a new database in Cloud Spanner.
:param instance_id: The ID of the Cloud Spanner instance.
:param database_id: The ID of the database to create in Cloud Spanner.
:param ddl_statements: The string list containing DDL for the new database.
:param project_id: Optional, the ID of the Google Cloud project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:return: None
"""
instance = self._get_client(project_id=project_id).instance(instance_id=instance_id)
if not instance.exists():
raise AirflowException(f"The instance {instance_id} does not exist in project {project_id} !")
database = instance.database(database_id=database_id, ddl_statements=ddl_statements)
try:
operation: Operation = database.create()
except GoogleAPICallError as e:
self.log.error("An error occurred: %s. Exiting.", e.message)
raise e
if operation:
result = operation.result()
self.log.info(result)
@GoogleBaseHook.fallback_to_default_project_id
def update_database(
self,
instance_id: str,
database_id: str,
ddl_statements: list[str],
project_id: str,
operation_id: str | None = None,
) -> None:
"""
Updates DDL of a database in Cloud Spanner.
:param instance_id: The ID of the Cloud Spanner instance.
:param database_id: The ID of the database in Cloud Spanner.
:param ddl_statements: The string list containing DDL for the new database.
:param project_id: Optional, the ID of the Google Cloud project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:param operation_id: (Optional) The unique per database operation ID that can be
specified to implement idempotency check.
:return: None
"""
instance = self._get_client(project_id=project_id).instance(instance_id=instance_id)
if not instance.exists():
raise AirflowException(f"The instance {instance_id} does not exist in project {project_id} !")
database = instance.database(database_id=database_id)
try:
operation = database.update_ddl(ddl_statements=ddl_statements, operation_id=operation_id)
if operation:
result = operation.result()
self.log.info(result)
return
except AlreadyExists as e:
if e.code == 409 and operation_id in e.message:
self.log.info(
"Replayed update_ddl message - the operation id %s was already done before.",
operation_id,
)
return
except GoogleAPICallError as e:
self.log.error("An error occurred: %s. Exiting.", e.message)
raise e
@GoogleBaseHook.fallback_to_default_project_id
def delete_database(self, instance_id: str, database_id, project_id: str) -> bool:
"""
Drops a database in Cloud Spanner.
:param instance_id: The ID of the Cloud Spanner instance.
:param database_id: The ID of the database in Cloud Spanner.
:param project_id: Optional, the ID of the Google Cloud project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:return: True if everything succeeded
"""
instance = self._get_client(project_id=project_id).instance(instance_id=instance_id)
if not instance.exists():
raise AirflowException(f"The instance {instance_id} does not exist in project {project_id} !")
database = instance.database(database_id=database_id)
if not database.exists():
self.log.info(
"The database %s is already deleted from instance %s. Exiting.", database_id, instance_id
)
return False
try:
database.drop()
except GoogleAPICallError as e:
self.log.error("An error occurred: %s. Exiting.", e.message)
raise e
return True
@GoogleBaseHook.fallback_to_default_project_id
def execute_dml(
self,
instance_id: str,
database_id: str,
queries: list[str],
project_id: str,
) -> None:
"""
Executes an arbitrary DML query (INSERT, UPDATE, DELETE).
:param instance_id: The ID of the Cloud Spanner instance.
:param database_id: The ID of the database in Cloud Spanner.
:param queries: The queries to execute.
:param project_id: Optional, the ID of the Google Cloud project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the Google Cloud connection
is used.
"""
self._get_client(project_id=project_id).instance(instance_id=instance_id).database(
database_id=database_id
).run_in_transaction(lambda transaction: self._execute_sql_in_transaction(transaction, queries))
@staticmethod
def _execute_sql_in_transaction(transaction: Transaction, queries: list[str]):
for sql in queries:
transaction.execute_update(sql)
| 15,552 | 41.378747 | 108 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/os_login.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""OS Login hooks.
.. spelling:word-list::
ImportSshPublicKeyResponse
oslogin
"""
from __future__ import annotations
from typing import Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.oslogin_v1 import ImportSshPublicKeyResponse, OsLoginServiceClient
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import PROVIDE_PROJECT_ID, GoogleBaseHook
class OSLoginHook(GoogleBaseHook):
"""
Hook for Google OS login APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._conn: OsLoginServiceClient | None = None
def get_conn(self) -> OsLoginServiceClient:
"""Return OS Login service client."""
if self._conn:
return self._conn
self._conn = OsLoginServiceClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
return self._conn
@GoogleBaseHook.fallback_to_default_project_id
def import_ssh_public_key(
self,
user: str,
ssh_public_key: dict,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ImportSshPublicKeyResponse:
"""
Adds an SSH public key and returns the profile information.
Default POSIX account information is set when no username and UID exist as part of the login profile.
:param user: The unique ID for the user
:param ssh_public_key: The SSH public key and expiration time.
:param project_id: The project ID of the Google Cloud project.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that
if ``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:return: A :class:`~google.cloud.oslogin_v1.ImportSshPublicKeyResponse` instance.
"""
conn = self.get_conn()
return conn.import_ssh_public_key(
request=dict(
parent=f"users/{user}",
ssh_public_key=ssh_public_key,
project_id=project_id,
),
retry=retry,
timeout=timeout,
metadata=metadata,
)
| 4,042 | 37.875 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/dataform.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import time
from typing import Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.dataform_v1beta1 import DataformClient
from google.cloud.dataform_v1beta1.types import (
CompilationResult,
InstallNpmPackagesResponse,
Repository,
WorkflowInvocation,
Workspace,
WriteFileResponse,
)
from airflow import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class DataformHook(GoogleBaseHook):
"""Hook for Google Cloud DataForm APIs."""
def __init__(self, **kwargs):
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(**kwargs)
def get_dataform_client(self) -> DataformClient:
"""Retrieves client library object that allow access to Cloud Dataform service."""
return DataformClient(credentials=self.get_credentials())
@GoogleBaseHook.fallback_to_default_project_id
def wait_for_workflow_invocation(
self,
workflow_invocation_id: str,
repository_id: str,
project_id: str,
region: str,
wait_time: int = 10,
timeout: int | None = None,
) -> None:
"""
Helper method which polls a job to check if it finishes.
:param workflow_invocation_id: Id of the Workflow Invocation
:param repository_id: Id of the Dataform repository
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param wait_time: Number of seconds between checks
:param timeout: How many seconds wait for job to be ready. Used only if ``asynchronous`` is False
"""
if region is None:
raise TypeError("missing 1 required keyword argument: 'region'")
state = None
start = time.monotonic()
while state not in (
WorkflowInvocation.State.FAILED,
WorkflowInvocation.State.SUCCEEDED,
WorkflowInvocation.State.CANCELLED,
):
if timeout and start + timeout < time.monotonic():
raise AirflowException(
f"Timeout: workflow invocation {workflow_invocation_id} is not ready after {timeout}s"
)
time.sleep(wait_time)
try:
workflow_invocation = self.get_workflow_invocation(
project_id=project_id,
region=region,
repository_id=repository_id,
workflow_invocation_id=workflow_invocation_id,
)
state = workflow_invocation.state
except Exception as err:
self.log.info(
"Retrying. Dataform API returned error when waiting for workflow invocation: %s", err
)
if state == WorkflowInvocation.State.FAILED:
raise AirflowException(f"Workflow Invocation failed:\n{workflow_invocation}")
if state == WorkflowInvocation.State.CANCELLED:
raise AirflowException(f"Workflow Invocation was cancelled:\n{workflow_invocation}")
@GoogleBaseHook.fallback_to_default_project_id
def create_compilation_result(
self,
project_id: str,
region: str,
repository_id: str,
compilation_result: CompilationResult | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> CompilationResult:
"""
Creates a new CompilationResult in a given project and location.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param repository_id: Required. The ID of the Dataform repository that the task belongs to.
:param compilation_result: Required. The compilation result to create.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
parent = f"projects/{project_id}/locations/{region}/repositories/{repository_id}"
return client.create_compilation_result(
request={
"parent": parent,
"compilation_result": compilation_result,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_compilation_result(
self,
project_id: str,
region: str,
repository_id: str,
compilation_result_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> CompilationResult:
"""
Fetches a single CompilationResult.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param repository_id: Required. The ID of the Dataform repository that the task belongs to.
:param compilation_result_id: The Id of the Dataform Compilation Result
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
name = (
f"projects/{project_id}/locations/{region}/repositories/"
f"{repository_id}/compilationResults/{compilation_result_id}"
)
return client.get_compilation_result(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata
)
@GoogleBaseHook.fallback_to_default_project_id
def create_workflow_invocation(
self,
project_id: str,
region: str,
repository_id: str,
workflow_invocation: WorkflowInvocation | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> WorkflowInvocation:
"""
Creates a new WorkflowInvocation in a given Repository.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param repository_id: Required. The ID of the Dataform repository that the task belongs to.
:param workflow_invocation: Required. The workflow invocation resource to create.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
parent = f"projects/{project_id}/locations/{region}/repositories/{repository_id}"
return client.create_workflow_invocation(
request={"parent": parent, "workflow_invocation": workflow_invocation},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_workflow_invocation(
self,
project_id: str,
region: str,
repository_id: str,
workflow_invocation_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> WorkflowInvocation:
"""
Fetches a single WorkflowInvocation.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param repository_id: Required. The ID of the Dataform repository that the task belongs to.
:param workflow_invocation_id: Required. The workflow invocation resource's id.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
name = (
f"projects/{project_id}/locations/{region}/repositories/"
f"{repository_id}/workflowInvocations/{workflow_invocation_id}"
)
return client.get_workflow_invocation(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def cancel_workflow_invocation(
self,
project_id: str,
region: str,
repository_id: str,
workflow_invocation_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Requests cancellation of a running WorkflowInvocation.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param repository_id: Required. The ID of the Dataform repository that the task belongs to.
:param workflow_invocation_id: Required. The workflow invocation resource's id.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
name = (
f"projects/{project_id}/locations/{region}/repositories/"
f"{repository_id}/workflowInvocations/{workflow_invocation_id}"
)
client.cancel_workflow_invocation(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata
)
@GoogleBaseHook.fallback_to_default_project_id
def create_repository(
self,
*,
project_id: str,
region: str,
repository_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Repository:
"""
Creates repository.
:param project_id: Required. The ID of the Google Cloud project where repository should be.
:param region: Required. The ID of the Google Cloud region where repository should be.
:param repository_id: Required. The ID of the new Dataform repository.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
parent = f"projects/{project_id}/locations/{region}"
request = {
"parent": parent,
"repository_id": repository_id,
}
repository = client.create_repository(
request=request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return repository
@GoogleBaseHook.fallback_to_default_project_id
def delete_repository(
self,
*,
project_id: str,
region: str,
repository_id: str,
force: bool = True,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Deletes repository.
:param project_id: Required. The ID of the Google Cloud project where repository located.
:param region: Required. The ID of the Google Cloud region where repository located.
:param repository_id: Required. The ID of the Dataform repository that should be deleted.
:param force: If set to true, any child resources of this repository will also be deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
name = f"projects/{project_id}/locations/{region}/repositories/{repository_id}"
request = {
"name": name,
"force": force,
}
client.delete_repository(
request=request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_workspace(
self,
*,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Workspace:
"""
Creates workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace should be.
:param region: Required. The ID of the Google Cloud region where workspace should be.
:param repository_id: Required. The ID of the Dataform repository where workspace should be.
:param workspace_id: Required. The ID of the new Dataform workspace.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
parent = f"projects/{project_id}/locations/{region}/repositories/{repository_id}"
request = {"parent": parent, "workspace_id": workspace_id}
workspace = client.create_workspace(
request=request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return workspace
@GoogleBaseHook.fallback_to_default_project_id
def delete_workspace(
self,
*,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Deletes workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace that should be deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
workspace_path = (
f"projects/{project_id}/locations/{region}/"
f"repositories/{repository_id}/workspaces/{workspace_id}"
)
request = {
"name": workspace_path,
}
client.delete_workspace(
request=request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def write_file(
self,
*,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
filepath: str,
contents: bytes,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> WriteFileResponse:
"""
Writes a new file to the specified workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace where files should be created.
:param filepath: Required. Path to file including name of the file relative to workspace root.
:param contents: Required. Content of the file to be written.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
workspace_path = (
f"projects/{project_id}/locations/{region}/"
f"repositories/{repository_id}/workspaces/{workspace_id}"
)
request = {
"workspace": workspace_path,
"path": filepath,
"contents": contents,
}
response = client.write_file(
request=request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return response
@GoogleBaseHook.fallback_to_default_project_id
def make_directory(
self,
*,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
path: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> dict:
"""
Makes new directory in specified workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace where directory should be created.
:param path: Required. The directory's full path including new directory name,
relative to the workspace root.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
workspace_path = (
f"projects/{project_id}/locations/{region}/"
f"repositories/{repository_id}/workspaces/{workspace_id}"
)
request = {
"workspace": workspace_path,
"path": path,
}
response = client.make_directory(
request=request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return response
@GoogleBaseHook.fallback_to_default_project_id
def remove_directory(
self,
*,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
path: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Removes directory in specified workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace where directory located.
:param path: Required. The directory's full path including directory name,
relative to the workspace root.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
workspace_path = (
f"projects/{project_id}/locations/{region}/"
f"repositories/{repository_id}/workspaces/{workspace_id}"
)
request = {
"workspace": workspace_path,
"path": path,
}
client.remove_directory(
request=request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def remove_file(
self,
*,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
filepath: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Removes file in specified workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace where directory located.
:param filepath: Required. The full path including name of the file, relative to the workspace root.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
workspace_path = (
f"projects/{project_id}/locations/{region}/"
f"repositories/{repository_id}/workspaces/{workspace_id}"
)
request = {
"workspace": workspace_path,
"path": filepath,
}
client.remove_file(
request=request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def install_npm_packages(
self,
*,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> InstallNpmPackagesResponse:
"""Install NPM dependencies in the provided workspace.
Requires "package.json" to be created in the workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
workspace_path = (
f"projects/{project_id}/locations/{region}/"
f"repositories/{repository_id}/workspaces/{workspace_id}"
)
request = {
"workspace": workspace_path,
}
response = client.install_npm_packages(
request=request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return response
| 25,350 | 38.860063 | 108 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/bigquery_dts.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a BigQuery Hook."""
from __future__ import annotations
from copy import copy
from typing import Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.bigquery_datatransfer_v1 import DataTransferServiceAsyncClient, DataTransferServiceClient
from google.cloud.bigquery_datatransfer_v1.types import (
StartManualTransferRunsResponse,
TransferConfig,
TransferRun,
)
from googleapiclient.discovery import Resource
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import (
PROVIDE_PROJECT_ID,
GoogleBaseAsyncHook,
GoogleBaseHook,
)
def get_object_id(obj: dict) -> str:
"""Returns unique id of the object."""
return obj["name"].rpartition("/")[-1]
class BiqQueryDataTransferServiceHook(GoogleBaseHook):
"""
Hook for Google Bigquery Transfer API.
All the methods in the hook where ``project_id`` is used must be called with
keyword arguments rather than positional.
"""
_conn: Resource | None = None
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self.location = location
@staticmethod
def _disable_auto_scheduling(config: dict | TransferConfig) -> TransferConfig:
"""
Create a transfer config with the automatic scheduling disabled.
In the case of Airflow, the customer needs to create a transfer config
with the automatic scheduling disabled (UI, CLI or an Airflow operator) and
then trigger a transfer run using a specialized Airflow operator that will
call start_manual_transfer_runs.
:param config: Data transfer configuration to create.
"""
config = TransferConfig.to_dict(config) if isinstance(config, TransferConfig) else config
new_config = copy(config)
schedule_options = new_config.get("schedule_options")
if schedule_options:
disable_auto_scheduling = schedule_options.get("disable_auto_scheduling", None)
if disable_auto_scheduling is None:
schedule_options["disable_auto_scheduling"] = True
else:
new_config["schedule_options"] = {"disable_auto_scheduling": True}
return TransferConfig(**new_config)
def get_conn(self) -> DataTransferServiceClient:
"""
Retrieves connection to Google Bigquery.
:return: Google Bigquery API client
"""
if not self._conn:
self._conn = DataTransferServiceClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO
)
return self._conn
@GoogleBaseHook.fallback_to_default_project_id
def create_transfer_config(
self,
transfer_config: dict | TransferConfig,
project_id: str = PROVIDE_PROJECT_ID,
authorization_code: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> TransferConfig:
"""
Creates a new data transfer configuration.
:param transfer_config: Data transfer configuration to create.
:param project_id: The BigQuery project id where the transfer configuration should be
created. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:param authorization_code: authorization code to use with this transfer configuration.
This is required if new credentials are needed.
:param retry: A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: Additional metadata that is provided to the method.
:return: A ``google.cloud.bigquery_datatransfer_v1.types.TransferConfig`` instance.
"""
client = self.get_conn()
parent = f"projects/{project_id}"
if self.location:
parent = f"{parent}/locations/{self.location}"
return client.create_transfer_config(
request={
"parent": parent,
"transfer_config": self._disable_auto_scheduling(transfer_config),
"authorization_code": authorization_code,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_transfer_config(
self,
transfer_config_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Deletes transfer configuration.
:param transfer_config_id: Id of transfer config to be used.
:param project_id: The BigQuery project id where the transfer configuration should be
created. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:param retry: A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: Additional metadata that is provided to the method.
:return: None
"""
client = self.get_conn()
project = f"projects/{project_id}"
if self.location:
project = f"{project}/locations/{self.location}"
name = f"{project}/transferConfigs/{transfer_config_id}"
return client.delete_transfer_config(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
@GoogleBaseHook.fallback_to_default_project_id
def start_manual_transfer_runs(
self,
transfer_config_id: str,
project_id: str = PROVIDE_PROJECT_ID,
requested_time_range: dict | None = None,
requested_run_time: dict | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> StartManualTransferRunsResponse:
"""
Start manual transfer runs to be executed now with schedule_time equal to current time.
The transfer runs can be created for a time range where the run_time is between
start_time (inclusive) and end_time (exclusive), or for a specific run_time.
:param transfer_config_id: Id of transfer config to be used.
:param requested_time_range: Time range for the transfer runs that should be started.
If a dict is provided, it must be of the same form as the protobuf
message `~google.cloud.bigquery_datatransfer_v1.types.TimeRange`
:param requested_run_time: Specific run_time for a transfer run to be started. The
requested_run_time must not be in the future. If a dict is provided, it
must be of the same form as the protobuf message
`~google.cloud.bigquery_datatransfer_v1.types.Timestamp`
:param project_id: The BigQuery project id where the transfer configuration should be
created. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:param retry: A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: Additional metadata that is provided to the method.
:return: An ``google.cloud.bigquery_datatransfer_v1.types.StartManualTransferRunsResponse`` instance.
"""
client = self.get_conn()
project = f"projects/{project_id}"
if self.location:
project = f"{project}/locations/{self.location}"
parent = f"{project}/transferConfigs/{transfer_config_id}"
return client.start_manual_transfer_runs(
request={
"parent": parent,
"requested_time_range": requested_time_range,
"requested_run_time": requested_run_time,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_transfer_run(
self,
run_id: str,
transfer_config_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> TransferRun:
"""
Returns information about the particular transfer run.
:param run_id: ID of the transfer run.
:param transfer_config_id: ID of transfer config to be used.
:param project_id: The BigQuery project id where the transfer configuration should be
created. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:param retry: A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: Additional metadata that is provided to the method.
:return: An ``google.cloud.bigquery_datatransfer_v1.types.TransferRun`` instance.
"""
client = self.get_conn()
project = f"projects/{project_id}"
if self.location:
project = f"{project}/locations/{self.location}"
name = f"{project}/transferConfigs/{transfer_config_id}/runs/{run_id}"
return client.get_transfer_run(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
class AsyncBiqQueryDataTransferServiceHook(GoogleBaseAsyncHook):
"""Hook of the BigQuery service to be used with async client of the Google library."""
sync_hook_class = BiqQueryDataTransferServiceHook
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
location=location,
impersonation_chain=impersonation_chain,
)
self._conn: DataTransferServiceAsyncClient | None = None
async def _get_conn(self) -> DataTransferServiceAsyncClient:
if not self._conn:
credentials = (await self.get_sync_hook()).get_credentials()
self._conn = DataTransferServiceAsyncClient(credentials=credentials, client_info=CLIENT_INFO)
return self._conn
async def _get_project_id(self) -> str:
sync_hook = await self.get_sync_hook()
return sync_hook.project_id
async def _get_project_location(self) -> str:
sync_hook = await self.get_sync_hook()
return sync_hook.location
async def get_transfer_run(
self,
config_id: str,
run_id: str,
project_id: str | None,
location: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Returns information about the particular transfer run.
:param run_id: ID of the transfer run.
:param config_id: ID of transfer config to be used.
:param project_id: The BigQuery project id where the transfer configuration should be
created. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:param location: BigQuery Transfer Service location for regional transfers.
:param retry: A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: Additional metadata that is provided to the method.
:return: An ``google.cloud.bigquery_datatransfer_v1.types.TransferRun`` instance.
"""
project_id = project_id or (await self._get_project_id())
location = location or (await self._get_project_location())
name = f"projects/{project_id}"
if location:
name += f"/locations/{location}"
name += f"/transferConfigs/{config_id}/runs/{run_id}"
client = await self._get_conn()
transfer_run = await client.get_transfer_run(
name=name,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return transfer_run
| 15,131 | 41.268156 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/datafusion.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google DataFusion hook."""
from __future__ import annotations
import json
import os
from time import monotonic, sleep
from typing import Any, Dict, Sequence
from urllib.parse import quote, urlencode, urljoin
import google.auth
from aiohttp import ClientSession
from gcloud.aio.auth import AioSession, Token
from google.api_core.retry import exponential_sleep_generator
from googleapiclient.discovery import Resource, build
from airflow.exceptions import AirflowException, AirflowNotFoundException
from airflow.providers.google.common.hooks.base_google import (
PROVIDE_PROJECT_ID,
GoogleBaseAsyncHook,
GoogleBaseHook,
)
Operation = Dict[str, Any]
class PipelineStates:
"""Data Fusion pipeline states."""
PENDING = "PENDING"
STARTING = "STARTING"
RUNNING = "RUNNING"
SUSPENDED = "SUSPENDED"
RESUMING = "RESUMING"
COMPLETED = "COMPLETED"
FAILED = "FAILED"
KILLED = "KILLED"
REJECTED = "REJECTED"
FAILURE_STATES = [PipelineStates.FAILED, PipelineStates.KILLED, PipelineStates.REJECTED]
SUCCESS_STATES = [PipelineStates.COMPLETED]
class DataFusionHook(GoogleBaseHook):
"""Hook for Google DataFusion."""
_conn: Resource | None = None
def __init__(
self,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self.api_version = api_version
def wait_for_operation(self, operation: dict[str, Any]) -> dict[str, Any]:
"""Waits for long-lasting operation to complete."""
for time_to_wait in exponential_sleep_generator(initial=10, maximum=120):
sleep(time_to_wait)
operation = (
self.get_conn().projects().locations().operations().get(name=operation.get("name")).execute()
)
if operation.get("done"):
break
if "error" in operation:
raise AirflowException(operation["error"])
return operation["response"]
def wait_for_pipeline_state(
self,
pipeline_name: str,
pipeline_id: str,
instance_url: str,
namespace: str = "default",
success_states: list[str] | None = None,
failure_states: list[str] | None = None,
timeout: int = 5 * 60,
) -> None:
"""Polls pipeline state and raises an exception if the state fails or times out."""
failure_states = failure_states or FAILURE_STATES
success_states = success_states or SUCCESS_STATES
start_time = monotonic()
current_state = None
while monotonic() - start_time < timeout:
try:
workflow = self.get_pipeline_workflow(
pipeline_name=pipeline_name,
pipeline_id=pipeline_id,
instance_url=instance_url,
namespace=namespace,
)
current_state = workflow["status"]
except AirflowException:
pass # Because the pipeline may not be visible in system yet
if current_state in success_states:
return
if current_state in failure_states:
raise AirflowException(
f"Pipeline {pipeline_name} state {current_state} is not one of {success_states}"
)
sleep(30)
# Time is up!
raise AirflowException(
f"Pipeline {pipeline_name} state {current_state} is not "
f"one of {success_states} after {timeout}s"
)
@staticmethod
def _name(project_id: str, location: str, instance_name: str) -> str:
return f"projects/{project_id}/locations/{location}/instances/{instance_name}"
@staticmethod
def _parent(project_id: str, location: str) -> str:
return f"projects/{project_id}/locations/{location}"
@staticmethod
def _base_url(instance_url: str, namespace: str) -> str:
return os.path.join(instance_url, "v3", "namespaces", quote(namespace), "apps")
def _cdap_request(
self, url: str, method: str, body: list | dict | None = None
) -> google.auth.transport.Response:
headers: dict[str, str] = {"Content-Type": "application/json"}
request = google.auth.transport.requests.Request()
credentials = self.get_credentials()
credentials.before_request(request=request, method=method, url=url, headers=headers)
payload = json.dumps(body) if body else None
response = request(method=method, url=url, headers=headers, body=payload)
return response
@staticmethod
def _check_response_status_and_data(response, message: str) -> None:
if response.status == 404:
raise AirflowNotFoundException(message)
elif response.status != 200:
raise AirflowException(message)
if response.data is None:
raise AirflowException(
"Empty response received. Please, check for possible root "
"causes of this behavior either in DAG code or on Cloud DataFusion side"
)
def get_conn(self) -> Resource:
"""Retrieves connection to DataFusion."""
if not self._conn:
http_authorized = self._authorize()
self._conn = build(
"datafusion",
self.api_version,
http=http_authorized,
cache_discovery=False,
)
return self._conn
@GoogleBaseHook.fallback_to_default_project_id
def restart_instance(self, instance_name: str, location: str, project_id: str) -> Operation:
"""
Restart a single Data Fusion instance.
At the end of an operation instance is fully restarted.
:param instance_name: The name of the instance to restart.
:param location: The Cloud Data Fusion location in which to handle the request.
:param project_id: The ID of the Google Cloud project that the instance belongs to.
"""
operation = (
self.get_conn()
.projects()
.locations()
.instances()
.restart(name=self._name(project_id, location, instance_name))
.execute(num_retries=self.num_retries)
)
return operation
@GoogleBaseHook.fallback_to_default_project_id
def delete_instance(self, instance_name: str, location: str, project_id: str) -> Operation:
"""
Deletes a single Date Fusion instance.
:param instance_name: The name of the instance to delete.
:param location: The Cloud Data Fusion location in which to handle the request.
:param project_id: The ID of the Google Cloud project that the instance belongs to.
"""
operation = (
self.get_conn()
.projects()
.locations()
.instances()
.delete(name=self._name(project_id, location, instance_name))
.execute(num_retries=self.num_retries)
)
return operation
@GoogleBaseHook.fallback_to_default_project_id
def create_instance(
self,
instance_name: str,
instance: dict[str, Any],
location: str,
project_id: str = PROVIDE_PROJECT_ID,
) -> Operation:
"""
Creates a new Data Fusion instance in the specified project and location.
:param instance_name: The name of the instance to create.
:param instance: An instance of Instance.
https://cloud.google.com/data-fusion/docs/reference/rest/v1beta1/projects.locations.instances#Instance
:param location: The Cloud Data Fusion location in which to handle the request.
:param project_id: The ID of the Google Cloud project that the instance belongs to.
"""
operation = (
self.get_conn()
.projects()
.locations()
.instances()
.create(
parent=self._parent(project_id, location),
body=instance,
instanceId=instance_name,
)
.execute(num_retries=self.num_retries)
)
return operation
@GoogleBaseHook.fallback_to_default_project_id
def get_instance(self, instance_name: str, location: str, project_id: str) -> dict[str, Any]:
"""
Gets details of a single Data Fusion instance.
:param instance_name: The name of the instance.
:param location: The Cloud Data Fusion location in which to handle the request.
:param project_id: The ID of the Google Cloud project that the instance belongs to.
"""
instance = (
self.get_conn()
.projects()
.locations()
.instances()
.get(name=self._name(project_id, location, instance_name))
.execute(num_retries=self.num_retries)
)
return instance
@GoogleBaseHook.fallback_to_default_project_id
def patch_instance(
self,
instance_name: str,
instance: dict[str, Any],
update_mask: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
) -> Operation:
"""
Updates a single Data Fusion instance.
:param instance_name: The name of the instance to create.
:param instance: An instance of Instance.
https://cloud.google.com/data-fusion/docs/reference/rest/v1beta1/projects.locations.instances#Instance
:param update_mask: Field mask is used to specify the fields that the update will overwrite
in an instance resource. The fields specified in the updateMask are relative to the resource,
not the full request. A field will be overwritten if it is in the mask. If the user does not
provide a mask, all the supported fields (labels and options currently) will be overwritten.
A comma-separated list of fully qualified names of fields. Example: "user.displayName,photo".
https://developers.google.com/protocol-buffers/docs/reference/google.protobuf?_ga=2.205612571.-968688242.1573564810#google.protobuf.FieldMask
:param location: The Cloud Data Fusion location in which to handle the request.
:param project_id: The ID of the Google Cloud project that the instance belongs to.
"""
operation = (
self.get_conn()
.projects()
.locations()
.instances()
.patch(
name=self._name(project_id, location, instance_name),
updateMask=update_mask,
body=instance,
)
.execute(num_retries=self.num_retries)
)
return operation
def create_pipeline(
self,
pipeline_name: str,
pipeline: dict[str, Any],
instance_url: str,
namespace: str = "default",
) -> None:
"""
Creates a Cloud Data Fusion pipeline.
:param pipeline_name: Your pipeline name.
:param pipeline: The pipeline definition. For more information check:
https://docs.cdap.io/cdap/current/en/developer-manual/pipelines/developing-pipelines.html#pipeline-configuration-file-format
:param instance_url: Endpoint on which the REST APIs is accessible for the instance.
:param namespace: if your pipeline belongs to a Basic edition instance, the namespace ID
is always default. If your pipeline belongs to an Enterprise edition instance, you
can create a namespace.
"""
url = os.path.join(self._base_url(instance_url, namespace), quote(pipeline_name))
response = self._cdap_request(url=url, method="PUT", body=pipeline)
self._check_response_status_and_data(
response, f"Creating a pipeline failed with code {response.status} while calling {url}"
)
def delete_pipeline(
self,
pipeline_name: str,
instance_url: str,
version_id: str | None = None,
namespace: str = "default",
) -> None:
"""
Deletes a Cloud Data Fusion pipeline.
:param pipeline_name: Your pipeline name.
:param version_id: Version of pipeline to delete
:param instance_url: Endpoint on which the REST APIs is accessible for the instance.
:param namespace: f your pipeline belongs to a Basic edition instance, the namespace ID
is always default. If your pipeline belongs to an Enterprise edition instance, you
can create a namespace.
"""
url = os.path.join(self._base_url(instance_url, namespace), quote(pipeline_name))
if version_id:
url = os.path.join(url, "versions", version_id)
response = self._cdap_request(url=url, method="DELETE", body=None)
self._check_response_status_and_data(
response, f"Deleting a pipeline failed with code {response.status}"
)
def list_pipelines(
self,
instance_url: str,
artifact_name: str | None = None,
artifact_version: str | None = None,
namespace: str = "default",
) -> dict:
"""
Lists Cloud Data Fusion pipelines.
:param artifact_version: Artifact version to filter instances
:param artifact_name: Artifact name to filter instances
:param instance_url: Endpoint on which the REST APIs is accessible for the instance.
:param namespace: f your pipeline belongs to a Basic edition instance, the namespace ID
is always default. If your pipeline belongs to an Enterprise edition instance, you
can create a namespace.
"""
url = self._base_url(instance_url, namespace)
query: dict[str, str] = {}
if artifact_name:
query = {"artifactName": artifact_name}
if artifact_version:
query = {"artifactVersion": artifact_version}
if query:
url = os.path.join(url, urlencode(query))
response = self._cdap_request(url=url, method="GET", body=None)
self._check_response_status_and_data(
response, f"Listing pipelines failed with code {response.status}"
)
return json.loads(response.data)
def get_pipeline_workflow(
self,
pipeline_name: str,
instance_url: str,
pipeline_id: str,
namespace: str = "default",
) -> Any:
url = os.path.join(
self._base_url(instance_url, namespace),
quote(pipeline_name),
"workflows",
"DataPipelineWorkflow",
"runs",
quote(pipeline_id),
)
response = self._cdap_request(url=url, method="GET")
self._check_response_status_and_data(
response, f"Retrieving a pipeline state failed with code {response.status}"
)
workflow = json.loads(response.data)
return workflow
def start_pipeline(
self,
pipeline_name: str,
instance_url: str,
namespace: str = "default",
runtime_args: dict[str, Any] | None = None,
) -> str:
"""
Starts a Cloud Data Fusion pipeline. Works for both batch and stream pipelines.
:param pipeline_name: Your pipeline name.
:param instance_url: Endpoint on which the REST APIs is accessible for the instance.
:param runtime_args: Optional runtime JSON args to be passed to the pipeline
:param namespace: if your pipeline belongs to a Basic edition instance, the namespace ID
is always default. If your pipeline belongs to an Enterprise edition instance, you
can create a namespace.
"""
# TODO: This API endpoint starts multiple pipelines. There will eventually be a fix
# return the run Id as part of the API request to run a single pipeline.
# https://github.com/apache/airflow/pull/8954#discussion_r438223116
url = os.path.join(
instance_url,
"v3",
"namespaces",
quote(namespace),
"start",
)
runtime_args = runtime_args or {}
body = [
{
"appId": pipeline_name,
"programType": "workflow",
"programId": "DataPipelineWorkflow",
"runtimeargs": runtime_args,
}
]
response = self._cdap_request(url=url, method="POST", body=body)
self._check_response_status_and_data(
response, f"Starting a pipeline failed with code {response.status}"
)
response_json = json.loads(response.data)
return response_json[0]["runId"]
def stop_pipeline(self, pipeline_name: str, instance_url: str, namespace: str = "default") -> None:
"""
Stops a Cloud Data Fusion pipeline. Works for both batch and stream pipelines.
:param pipeline_name: Your pipeline name.
:param instance_url: Endpoint on which the REST APIs is accessible for the instance.
:param namespace: f your pipeline belongs to a Basic edition instance, the namespace ID
is always default. If your pipeline belongs to an Enterprise edition instance, you
can create a namespace.
"""
url = os.path.join(
self._base_url(instance_url, namespace),
quote(pipeline_name),
"workflows",
"DataPipelineWorkflow",
"stop",
)
response = self._cdap_request(url=url, method="POST")
self._check_response_status_and_data(
response, f"Stopping a pipeline failed with code {response.status}"
)
class DataFusionAsyncHook(GoogleBaseAsyncHook):
"""Class to get asynchronous hook for DataFusion."""
sync_hook_class = DataFusionHook
scopes = ["https://www.googleapis.com/auth/cloud-platform"]
def __init__(self, **kwargs):
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(**kwargs)
@staticmethod
def _base_url(instance_url: str, namespace: str) -> str:
return urljoin(f"{instance_url}/", f"v3/namespaces/{quote(namespace)}/apps/")
async def _get_link(self, url: str, session):
async with Token(scopes=self.scopes) as token:
session_aio = AioSession(session)
headers = {
"Authorization": f"Bearer {await token.get()}",
}
try:
pipeline = await session_aio.get(url=url, headers=headers)
except AirflowException:
pass # Because the pipeline may not be visible in system yet
return pipeline
async def get_pipeline(
self,
instance_url: str,
namespace: str,
pipeline_name: str,
pipeline_id: str,
session,
):
base_url_link = self._base_url(instance_url, namespace)
url = urljoin(
base_url_link, f"{quote(pipeline_name)}/workflows/DataPipelineWorkflow/runs/{quote(pipeline_id)}"
)
return await self._get_link(url=url, session=session)
async def get_pipeline_status(
self,
pipeline_name: str,
instance_url: str,
pipeline_id: str,
namespace: str = "default",
success_states: list[str] | None = None,
) -> str:
"""
Gets a Cloud Data Fusion pipeline status asynchronously.
:param pipeline_name: Your pipeline name.
:param instance_url: Endpoint on which the REST APIs is accessible for the instance.
:param pipeline_id: Unique pipeline ID associated with specific pipeline
:param namespace: if your pipeline belongs to a Basic edition instance, the namespace ID
is always default. If your pipeline belongs to an Enterprise edition instance, you
can create a namespace.
:param success_states: If provided the operator will wait for pipeline to be in one of
the provided states.
"""
success_states = success_states or SUCCESS_STATES
async with ClientSession() as session:
try:
pipeline = await self.get_pipeline(
instance_url=instance_url,
namespace=namespace,
pipeline_name=pipeline_name,
pipeline_id=pipeline_id,
session=session,
)
self.log.info("Response pipeline: %s", pipeline)
pipeline = await pipeline.json(content_type=None)
current_pipeline_state = pipeline["status"]
if current_pipeline_state in success_states:
pipeline_status = "success"
elif current_pipeline_state in FAILURE_STATES:
pipeline_status = "failed"
else:
pipeline_status = "pending"
except OSError:
pipeline_status = "pending"
except Exception as e:
self.log.info("Retrieving pipeline status finished with errors...")
pipeline_status = str(e)
return pipeline_status
| 22,699 | 38.478261 | 153 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/bigtable.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Bigtable Hook."""
from __future__ import annotations
import enum
from typing import Sequence
from google.cloud.bigtable import Client, enums
from google.cloud.bigtable.cluster import Cluster
from google.cloud.bigtable.column_family import ColumnFamily, GarbageCollectionRule
from google.cloud.bigtable.instance import Instance
from google.cloud.bigtable.table import ClusterState, Table
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class BigtableHook(GoogleBaseHook):
"""
Hook for Google Cloud Bigtable APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._client: Client | None = None
def _get_client(self, project_id: str) -> Client:
if not self._client:
self._client = Client(
project=project_id,
credentials=self.get_credentials(),
client_info=CLIENT_INFO,
admin=True,
)
return self._client
@GoogleBaseHook.fallback_to_default_project_id
def get_instance(self, instance_id: str, project_id: str) -> Instance | None:
"""
Retrieves and returns the specified Cloud Bigtable instance if it exists, otherwise returns None.
:param instance_id: The ID of the Cloud Bigtable instance.
:param project_id: Optional, Google Cloud project ID where the
BigTable exists. If set to None or missing,
the default project_id from the Google Cloud connection is used.
"""
instance = self._get_client(project_id=project_id).instance(instance_id)
if not instance.exists():
return None
return instance
@GoogleBaseHook.fallback_to_default_project_id
def delete_instance(self, instance_id: str, project_id: str) -> None:
"""
Deletes the specified Cloud Bigtable instance.
Raises google.api_core.exceptions.NotFound if the Cloud Bigtable instance does
not exist.
:param project_id: Optional, Google Cloud project ID where the
BigTable exists. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:param instance_id: The ID of the Cloud Bigtable instance.
"""
instance = self.get_instance(instance_id=instance_id, project_id=project_id)
if instance:
instance.delete()
else:
self.log.warning(
"The instance '%s' does not exist in project '%s'. Exiting", instance_id, project_id
)
@GoogleBaseHook.fallback_to_default_project_id
def create_instance(
self,
instance_id: str,
main_cluster_id: str,
main_cluster_zone: str,
project_id: str,
replica_clusters: list[dict[str, str]] | None = None,
instance_display_name: str | None = None,
instance_type: enums.Instance.Type = enums.Instance.Type.UNSPECIFIED, # type: ignore[assignment]
instance_labels: dict | None = None,
cluster_nodes: int | None = None,
cluster_storage_type: enums.StorageType = enums.StorageType.UNSPECIFIED, # type: ignore[assignment]
timeout: float | None = None,
) -> Instance:
"""
Creates new instance.
:param instance_id: The ID for the new instance.
:param main_cluster_id: The ID for main cluster for the new instance.
:param main_cluster_zone: The zone for main cluster.
See https://cloud.google.com/bigtable/docs/locations for more details.
:param project_id: Optional, Google Cloud project ID where the
BigTable exists. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:param replica_clusters: (optional) A list of replica clusters for the new
instance. Each cluster dictionary contains an id and a zone.
Example: [{"id": "replica-1", "zone": "us-west1-a"}]
:param instance_type: (optional) The type of the instance.
:param instance_display_name: (optional) Human-readable name of the instance.
Defaults to ``instance_id``.
:param instance_labels: (optional) Dictionary of labels to associate with the
instance.
:param cluster_nodes: (optional) Number of nodes for cluster.
:param cluster_storage_type: (optional) The type of storage.
:param timeout: (optional) timeout (in seconds) for instance creation.
If None is not specified, Operator will wait indefinitely.
"""
instance = Instance(
instance_id,
self._get_client(project_id=project_id),
instance_display_name,
instance_type,
instance_labels,
)
cluster_kwargs = dict(
cluster_id=main_cluster_id,
location_id=main_cluster_zone,
default_storage_type=cluster_storage_type,
)
if instance_type != enums.Instance.Type.DEVELOPMENT and cluster_nodes:
cluster_kwargs["serve_nodes"] = cluster_nodes
clusters = [instance.cluster(**cluster_kwargs)]
if replica_clusters:
for replica_cluster in replica_clusters:
if "id" in replica_cluster and "zone" in replica_cluster:
clusters.append(
instance.cluster(
replica_cluster["id"],
replica_cluster["zone"],
cluster_nodes,
cluster_storage_type,
)
)
operation = instance.create(clusters=clusters)
operation.result(timeout)
return instance
@GoogleBaseHook.fallback_to_default_project_id
def update_instance(
self,
instance_id: str,
project_id: str,
instance_display_name: str | None = None,
instance_type: enums.Instance.Type | enum.IntEnum | None = None,
instance_labels: dict | None = None,
timeout: float | None = None,
) -> Instance:
"""
Update an existing instance.
:param instance_id: The ID for the existing instance.
:param project_id: Optional, Google Cloud project ID where the
BigTable exists. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:param instance_display_name: (optional) Human-readable name of the instance.
:param instance_type: (optional) The type of the instance.
:param instance_labels: (optional) Dictionary of labels to associate with the
instance.
:param timeout: (optional) timeout (in seconds) for instance update.
If None is not specified, Operator will wait indefinitely.
"""
instance = Instance(
instance_id=instance_id,
client=self._get_client(project_id=project_id),
display_name=instance_display_name,
instance_type=instance_type,
labels=instance_labels,
)
operation = instance.update()
operation.result(timeout)
return instance
@staticmethod
def create_table(
instance: Instance,
table_id: str,
initial_split_keys: list | None = None,
column_families: dict[str, GarbageCollectionRule] | None = None,
) -> None:
"""
Creates the specified Cloud Bigtable table.
Raises ``google.api_core.exceptions.AlreadyExists`` if the table exists.
:param instance: The Cloud Bigtable instance that owns the table.
:param table_id: The ID of the table to create in Cloud Bigtable.
:param initial_split_keys: (Optional) A list of row keys in bytes to use to
initially split the table.
:param column_families: (Optional) A map of columns to create. The key is the
column_id str, and the value is a
:class:`google.cloud.bigtable.column_family.GarbageCollectionRule`.
"""
if column_families is None:
column_families = {}
if initial_split_keys is None:
initial_split_keys = []
table = Table(table_id, instance)
table.create(initial_split_keys, column_families)
@GoogleBaseHook.fallback_to_default_project_id
def delete_table(self, instance_id: str, table_id: str, project_id: str) -> None:
"""
Deletes the specified table in Cloud Bigtable.
Raises google.api_core.exceptions.NotFound if the table does not exist.
:param instance_id: The ID of the Cloud Bigtable instance.
:param table_id: The ID of the table in Cloud Bigtable.
:param project_id: Optional, Google Cloud project ID where the
BigTable exists. If set to None or missing,
the default project_id from the Google Cloud connection is used.
"""
instance = self.get_instance(instance_id=instance_id, project_id=project_id)
if instance is None:
raise RuntimeError(f"Instance {instance_id} did not exist; unable to delete table {table_id}")
table = instance.table(table_id=table_id)
table.delete()
@staticmethod
def update_cluster(instance: Instance, cluster_id: str, nodes: int) -> None:
"""
Updates number of nodes in the specified Cloud Bigtable cluster.
Raises google.api_core.exceptions.NotFound if the cluster does not exist.
:param instance: The Cloud Bigtable instance that owns the cluster.
:param cluster_id: The ID of the cluster.
:param nodes: The desired number of nodes.
"""
cluster = Cluster(cluster_id, instance)
# "reload" is required to set location_id attribute on cluster.
cluster.reload()
cluster.serve_nodes = nodes
cluster.update()
@staticmethod
def get_column_families_for_table(instance: Instance, table_id: str) -> dict[str, ColumnFamily]:
"""
Fetches Column Families for the specified table in Cloud Bigtable.
:param instance: The Cloud Bigtable instance that owns the table.
:param table_id: The ID of the table in Cloud Bigtable to fetch Column Families
from.
"""
table = Table(table_id, instance)
return table.list_column_families()
@staticmethod
def get_cluster_states_for_table(instance: Instance, table_id: str) -> dict[str, ClusterState]:
"""
Fetches Cluster States for the specified table in Cloud Bigtable.
Raises google.api_core.exceptions.NotFound if the table does not exist.
:param instance: The Cloud Bigtable instance that owns the table.
:param table_id: The ID of the table in Cloud Bigtable to fetch Cluster States
from.
"""
table = Table(table_id, instance)
return table.get_cluster_states()
| 12,541 | 40.946488 | 108 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/cloud_storage_transfer_service.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a Google Storage Transfer Service Hook.
.. spelling::
ListTransferJobsAsyncPager
StorageTransferServiceAsyncClient
"""
from __future__ import annotations
import json
import logging
import time
import warnings
from copy import deepcopy
from datetime import timedelta
from typing import Any, Sequence
from google.cloud.storage_transfer_v1 import (
ListTransferJobsRequest,
StorageTransferServiceAsyncClient,
TransferJob,
TransferOperation,
)
from google.cloud.storage_transfer_v1.services.storage_transfer_service.pagers import (
ListTransferJobsAsyncPager,
)
from googleapiclient.discovery import Resource, build
from googleapiclient.errors import HttpError
from proto import Message
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.providers.google.common.hooks.base_google import GoogleBaseAsyncHook, GoogleBaseHook
log = logging.getLogger(__name__)
# Time to sleep between active checks of the operation results
TIME_TO_SLEEP_IN_SECONDS = 10
class GcpTransferJobsStatus:
"""Google Cloud Transfer job status."""
ENABLED = "ENABLED"
DISABLED = "DISABLED"
DELETED = "DELETED"
class GcpTransferOperationStatus:
"""Google Cloud Transfer operation status."""
IN_PROGRESS = "IN_PROGRESS"
PAUSED = "PAUSED"
SUCCESS = "SUCCESS"
FAILED = "FAILED"
ABORTED = "ABORTED"
# A list of keywords used to build a request or response
ACCESS_KEY_ID = "accessKeyId"
ALREADY_EXISTING_IN_SINK = "overwriteObjectsAlreadyExistingInSink"
AWS_ACCESS_KEY = "awsAccessKey"
AWS_SECRET_ACCESS_KEY = "secretAccessKey"
AWS_S3_DATA_SOURCE = "awsS3DataSource"
BODY = "body"
BUCKET_NAME = "bucketName"
COUNTERS = "counters"
DAY = "day"
DESCRIPTION = "description"
FILTER = "filter"
FILTER_JOB_NAMES = "job_names"
FILTER_PROJECT_ID = "project_id"
GCS_DATA_SINK = "gcsDataSink"
GCS_DATA_SOURCE = "gcsDataSource"
HOURS = "hours"
HTTP_DATA_SOURCE = "httpDataSource"
INCLUDE_PREFIXES = "includePrefixes"
JOB_NAME = "name"
LIST_URL = "list_url"
METADATA = "metadata"
MINUTES = "minutes"
MONTH = "month"
NAME = "name"
OBJECT_CONDITIONS = "object_conditions"
OPERATIONS = "operations"
OVERWRITE_OBJECTS_ALREADY_EXISTING_IN_SINK = "overwriteObjectsAlreadyExistingInSink"
PATH = "path"
PROJECT_ID = "projectId"
SCHEDULE = "schedule"
SCHEDULE_END_DATE = "scheduleEndDate"
SCHEDULE_START_DATE = "scheduleStartDate"
SECONDS = "seconds"
SECRET_ACCESS_KEY = "secretAccessKey"
START_TIME_OF_DAY = "startTimeOfDay"
STATUS = "status"
STATUS1 = "status"
TRANSFER_JOB = "transfer_job"
TRANSFER_JOBS = "transferJobs"
TRANSFER_JOB_FIELD_MASK = "update_transfer_job_field_mask"
TRANSFER_OPERATIONS = "transferOperations"
TRANSFER_OPTIONS = "transfer_options"
TRANSFER_SPEC = "transferSpec"
YEAR = "year"
ALREADY_EXIST_CODE = 409
NEGATIVE_STATUSES = {GcpTransferOperationStatus.FAILED, GcpTransferOperationStatus.ABORTED}
def gen_job_name(job_name: str) -> str:
"""Add a unique suffix to the job name.
:param job_name:
:return: job_name with suffix
"""
uniq = int(time.time())
return f"{job_name}_{uniq}"
class CloudDataTransferServiceHook(GoogleBaseHook):
"""Google Storage Transfer Service functionalities.
All methods in the hook with *project_id* in the signature must be called
with keyword arguments rather than positional.
"""
def __init__(
self,
api_version: str = "v1",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if "delegate_to" in kwargs:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and "
"finally removed in this version of Google Provider. You MUST "
"convert it to `impersonate_chain`."
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self.api_version = api_version
self._conn = None
def get_conn(self) -> Resource:
"""Retrieve connection to Google Storage Transfer service.
:return: Google Storage Transfer service object
"""
if not self._conn:
http_authorized = self._authorize()
self._conn = build(
"storagetransfer", self.api_version, http=http_authorized, cache_discovery=False
)
return self._conn
def create_transfer_job(self, body: dict) -> dict:
"""Create a transfer job that runs periodically.
:param body: (Required) The request body, as described in
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/patch#request-body
:return: The transfer job. See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs#TransferJob
"""
body = self._inject_project_id(body, BODY, PROJECT_ID)
try:
transfer_job = (
self.get_conn().transferJobs().create(body=body).execute(num_retries=self.num_retries)
)
except HttpError as e:
# If status code "Conflict"
# https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferOperations#Code.ENUM_VALUES.ALREADY_EXISTS
# we should try to find this job
job_name = body.get(JOB_NAME, "")
if int(e.resp.status) == ALREADY_EXIST_CODE and job_name:
transfer_job = self.get_transfer_job(job_name=job_name, project_id=body.get(PROJECT_ID))
# Generate new job_name, if jobs status is deleted
# and try to create this job again
if transfer_job.get(STATUS) == GcpTransferJobsStatus.DELETED:
body[JOB_NAME] = gen_job_name(job_name)
self.log.info(
"Job `%s` has been soft deleted. Creating job with new name `%s`",
job_name,
{body[JOB_NAME]},
)
return (
self.get_conn().transferJobs().create(body=body).execute(num_retries=self.num_retries)
)
elif transfer_job.get(STATUS) == GcpTransferJobsStatus.DISABLED:
return self.enable_transfer_job(job_name=job_name, project_id=body.get(PROJECT_ID))
else:
raise e
self.log.info("Created job %s", transfer_job[NAME])
return transfer_job
@GoogleBaseHook.fallback_to_default_project_id
def get_transfer_job(self, job_name: str, project_id: str) -> dict:
"""Get latest state of a long-running Google Storage Transfer Service job.
:param job_name: (Required) Name of the job to be fetched
:param project_id: (Optional) the ID of the project that owns the Transfer
Job. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:return: Transfer Job
"""
return (
self.get_conn()
.transferJobs()
.get(jobName=job_name, projectId=project_id)
.execute(num_retries=self.num_retries)
)
def list_transfer_job(self, request_filter: dict | None = None, **kwargs) -> list[dict]:
"""List long-running operations in Google Storage Transfer Service.
A filter can be specified to match only certain entries.
:param request_filter: (Required) A request filter, as described in
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/list#body.QUERY_PARAMETERS.filter
:return: List of Transfer Jobs
"""
# To preserve backward compatibility
# TODO: remove one day
if request_filter is None:
if "filter" in kwargs:
request_filter = kwargs["filter"]
if not isinstance(request_filter, dict):
raise ValueError(f"The request_filter should be dict and is {type(request_filter)}")
warnings.warn("Use 'request_filter' instead of 'filter'", AirflowProviderDeprecationWarning)
else:
raise TypeError("list_transfer_job missing 1 required positional argument: 'request_filter'")
conn = self.get_conn()
request_filter = self._inject_project_id(request_filter, FILTER, FILTER_PROJECT_ID)
request = conn.transferJobs().list(filter=json.dumps(request_filter))
jobs: list[dict] = []
while request is not None:
response = request.execute(num_retries=self.num_retries)
jobs.extend(response[TRANSFER_JOBS])
request = conn.transferJobs().list_next(previous_request=request, previous_response=response)
return jobs
@GoogleBaseHook.fallback_to_default_project_id
def enable_transfer_job(self, job_name: str, project_id: str) -> dict:
"""Make new transfers be performed based on the schedule.
:param job_name: (Required) Name of the job to be updated
:param project_id: (Optional) the ID of the project that owns the Transfer
Job. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:return: If successful, TransferJob.
"""
return (
self.get_conn()
.transferJobs()
.patch(
jobName=job_name,
body={
PROJECT_ID: project_id,
TRANSFER_JOB: {STATUS1: GcpTransferJobsStatus.ENABLED},
TRANSFER_JOB_FIELD_MASK: STATUS1,
},
)
.execute(num_retries=self.num_retries)
)
def update_transfer_job(self, job_name: str, body: dict) -> dict:
"""Update a transfer job that runs periodically.
:param job_name: (Required) Name of the job to be updated
:param body: A request body, as described in
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/patch#request-body
:return: If successful, TransferJob.
"""
body = self._inject_project_id(body, BODY, PROJECT_ID)
return (
self.get_conn()
.transferJobs()
.patch(jobName=job_name, body=body)
.execute(num_retries=self.num_retries)
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_transfer_job(self, job_name: str, project_id: str) -> None:
"""Delete a transfer job.
This is a soft delete. After a transfer job is deleted, the job and all
the transfer executions are subject to garbage collection. Transfer jobs
become eligible for garbage collection 30 days after soft delete.
:param job_name: (Required) Name of the job to be deleted
:param project_id: (Optional) the ID of the project that owns the Transfer
Job. If set to None or missing, the default project_id from the Google Cloud
connection is used.
"""
(
self.get_conn()
.transferJobs()
.patch(
jobName=job_name,
body={
PROJECT_ID: project_id,
TRANSFER_JOB: {STATUS1: GcpTransferJobsStatus.DELETED},
TRANSFER_JOB_FIELD_MASK: STATUS1,
},
)
.execute(num_retries=self.num_retries)
)
def cancel_transfer_operation(self, operation_name: str) -> None:
"""Cancel a transfer operation in Google Storage Transfer Service.
:param operation_name: Name of the transfer operation.
"""
self.get_conn().transferOperations().cancel(name=operation_name).execute(num_retries=self.num_retries)
def get_transfer_operation(self, operation_name: str) -> dict:
"""Get a transfer operation in Google Storage Transfer Service.
:param operation_name: (Required) Name of the transfer operation.
:return: transfer operation
.. seealso:: https://cloud.google.com/storage-transfer/docs/reference/rest/v1/Operation
"""
return (
self.get_conn()
.transferOperations()
.get(name=operation_name)
.execute(num_retries=self.num_retries)
)
def list_transfer_operations(self, request_filter: dict | None = None, **kwargs) -> list[dict]:
"""Get a transfer operation in Google Storage Transfer Service.
:param request_filter: (Required) A request filter, as described in
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/list#body.QUERY_PARAMETERS.filter
With one additional improvement:
:return: transfer operation
The ``project_id`` parameter is optional if you have a project ID
defined in the connection. See: :doc:`/connections/gcp`
"""
# To preserve backward compatibility
# TODO: remove one day
if request_filter is None:
if "filter" in kwargs:
request_filter = kwargs["filter"]
if not isinstance(request_filter, dict):
raise ValueError(f"The request_filter should be dict and is {type(request_filter)}")
warnings.warn("Use 'request_filter' instead of 'filter'", AirflowProviderDeprecationWarning)
else:
raise TypeError(
"list_transfer_operations missing 1 required positional argument: 'request_filter'"
)
conn = self.get_conn()
request_filter = self._inject_project_id(request_filter, FILTER, FILTER_PROJECT_ID)
operations: list[dict] = []
request = conn.transferOperations().list(name=TRANSFER_OPERATIONS, filter=json.dumps(request_filter))
while request is not None:
response = request.execute(num_retries=self.num_retries)
if OPERATIONS in response:
operations.extend(response[OPERATIONS])
request = conn.transferOperations().list_next(
previous_request=request, previous_response=response
)
return operations
def pause_transfer_operation(self, operation_name: str) -> None:
"""Pause a transfer operation in Google Storage Transfer Service.
:param operation_name: (Required) Name of the transfer operation.
"""
self.get_conn().transferOperations().pause(name=operation_name).execute(num_retries=self.num_retries)
def resume_transfer_operation(self, operation_name: str) -> None:
"""Resume a transfer operation in Google Storage Transfer Service.
:param operation_name: (Required) Name of the transfer operation.
"""
self.get_conn().transferOperations().resume(name=operation_name).execute(num_retries=self.num_retries)
def wait_for_transfer_job(
self,
job: dict,
expected_statuses: set[str] | None = None,
timeout: float | timedelta | None = None,
) -> None:
"""Wait until the job reaches the expected state.
:param job: The transfer job to wait for. See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs#TransferJob
:param expected_statuses: The expected state. See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferOperations#Status
:param timeout: Time in which the operation must end in seconds. If not
specified, defaults to 60 seconds.
"""
expected_statuses = (
{GcpTransferOperationStatus.SUCCESS} if not expected_statuses else expected_statuses
)
if timeout is None:
timeout = 60
elif isinstance(timeout, timedelta):
timeout = timeout.total_seconds()
start_time = time.monotonic()
while time.monotonic() - start_time < timeout:
request_filter = {FILTER_PROJECT_ID: job[PROJECT_ID], FILTER_JOB_NAMES: [job[NAME]]}
operations = self.list_transfer_operations(request_filter=request_filter)
for operation in operations:
self.log.info("Progress for operation %s: %s", operation[NAME], operation[METADATA][COUNTERS])
if self.operations_contain_expected_statuses(operations, expected_statuses):
return
time.sleep(TIME_TO_SLEEP_IN_SECONDS)
raise AirflowException("Timeout. The operation could not be completed within the allotted time.")
def _inject_project_id(self, body: dict, param_name: str, target_key: str) -> dict:
body = deepcopy(body)
body[target_key] = body.get(target_key, self.project_id)
if not body.get(target_key):
raise AirflowException(
f"The project id must be passed either as `{target_key}` key in `{param_name}` "
f"parameter or as project_id extra in Google Cloud connection definition. Both are not set!"
)
return body
@staticmethod
def operations_contain_expected_statuses(
operations: list[dict], expected_statuses: set[str] | str
) -> bool:
"""Check whether an operation exists with the expected status.
:param operations: (Required) List of transfer operations to check.
:param expected_statuses: (Required) The expected status. See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferOperations#Status
:return: If there is an operation with the expected state in the
operation list, returns true,
:raises AirflowException: If it encounters operations with state FAILED
or ABORTED in the list.
"""
expected_statuses_set = (
{expected_statuses} if isinstance(expected_statuses, str) else set(expected_statuses)
)
if not operations:
return False
current_statuses = {operation[METADATA][STATUS] for operation in operations}
if len(current_statuses - expected_statuses_set) != len(current_statuses):
return True
if len(NEGATIVE_STATUSES - current_statuses) != len(NEGATIVE_STATUSES):
raise AirflowException(
f"An unexpected operation status was encountered. "
f"Expected: {', '.join(expected_statuses_set)}"
)
return False
class CloudDataTransferServiceAsyncHook(GoogleBaseAsyncHook):
"""Asynchronous hook for Google Storage Transfer Service."""
def __init__(self, project_id: str | None = None, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self._client: StorageTransferServiceAsyncClient | None = None
def get_conn(self) -> StorageTransferServiceAsyncClient:
"""
Returns async connection to the Storage Transfer Service.
:return: Google Storage Transfer asynchronous client.
"""
if not self._client:
self._client = StorageTransferServiceAsyncClient()
return self._client
async def get_jobs(self, job_names: list[str]) -> ListTransferJobsAsyncPager:
"""
Gets the latest state of a long-running operations in Google Storage Transfer Service.
:param job_names: (Required) List of names of the jobs to be fetched.
:return: Object that yields Transfer jobs.
"""
client = self.get_conn()
jobs_list_request = ListTransferJobsRequest(
filter=json.dumps(dict(project_id=self.project_id, job_names=job_names))
)
return await client.list_transfer_jobs(request=jobs_list_request)
async def get_latest_operation(self, job: TransferJob) -> Message | None:
"""
Gets the latest operation of the given TransferJob instance.
:param job: Transfer job instance.
:return: The latest job operation.
"""
latest_operation_name = job.latest_operation_name
if latest_operation_name:
client = self.get_conn()
response_operation = await client.transport.operations_client.get_operation(latest_operation_name)
operation = TransferOperation.deserialize(response_operation.metadata.value)
return operation
return None
| 21,280 | 38.555762 | 129 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/kms.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud KMS hook."""
from __future__ import annotations
import base64
from typing import Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.kms_v1 import KeyManagementServiceClient
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
def _b64encode(s: bytes) -> str:
"""Base 64 encodes a bytes object to a string."""
return base64.b64encode(s).decode("ascii")
def _b64decode(s: str) -> bytes:
"""Base 64 decodes a string to bytes."""
return base64.b64decode(s.encode("utf-8"))
class CloudKMSHook(GoogleBaseHook):
"""
Hook for Google Cloud Key Management service.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._conn: KeyManagementServiceClient | None = None
def get_conn(self) -> KeyManagementServiceClient:
"""
Retrieves connection to Cloud Key Management service.
:return: Cloud Key Management service object
"""
if not self._conn:
self._conn = KeyManagementServiceClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO
)
return self._conn
def encrypt(
self,
key_name: str,
plaintext: bytes,
authenticated_data: bytes | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> str:
"""
Encrypts a plaintext message using Google Cloud KMS.
:param key_name: The Resource Name for the key (or key version)
to be used for encryption. Of the form
``projects/*/locations/*/keyRings/*/cryptoKeys/**``
:param plaintext: The message to be encrypted.
:param authenticated_data: Optional additional authenticated data that
must also be provided to decrypt the message.
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:return: The base 64 encoded ciphertext of the original message.
"""
response = self.get_conn().encrypt(
request={
"name": key_name,
"plaintext": plaintext,
"additional_authenticated_data": authenticated_data,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
ciphertext = _b64encode(response.ciphertext)
return ciphertext
def decrypt(
self,
key_name: str,
ciphertext: str,
authenticated_data: bytes | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> bytes:
"""
Decrypts a ciphertext message using Google Cloud KMS.
:param key_name: The Resource Name for the key to be used for decryption.
Of the form ``projects/*/locations/*/keyRings/*/cryptoKeys/**``
:param ciphertext: The message to be decrypted.
:param authenticated_data: Any additional authenticated data that was
provided when encrypting the message.
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:return: The original message.
"""
response = self.get_conn().decrypt(
request={
"name": key_name,
"ciphertext": _b64decode(ciphertext),
"additional_authenticated_data": authenticated_data,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return response.plaintext
| 6,540 | 39.627329 | 108 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/dataproc.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Dataproc hook."""
from __future__ import annotations
import time
import uuid
from typing import Any, Sequence
from google.api_core.client_options import ClientOptions
from google.api_core.exceptions import ServerError
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.operation import Operation
from google.api_core.operation_async import AsyncOperation
from google.api_core.operations_v1.operations_client import OperationsClient
from google.api_core.retry import Retry
from google.cloud.dataproc_v1 import (
Batch,
BatchControllerAsyncClient,
BatchControllerClient,
Cluster,
ClusterControllerAsyncClient,
ClusterControllerClient,
Job,
JobControllerAsyncClient,
JobControllerClient,
JobStatus,
WorkflowTemplate,
WorkflowTemplateServiceAsyncClient,
WorkflowTemplateServiceClient,
)
from google.protobuf.duration_pb2 import Duration
from google.protobuf.field_mask_pb2 import FieldMask
from airflow.exceptions import AirflowException
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
from airflow.version import version as airflow_version
class DataProcJobBuilder:
"""A helper class for building Dataproc job."""
def __init__(
self,
project_id: str,
task_id: str,
cluster_name: str,
job_type: str,
properties: dict[str, str] | None = None,
) -> None:
name = f"{task_id.replace('.', '_')}_{uuid.uuid4()!s:.8}"
self.job_type = job_type
self.job: dict[str, Any] = {
"job": {
"reference": {"project_id": project_id, "job_id": name},
"placement": {"cluster_name": cluster_name},
"labels": {"airflow-version": "v" + airflow_version.replace(".", "-").replace("+", "-")},
job_type: {},
}
}
if properties is not None:
self.job["job"][job_type]["properties"] = properties
def add_labels(self, labels: dict | None = None) -> None:
"""Set labels for Dataproc job.
:param labels: Labels for the job query.
"""
if labels:
self.job["job"]["labels"].update(labels)
def add_variables(self, variables: dict | None = None) -> None:
"""Set variables for Dataproc job.
:param variables: Variables for the job query.
"""
if variables is not None:
self.job["job"][self.job_type]["script_variables"] = variables
def add_args(self, args: list[str] | None = None) -> None:
"""Set args for Dataproc job.
:param args: Args for the job query.
"""
if args is not None:
self.job["job"][self.job_type]["args"] = args
def add_query(self, query: str) -> None:
"""Set query for Dataproc job.
:param query: query for the job.
"""
self.job["job"][self.job_type]["query_list"] = {"queries": [query]}
def add_query_uri(self, query_uri: str) -> None:
"""Set query uri for Dataproc job.
:param query_uri: URI for the job query.
"""
self.job["job"][self.job_type]["query_file_uri"] = query_uri
def add_jar_file_uris(self, jars: list[str] | None = None) -> None:
"""Set jars uris for Dataproc job.
:param jars: List of jars URIs
"""
if jars is not None:
self.job["job"][self.job_type]["jar_file_uris"] = jars
def add_archive_uris(self, archives: list[str] | None = None) -> None:
"""Set archives uris for Dataproc job.
:param archives: List of archives URIs
"""
if archives is not None:
self.job["job"][self.job_type]["archive_uris"] = archives
def add_file_uris(self, files: list[str] | None = None) -> None:
"""Set file uris for Dataproc job.
:param files: List of files URIs
"""
if files is not None:
self.job["job"][self.job_type]["file_uris"] = files
def add_python_file_uris(self, pyfiles: list[str] | None = None) -> None:
"""Set python file uris for Dataproc job.
:param pyfiles: List of python files URIs
"""
if pyfiles is not None:
self.job["job"][self.job_type]["python_file_uris"] = pyfiles
def set_main(self, main_jar: str | None = None, main_class: str | None = None) -> None:
"""Set Dataproc main class.
:param main_jar: URI for the main file.
:param main_class: Name of the main class.
:raises: Exception
"""
if main_class is not None and main_jar is not None:
raise Exception("Set either main_jar or main_class")
if main_jar:
self.job["job"][self.job_type]["main_jar_file_uri"] = main_jar
else:
self.job["job"][self.job_type]["main_class"] = main_class
def set_python_main(self, main: str) -> None:
"""Set Dataproc main python file uri.
:param main: URI for the python main file.
"""
self.job["job"][self.job_type]["main_python_file_uri"] = main
def set_job_name(self, name: str) -> None:
"""Set Dataproc job name.
Job name is sanitized, replacing dots by underscores.
:param name: Job name.
"""
sanitized_name = f"{name.replace('.', '_')}_{uuid.uuid4()!s:.8}"
self.job["job"]["reference"]["job_id"] = sanitized_name
def build(self) -> dict:
"""Return Dataproc job.
:return: Dataproc job
"""
return self.job
class DataprocHook(GoogleBaseHook):
"""Google Cloud Dataproc APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(gcp_conn_id=gcp_conn_id, impersonation_chain=impersonation_chain)
def get_cluster_client(self, region: str | None = None) -> ClusterControllerClient:
"""Create a ClusterControllerClient."""
client_options = None
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-dataproc.googleapis.com:443")
return ClusterControllerClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def get_template_client(self, region: str | None = None) -> WorkflowTemplateServiceClient:
"""Create a WorkflowTemplateServiceClient."""
client_options = None
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-dataproc.googleapis.com:443")
return WorkflowTemplateServiceClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def get_job_client(self, region: str | None = None) -> JobControllerClient:
"""Create a JobControllerClient."""
client_options = None
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-dataproc.googleapis.com:443")
return JobControllerClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def get_batch_client(self, region: str | None = None) -> BatchControllerClient:
"""Create a BatchControllerClient."""
client_options = None
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-dataproc.googleapis.com:443")
return BatchControllerClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def get_operations_client(self, region: str | None):
"""Create a OperationsClient."""
return self.get_batch_client(region=region).transport.operations_client
def wait_for_operation(
self,
operation: Operation,
timeout: float | None = None,
result_retry: Retry | _MethodDefault = DEFAULT,
) -> Any:
"""Wait for a long-lasting operation to complete."""
try:
return operation.result(timeout=timeout, retry=result_retry)
except Exception:
error = operation.exception(timeout=timeout)
raise AirflowException(error)
@GoogleBaseHook.fallback_to_default_project_id
def create_cluster(
self,
region: str,
project_id: str,
cluster_name: str,
cluster_config: dict | Cluster | None = None,
virtual_cluster_config: dict | None = None,
labels: dict[str, str] | None = None,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""Create a cluster in a specified project.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region in which to handle the request.
:param cluster_name: Name of the cluster to create.
:param labels: Labels that will be assigned to created cluster.
:param cluster_config: The cluster config to create. If a dict is
provided, it must be of the same form as the protobuf message
:class:`~google.cloud.dataproc_v1.types.ClusterConfig`.
:param virtual_cluster_config: The virtual cluster config, used when
creating a Dataproc cluster that does not directly control the
underlying compute resources, for example, when creating a
Dataproc-on-GKE cluster with
:class:`~google.cloud.dataproc_v1.types.VirtualClusterConfig`.
:param request_id: A unique id used to identify the request. If the
server receives two *CreateClusterRequest* requests with the same
ID, the second request will be ignored, and an operation created
for the first one and stored in the backend is returned.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
# Dataproc labels must conform to the following regex:
# [a-z]([-a-z0-9]*[a-z0-9])? (current airflow version string follows
# semantic versioning spec: x.y.z).
labels = labels or {}
labels.update({"airflow-version": "v" + airflow_version.replace(".", "-").replace("+", "-")})
cluster = {
"project_id": project_id,
"cluster_name": cluster_name,
}
if virtual_cluster_config is not None:
cluster["virtual_cluster_config"] = virtual_cluster_config # type: ignore
if cluster_config is not None:
cluster["config"] = cluster_config # type: ignore
cluster["labels"] = labels # type: ignore
client = self.get_cluster_client(region=region)
result = client.create_cluster(
request={
"project_id": project_id,
"region": region,
"cluster": cluster,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_cluster(
self,
region: str,
cluster_name: str,
project_id: str,
cluster_uuid: str | None = None,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""Delete a cluster in a project.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region in which to handle the request.
:param cluster_name: Name of the cluster to delete.
:param cluster_uuid: If specified, the RPC should fail if cluster with
the UUID does not exist.
:param request_id: A unique id used to identify the request. If the
server receives two *DeleteClusterRequest* requests with the same
ID, the second request will be ignored, and an operation created
for the first one and stored in the backend is returned.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_cluster_client(region=region)
result = client.delete_cluster(
request={
"project_id": project_id,
"region": region,
"cluster_name": cluster_name,
"cluster_uuid": cluster_uuid,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def diagnose_cluster(
self,
region: str,
cluster_name: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> str:
"""Get cluster diagnostic information.
After the operation completes, the GCS URI to diagnose is returned.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region in which to handle the request.
:param cluster_name: Name of the cluster.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_cluster_client(region=region)
operation = client.diagnose_cluster(
request={"project_id": project_id, "region": region, "cluster_name": cluster_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
operation.result()
gcs_uri = str(operation.operation.response.value)
return gcs_uri
@GoogleBaseHook.fallback_to_default_project_id
def get_cluster(
self,
region: str,
cluster_name: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Cluster:
"""Get the resource representation for a cluster in a project.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param cluster_name: The cluster name.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_cluster_client(region=region)
result = client.get_cluster(
request={"project_id": project_id, "region": region, "cluster_name": cluster_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_clusters(
self,
region: str,
filter_: str,
project_id: str,
page_size: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""List all regions/{region}/clusters in a project.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param filter_: To constrain the clusters to. Case-sensitive.
:param page_size: The maximum number of resources contained in the
underlying API response. If page streaming is performed
per-resource, this parameter does not affect the return value. If
page streaming is performed per-page, this determines the maximum
number of resources in a page.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_cluster_client(region=region)
result = client.list_clusters(
request={"project_id": project_id, "region": region, "filter": filter_, "page_size": page_size},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_cluster(
self,
cluster_name: str,
cluster: dict | Cluster,
update_mask: dict | FieldMask,
project_id: str,
region: str,
graceful_decommission_timeout: dict | Duration | None = None,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""Update a cluster in a project.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param cluster_name: The cluster name.
:param cluster: Changes to the cluster. If a dict is provided, it must
be of the same form as the protobuf message
:class:`~google.cloud.dataproc_v1.types.Cluster`.
:param update_mask: Specifies the path, relative to ``Cluster``, of the
field to update. For example, to change the number of workers in a
cluster to 5, this would be specified as
``config.worker_config.num_instances``, and the ``PATCH`` request
body would specify the new value:
.. code-block:: python
{"config": {"workerConfig": {"numInstances": "5"}}}
Similarly, to change the number of preemptible workers in a cluster
to 5, this would be ``config.secondary_worker_config.num_instances``
and the ``PATCH`` request body would be:
.. code-block:: python
{"config": {"secondaryWorkerConfig": {"numInstances": "5"}}}
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dataproc_v1.types.FieldMask`.
:param graceful_decommission_timeout: Timeout for graceful YARN
decommissioning. Graceful decommissioning allows removing nodes from
the cluster without interrupting jobs in progress. Timeout specifies
how long to wait for jobs in progress to finish before forcefully
removing nodes (and potentially interrupting jobs). Default timeout
is 0 (for forceful decommission), and the maximum allowed timeout is
one day.
Only supported on Dataproc image versions 1.2 and higher.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dataproc_v1.types.Duration`.
:param request_id: A unique id used to identify the request. If the
server receives two *UpdateClusterRequest* requests with the same
ID, the second request will be ignored, and an operation created
for the first one and stored in the backend is returned.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
if region is None:
raise TypeError("missing 1 required keyword argument: 'region'")
client = self.get_cluster_client(region=region)
operation = client.update_cluster(
request={
"project_id": project_id,
"region": region,
"cluster_name": cluster_name,
"cluster": cluster,
"update_mask": update_mask,
"graceful_decommission_timeout": graceful_decommission_timeout,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return operation
@GoogleBaseHook.fallback_to_default_project_id
def create_workflow_template(
self,
template: dict | WorkflowTemplate,
project_id: str,
region: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> WorkflowTemplate:
"""Create a new workflow template.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param template: The Dataproc workflow template to create. If a dict is
provided, it must be of the same form as the protobuf message
WorkflowTemplate.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
if region is None:
raise TypeError("missing 1 required keyword argument: 'region'")
metadata = metadata or ()
client = self.get_template_client(region)
parent = f"projects/{project_id}/regions/{region}"
return client.create_workflow_template(
request={"parent": parent, "template": template}, retry=retry, timeout=timeout, metadata=metadata
)
@GoogleBaseHook.fallback_to_default_project_id
def instantiate_workflow_template(
self,
template_name: str,
project_id: str,
region: str,
version: int | None = None,
request_id: str | None = None,
parameters: dict[str, str] | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""Instantiate a template and begins execution.
:param template_name: Name of template to instantiate.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param version: Version of workflow template to instantiate. If
specified, the workflow will be instantiated only if the current
version of the workflow template has the supplied version. This
option cannot be used to instantiate a previous version of workflow
template.
:param request_id: A tag that prevents multiple concurrent workflow
instances with the same tag from running. This mitigates risk of
concurrent instances started due to retries.
:param parameters: Map from parameter names to values that should be
used for those parameters. Values may not exceed 100 characters.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
if region is None:
raise TypeError("missing 1 required keyword argument: 'region'")
metadata = metadata or ()
client = self.get_template_client(region)
name = f"projects/{project_id}/regions/{region}/workflowTemplates/{template_name}"
operation = client.instantiate_workflow_template(
request={"name": name, "version": version, "request_id": request_id, "parameters": parameters},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return operation
@GoogleBaseHook.fallback_to_default_project_id
def instantiate_inline_workflow_template(
self,
template: dict | WorkflowTemplate,
project_id: str,
region: str,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""Instantiate a template and begin execution.
:param template: The workflow template to instantiate. If a dict is
provided, it must be of the same form as the protobuf message
WorkflowTemplate.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param request_id: A tag that prevents multiple concurrent workflow
instances with the same tag from running. This mitigates risk of
concurrent instances started due to retries.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
if region is None:
raise TypeError("missing 1 required keyword argument: 'region'")
metadata = metadata or ()
client = self.get_template_client(region)
parent = f"projects/{project_id}/regions/{region}"
operation = client.instantiate_inline_workflow_template(
request={"parent": parent, "template": template, "request_id": request_id},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return operation
@GoogleBaseHook.fallback_to_default_project_id
def wait_for_job(
self,
job_id: str,
project_id: str,
region: str,
wait_time: int = 10,
timeout: int | None = None,
) -> None:
"""Poll a job to check if it has finished.
:param job_id: Dataproc job ID.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param wait_time: Number of seconds between checks.
:param timeout: How many seconds wait for job to be ready.
"""
if region is None:
raise TypeError("missing 1 required keyword argument: 'region'")
state = None
start = time.monotonic()
while state not in (JobStatus.State.ERROR, JobStatus.State.DONE, JobStatus.State.CANCELLED):
if timeout and start + timeout < time.monotonic():
raise AirflowException(f"Timeout: dataproc job {job_id} is not ready after {timeout}s")
time.sleep(wait_time)
try:
job = self.get_job(project_id=project_id, region=region, job_id=job_id)
state = job.status.state
except ServerError as err:
self.log.info("Retrying. Dataproc API returned server error when waiting for job: %s", err)
if state == JobStatus.State.ERROR:
raise AirflowException(f"Job failed:\n{job}")
if state == JobStatus.State.CANCELLED:
raise AirflowException(f"Job was cancelled:\n{job}")
@GoogleBaseHook.fallback_to_default_project_id
def get_job(
self,
job_id: str,
project_id: str,
region: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Job:
"""Get the resource representation for a job in a project.
:param job_id: Dataproc job ID.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
if region is None:
raise TypeError("missing 1 required keyword argument: 'region'")
client = self.get_job_client(region=region)
job = client.get_job(
request={"project_id": project_id, "region": region, "job_id": job_id},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return job
@GoogleBaseHook.fallback_to_default_project_id
def submit_job(
self,
job: dict | Job,
project_id: str,
region: str,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Job:
"""Submit a job to a cluster.
:param job: The job resource. If a dict is provided, it must be of the
same form as the protobuf message Job.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param request_id: A tag that prevents multiple concurrent workflow
instances with the same tag from running. This mitigates risk of
concurrent instances started due to retries.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
if region is None:
raise TypeError("missing 1 required keyword argument: 'region'")
client = self.get_job_client(region=region)
return client.submit_job(
request={"project_id": project_id, "region": region, "job": job, "request_id": request_id},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def cancel_job(
self,
job_id: str,
project_id: str,
region: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Job:
"""Start a job cancellation request.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param job_id: The job ID.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_job_client(region=region)
job = client.cancel_job(
request={"project_id": project_id, "region": region, "job_id": job_id},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return job
@GoogleBaseHook.fallback_to_default_project_id
def create_batch(
self,
region: str,
project_id: str,
batch: dict | Batch,
batch_id: str | None = None,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""Create a batch workload.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param batch: The batch to create.
:param batch_id: The ID to use for the batch, which will become the
final component of the batch's resource name. This value must be of
4-63 characters. Valid characters are ``[a-z][0-9]-``.
:param request_id: A unique id used to identify the request. If the
server receives two *CreateBatchRequest* requests with the same
ID, the second request will be ignored, and an operation created
for the first one and stored in the backend is returned.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_batch_client(region)
parent = f"projects/{project_id}/regions/{region}"
result = client.create_batch(
request={
"parent": parent,
"batch": batch,
"batch_id": batch_id,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_batch(
self,
batch_id: str,
region: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""Delete the batch workload resource.
:param batch_id: The batch ID.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_batch_client(region)
name = f"projects/{project_id}/locations/{region}/batches/{batch_id}"
client.delete_batch(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_batch(
self,
batch_id: str,
region: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Batch:
"""Get the batch workload resource representation.
:param batch_id: The batch ID.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_batch_client(region)
name = f"projects/{project_id}/locations/{region}/batches/{batch_id}"
result = client.get_batch(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_batches(
self,
region: str,
project_id: str,
page_size: int | None = None,
page_token: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""List batch workloads.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param page_size: The maximum number of batches to return in each
response. The service may return fewer than this value. The default
page size is 20; the maximum page size is 1000.
:param page_token: A page token received from a previous ``ListBatches``
call. Provide this token to retrieve the subsequent page.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_batch_client(region)
parent = f"projects/{project_id}/regions/{region}"
result = client.list_batches(
request={
"parent": parent,
"page_size": page_size,
"page_token": page_token,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def wait_for_batch(
self,
batch_id: str,
region: str,
project_id: str,
wait_check_interval: int = 10,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Batch:
"""Wait for a batch job to complete.
After submission of a batch job, the operator waits for the job to
complete. This hook is, however, useful in the case when Airflow is
restarted or the task pid is killed for any reason. In this case, the
creation would happen again, catching the raised AlreadyExists, and fail
to this function for waiting on completion.
:param batch_id: The batch ID.
:param region: Cloud Dataproc region to handle the request.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param wait_check_interval: The amount of time to pause between checks
for job completion.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
state = None
first_loop: bool = True
while state not in [
Batch.State.CANCELLED,
Batch.State.FAILED,
Batch.State.SUCCEEDED,
Batch.State.STATE_UNSPECIFIED,
]:
try:
if not first_loop:
time.sleep(wait_check_interval)
first_loop = False
self.log.debug("Waiting for batch %s", batch_id)
result = self.get_batch(
batch_id=batch_id,
region=region,
project_id=project_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
state = result.state
except ServerError as err:
self.log.info(
"Retrying. Dataproc API returned server error when waiting for batch id %s: %s",
batch_id,
err,
)
return result
class DataprocAsyncHook(GoogleBaseHook):
"""Asynchronous interaction with Google Cloud Dataproc APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(gcp_conn_id=gcp_conn_id, impersonation_chain=impersonation_chain)
self._cached_client: JobControllerAsyncClient | None = None
def get_cluster_client(self, region: str | None = None) -> ClusterControllerAsyncClient:
"""Create a ClusterControllerAsyncClient."""
client_options = None
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-dataproc.googleapis.com:443")
return ClusterControllerAsyncClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def get_template_client(self, region: str | None = None) -> WorkflowTemplateServiceAsyncClient:
"""Create a WorkflowTemplateServiceAsyncClient."""
client_options = None
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-dataproc.googleapis.com:443")
return WorkflowTemplateServiceAsyncClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def get_job_client(self, region: str | None = None) -> JobControllerAsyncClient:
"""Create a JobControllerAsyncClient."""
if self._cached_client is None:
client_options = None
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-dataproc.googleapis.com:443")
self._cached_client = JobControllerAsyncClient(
credentials=self.get_credentials(),
client_info=CLIENT_INFO,
client_options=client_options,
)
return self._cached_client
def get_batch_client(self, region: str | None = None) -> BatchControllerAsyncClient:
"""Create a BatchControllerAsyncClient."""
client_options = None
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-dataproc.googleapis.com:443")
return BatchControllerAsyncClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def get_operations_client(self, region: str) -> OperationsClient:
"""Create a OperationsClient."""
return self.get_template_client(region=region).transport.operations_client
@GoogleBaseHook.fallback_to_default_project_id
async def create_cluster(
self,
region: str,
project_id: str,
cluster_name: str,
cluster_config: dict | Cluster | None = None,
virtual_cluster_config: dict | None = None,
labels: dict[str, str] | None = None,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AsyncOperation:
"""Create a cluster in a project.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region in which to handle the request.
:param cluster_name: Name of the cluster to create.
:param labels: Labels that will be assigned to created cluster.
:param cluster_config: The cluster config to create. If a dict is
provided, it must be of the same form as the protobuf message
:class:`~google.cloud.dataproc_v1.types.ClusterConfig`.
:param virtual_cluster_config: The virtual cluster config, used when
creating a Dataproc cluster that does not directly control the
underlying compute resources, for example, when creating a
Dataproc-on-GKE cluster with
:class:`~google.cloud.dataproc_v1.types.VirtualClusterConfig`.
:param request_id: A unique id used to identify the request. If the
server receives two *CreateClusterRequest* requests with the same
ID, the second request will be ignored, and an operation created
for the first one and stored in the backend is returned.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
# Dataproc labels must conform to the following regex:
# [a-z]([-a-z0-9]*[a-z0-9])? (current airflow version string follows
# semantic versioning spec: x.y.z).
labels = labels or {}
labels.update({"airflow-version": "v" + airflow_version.replace(".", "-").replace("+", "-")})
cluster = {
"project_id": project_id,
"cluster_name": cluster_name,
}
if virtual_cluster_config is not None:
cluster["virtual_cluster_config"] = virtual_cluster_config # type: ignore
if cluster_config is not None:
cluster["config"] = cluster_config # type: ignore
cluster["labels"] = labels # type: ignore
client = self.get_cluster_client(region=region)
result = await client.create_cluster(
request={
"project_id": project_id,
"region": region,
"cluster": cluster,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
async def delete_cluster(
self,
region: str,
cluster_name: str,
project_id: str,
cluster_uuid: str | None = None,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AsyncOperation:
"""Delete a cluster in a project.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region in which to handle the request.
:param cluster_name: Name of the cluster to delete.
:param cluster_uuid: If specified, the RPC should fail if cluster with
the UUID does not exist.
:param request_id: A unique id used to identify the request. If the
server receives two *DeleteClusterRequest* requests with the same
ID, the second request will be ignored, and an operation created
for the first one and stored in the backend is returned.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_cluster_client(region=region)
result = await client.delete_cluster(
request={
"project_id": project_id,
"region": region,
"cluster_name": cluster_name,
"cluster_uuid": cluster_uuid,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
async def diagnose_cluster(
self,
region: str,
cluster_name: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> str:
"""Get cluster diagnostic information.
After the operation completes, the GCS URI to diagnose is returned.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region in which to handle the request.
:param cluster_name: Name of the cluster.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_cluster_client(region=region)
operation = await client.diagnose_cluster(
request={"project_id": project_id, "region": region, "cluster_name": cluster_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
operation.result()
gcs_uri = str(operation.operation.response.value)
return gcs_uri
@GoogleBaseHook.fallback_to_default_project_id
async def get_cluster(
self,
region: str,
cluster_name: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Cluster:
"""Get the resource representation for a cluster in a project.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param cluster_name: The cluster name.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_cluster_client(region=region)
result = await client.get_cluster(
request={"project_id": project_id, "region": region, "cluster_name": cluster_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
async def list_clusters(
self,
region: str,
filter_: str,
project_id: str,
page_size: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""List all regions/{region}/clusters in a project.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param filter_: To constrain the clusters to. Case-sensitive.
:param page_size: The maximum number of resources contained in the
underlying API response. If page streaming is performed
per-resource, this parameter does not affect the return value. If
page streaming is performed per-page, this determines the maximum
number of resources in a page.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_cluster_client(region=region)
result = await client.list_clusters(
request={"project_id": project_id, "region": region, "filter": filter_, "page_size": page_size},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
async def update_cluster(
self,
cluster_name: str,
cluster: dict | Cluster,
update_mask: dict | FieldMask,
project_id: str,
region: str,
graceful_decommission_timeout: dict | Duration | None = None,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AsyncOperation:
"""Update a cluster in a project.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param cluster_name: The cluster name.
:param cluster: Changes to the cluster. If a dict is provided, it must
be of the same form as the protobuf message
:class:`~google.cloud.dataproc_v1.types.Cluster`.
:param update_mask: Specifies the path, relative to ``Cluster``, of the
field to update. For example, to change the number of workers in a
cluster to 5, this would be specified as
``config.worker_config.num_instances``, and the ``PATCH`` request
body would specify the new value:
.. code-block:: python
{"config": {"workerConfig": {"numInstances": "5"}}}
Similarly, to change the number of preemptible workers in a cluster
to 5, this would be ``config.secondary_worker_config.num_instances``
and the ``PATCH`` request body would be:
.. code-block:: python
{"config": {"secondaryWorkerConfig": {"numInstances": "5"}}}
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dataproc_v1.types.FieldMask`.
:param graceful_decommission_timeout: Timeout for graceful YARN
decommissioning. Graceful decommissioning allows removing nodes from
the cluster without interrupting jobs in progress. Timeout specifies
how long to wait for jobs in progress to finish before forcefully
removing nodes (and potentially interrupting jobs). Default timeout
is 0 (for forceful decommission), and the maximum allowed timeout is
one day.
Only supported on Dataproc image versions 1.2 and higher.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dataproc_v1.types.Duration`.
:param request_id: A unique id used to identify the request. If the
server receives two *UpdateClusterRequest* requests with the same
ID, the second request will be ignored, and an operation created
for the first one and stored in the backend is returned.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
if region is None:
raise TypeError("missing 1 required keyword argument: 'region'")
client = self.get_cluster_client(region=region)
operation = await client.update_cluster(
request={
"project_id": project_id,
"region": region,
"cluster_name": cluster_name,
"cluster": cluster,
"update_mask": update_mask,
"graceful_decommission_timeout": graceful_decommission_timeout,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return operation
@GoogleBaseHook.fallback_to_default_project_id
async def create_workflow_template(
self,
template: dict | WorkflowTemplate,
project_id: str,
region: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> WorkflowTemplate:
"""Create a new workflow template.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param template: The Dataproc workflow template to create. If a dict is
provided, it must be of the same form as the protobuf message
WorkflowTemplate.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
if region is None:
raise TypeError("missing 1 required keyword argument: 'region'")
metadata = metadata or ()
client = self.get_template_client(region)
parent = f"projects/{project_id}/regions/{region}"
return await client.create_workflow_template(
request={"parent": parent, "template": template}, retry=retry, timeout=timeout, metadata=metadata
)
@GoogleBaseHook.fallback_to_default_project_id
async def instantiate_workflow_template(
self,
template_name: str,
project_id: str,
region: str,
version: int | None = None,
request_id: str | None = None,
parameters: dict[str, str] | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AsyncOperation:
"""Instantiate a template and begins execution.
:param template_name: Name of template to instantiate.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param version: Version of workflow template to instantiate. If
specified, the workflow will be instantiated only if the current
version of the workflow template has the supplied version. This
option cannot be used to instantiate a previous version of workflow
template.
:param request_id: A tag that prevents multiple concurrent workflow
instances with the same tag from running. This mitigates risk of
concurrent instances started due to retries.
:param parameters: Map from parameter names to values that should be
used for those parameters. Values may not exceed 100 characters.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
if region is None:
raise TypeError("missing 1 required keyword argument: 'region'")
metadata = metadata or ()
client = self.get_template_client(region)
name = f"projects/{project_id}/regions/{region}/workflowTemplates/{template_name}"
operation = await client.instantiate_workflow_template(
request={"name": name, "version": version, "request_id": request_id, "parameters": parameters},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return operation
@GoogleBaseHook.fallback_to_default_project_id
async def instantiate_inline_workflow_template(
self,
template: dict | WorkflowTemplate,
project_id: str,
region: str,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AsyncOperation:
"""Instantiate a template and begin execution.
:param template: The workflow template to instantiate. If a dict is
provided, it must be of the same form as the protobuf message
WorkflowTemplate.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param request_id: A tag that prevents multiple concurrent workflow
instances with the same tag from running. This mitigates risk of
concurrent instances started due to retries.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
if region is None:
raise TypeError("missing 1 required keyword argument: 'region'")
metadata = metadata or ()
client = self.get_template_client(region)
parent = f"projects/{project_id}/regions/{region}"
operation = await client.instantiate_inline_workflow_template(
request={"parent": parent, "template": template, "request_id": request_id},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return operation
async def get_operation(self, region, operation_name):
return await self.get_operations_client(region).get_operation(name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
async def get_job(
self,
job_id: str,
project_id: str,
region: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Job:
"""Get the resource representation for a job in a project.
:param job_id: Dataproc job ID.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
if region is None:
raise TypeError("missing 1 required keyword argument: 'region'")
client = self.get_job_client(region=region)
job = await client.get_job(
request={"project_id": project_id, "region": region, "job_id": job_id},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return job
@GoogleBaseHook.fallback_to_default_project_id
async def submit_job(
self,
job: dict | Job,
project_id: str,
region: str,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Job:
"""Submit a job to a cluster.
:param job: The job resource. If a dict is provided, it must be of the
same form as the protobuf message Job.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param request_id: A tag that prevents multiple concurrent workflow
instances with the same tag from running. This mitigates risk of
concurrent instances started due to retries.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
if region is None:
raise TypeError("missing 1 required keyword argument: 'region'")
client = self.get_job_client(region=region)
return await client.submit_job(
request={"project_id": project_id, "region": region, "job": job, "request_id": request_id},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
async def cancel_job(
self,
job_id: str,
project_id: str,
region: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Job:
"""Start a job cancellation request.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param job_id: The job ID.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_job_client(region=region)
job = await client.cancel_job(
request={"project_id": project_id, "region": region, "job_id": job_id},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return job
@GoogleBaseHook.fallback_to_default_project_id
async def create_batch(
self,
region: str,
project_id: str,
batch: dict | Batch,
batch_id: str | None = None,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AsyncOperation:
"""Create a batch workload.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param batch: The batch to create.
:param batch_id: The ID to use for the batch, which will become the
final component of the batch's resource name. This value must be of
4-63 characters. Valid characters are ``[a-z][0-9]-``.
:param request_id: A unique id used to identify the request. If the
server receives two *CreateBatchRequest* requests with the same
ID, the second request will be ignored, and an operation created
for the first one and stored in the backend is returned.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_batch_client(region)
parent = f"projects/{project_id}/regions/{region}"
result = await client.create_batch(
request={
"parent": parent,
"batch": batch,
"batch_id": batch_id,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
async def delete_batch(
self,
batch_id: str,
region: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""Delete the batch workload resource.
:param batch_id: The batch ID.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_batch_client(region)
name = f"projects/{project_id}/locations/{region}/batches/{batch_id}"
await client.delete_batch(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
async def get_batch(
self,
batch_id: str,
region: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Batch:
"""Get the batch workload resource representation.
:param batch_id: The batch ID.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_batch_client(region)
name = f"projects/{project_id}/locations/{region}/batches/{batch_id}"
result = await client.get_batch(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
async def list_batches(
self,
region: str,
project_id: str,
page_size: int | None = None,
page_token: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""List batch workloads.
:param project_id: Google Cloud project ID that the cluster belongs to.
:param region: Cloud Dataproc region to handle the request.
:param page_size: The maximum number of batches to return in each
response. The service may return fewer than this value. The default
page size is 20; the maximum page size is 1000.
:param page_token: A page token received from a previous ``ListBatches``
call. Provide this token to retrieve the subsequent page.
:param retry: A retry object used to retry requests. If *None*, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. If *retry* is specified, the timeout applies to each
individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_batch_client(region)
parent = f"projects/{project_id}/regions/{region}"
result = await client.list_batches(
request={
"parent": parent,
"page_size": page_size,
"page_token": page_token,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
| 76,762 | 41.598779 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/dataflow.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Dataflow Hook."""
from __future__ import annotations
import functools
import json
import re
import shlex
import subprocess
import time
import uuid
import warnings
from copy import deepcopy
from typing import Any, Callable, Generator, Sequence, TypeVar, cast
from google.cloud.dataflow_v1beta3 import GetJobRequest, Job, JobState, JobsV1Beta3AsyncClient, JobView
from googleapiclient.discovery import build
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.providers.apache.beam.hooks.beam import BeamHook, BeamRunnerType, beam_options_to_args
from airflow.providers.google.common.hooks.base_google import (
PROVIDE_PROJECT_ID,
GoogleBaseAsyncHook,
GoogleBaseHook,
)
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.timeout import timeout
# This is the default location
# https://cloud.google.com/dataflow/pipelines/specifying-exec-params
DEFAULT_DATAFLOW_LOCATION = "us-central1"
JOB_ID_PATTERN = re.compile(
r"Submitted job: (?P<job_id_java>.*)|Created job with id: \[(?P<job_id_python>.*)\]"
)
T = TypeVar("T", bound=Callable)
def process_line_and_extract_dataflow_job_id_callback(
on_new_job_id_callback: Callable[[str], None] | None
) -> Callable[[str], None]:
"""Build callback that triggers the specified function.
The returned callback is intended to be used as ``process_line_callback`` in
:py:class:`~airflow.providers.apache.beam.hooks.beam.BeamCommandRunner`.
:param on_new_job_id_callback: Callback called when the job ID is known
"""
def _process_line_and_extract_job_id(line: str) -> None:
# Job id info: https://goo.gl/SE29y9.
if on_new_job_id_callback is None:
return
matched_job = JOB_ID_PATTERN.search(line)
if matched_job is None:
return
job_id = matched_job.group("job_id_java") or matched_job.group("job_id_python")
on_new_job_id_callback(job_id)
return _process_line_and_extract_job_id
def _fallback_variable_parameter(parameter_name: str, variable_key_name: str) -> Callable[[T], T]:
def _wrapper(func: T) -> T:
"""
Decorator that provides fallback for location from `region` key in `variables` parameters.
:param func: function to wrap
:return: result of the function call
"""
@functools.wraps(func)
def inner_wrapper(self: DataflowHook, *args, **kwargs):
if args:
raise AirflowException(
"You must use keyword arguments in this methods rather than positional"
)
parameter_location = kwargs.get(parameter_name)
variables_location = kwargs.get("variables", {}).get(variable_key_name)
if parameter_location and variables_location:
raise AirflowException(
f"The mutually exclusive parameter `{parameter_name}` and `{variable_key_name}` key "
f"in `variables` parameter are both present. Please remove one."
)
if parameter_location or variables_location:
kwargs[parameter_name] = parameter_location or variables_location
if variables_location:
copy_variables = deepcopy(kwargs["variables"])
del copy_variables[variable_key_name]
kwargs["variables"] = copy_variables
return func(self, *args, **kwargs)
return cast(T, inner_wrapper)
return _wrapper
_fallback_to_location_from_variables = _fallback_variable_parameter("location", "region")
_fallback_to_project_id_from_variables = _fallback_variable_parameter("project_id", "project")
class DataflowJobStatus:
"""
Helper class with Dataflow job statuses.
Reference: https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#Job.JobState
"""
JOB_STATE_DONE = "JOB_STATE_DONE"
JOB_STATE_UNKNOWN = "JOB_STATE_UNKNOWN"
JOB_STATE_STOPPED = "JOB_STATE_STOPPED"
JOB_STATE_RUNNING = "JOB_STATE_RUNNING"
JOB_STATE_FAILED = "JOB_STATE_FAILED"
JOB_STATE_CANCELLED = "JOB_STATE_CANCELLED"
JOB_STATE_UPDATED = "JOB_STATE_UPDATED"
JOB_STATE_DRAINING = "JOB_STATE_DRAINING"
JOB_STATE_DRAINED = "JOB_STATE_DRAINED"
JOB_STATE_PENDING = "JOB_STATE_PENDING"
JOB_STATE_CANCELLING = "JOB_STATE_CANCELLING"
JOB_STATE_QUEUED = "JOB_STATE_QUEUED"
FAILED_END_STATES = {JOB_STATE_FAILED, JOB_STATE_CANCELLED}
SUCCEEDED_END_STATES = {JOB_STATE_DONE, JOB_STATE_UPDATED, JOB_STATE_DRAINED}
TERMINAL_STATES = SUCCEEDED_END_STATES | FAILED_END_STATES
AWAITING_STATES = {
JOB_STATE_RUNNING,
JOB_STATE_PENDING,
JOB_STATE_QUEUED,
JOB_STATE_CANCELLING,
JOB_STATE_DRAINING,
JOB_STATE_STOPPED,
}
class DataflowJobType:
"""Helper class with Dataflow job types."""
JOB_TYPE_UNKNOWN = "JOB_TYPE_UNKNOWN"
JOB_TYPE_BATCH = "JOB_TYPE_BATCH"
JOB_TYPE_STREAMING = "JOB_TYPE_STREAMING"
class _DataflowJobsController(LoggingMixin):
"""
Interface for communication with Google API.
It's not use Apache Beam, but only Google Dataflow API.
:param dataflow: Discovery resource
:param project_number: The Google Cloud Project ID.
:param location: Job location.
:param poll_sleep: The status refresh rate for pending operations.
:param name: The Job ID prefix used when the multiple_jobs option is passed is set to True.
:param job_id: ID of a single job.
:param num_retries: Maximum number of retries in case of connection problems.
:param multiple_jobs: If set to true this task will be searched by name prefix (``name`` parameter),
not by specific job ID, then actions will be performed on all matching jobs.
:param drain_pipeline: Optional, set to True if want to stop streaming job by draining it
instead of canceling.
:param cancel_timeout: wait time in seconds for successful job canceling
:param wait_until_finished: If True, wait for the end of pipeline execution before exiting. If False,
it only submits job and check once is job not in terminal state.
The default behavior depends on the type of pipeline:
* for the streaming pipeline, wait for jobs to start,
* for the batch pipeline, wait for the jobs to complete.
"""
def __init__(
self,
dataflow: Any,
project_number: str,
location: str,
poll_sleep: int = 10,
name: str | None = None,
job_id: str | None = None,
num_retries: int = 0,
multiple_jobs: bool = False,
drain_pipeline: bool = False,
cancel_timeout: int | None = 5 * 60,
wait_until_finished: bool | None = None,
) -> None:
super().__init__()
self._dataflow = dataflow
self._project_number = project_number
self._job_name = name
self._job_location = location
self._multiple_jobs = multiple_jobs
self._job_id = job_id
self._num_retries = num_retries
self._poll_sleep = poll_sleep
self._cancel_timeout = cancel_timeout
self._jobs: list[dict] | None = None
self.drain_pipeline = drain_pipeline
self._wait_until_finished = wait_until_finished
def is_job_running(self) -> bool:
"""
Helper method to check if jos is still running in dataflow.
:return: True if job is running.
"""
self._refresh_jobs()
if not self._jobs:
return False
for job in self._jobs:
if job["currentState"] not in DataflowJobStatus.TERMINAL_STATES:
return True
return False
def _get_current_jobs(self) -> list[dict]:
"""
Helper method to get list of jobs that start with job name or id.
:return: list of jobs including id's
"""
if not self._multiple_jobs and self._job_id:
return [self.fetch_job_by_id(self._job_id)]
elif self._jobs:
return [self.fetch_job_by_id(job["id"]) for job in self._jobs]
elif self._job_name:
jobs = self._fetch_jobs_by_prefix_name(self._job_name.lower())
if len(jobs) == 1:
self._job_id = jobs[0]["id"]
return jobs
else:
raise Exception("Missing both dataflow job ID and name.")
def fetch_job_by_id(self, job_id: str) -> dict:
"""
Helper method to fetch the job with the specified Job ID.
:param job_id: Job ID to get.
:return: the Job
"""
return (
self._dataflow.projects()
.locations()
.jobs()
.get(
projectId=self._project_number,
location=self._job_location,
jobId=job_id,
)
.execute(num_retries=self._num_retries)
)
def fetch_job_metrics_by_id(self, job_id: str) -> dict:
"""
Helper method to fetch the job metrics with the specified Job ID.
:param job_id: Job ID to get.
:return: the JobMetrics. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/JobMetrics
"""
result = (
self._dataflow.projects()
.locations()
.jobs()
.getMetrics(projectId=self._project_number, location=self._job_location, jobId=job_id)
.execute(num_retries=self._num_retries)
)
self.log.debug("fetch_job_metrics_by_id %s:\n%s", job_id, result)
return result
def _fetch_list_job_messages_responses(self, job_id: str) -> Generator[dict, None, None]:
"""
Helper method to fetch ListJobMessagesResponse with the specified Job ID.
:param job_id: Job ID to get.
:return: yields the ListJobMessagesResponse. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/ListJobMessagesResponse
"""
request = (
self._dataflow.projects()
.locations()
.jobs()
.messages()
.list(projectId=self._project_number, location=self._job_location, jobId=job_id)
)
while request is not None:
response = request.execute(num_retries=self._num_retries)
yield response
request = (
self._dataflow.projects()
.locations()
.jobs()
.messages()
.list_next(previous_request=request, previous_response=response)
)
def fetch_job_messages_by_id(self, job_id: str) -> list[dict]:
"""
Helper method to fetch the job messages with the specified Job ID.
:param job_id: Job ID to get.
:return: the list of JobMessages. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/ListJobMessagesResponse#JobMessage
"""
messages: list[dict] = []
for response in self._fetch_list_job_messages_responses(job_id=job_id):
messages.extend(response.get("jobMessages", []))
return messages
def fetch_job_autoscaling_events_by_id(self, job_id: str) -> list[dict]:
"""
Helper method to fetch the job autoscaling events with the specified Job ID.
:param job_id: Job ID to get.
:return: the list of AutoscalingEvents. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/ListJobMessagesResponse#autoscalingevent
"""
autoscaling_events: list[dict] = []
for response in self._fetch_list_job_messages_responses(job_id=job_id):
autoscaling_events.extend(response.get("autoscalingEvents", []))
return autoscaling_events
def _fetch_all_jobs(self) -> list[dict]:
request = (
self._dataflow.projects()
.locations()
.jobs()
.list(projectId=self._project_number, location=self._job_location)
)
all_jobs: list[dict] = []
while request is not None:
response = request.execute(num_retries=self._num_retries)
jobs = response.get("jobs")
if jobs is None:
break
all_jobs.extend(jobs)
request = (
self._dataflow.projects()
.locations()
.jobs()
.list_next(previous_request=request, previous_response=response)
)
return all_jobs
def _fetch_jobs_by_prefix_name(self, prefix_name: str) -> list[dict]:
jobs = self._fetch_all_jobs()
jobs = [job for job in jobs if job["name"].startswith(prefix_name)]
return jobs
def _refresh_jobs(self) -> None:
"""
Helper method to get all jobs by name.
:return: jobs
"""
self._jobs = self._get_current_jobs()
if self._jobs:
for job in self._jobs:
self.log.info(
"Google Cloud DataFlow job %s is state: %s",
job["name"],
job["currentState"],
)
else:
self.log.info("Google Cloud DataFlow job not available yet..")
def _check_dataflow_job_state(self, job) -> bool:
"""
Helper method to check the state of one job in dataflow for this task if job failed raise exception.
:return: True if job is done.
:raise: Exception
"""
if self._wait_until_finished is None:
wait_for_running = job.get("type") == DataflowJobType.JOB_TYPE_STREAMING
else:
wait_for_running = not self._wait_until_finished
if job["currentState"] == DataflowJobStatus.JOB_STATE_DONE:
return True
elif job["currentState"] == DataflowJobStatus.JOB_STATE_FAILED:
raise Exception(f"Google Cloud Dataflow job {job['name']} has failed.")
elif job["currentState"] == DataflowJobStatus.JOB_STATE_CANCELLED:
raise Exception(f"Google Cloud Dataflow job {job['name']} was cancelled.")
elif job["currentState"] == DataflowJobStatus.JOB_STATE_DRAINED:
raise Exception(f"Google Cloud Dataflow job {job['name']} was drained.")
elif job["currentState"] == DataflowJobStatus.JOB_STATE_UPDATED:
raise Exception(f"Google Cloud Dataflow job {job['name']} was updated.")
elif job["currentState"] == DataflowJobStatus.JOB_STATE_RUNNING and wait_for_running:
return True
elif job["currentState"] in DataflowJobStatus.AWAITING_STATES:
return self._wait_until_finished is False
self.log.debug("Current job: %s", str(job))
raise Exception(f"Google Cloud Dataflow job {job['name']} was unknown state: {job['currentState']}")
def wait_for_done(self) -> None:
"""Helper method to wait for result of submitted job."""
self.log.info("Start waiting for done.")
self._refresh_jobs()
while self._jobs and not all(self._check_dataflow_job_state(job) for job in self._jobs):
self.log.info("Waiting for done. Sleep %s s", self._poll_sleep)
time.sleep(self._poll_sleep)
self._refresh_jobs()
def get_jobs(self, refresh: bool = False) -> list[dict]:
"""
Returns Dataflow jobs.
:param refresh: Forces the latest data to be fetched.
:return: list of jobs
"""
if not self._jobs or refresh:
self._refresh_jobs()
if not self._jobs:
raise ValueError("Could not read _jobs")
return self._jobs
def _wait_for_states(self, expected_states: set[str]):
"""Waiting for the jobs to reach a certain state."""
if not self._jobs:
raise ValueError("The _jobs should be set")
while True:
self._refresh_jobs()
job_states = {job["currentState"] for job in self._jobs}
if not job_states.difference(expected_states):
return
unexpected_failed_end_states = DataflowJobStatus.FAILED_END_STATES - expected_states
if unexpected_failed_end_states.intersection(job_states):
unexpected_failed_jobs = [
job for job in self._jobs if job["currentState"] in unexpected_failed_end_states
]
raise AirflowException(
"Jobs failed: "
+ ", ".join(
f"ID: {job['id']} name: {job['name']} state: {job['currentState']}"
for job in unexpected_failed_jobs
)
)
time.sleep(self._poll_sleep)
def cancel(self) -> None:
"""Cancels or drains current job."""
self._jobs = [
job for job in self.get_jobs() if job["currentState"] not in DataflowJobStatus.TERMINAL_STATES
]
job_ids = [job["id"] for job in self._jobs]
if job_ids:
self.log.info("Canceling jobs: %s", ", ".join(job_ids))
for job in self._jobs:
requested_state = (
DataflowJobStatus.JOB_STATE_DRAINED
if self.drain_pipeline and job["type"] == DataflowJobType.JOB_TYPE_STREAMING
else DataflowJobStatus.JOB_STATE_CANCELLED
)
request = (
self._dataflow.projects()
.locations()
.jobs()
.update(
projectId=self._project_number,
location=self._job_location,
jobId=job["id"],
body={"requestedState": requested_state},
)
)
request.execute(num_retries=self._num_retries)
if self._cancel_timeout and isinstance(self._cancel_timeout, int):
timeout_error_message = (
f"Canceling jobs failed due to timeout ({self._cancel_timeout}s): {', '.join(job_ids)}"
)
tm = timeout(seconds=self._cancel_timeout, error_message=timeout_error_message)
with tm:
self._wait_for_states(
{DataflowJobStatus.JOB_STATE_CANCELLED, DataflowJobStatus.JOB_STATE_DRAINED}
)
else:
self.log.info("No jobs to cancel")
class DataflowHook(GoogleBaseHook):
"""
Hook for Google Dataflow.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
poll_sleep: int = 10,
impersonation_chain: str | Sequence[str] | None = None,
drain_pipeline: bool = False,
cancel_timeout: int | None = 5 * 60,
wait_until_finished: bool | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
self.poll_sleep = poll_sleep
self.drain_pipeline = drain_pipeline
self.cancel_timeout = cancel_timeout
self.wait_until_finished = wait_until_finished
self.job_id: str | None = None
self.beam_hook = BeamHook(BeamRunnerType.DataflowRunner)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
def get_conn(self) -> build:
"""Returns a Google Cloud Dataflow service object."""
http_authorized = self._authorize()
return build("dataflow", "v1b3", http=http_authorized, cache_discovery=False)
@_fallback_to_location_from_variables
@_fallback_to_project_id_from_variables
@GoogleBaseHook.fallback_to_default_project_id
def start_java_dataflow(
self,
job_name: str,
variables: dict,
jar: str,
project_id: str,
job_class: str | None = None,
append_job_name: bool = True,
multiple_jobs: bool = False,
on_new_job_id_callback: Callable[[str], None] | None = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
) -> None:
"""
Starts Dataflow java job.
:param job_name: The name of the job.
:param variables: Variables passed to the job.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param jar: Name of the jar for the job
:param job_class: Name of the java class for the job.
:param append_job_name: True if unique suffix has to be appended to job name.
:param multiple_jobs: True if to check for multiple job in dataflow
:param on_new_job_id_callback: Callback called when the job ID is known.
:param location: Job location.
"""
warnings.warn(
""""This method is deprecated.
Please use `airflow.providers.apache.beam.hooks.beam.start.start_java_pipeline`
to start pipeline and `providers.google.cloud.hooks.dataflow.DataflowHook.wait_for_done`
to wait for the required pipeline state.
""",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
name = self.build_dataflow_job_name(job_name, append_job_name)
variables["jobName"] = name
variables["region"] = location
variables["project"] = project_id
if "labels" in variables:
variables["labels"] = json.dumps(variables["labels"], separators=(",", ":"))
self.beam_hook.start_java_pipeline(
variables=variables,
jar=jar,
job_class=job_class,
process_line_callback=process_line_and_extract_dataflow_job_id_callback(on_new_job_id_callback),
)
self.wait_for_done(
job_name=name,
location=location,
job_id=self.job_id,
multiple_jobs=multiple_jobs,
)
@_fallback_to_location_from_variables
@_fallback_to_project_id_from_variables
@GoogleBaseHook.fallback_to_default_project_id
def start_template_dataflow(
self,
job_name: str,
variables: dict,
parameters: dict,
dataflow_template: str,
project_id: str,
append_job_name: bool = True,
on_new_job_id_callback: Callable[[str], None] | None = None,
on_new_job_callback: Callable[[dict], None] | None = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
environment: dict | None = None,
) -> dict:
"""
Starts Dataflow template job.
:param job_name: The name of the job.
:param variables: Map of job runtime environment options.
It will update environment argument if passed.
.. seealso::
For more information on possible configurations, look at the API documentation
`https://cloud.google.com/dataflow/pipelines/specifying-exec-params
<https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment>`__
:param parameters: Parameters for the template
:param dataflow_template: GCS path to the template.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param append_job_name: True if unique suffix has to be appended to job name.
:param on_new_job_id_callback: (Deprecated) Callback called when the Job is known.
:param on_new_job_callback: Callback called when the Job is known.
:param location: Job location.
.. seealso::
For more information on possible configurations, look at the API documentation
`https://cloud.google.com/dataflow/pipelines/specifying-exec-params
<https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment>`__
"""
name = self.build_dataflow_job_name(job_name, append_job_name)
environment = self._update_environment(
variables=variables,
environment=environment,
)
service = self.get_conn()
request = (
service.projects()
.locations()
.templates()
.launch(
projectId=project_id,
location=location,
gcsPath=dataflow_template,
body={
"jobName": name,
"parameters": parameters,
"environment": environment,
},
)
)
response = request.execute(num_retries=self.num_retries)
job = response["job"]
if on_new_job_id_callback:
warnings.warn(
"on_new_job_id_callback is Deprecated. Please start using on_new_job_callback",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
on_new_job_id_callback(job.get("id"))
if on_new_job_callback:
on_new_job_callback(job)
jobs_controller = _DataflowJobsController(
dataflow=self.get_conn(),
project_number=project_id,
name=name,
job_id=job["id"],
location=location,
poll_sleep=self.poll_sleep,
num_retries=self.num_retries,
drain_pipeline=self.drain_pipeline,
cancel_timeout=self.cancel_timeout,
wait_until_finished=self.wait_until_finished,
)
jobs_controller.wait_for_done()
return response["job"]
def _update_environment(self, variables: dict, environment: dict | None = None) -> dict:
environment = environment or {}
# available keys for runtime environment are listed here:
# https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment
environment_keys = {
"numWorkers",
"maxWorkers",
"zone",
"serviceAccountEmail",
"tempLocation",
"bypassTempDirValidation",
"machineType",
"additionalExperiments",
"network",
"subnetwork",
"additionalUserLabels",
"kmsKeyName",
"ipConfiguration",
"workerRegion",
"workerZone",
}
def _check_one(key, val):
if key in environment:
self.log.warning(
"%r parameter in 'variables' will override the same one passed in 'environment'!",
key,
)
return key, val
environment.update(_check_one(key, val) for key, val in variables.items() if key in environment_keys)
return environment
@GoogleBaseHook.fallback_to_default_project_id
def start_flex_template(
self,
body: dict,
location: str,
project_id: str,
on_new_job_id_callback: Callable[[str], None] | None = None,
on_new_job_callback: Callable[[dict], None] | None = None,
) -> dict:
"""
Starts flex templates with the Dataflow pipeline.
:param body: The request body. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.locations.flexTemplates/launch#request-body
:param location: The location of the Dataflow job (for example europe-west1)
:param project_id: The ID of the GCP project that owns the job.
If set to ``None`` or missing, the default project_id from the GCP connection is used.
:param on_new_job_id_callback: (Deprecated) A callback that is called when a Job ID is detected.
:param on_new_job_callback: A callback that is called when a Job is detected.
:return: the Job
"""
service = self.get_conn()
request = (
service.projects()
.locations()
.flexTemplates()
.launch(projectId=project_id, body=body, location=location)
)
response = request.execute(num_retries=self.num_retries)
job = response["job"]
if on_new_job_id_callback:
warnings.warn(
"on_new_job_id_callback is Deprecated. Please start using on_new_job_callback",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
on_new_job_id_callback(job.get("id"))
if on_new_job_callback:
on_new_job_callback(job)
jobs_controller = _DataflowJobsController(
dataflow=self.get_conn(),
project_number=project_id,
job_id=job.get("id"),
location=location,
poll_sleep=self.poll_sleep,
num_retries=self.num_retries,
cancel_timeout=self.cancel_timeout,
wait_until_finished=self.wait_until_finished,
)
jobs_controller.wait_for_done()
return jobs_controller.get_jobs(refresh=True)[0]
@_fallback_to_location_from_variables
@_fallback_to_project_id_from_variables
@GoogleBaseHook.fallback_to_default_project_id
def start_python_dataflow(
self,
job_name: str,
variables: dict,
dataflow: str,
py_options: list[str],
project_id: str,
py_interpreter: str = "python3",
py_requirements: list[str] | None = None,
py_system_site_packages: bool = False,
append_job_name: bool = True,
on_new_job_id_callback: Callable[[str], None] | None = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
):
"""
Starts Dataflow job.
:param job_name: The name of the job.
:param variables: Variables passed to the job.
:param dataflow: Name of the Dataflow process.
:param py_options: Additional options.
:param project_id: The ID of the GCP project that owns the job.
If set to ``None`` or missing, the default project_id from the GCP connection is used.
:param py_interpreter: Python version of the beam pipeline.
If None, this defaults to the python3.
To track python versions supported by beam and related
issues check: https://issues.apache.org/jira/browse/BEAM-1251
:param py_requirements: Additional python package(s) to install.
If a value is passed to this parameter, a new virtual environment has been created with
additional packages installed.
You could also install the apache-beam package if it is not installed on your system or you want
to use a different version.
:param py_system_site_packages: Whether to include system_site_packages in your virtualenv.
See virtualenv documentation for more information.
This option is only relevant if the ``py_requirements`` parameter is not None.
:param append_job_name: True if unique suffix has to be appended to job name.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param on_new_job_id_callback: Callback called when the job ID is known.
:param location: Job location.
"""
warnings.warn(
"""This method is deprecated.
Please use `airflow.providers.apache.beam.hooks.beam.start.start_python_pipeline`
to start pipeline and `providers.google.cloud.hooks.dataflow.DataflowHook.wait_for_done`
to wait for the required pipeline state.
""",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
name = self.build_dataflow_job_name(job_name, append_job_name)
variables["job_name"] = name
variables["region"] = location
variables["project"] = project_id
self.beam_hook.start_python_pipeline(
variables=variables,
py_file=dataflow,
py_options=py_options,
py_interpreter=py_interpreter,
py_requirements=py_requirements,
py_system_site_packages=py_system_site_packages,
process_line_callback=process_line_and_extract_dataflow_job_id_callback(on_new_job_id_callback),
)
self.wait_for_done(
job_name=name,
location=location,
job_id=self.job_id,
)
@staticmethod
def build_dataflow_job_name(job_name: str, append_job_name: bool = True) -> str:
"""Builds Dataflow job name."""
base_job_name = str(job_name).replace("_", "-")
if not re.match(r"^[a-z]([-a-z0-9]*[a-z0-9])?$", base_job_name):
raise ValueError(
f"Invalid job_name ({base_job_name}); the name must consist of only the characters "
f"[-a-z0-9], starting with a letter and ending with a letter or number "
)
if append_job_name:
safe_job_name = base_job_name + "-" + str(uuid.uuid4())[:8]
else:
safe_job_name = base_job_name
return safe_job_name
@_fallback_to_location_from_variables
@_fallback_to_project_id_from_variables
@GoogleBaseHook.fallback_to_default_project_id
def is_job_dataflow_running(
self,
name: str,
project_id: str,
location: str = DEFAULT_DATAFLOW_LOCATION,
variables: dict | None = None,
) -> bool:
"""
Helper method to check if jos is still running in dataflow.
:param name: The name of the job.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: Job location.
:return: True if job is running.
"""
if variables:
warnings.warn(
"The variables parameter has been deprecated. You should pass location using "
"the location parameter.",
AirflowProviderDeprecationWarning,
stacklevel=4,
)
jobs_controller = _DataflowJobsController(
dataflow=self.get_conn(),
project_number=project_id,
name=name,
location=location,
poll_sleep=self.poll_sleep,
drain_pipeline=self.drain_pipeline,
num_retries=self.num_retries,
cancel_timeout=self.cancel_timeout,
)
return jobs_controller.is_job_running()
@GoogleBaseHook.fallback_to_default_project_id
def cancel_job(
self,
project_id: str,
job_name: str | None = None,
job_id: str | None = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
) -> None:
"""
Cancels the job with the specified name prefix or Job ID.
Parameter ``name`` and ``job_id`` are mutually exclusive.
:param job_name: Name prefix specifying which jobs are to be canceled.
:param job_id: Job ID specifying which jobs are to be canceled.
:param location: Job location.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
"""
jobs_controller = _DataflowJobsController(
dataflow=self.get_conn(),
project_number=project_id,
name=job_name,
job_id=job_id,
location=location,
poll_sleep=self.poll_sleep,
drain_pipeline=self.drain_pipeline,
num_retries=self.num_retries,
cancel_timeout=self.cancel_timeout,
)
jobs_controller.cancel()
@GoogleBaseHook.fallback_to_default_project_id
def start_sql_job(
self,
job_name: str,
query: str,
options: dict[str, Any],
project_id: str,
location: str = DEFAULT_DATAFLOW_LOCATION,
on_new_job_id_callback: Callable[[str], None] | None = None,
on_new_job_callback: Callable[[dict], None] | None = None,
):
"""
Starts Dataflow SQL query.
:param job_name: The unique name to assign to the Cloud Dataflow job.
:param query: The SQL query to execute.
:param options: Job parameters to be executed.
For more information, look at:
`https://cloud.google.com/sdk/gcloud/reference/beta/dataflow/sql/query
<gcloud beta dataflow sql query>`__
command reference
:param location: The location of the Dataflow job (for example europe-west1)
:param project_id: The ID of the GCP project that owns the job.
If set to ``None`` or missing, the default project_id from the GCP connection is used.
:param on_new_job_id_callback: (Deprecated) Callback called when the job ID is known.
:param on_new_job_callback: Callback called when the job is known.
:return: the new job object
"""
gcp_options = [
f"--project={project_id}",
"--format=value(job.id)",
f"--job-name={job_name}",
f"--region={location}",
]
if self.impersonation_chain:
if isinstance(self.impersonation_chain, str):
impersonation_account = self.impersonation_chain
elif len(self.impersonation_chain) == 1:
impersonation_account = self.impersonation_chain[0]
else:
raise AirflowException(
"Chained list of accounts is not supported, please specify only one service account"
)
gcp_options.append(f"--impersonate-service-account={impersonation_account}")
cmd = [
"gcloud",
"dataflow",
"sql",
"query",
query,
*gcp_options,
*(beam_options_to_args(options)),
]
self.log.info("Executing command: %s", " ".join(shlex.quote(c) for c in cmd))
with self.provide_authorized_gcloud():
proc = subprocess.run(cmd, capture_output=True)
self.log.info("Output: %s", proc.stdout.decode())
self.log.warning("Stderr: %s", proc.stderr.decode())
self.log.info("Exit code %d", proc.returncode)
if proc.returncode != 0:
raise AirflowException(f"Process exit with non-zero exit code. Exit code: {proc.returncode}")
job_id = proc.stdout.decode().strip()
self.log.info("Created job ID: %s", job_id)
jobs_controller = _DataflowJobsController(
dataflow=self.get_conn(),
project_number=project_id,
job_id=job_id,
location=location,
poll_sleep=self.poll_sleep,
num_retries=self.num_retries,
drain_pipeline=self.drain_pipeline,
wait_until_finished=self.wait_until_finished,
)
job = jobs_controller.get_jobs(refresh=True)[0]
if on_new_job_id_callback:
warnings.warn(
"on_new_job_id_callback is Deprecated. Please start using on_new_job_callback",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
on_new_job_id_callback(cast(str, job.get("id")))
if on_new_job_callback:
on_new_job_callback(job)
jobs_controller.wait_for_done()
return jobs_controller.get_jobs(refresh=True)[0]
@GoogleBaseHook.fallback_to_default_project_id
def get_job(
self,
job_id: str,
project_id: str = PROVIDE_PROJECT_ID,
location: str = DEFAULT_DATAFLOW_LOCATION,
) -> dict:
"""
Gets the job with the specified Job ID.
:param job_id: Job ID to get.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: The location of the Dataflow job (for example europe-west1). See:
https://cloud.google.com/dataflow/docs/concepts/regional-endpoints
:return: the Job
"""
jobs_controller = _DataflowJobsController(
dataflow=self.get_conn(),
project_number=project_id,
location=location,
)
return jobs_controller.fetch_job_by_id(job_id)
@GoogleBaseHook.fallback_to_default_project_id
def fetch_job_metrics_by_id(
self,
job_id: str,
project_id: str,
location: str = DEFAULT_DATAFLOW_LOCATION,
) -> dict:
"""
Gets the job metrics with the specified Job ID.
:param job_id: Job ID to get.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: The location of the Dataflow job (for example europe-west1). See:
https://cloud.google.com/dataflow/docs/concepts/regional-endpoints
:return: the JobMetrics. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/JobMetrics
"""
jobs_controller = _DataflowJobsController(
dataflow=self.get_conn(),
project_number=project_id,
location=location,
)
return jobs_controller.fetch_job_metrics_by_id(job_id)
@GoogleBaseHook.fallback_to_default_project_id
def fetch_job_messages_by_id(
self,
job_id: str,
project_id: str,
location: str = DEFAULT_DATAFLOW_LOCATION,
) -> list[dict]:
"""
Gets the job messages with the specified Job ID.
:param job_id: Job ID to get.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: Job location.
:return: the list of JobMessages. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/ListJobMessagesResponse#JobMessage
"""
jobs_controller = _DataflowJobsController(
dataflow=self.get_conn(),
project_number=project_id,
location=location,
)
return jobs_controller.fetch_job_messages_by_id(job_id)
@GoogleBaseHook.fallback_to_default_project_id
def fetch_job_autoscaling_events_by_id(
self,
job_id: str,
project_id: str,
location: str = DEFAULT_DATAFLOW_LOCATION,
) -> list[dict]:
"""
Gets the job autoscaling events with the specified Job ID.
:param job_id: Job ID to get.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: Job location.
:return: the list of AutoscalingEvents. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/ListJobMessagesResponse#autoscalingevent
"""
jobs_controller = _DataflowJobsController(
dataflow=self.get_conn(),
project_number=project_id,
location=location,
)
return jobs_controller.fetch_job_autoscaling_events_by_id(job_id)
@GoogleBaseHook.fallback_to_default_project_id
def wait_for_done(
self,
job_name: str,
location: str,
project_id: str,
job_id: str | None = None,
multiple_jobs: bool = False,
) -> None:
"""
Wait for Dataflow job.
:param job_name: The 'jobName' to use when executing the DataFlow job
(templated). This ends up being set in the pipeline options, so any entry
with key ``'jobName'`` in ``options`` will be overwritten.
:param location: location the job is running
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param job_id: a Dataflow job ID
:param multiple_jobs: If pipeline creates multiple jobs then monitor all jobs
"""
job_controller = _DataflowJobsController(
dataflow=self.get_conn(),
project_number=project_id,
name=job_name,
location=location,
poll_sleep=self.poll_sleep,
job_id=job_id or self.job_id,
num_retries=self.num_retries,
multiple_jobs=multiple_jobs,
drain_pipeline=self.drain_pipeline,
cancel_timeout=self.cancel_timeout,
wait_until_finished=self.wait_until_finished,
)
job_controller.wait_for_done()
class AsyncDataflowHook(GoogleBaseAsyncHook):
"""Async hook class for dataflow service."""
sync_hook_class = DataflowHook
def __init__(self, **kwargs):
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(**kwargs)
async def initialize_client(self, client_class):
"""
Initialize object of the given class.
Method is used to initialize asynchronous client. Because of the big amount of the classes which are
used for Dataflow service it was decided to initialize them the same way with credentials which are
received from the method of the GoogleBaseHook class.
:param client_class: Class of the Google cloud SDK
"""
credentials = (await self.get_sync_hook()).get_credentials()
return client_class(
credentials=credentials,
)
async def get_project_id(self) -> str:
project_id = (await self.get_sync_hook()).project_id
return project_id
async def get_job(
self,
job_id: str,
project_id: str = PROVIDE_PROJECT_ID,
job_view: int = JobView.JOB_VIEW_SUMMARY,
location: str = DEFAULT_DATAFLOW_LOCATION,
) -> Job:
"""
Gets the job with the specified Job ID.
:param job_id: Job ID to get.
:param project_id: the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param job_view: Optional. JobView object which determines representation of the returned data
:param location: Optional. The location of the Dataflow job (for example europe-west1). See:
https://cloud.google.com/dataflow/docs/concepts/regional-endpoints
"""
project_id = project_id or (await self.get_project_id())
client = await self.initialize_client(JobsV1Beta3AsyncClient)
request = GetJobRequest(
dict(
project_id=project_id,
job_id=job_id,
view=job_view,
location=location,
)
)
job = await client.get_job(
request=request,
)
return job
async def get_job_status(
self,
job_id: str,
project_id: str = PROVIDE_PROJECT_ID,
job_view: int = JobView.JOB_VIEW_SUMMARY,
location: str = DEFAULT_DATAFLOW_LOCATION,
) -> JobState:
"""
Gets the job status with the specified Job ID.
:param job_id: Job ID to get.
:param project_id: the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param job_view: Optional. JobView object which determines representation of the returned data
:param location: Optional. The location of the Dataflow job (for example europe-west1). See:
https://cloud.google.com/dataflow/docs/concepts/regional-endpoints
"""
job = await self.get_job(
project_id=project_id,
job_id=job_id,
job_view=job_view,
location=location,
)
state = job.current_state
return state
| 49,826 | 38.110675 | 123 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/vision.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Vision Hook."""
from __future__ import annotations
from copy import deepcopy
from functools import cached_property
from typing import Any, Callable, Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.vision_v1 import (
AnnotateImageRequest,
Image,
ImageAnnotatorClient,
Product,
ProductSearchClient,
ProductSet,
ReferenceImage,
)
from google.protobuf import field_mask_pb2
from google.protobuf.json_format import MessageToDict
from airflow.exceptions import AirflowException
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import PROVIDE_PROJECT_ID, GoogleBaseHook
ERR_DIFF_NAMES = """The {label} name provided in the object ({explicit_name}) is different
than the name created from the input parameters ({constructed_name}). Please either:
1) Remove the {label} name,
2) Remove the location and {id_label} parameters,
3) Unify the {label} name and input parameters.
"""
ERR_UNABLE_TO_CREATE = """Unable to determine the {label} name. Please either set the name directly
in the {label} object or provide the `location` and `{id_label}` parameters.
"""
class NameDeterminer:
"""Helper class to determine entity name."""
def __init__(self, label: str, id_label: str, get_path: Callable[[str, str, str], str]) -> None:
self.label = label
self.id_label = id_label
self.get_path = get_path
def get_entity_with_name(
self, entity: Any, entity_id: str | None, location: str | None, project_id: str
) -> Any:
"""
Check if entity has the `name` attribute set.
* If so, no action is taken.
* If not, and the name can be constructed from other parameters provided, it is created and filled in
the entity.
* If both the entity's 'name' attribute is set and the name can be constructed from other parameters
provided:
* If they are the same - no action is taken
* if they are different - an exception is thrown.
:param entity: Entity
:param entity_id: Entity id
:param location: Location
:param project_id: The id of Google Cloud Vision project.
:return: The same entity or entity with new name
:raises: AirflowException
"""
entity = deepcopy(entity)
explicit_name = getattr(entity, "name")
if location and entity_id:
# Necessary parameters to construct the name are present. Checking for conflict with explicit name
constructed_name = self.get_path(project_id, location, entity_id)
if not explicit_name:
entity.name = constructed_name
return entity
if explicit_name != constructed_name:
raise AirflowException(
ERR_DIFF_NAMES.format(
label=self.label,
explicit_name=explicit_name,
constructed_name=constructed_name,
id_label=self.id_label,
)
)
# Not enough parameters to construct the name. Trying to use the name from Product / ProductSet.
if explicit_name:
return entity
else:
raise AirflowException(ERR_UNABLE_TO_CREATE.format(label=self.label, id_label=self.id_label))
class CloudVisionHook(GoogleBaseHook):
"""
Hook for Google Cloud Vision APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
_client: ProductSearchClient | None
product_name_determiner = NameDeterminer("Product", "product_id", ProductSearchClient.product_path)
product_set_name_determiner = NameDeterminer(
"ProductSet", "productset_id", ProductSearchClient.product_set_path
)
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._client = None
def get_conn(self) -> ProductSearchClient:
"""
Retrieves connection to Cloud Vision.
:return: Google Cloud Vision client object.
"""
if not self._client:
self._client = ProductSearchClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
return self._client
@cached_property
def annotator_client(self) -> ImageAnnotatorClient:
"""
Creates ImageAnnotatorClient.
:return: Google Image Annotator client object.
"""
return ImageAnnotatorClient(credentials=self.get_credentials())
@staticmethod
def _check_for_error(response: dict) -> None:
if "error" in response:
raise AirflowException(response)
@GoogleBaseHook.fallback_to_default_project_id
def create_product_set(
self,
location: str,
product_set: ProductSet | None,
project_id: str = PROVIDE_PROJECT_ID,
product_set_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> str:
"""
Create product set.
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionCreateProductSetOperator`.
"""
client = self.get_conn()
parent = f"projects/{project_id}/locations/{location}"
self.log.info("Creating a new ProductSet under the parent: %s", parent)
response = client.create_product_set(
parent=parent,
product_set=product_set,
product_set_id=product_set_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("ProductSet created: %s", response.name if response else "")
self.log.debug("ProductSet created:\n%s", response)
if not product_set_id:
# Product set id was generated by the API
product_set_id = self._get_autogenerated_id(response)
self.log.info("Extracted autogenerated ProductSet ID from the response: %s", product_set_id)
return product_set_id
@GoogleBaseHook.fallback_to_default_project_id
def get_product_set(
self,
location: str,
product_set_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> dict:
"""
Get product set.
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionGetProductSetOperator`.
"""
client = self.get_conn()
name = ProductSearchClient.product_set_path(project_id, location, product_set_id)
self.log.info("Retrieving ProductSet: %s", name)
response = client.get_product_set(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info("ProductSet retrieved.")
self.log.debug("ProductSet retrieved:\n%s", response)
return MessageToDict(response._pb)
@GoogleBaseHook.fallback_to_default_project_id
def update_product_set(
self,
product_set: dict | ProductSet,
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = None,
product_set_id: str | None = None,
update_mask: dict | field_mask_pb2.FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> dict:
"""
Update product set.
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionUpdateProductSetOperator`.
"""
client = self.get_conn()
product_set = self.product_set_name_determiner.get_entity_with_name(
product_set, product_set_id, location, project_id
)
if isinstance(product_set, dict):
product_set = ProductSet(product_set)
self.log.info("Updating ProductSet: %s", product_set.name)
response = client.update_product_set(
product_set=product_set,
update_mask=update_mask, # type: ignore
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("ProductSet updated: %s", response.name if response else "")
self.log.debug("ProductSet updated:\n%s", response)
return MessageToDict(response._pb)
@GoogleBaseHook.fallback_to_default_project_id
def delete_product_set(
self,
location: str,
product_set_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Delete product set.
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionDeleteProductSetOperator`.
"""
client = self.get_conn()
name = ProductSearchClient.product_set_path(project_id, location, product_set_id)
self.log.info("Deleting ProductSet: %s", name)
client.delete_product_set(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info("ProductSet with the name [%s] deleted.", name)
@GoogleBaseHook.fallback_to_default_project_id
def create_product(
self,
location: str,
product: dict | Product,
project_id: str = PROVIDE_PROJECT_ID,
product_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Create product.
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionCreateProductOperator`.
"""
client = self.get_conn()
parent = f"projects/{project_id}/locations/{location}"
self.log.info("Creating a new Product under the parent: %s", parent)
if isinstance(product, dict):
product = Product(product)
response = client.create_product(
parent=parent,
product=product,
product_id=product_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Product created: %s", response.name if response else "")
self.log.debug("Product created:\n%s", response)
if not product_id:
# Product id was generated by the API
product_id = self._get_autogenerated_id(response)
self.log.info("Extracted autogenerated Product ID from the response: %s", product_id)
return product_id
@GoogleBaseHook.fallback_to_default_project_id
def get_product(
self,
location: str,
product_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Get product.
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionGetProductOperator`.
"""
client = self.get_conn()
name = ProductSearchClient.product_path(project_id, location, product_id)
self.log.info("Retrieving Product: %s", name)
response = client.get_product(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info("Product retrieved.")
self.log.debug("Product retrieved:\n%s", response)
return MessageToDict(response._pb)
@GoogleBaseHook.fallback_to_default_project_id
def update_product(
self,
product: dict | Product,
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = None,
product_id: str | None = None,
update_mask: dict | field_mask_pb2.FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Update product.
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionUpdateProductOperator`.
"""
client = self.get_conn()
product = self.product_name_determiner.get_entity_with_name(product, product_id, location, project_id)
if isinstance(product, dict):
product = Product(product)
self.log.info("Updating ProductSet: %s", product.name)
response = client.update_product(
product=product,
update_mask=update_mask, # type: ignore
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Product updated: %s", response.name if response else "")
self.log.debug("Product updated:\n%s", response)
return MessageToDict(response._pb)
@GoogleBaseHook.fallback_to_default_project_id
def delete_product(
self,
location: str,
product_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Delete product.
For the documentation see:
:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionDeleteProductOperator`.
"""
client = self.get_conn()
name = ProductSearchClient.product_path(project_id, location, product_id)
self.log.info("Deleting ProductSet: %s", name)
client.delete_product(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info("Product with the name [%s] deleted:", name)
@GoogleBaseHook.fallback_to_default_project_id
def create_reference_image(
self,
location: str,
product_id: str,
reference_image: dict | ReferenceImage,
project_id: str,
reference_image_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> str:
"""
Create reference image.
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionCreateReferenceImageOperator`.
"""
client = self.get_conn()
self.log.info("Creating ReferenceImage")
parent = ProductSearchClient.product_path(project=project_id, location=location, product=product_id)
if isinstance(reference_image, dict):
reference_image = ReferenceImage(reference_image)
response = client.create_reference_image(
parent=parent,
reference_image=reference_image,
reference_image_id=reference_image_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("ReferenceImage created: %s", response.name if response else "")
self.log.debug("ReferenceImage created:\n%s", response)
if not reference_image_id:
# Reference image id was generated by the API
reference_image_id = self._get_autogenerated_id(response)
self.log.info(
"Extracted autogenerated ReferenceImage ID from the response: %s", reference_image_id
)
return reference_image_id
@GoogleBaseHook.fallback_to_default_project_id
def delete_reference_image(
self,
location: str,
product_id: str,
reference_image_id: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Delete reference image.
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionDeleteReferenceImageOperator`.
"""
client = self.get_conn()
self.log.info("Deleting ReferenceImage")
name = ProductSearchClient.reference_image_path(
project=project_id, location=location, product=product_id, reference_image=reference_image_id
)
client.delete_reference_image(
name=name,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("ReferenceImage with the name [%s] deleted.", name)
@GoogleBaseHook.fallback_to_default_project_id
def add_product_to_product_set(
self,
product_set_id: str,
product_id: str,
project_id: str,
location: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Add product to product set.
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionAddProductToProductSetOperator`.
"""
client = self.get_conn()
product_name = ProductSearchClient.product_path(project_id, location, product_id)
product_set_name = ProductSearchClient.product_set_path(project_id, location, product_set_id)
self.log.info("Add Product[name=%s] to Product Set[name=%s]", product_name, product_set_name)
client.add_product_to_product_set(
name=product_set_name, product=product_name, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info("Product added to Product Set")
@GoogleBaseHook.fallback_to_default_project_id
def remove_product_from_product_set(
self,
product_set_id: str,
product_id: str,
project_id: str,
location: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Remove product from product set.
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionRemoveProductFromProductSetOperator`.
"""
client = self.get_conn()
product_name = ProductSearchClient.product_path(project_id, location, product_id)
product_set_name = ProductSearchClient.product_set_path(project_id, location, product_set_id)
self.log.info("Remove Product[name=%s] from Product Set[name=%s]", product_name, product_set_name)
client.remove_product_from_product_set(
name=product_set_name, product=product_name, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info("Product removed from Product Set")
def annotate_image(
self,
request: dict | AnnotateImageRequest,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
) -> dict:
"""
Annotate image.
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionImageAnnotateOperator`.
"""
client = self.annotator_client
self.log.info("Annotating image")
response = client.annotate_image(request=request, retry=retry, timeout=timeout)
self.log.info("Image annotated")
return MessageToDict(response._pb)
@GoogleBaseHook.quota_retry()
def batch_annotate_images(
self,
requests: list[dict] | list[AnnotateImageRequest],
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
) -> dict:
"""
Batch annotate images.
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionImageAnnotateOperator`.
"""
client = self.annotator_client
self.log.info("Annotating images")
requests = list(map(AnnotateImageRequest, requests))
response = client.batch_annotate_images(requests=requests, retry=retry, timeout=timeout)
self.log.info("Images annotated")
return MessageToDict(response._pb)
@GoogleBaseHook.quota_retry()
def text_detection(
self,
image: dict | Image,
max_results: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
additional_properties: dict | None = None,
) -> dict:
"""
Text detection.
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionDetectTextOperator`.
"""
client = self.annotator_client
self.log.info("Detecting text")
if additional_properties is None:
additional_properties = {}
response = client.text_detection(
image=image, max_results=max_results, retry=retry, timeout=timeout, **additional_properties
)
response = MessageToDict(response._pb)
self._check_for_error(response)
self.log.info("Text detection finished")
return response
@GoogleBaseHook.quota_retry()
def document_text_detection(
self,
image: dict | Image,
max_results: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
additional_properties: dict | None = None,
) -> dict:
"""
Document text detection.
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionTextDetectOperator`.
"""
client = self.annotator_client
self.log.info("Detecting document text")
if additional_properties is None:
additional_properties = {}
response = client.document_text_detection(
image=image, max_results=max_results, retry=retry, timeout=timeout, **additional_properties
)
response = MessageToDict(response._pb)
self._check_for_error(response)
self.log.info("Document text detection finished")
return response
@GoogleBaseHook.quota_retry()
def label_detection(
self,
image: dict | Image,
max_results: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
additional_properties: dict | None = None,
) -> dict:
"""
Label detection.
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionDetectImageLabelsOperator`.
"""
client = self.annotator_client
self.log.info("Detecting labels")
if additional_properties is None:
additional_properties = {}
response = client.label_detection(
image=image, max_results=max_results, retry=retry, timeout=timeout, **additional_properties
)
response = MessageToDict(response._pb)
self._check_for_error(response)
self.log.info("Labels detection finished")
return response
@GoogleBaseHook.quota_retry()
def safe_search_detection(
self,
image: dict | Image,
max_results: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
additional_properties: dict | None = None,
) -> dict:
"""
Safe search detection.
For the documentation see:
:py:class:`~airflow.providers.google.cloud.operators.vision.CloudVisionDetectImageSafeSearchOperator`.
"""
client = self.annotator_client
self.log.info("Detecting safe search")
if additional_properties is None:
additional_properties = {}
response = client.safe_search_detection(
image=image, max_results=max_results, retry=retry, timeout=timeout, **additional_properties
)
response = MessageToDict(response._pb)
self._check_for_error(response)
self.log.info("Safe search detection finished")
return response
@staticmethod
def _get_autogenerated_id(response) -> str:
try:
name = response.name
except AttributeError as e:
raise AirflowException(f"Unable to get name from response... [{response}]\n{e}")
if "/" not in name:
raise AirflowException(f"Unable to get id from name... [{name}]")
return name.rsplit("/", 1)[1]
| 26,125 | 34.936726 | 116 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/cloud_sql.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud SQL Hook."""
from __future__ import annotations
import errno
import json
import os
import os.path
import platform
import random
import re
import shutil
import socket
import string
import subprocess
import time
import uuid
from inspect import signature
from pathlib import Path
from subprocess import PIPE, Popen
from tempfile import gettempdir
from typing import Any, Sequence
from urllib.parse import quote_plus
import httpx
from aiohttp import ClientSession
from gcloud.aio.auth import AioSession, Token
from googleapiclient.discovery import Resource, build
from googleapiclient.errors import HttpError
from requests import Session
# Number of retries - used by googleapiclient method calls to perform retries
# For requests that are "retriable"
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models import Connection
from airflow.providers.google.common.hooks.base_google import GoogleBaseAsyncHook, GoogleBaseHook, get_field
from airflow.providers.mysql.hooks.mysql import MySqlHook
from airflow.providers.postgres.hooks.postgres import PostgresHook
from airflow.utils.log.logging_mixin import LoggingMixin
UNIX_PATH_MAX = 108
# Time to sleep between active checks of the operation results
TIME_TO_SLEEP_IN_SECONDS = 20
CLOUD_SQL_PROXY_VERSION_REGEX = re.compile(r"^v?(\d+\.\d+\.\d+)(-\w*.?\d?)?$")
class CloudSqlOperationStatus:
"""Helper class with operation statuses."""
PENDING = "PENDING"
RUNNING = "RUNNING"
DONE = "DONE"
UNKNOWN = "UNKNOWN"
class CloudSQLHook(GoogleBaseHook):
"""
Hook for Google Cloud SQL APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param api_version: This is the version of the api.
:param gcp_conn_id: The Airflow connection used for GCP credentials.
:param impersonation_chain: This is the optional service account to impersonate using short term
credentials.
"""
conn_name_attr = "gcp_conn_id"
default_conn_name = "google_cloud_sql_default"
conn_type = "gcpcloudsql"
hook_name = "Google Cloud SQL"
def __init__(
self,
api_version: str,
gcp_conn_id: str = default_conn_name,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self.api_version = api_version
self._conn = None
def get_conn(self) -> Resource:
"""
Retrieves connection to Cloud SQL.
:return: Google Cloud SQL services object.
"""
if not self._conn:
http_authorized = self._authorize()
self._conn = build("sqladmin", self.api_version, http=http_authorized, cache_discovery=False)
return self._conn
@GoogleBaseHook.fallback_to_default_project_id
def get_instance(self, instance: str, project_id: str) -> dict:
"""
Retrieves a resource containing information about a Cloud SQL instance.
:param instance: Database instance ID. This does not include the project ID.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:return: A Cloud SQL instance resource.
"""
return (
self.get_conn()
.instances()
.get(project=project_id, instance=instance)
.execute(num_retries=self.num_retries)
)
@GoogleBaseHook.fallback_to_default_project_id
@GoogleBaseHook.operation_in_progress_retry()
def create_instance(self, body: dict, project_id: str) -> None:
"""
Creates a new Cloud SQL instance.
:param body: Body required by the Cloud SQL insert API, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/insert#request-body.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:return: None
"""
response = (
self.get_conn()
.instances()
.insert(project=project_id, body=body)
.execute(num_retries=self.num_retries)
)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
@GoogleBaseHook.operation_in_progress_retry()
def patch_instance(self, body: dict, instance: str, project_id: str) -> None:
"""
Updates settings of a Cloud SQL instance.
Caution: This is not a partial update, so you must include values for
all the settings that you want to retain.
:param body: Body required by the Cloud SQL patch API, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/patch#request-body.
:param instance: Cloud SQL instance ID. This does not include the project ID.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:return: None
"""
response = (
self.get_conn()
.instances()
.patch(project=project_id, instance=instance, body=body)
.execute(num_retries=self.num_retries)
)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
@GoogleBaseHook.operation_in_progress_retry()
def delete_instance(self, instance: str, project_id: str) -> None:
"""
Deletes a Cloud SQL instance.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param instance: Cloud SQL instance ID. This does not include the project ID.
:return: None
"""
response = (
self.get_conn()
.instances()
.delete(project=project_id, instance=instance)
.execute(num_retries=self.num_retries)
)
operation_name = response["name"]
# For some delete instance operations, the operation stops being available ~9 seconds after
# completion, so we need a shorter sleep time to make sure we don't miss the DONE status.
self._wait_for_operation_to_complete(
project_id=project_id, operation_name=operation_name, time_to_sleep=5
)
@GoogleBaseHook.fallback_to_default_project_id
def get_database(self, instance: str, database: str, project_id: str) -> dict:
"""
Retrieves a database resource from a Cloud SQL instance.
:param instance: Database instance ID. This does not include the project ID.
:param database: Name of the database in the instance.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:return: A Cloud SQL database resource, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases#resource.
"""
return (
self.get_conn()
.databases()
.get(project=project_id, instance=instance, database=database)
.execute(num_retries=self.num_retries)
)
@GoogleBaseHook.fallback_to_default_project_id
@GoogleBaseHook.operation_in_progress_retry()
def create_database(self, instance: str, body: dict, project_id: str) -> None:
"""
Creates a new database inside a Cloud SQL instance.
:param instance: Database instance ID. This does not include the project ID.
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases/insert#request-body.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:return: None
"""
response = (
self.get_conn()
.databases()
.insert(project=project_id, instance=instance, body=body)
.execute(num_retries=self.num_retries)
)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
@GoogleBaseHook.operation_in_progress_retry()
def patch_database(
self,
instance: str,
database: str,
body: dict,
project_id: str,
) -> None:
"""
Updates a database resource inside a Cloud SQL instance.
This method supports patch semantics.
See https://cloud.google.com/sql/docs/mysql/admin-api/how-tos/performance#patch.
:param instance: Database instance ID. This does not include the project ID.
:param database: Name of the database to be updated in the instance.
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases/insert#request-body.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:return: None
"""
response = (
self.get_conn()
.databases()
.patch(project=project_id, instance=instance, database=database, body=body)
.execute(num_retries=self.num_retries)
)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
@GoogleBaseHook.operation_in_progress_retry()
def delete_database(self, instance: str, database: str, project_id: str) -> None:
"""
Deletes a database from a Cloud SQL instance.
:param instance: Database instance ID. This does not include the project ID.
:param database: Name of the database to be deleted in the instance.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:return: None
"""
response = (
self.get_conn()
.databases()
.delete(project=project_id, instance=instance, database=database)
.execute(num_retries=self.num_retries)
)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
def export_instance(self, instance: str, body: dict, project_id: str):
"""
Exports data from a Cloud SQL instance to a Cloud Storage bucket as a SQL dump or CSV file.
:param instance: Database instance ID of the Cloud SQL instance. This does not include the
project ID.
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/export#request-body
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:return: None
"""
response = (
self.get_conn()
.instances()
.export(project=project_id, instance=instance, body=body)
.execute(num_retries=self.num_retries)
)
operation_name = response["name"]
return operation_name
@GoogleBaseHook.fallback_to_default_project_id
def import_instance(self, instance: str, body: dict, project_id: str) -> None:
"""
Imports data into a Cloud SQL instance from a SQL dump or CSV file in Cloud Storage.
:param instance: Database instance ID. This does not include the
project ID.
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/import#request-body
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:return: None
"""
try:
response = (
self.get_conn()
.instances()
.import_(project=project_id, instance=instance, body=body)
.execute(num_retries=self.num_retries)
)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
except HttpError as ex:
raise AirflowException(f"Importing instance {instance} failed: {ex.content}")
@GoogleBaseHook.fallback_to_default_project_id
def clone_instance(self, instance: str, body: dict, project_id: str) -> None:
"""
Clones an instance to a target instance.
:param instance: Database instance ID to be cloned. This does not include the
project ID.
:param instance: Database instance ID to be used for the clone. This does not include the
project ID.
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1beta4/instances/clone
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:return: None
"""
try:
response = (
self.get_conn()
.instances()
.clone(project=project_id, instance=instance, body=body)
.execute(num_retries=self.num_retries)
)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
except HttpError as ex:
raise AirflowException(f"Cloning of instance {instance} failed: {ex.content}")
@GoogleBaseHook.fallback_to_default_project_id
def _wait_for_operation_to_complete(
self, project_id: str, operation_name: str, time_to_sleep: int = TIME_TO_SLEEP_IN_SECONDS
) -> None:
"""
Waits for the named operation to complete - checks status of the asynchronous call.
:param project_id: Project ID of the project that contains the instance.
:param operation_name: Name of the operation.
:param time_to_sleep: Time to sleep between active checks of the operation results.
:return: None
"""
service = self.get_conn()
while True:
operation_response = (
service.operations()
.get(project=project_id, operation=operation_name)
.execute(num_retries=self.num_retries)
)
if operation_response.get("status") == CloudSqlOperationStatus.DONE:
error = operation_response.get("error")
if error:
# Extracting the errors list as string and trimming square braces
error_msg = str(error.get("errors"))[1:-1]
raise AirflowException(error_msg)
# No meaningful info to return from the response in case of success
return
time.sleep(time_to_sleep)
CLOUD_SQL_PROXY_DOWNLOAD_URL = "https://dl.google.com/cloudsql/cloud_sql_proxy.{}.{}"
CLOUD_SQL_PROXY_VERSION_DOWNLOAD_URL = (
"https://storage.googleapis.com/cloudsql-proxy/{}/cloud_sql_proxy.{}.{}"
)
class CloudSQLAsyncHook(GoogleBaseAsyncHook):
"""Class to get asynchronous hook for Google Cloud SQL."""
sync_hook_class = CloudSQLHook
async def _get_conn(self, session: Session, url: str):
scopes = [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/sqlservice.admin",
]
async with Token(scopes=scopes) as token:
session_aio = AioSession(session)
headers = {
"Authorization": f"Bearer {await token.get()}",
}
return await session_aio.get(url=url, headers=headers)
async def get_operation_name(self, project_id: str, operation_name: str, session):
url = f"https://sqladmin.googleapis.com/sql/v1beta4/projects/{project_id}/operations/{operation_name}"
return await self._get_conn(url=str(url), session=session)
async def get_operation(self, project_id: str, operation_name: str):
async with ClientSession() as session:
try:
operation = await self.get_operation_name(
project_id=project_id,
operation_name=operation_name,
session=session,
)
operation = await operation.json(content_type=None)
except HttpError as e:
raise e
return operation
class CloudSqlProxyRunner(LoggingMixin):
"""
Downloads and runs cloud-sql-proxy as subprocess of the Python process.
The cloud-sql-proxy needs to be downloaded and started before we can connect
to the Google Cloud SQL instance via database connection. It establishes
secure tunnel connection to the database. It authorizes using the
Google Cloud credentials that are passed by the configuration.
More details about the proxy can be found here:
https://cloud.google.com/sql/docs/mysql/sql-proxy
:param path_prefix: Unique path prefix where proxy will be downloaded and
directories created for unix sockets.
:param instance_specification: Specification of the instance to connect the
proxy to. It should be specified in the form that is described in
https://cloud.google.com/sql/docs/mysql/sql-proxy#multiple-instances in
-instances parameter (typically in the form of ``<project>:<region>:<instance>``
for UNIX socket connections and in the form of
``<project>:<region>:<instance>=tcp:<port>`` for TCP connections.
:param gcp_conn_id: Id of Google Cloud connection to use for
authentication
:param project_id: Optional id of the Google Cloud project to connect to - it overwrites
default project id taken from the Google Cloud connection.
:param sql_proxy_version: Specific version of SQL proxy to download
(for example 'v1.13'). By default latest version is downloaded.
:param sql_proxy_binary_path: If specified, then proxy will be
used from the path specified rather than dynamically generated. This means
that if the binary is not present in that path it will also be downloaded.
"""
def __init__(
self,
path_prefix: str,
instance_specification: str,
gcp_conn_id: str = "google_cloud_default",
project_id: str | None = None,
sql_proxy_version: str | None = None,
sql_proxy_binary_path: str | None = None,
) -> None:
super().__init__()
self.path_prefix = path_prefix
if not self.path_prefix:
raise AirflowException("The path_prefix must not be empty!")
self.sql_proxy_was_downloaded = False
self.sql_proxy_version = sql_proxy_version
self.download_sql_proxy_dir = None
self.sql_proxy_process: Popen | None = None
self.instance_specification = instance_specification
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.command_line_parameters: list[str] = []
self.cloud_sql_proxy_socket_directory = self.path_prefix
self.sql_proxy_path = (
sql_proxy_binary_path if sql_proxy_binary_path else self.path_prefix + "_cloud_sql_proxy"
)
self.credentials_path = self.path_prefix + "_credentials.json"
self._build_command_line_parameters()
def _build_command_line_parameters(self) -> None:
self.command_line_parameters.extend(["-dir", self.cloud_sql_proxy_socket_directory])
self.command_line_parameters.extend(["-instances", self.instance_specification])
@staticmethod
def _is_os_64bit() -> bool:
return platform.machine().endswith("64")
def _download_sql_proxy_if_needed(self) -> None:
if os.path.isfile(self.sql_proxy_path):
self.log.info("cloud-sql-proxy is already present")
return
download_url = self._get_sql_proxy_download_url()
proxy_path_tmp = self.sql_proxy_path + ".tmp"
self.log.info("Downloading cloud_sql_proxy from %s to %s", download_url, proxy_path_tmp)
# httpx has a breaking API change (follow_redirects vs allow_redirects)
# and this should work with both versions (cf. issue #20088)
if "follow_redirects" in signature(httpx.get).parameters.keys():
response = httpx.get(download_url, follow_redirects=True)
else:
response = httpx.get(download_url, allow_redirects=True) # type: ignore[call-arg]
# Downloading to .tmp file first to avoid case where partially downloaded
# binary is used by parallel operator which uses the same fixed binary path
with open(proxy_path_tmp, "wb") as file:
file.write(response.content)
if response.status_code != 200:
raise AirflowException(
"The cloud-sql-proxy could not be downloaded. "
f"Status code = {response.status_code}. Reason = {response.reason_phrase}"
)
self.log.info("Moving sql_proxy binary from %s to %s", proxy_path_tmp, self.sql_proxy_path)
shutil.move(proxy_path_tmp, self.sql_proxy_path)
os.chmod(self.sql_proxy_path, 0o744) # Set executable bit
self.sql_proxy_was_downloaded = True
def _get_sql_proxy_download_url(self):
system = platform.system().lower()
processor = os.uname().machine
if processor == "x86_64":
processor = "amd64"
if not self.sql_proxy_version:
download_url = CLOUD_SQL_PROXY_DOWNLOAD_URL.format(system, processor)
else:
if not CLOUD_SQL_PROXY_VERSION_REGEX.match(self.sql_proxy_version):
raise ValueError(
"The sql_proxy_version should match the regular expression "
f"{CLOUD_SQL_PROXY_VERSION_REGEX.pattern}"
)
download_url = CLOUD_SQL_PROXY_VERSION_DOWNLOAD_URL.format(
self.sql_proxy_version, system, processor
)
return download_url
def _get_credential_parameters(self) -> list[str]:
extras = GoogleBaseHook.get_connection(conn_id=self.gcp_conn_id).extra_dejson
key_path = get_field(extras, "key_path")
keyfile_dict = get_field(extras, "keyfile_dict")
if key_path:
credential_params = ["-credential_file", key_path]
elif keyfile_dict:
keyfile_content = keyfile_dict if isinstance(keyfile_dict, dict) else json.loads(keyfile_dict)
self.log.info("Saving credentials to %s", self.credentials_path)
with open(self.credentials_path, "w") as file:
json.dump(keyfile_content, file)
credential_params = ["-credential_file", self.credentials_path]
else:
self.log.info(
"The credentials are not supplied by neither key_path nor "
"keyfile_dict of the gcp connection %s. Falling back to "
"default activated account",
self.gcp_conn_id,
)
credential_params = []
if not self.instance_specification:
project_id = get_field(extras, "project")
if self.project_id:
project_id = self.project_id
if not project_id:
raise AirflowException(
"For forwarding all instances, the project id "
"for Google Cloud should be provided either "
"by project_id extra in the Google Cloud connection or by "
"project_id provided in the operator."
)
credential_params.extend(["-projects", project_id])
return credential_params
def start_proxy(self) -> None:
"""
Starts Cloud SQL Proxy.
You have to remember to stop the proxy if you started it!
"""
self._download_sql_proxy_if_needed()
if self.sql_proxy_process:
raise AirflowException(f"The sql proxy is already running: {self.sql_proxy_process}")
else:
command_to_run = [self.sql_proxy_path]
command_to_run.extend(self.command_line_parameters)
self.log.info("Creating directory %s", self.cloud_sql_proxy_socket_directory)
Path(self.cloud_sql_proxy_socket_directory).mkdir(parents=True, exist_ok=True)
command_to_run.extend(self._get_credential_parameters())
self.log.info("Running the command: `%s`", " ".join(command_to_run))
self.sql_proxy_process = Popen(command_to_run, stdin=PIPE, stdout=PIPE, stderr=PIPE)
self.log.info("The pid of cloud_sql_proxy: %s", self.sql_proxy_process.pid)
while True:
line = (
self.sql_proxy_process.stderr.readline().decode("utf-8")
if self.sql_proxy_process.stderr
else ""
)
return_code = self.sql_proxy_process.poll()
if line == "" and return_code is not None:
self.sql_proxy_process = None
raise AirflowException(
f"The cloud_sql_proxy finished early with return code {return_code}!"
)
if line != "":
self.log.info(line)
if "googleapi: Error" in line or "invalid instance name:" in line:
self.stop_proxy()
raise AirflowException(f"Error when starting the cloud_sql_proxy {line}!")
if "Ready for new connections" in line:
return
def stop_proxy(self) -> None:
"""
Stops running proxy.
You should stop the proxy after you stop using it.
"""
if not self.sql_proxy_process:
raise AirflowException("The sql proxy is not started yet")
else:
self.log.info("Stopping the cloud_sql_proxy pid: %s", self.sql_proxy_process.pid)
self.sql_proxy_process.kill()
self.sql_proxy_process = None
# Cleanup!
self.log.info("Removing the socket directory: %s", self.cloud_sql_proxy_socket_directory)
shutil.rmtree(self.cloud_sql_proxy_socket_directory, ignore_errors=True)
if self.sql_proxy_was_downloaded:
self.log.info("Removing downloaded proxy: %s", self.sql_proxy_path)
# Silently ignore if the file has already been removed (concurrency)
try:
os.remove(self.sql_proxy_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
else:
self.log.info("Skipped removing proxy - it was not downloaded: %s", self.sql_proxy_path)
if os.path.isfile(self.credentials_path):
self.log.info("Removing generated credentials file %s", self.credentials_path)
# Here file cannot be delete by concurrent task (each task has its own copy)
os.remove(self.credentials_path)
def get_proxy_version(self) -> str | None:
"""Returns version of the Cloud SQL Proxy."""
self._download_sql_proxy_if_needed()
command_to_run = [self.sql_proxy_path]
command_to_run.extend(["--version"])
command_to_run.extend(self._get_credential_parameters())
result = subprocess.check_output(command_to_run).decode("utf-8")
pattern = re.compile("^.*[V|v]ersion ([^;]*);.*$")
matched = pattern.match(result)
if matched:
return matched.group(1)
else:
return None
def get_socket_path(self) -> str:
"""
Retrieves UNIX socket path used by Cloud SQL Proxy.
:return: The dynamically generated path for the socket created by the proxy.
"""
return self.cloud_sql_proxy_socket_directory + "/" + self.instance_specification
CONNECTION_URIS: dict[str, dict[str, dict[str, str]]] = {
"postgres": {
"proxy": {
"tcp": "postgresql://{user}:{password}@127.0.0.1:{proxy_port}/{database}",
"socket": "postgresql://{user}:{password}@{socket_path}/{database}",
},
"public": {
"ssl": "postgresql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"sslmode=verify-ca&"
"sslcert={client_cert_file}&"
"sslkey={client_key_file}&"
"sslrootcert={server_ca_file}",
"non-ssl": "postgresql://{user}:{password}@{public_ip}:{public_port}/{database}",
},
},
"mysql": {
"proxy": {
"tcp": "mysql://{user}:{password}@127.0.0.1:{proxy_port}/{database}",
"socket": "mysql://{user}:{password}@localhost/{database}?unix_socket={socket_path}",
},
"public": {
"ssl": "mysql://{user}:{password}@{public_ip}:{public_port}/{database}?ssl={ssl_spec}",
"non-ssl": "mysql://{user}:{password}@{public_ip}:{public_port}/{database}",
},
},
}
CLOUD_SQL_VALID_DATABASE_TYPES = ["postgres", "mysql"]
class CloudSQLDatabaseHook(BaseHook):
"""
Serves DB connection configuration for Google Cloud SQL (Connections of *gcpcloudsqldb://* type).
The hook is a "meta" one. It does not perform an actual connection.
It is there to retrieve all the parameters configured in gcpcloudsql:// connection,
start/stop Cloud SQL Proxy if needed, dynamically generate Postgres or MySQL
connection in the database and return an actual Postgres or MySQL hook.
The returned Postgres/MySQL hooks are using direct connection or Cloud SQL
Proxy socket/TCP as configured.
Main parameters of the hook are retrieved from the standard URI components:
* **user** - User name to authenticate to the database (from login of the URI).
* **password** - Password to authenticate to the database (from password of the URI).
* **public_ip** - IP to connect to for public connection (from host of the URI).
* **public_port** - Port to connect to for public connection (from port of the URI).
* **database** - Database to connect to (from schema of the URI).
* **sql_proxy_binary_path** - Optional path to Cloud SQL Proxy binary. If the binary
is not specified or the binary is not present, it is automatically downloaded.
Remaining parameters are retrieved from the extras (URI query parameters):
* **project_id** - Optional, Google Cloud project where the Cloud SQL
instance exists. If missing, default project id passed is used.
* **instance** - Name of the instance of the Cloud SQL database instance.
* **location** - The location of the Cloud SQL instance (for example europe-west1).
* **database_type** - The type of the database instance (MySQL or Postgres).
* **use_proxy** - (default False) Whether SQL proxy should be used to connect to Cloud
SQL DB.
* **use_ssl** - (default False) Whether SSL should be used to connect to Cloud SQL DB.
You cannot use proxy and SSL together.
* **sql_proxy_use_tcp** - (default False) If set to true, TCP is used to connect via
proxy, otherwise UNIX sockets are used.
* **sql_proxy_version** - Specific version of the proxy to download (for example
v1.13). If not specified, the latest version is downloaded.
* **sslcert** - Path to client certificate to authenticate when SSL is used.
* **sslkey** - Path to client private key to authenticate when SSL is used.
* **sslrootcert** - Path to server's certificate to authenticate when SSL is used.
:param gcp_cloudsql_conn_id: URL of the connection
:param gcp_conn_id: The connection ID used to connect to Google Cloud for
cloud-sql-proxy authentication.
:param default_gcp_project_id: Default project id used if project_id not specified
in the connection URL
"""
conn_name_attr = "gcp_cloudsql_conn_id"
default_conn_name = "google_cloud_sqldb_default"
conn_type = "gcpcloudsqldb"
hook_name = "Google Cloud SQL Database"
def __init__(
self,
gcp_cloudsql_conn_id: str = "google_cloud_sql_default",
gcp_conn_id: str = "google_cloud_default",
default_gcp_project_id: str | None = None,
sql_proxy_binary_path: str | None = None,
) -> None:
super().__init__()
self.gcp_conn_id = gcp_conn_id
self.gcp_cloudsql_conn_id = gcp_cloudsql_conn_id
self.cloudsql_connection = self.get_connection(self.gcp_cloudsql_conn_id)
self.extras = self.cloudsql_connection.extra_dejson
self.project_id = self.extras.get("project_id", default_gcp_project_id)
self.instance = self.extras.get("instance")
self.database = self.cloudsql_connection.schema
self.location = self.extras.get("location")
self.database_type = self.extras.get("database_type")
self.use_proxy = self._get_bool(self.extras.get("use_proxy", "False"))
self.use_ssl = self._get_bool(self.extras.get("use_ssl", "False"))
self.sql_proxy_use_tcp = self._get_bool(self.extras.get("sql_proxy_use_tcp", "False"))
self.sql_proxy_version = self.extras.get("sql_proxy_version")
self.sql_proxy_binary_path = sql_proxy_binary_path
self.user = self.cloudsql_connection.login
self.password = self.cloudsql_connection.password
self.public_ip = self.cloudsql_connection.host
self.public_port = self.cloudsql_connection.port
self.sslcert = self.extras.get("sslcert")
self.sslkey = self.extras.get("sslkey")
self.sslrootcert = self.extras.get("sslrootcert")
# Port and socket path and db_hook are automatically generated
self.sql_proxy_tcp_port = None
self.sql_proxy_unique_path: str | None = None
self.db_hook: PostgresHook | MySqlHook | None = None
self.reserved_tcp_socket: socket.socket | None = None
# Generated based on clock + clock sequence. Unique per host (!).
# This is important as different hosts share the database
self.db_conn_id = str(uuid.uuid1())
self._validate_inputs()
@staticmethod
def _get_bool(val: Any) -> bool:
if val == "False" or val is False:
return False
return True
@staticmethod
def _check_ssl_file(file_to_check, name) -> None:
if not file_to_check:
raise AirflowException(f"SSL connections requires {name} to be set")
if not os.path.isfile(file_to_check):
raise AirflowException(f"The {file_to_check} must be a readable file")
def _validate_inputs(self) -> None:
if self.project_id == "":
raise AirflowException("The required extra 'project_id' is empty")
if not self.location:
raise AirflowException("The required extra 'location' is empty or None")
if not self.instance:
raise AirflowException("The required extra 'instance' is empty or None")
if self.database_type not in CLOUD_SQL_VALID_DATABASE_TYPES:
raise AirflowException(
f"Invalid database type '{self.database_type}'. "
f"Must be one of {CLOUD_SQL_VALID_DATABASE_TYPES}"
)
if self.use_proxy and self.use_ssl:
raise AirflowException(
"Cloud SQL Proxy does not support SSL connections."
" SSL is not needed as Cloud SQL Proxy "
"provides encryption on its own"
)
def validate_ssl_certs(self) -> None:
"""
SSL certificates validator.
:return: None
"""
if self.use_ssl:
self._check_ssl_file(self.sslcert, "sslcert")
self._check_ssl_file(self.sslkey, "sslkey")
self._check_ssl_file(self.sslrootcert, "sslrootcert")
def validate_socket_path_length(self) -> None:
"""
Validates sockets path length.
:return: None or rises AirflowException
"""
if self.use_proxy and not self.sql_proxy_use_tcp:
if self.database_type == "postgres":
suffix = "/.s.PGSQL.5432"
else:
suffix = ""
expected_path = (
f"{self._generate_unique_path()}/{self.project_id}:{self.instance}:{self.database}{suffix}"
)
if len(expected_path) > UNIX_PATH_MAX:
self.log.info("Too long (%s) path: %s", len(expected_path), expected_path)
raise AirflowException(
f"The UNIX socket path length cannot exceed {UNIX_PATH_MAX} characters on Linux system. "
"Either use shorter instance/database name or switch to TCP connection. "
f"The socket path for Cloud SQL proxy is now:{expected_path}"
)
@staticmethod
def _generate_unique_path() -> str:
"""Generate a unique path.
We don't using mkdtemp here since it can generate paths close to 60
characters. We append project/location/instance to the path, Postgres
will then appends its own prefix, making the resulting path exceed the
100 character length limitation of a socket path. This generates a
shorter path ``${tempdir()}[8 random characters]``.
"""
random.seed()
while True:
candidate = os.path.join(
gettempdir(), "".join(random.choice(string.ascii_lowercase + string.digits) for _ in range(8))
)
if not os.path.exists(candidate):
return candidate
@staticmethod
def _quote(value) -> str | None:
return quote_plus(value) if value else None
def _generate_connection_uri(self) -> str:
if self.use_proxy:
if self.sql_proxy_use_tcp:
if not self.sql_proxy_tcp_port:
self.reserve_free_tcp_port()
if not self.sql_proxy_unique_path:
self.sql_proxy_unique_path = self._generate_unique_path()
if not self.database_type:
raise ValueError("The database_type should be set")
database_uris = CONNECTION_URIS[self.database_type]
ssl_spec = None
socket_path = None
if self.use_proxy:
proxy_uris = database_uris["proxy"]
if self.sql_proxy_use_tcp:
format_string = proxy_uris["tcp"]
else:
format_string = proxy_uris["socket"]
socket_path = f"{self.sql_proxy_unique_path}/{self._get_instance_socket_name()}"
else:
public_uris = database_uris["public"]
if self.use_ssl:
format_string = public_uris["ssl"]
ssl_spec = {"cert": self.sslcert, "key": self.sslkey, "ca": self.sslrootcert}
else:
format_string = public_uris["non-ssl"]
if not self.user:
raise AirflowException("The login parameter needs to be set in connection")
if not self.public_ip:
raise AirflowException("The location parameter needs to be set in connection")
if not self.password:
raise AirflowException("The password parameter needs to be set in connection")
if not self.database:
raise AirflowException("The database parameter needs to be set in connection")
connection_uri = format_string.format(
user=quote_plus(self.user) if self.user else "",
password=quote_plus(self.password) if self.password else "",
database=quote_plus(self.database) if self.database else "",
public_ip=self.public_ip,
public_port=self.public_port,
proxy_port=self.sql_proxy_tcp_port,
socket_path=self._quote(socket_path),
ssl_spec=self._quote(json.dumps(ssl_spec)) if ssl_spec else "",
client_cert_file=self._quote(self.sslcert) if self.sslcert else "",
client_key_file=self._quote(self.sslkey) if self.sslcert else "",
server_ca_file=self._quote(self.sslrootcert if self.sslcert else ""),
)
self.log.info(
"DB connection URI %s",
connection_uri.replace(
quote_plus(self.password) if self.password else "PASSWORD", "XXXXXXXXXXXX"
),
)
return connection_uri
def _get_instance_socket_name(self) -> str:
return self.project_id + ":" + self.location + ":" + self.instance
def _get_sqlproxy_instance_specification(self) -> str:
instance_specification = self._get_instance_socket_name()
if self.sql_proxy_use_tcp:
instance_specification += "=tcp:" + str(self.sql_proxy_tcp_port)
return instance_specification
def create_connection(self) -> Connection:
"""Create a connection.
Connection ID will be randomly generated according to whether it uses
proxy, TCP, UNIX sockets, SSL.
"""
uri = self._generate_connection_uri()
connection = Connection(conn_id=self.db_conn_id, uri=uri)
self.log.info("Creating connection %s", self.db_conn_id)
return connection
def get_sqlproxy_runner(self) -> CloudSqlProxyRunner:
"""Retrieve Cloud SQL Proxy runner.
It is used to manage the proxy lifecycle per task.
:return: The Cloud SQL Proxy runner.
"""
if not self.use_proxy:
raise ValueError("Proxy runner can only be retrieved in case of use_proxy = True")
if not self.sql_proxy_unique_path:
raise ValueError("The sql_proxy_unique_path should be set")
return CloudSqlProxyRunner(
path_prefix=self.sql_proxy_unique_path,
instance_specification=self._get_sqlproxy_instance_specification(),
project_id=self.project_id,
sql_proxy_version=self.sql_proxy_version,
sql_proxy_binary_path=self.sql_proxy_binary_path,
gcp_conn_id=self.gcp_conn_id,
)
def get_database_hook(self, connection: Connection) -> PostgresHook | MySqlHook:
"""Retrieve database hook.
This is the actual Postgres or MySQL database hook that uses proxy or
connects directly to the Google Cloud SQL database.
"""
if self.database_type == "postgres":
db_hook: PostgresHook | MySqlHook = PostgresHook(connection=connection, schema=self.database)
else:
db_hook = MySqlHook(connection=connection, schema=self.database)
self.db_hook = db_hook
return db_hook
def cleanup_database_hook(self) -> None:
"""Clean up database hook after it was used."""
if self.database_type == "postgres":
if not self.db_hook:
raise ValueError("The db_hook should be set")
if not isinstance(self.db_hook, PostgresHook):
raise ValueError(f"The db_hook should be PostgresHook and is {type(self.db_hook)}")
conn = getattr(self.db_hook, "conn")
if conn and conn.notices:
for output in self.db_hook.conn.notices:
self.log.info(output)
def reserve_free_tcp_port(self) -> None:
"""Reserve free TCP port to be used by Cloud SQL Proxy."""
self.reserved_tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.reserved_tcp_socket.bind(("127.0.0.1", 0))
self.sql_proxy_tcp_port = self.reserved_tcp_socket.getsockname()[1]
def free_reserved_port(self) -> None:
"""Free TCP port.
Makes it immediately ready to be used by Cloud SQL Proxy.
"""
if self.reserved_tcp_socket:
self.reserved_tcp_socket.close()
self.reserved_tcp_socket = None
| 46,163 | 43.602899 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/speech_to_text.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Speech Hook."""
from __future__ import annotations
from typing import Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.speech_v1 import SpeechClient
from google.cloud.speech_v1.types import RecognitionAudio, RecognitionConfig
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class CloudSpeechToTextHook(GoogleBaseHook):
"""
Hook for Google Cloud Speech API.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._client: SpeechClient | None = None
def get_conn(self) -> SpeechClient:
"""
Retrieves connection to Cloud Speech.
:return: Google Cloud Speech client object.
"""
if not self._client:
self._client = SpeechClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
return self._client
@GoogleBaseHook.quota_retry()
def recognize_speech(
self,
config: dict | RecognitionConfig,
audio: dict | RecognitionAudio,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
):
"""
Recognizes audio input.
:param config: information to the recognizer that specifies how to process the request.
https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionConfig
:param audio: audio data to be recognized
https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionAudio
:param retry: (Optional) A retry object used to retry requests. If None is specified,
requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete.
Note that if retry is specified, the timeout applies to each individual attempt.
"""
client = self.get_conn()
if isinstance(config, dict):
config = RecognitionConfig(config)
if isinstance(audio, dict):
audio = RecognitionAudio(audio)
response = client.recognize(config=config, audio=audio, retry=retry, timeout=timeout)
self.log.info("Recognised speech: %s", response)
return response
| 4,494 | 42.640777 | 141 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/tasks.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a CloudTasksHook which allows you to connect to Google Cloud Tasks service."""
from __future__ import annotations
from typing import Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.tasks_v2 import CloudTasksClient
from google.cloud.tasks_v2.types import Queue, Task
from google.protobuf.field_mask_pb2 import FieldMask
from airflow.exceptions import AirflowException
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import PROVIDE_PROJECT_ID, GoogleBaseHook
class CloudTasksHook(GoogleBaseHook):
"""
Hook for Google Cloud Tasks APIs.
Cloud Tasks allows developers to manage the execution of background work in their applications.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._client: CloudTasksClient | None = None
def get_conn(self) -> CloudTasksClient:
"""
Provides a client for interacting with the Google Cloud Tasks API.
:return: Google Cloud Tasks API Client
"""
if self._client is None:
self._client = CloudTasksClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
return self._client
@GoogleBaseHook.fallback_to_default_project_id
def create_queue(
self,
location: str,
task_queue: dict | Queue,
project_id: str = PROVIDE_PROJECT_ID,
queue_name: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Queue:
"""
Creates a queue in Cloud Tasks.
:param location: The location name in which the queue will be created.
:param task_queue: The task queue to create.
Queue's name cannot be the same as an existing queue.
If a dict is provided, it must be of the same form as the protobuf message Queue.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param queue_name: (Optional) The queue's name.
If provided, it will be used to construct the full queue path.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
if queue_name:
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
if isinstance(task_queue, Queue):
task_queue.name = full_queue_name
elif isinstance(task_queue, dict):
task_queue["name"] = full_queue_name
else:
raise AirflowException("Unable to set queue_name.")
full_location_path = f"projects/{project_id}/locations/{location}"
return client.create_queue(
request={"parent": full_location_path, "queue": task_queue},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def update_queue(
self,
task_queue: Queue,
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = None,
queue_name: str | None = None,
update_mask: FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Queue:
"""
Updates a queue in Cloud Tasks.
:param task_queue: The task queue to update.
This method creates the queue if it does not exist and updates the queue if
it does exist. The queue's name must be specified.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: (Optional) The location name in which the queue will be updated.
If provided, it will be used to construct the full queue path.
:param queue_name: (Optional) The queue's name.
If provided, it will be used to construct the full queue path.
:param update_mask: A mast used to specify which fields of the queue are being updated.
If empty, then all fields will be updated.
If a dict is provided, it must be of the same form as the protobuf message.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
if queue_name and location:
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
if isinstance(task_queue, Queue):
task_queue.name = full_queue_name
elif isinstance(task_queue, dict):
task_queue["name"] = full_queue_name
else:
raise AirflowException("Unable to set queue_name.")
return client.update_queue(
request={"queue": task_queue, "update_mask": update_mask},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_queue(
self,
location: str,
queue_name: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Queue:
"""
Gets a queue from Cloud Tasks.
:param location: The location name in which the queue was created.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
return client.get_queue(
request={"name": full_queue_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def list_queues(
self,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
results_filter: str | None = None,
page_size: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> list[Queue]:
"""
Lists queues from Cloud Tasks.
:param location: The location name in which the queues were created.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param results_filter: (Optional) Filter used to specify a subset of queues.
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_location_path = f"projects/{project_id}/locations/{location}"
queues = client.list_queues(
request={"parent": full_location_path, "filter": results_filter, "page_size": page_size},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return list(queues)
@GoogleBaseHook.fallback_to_default_project_id
def delete_queue(
self,
location: str,
queue_name: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Deletes a queue from Cloud Tasks, even if it has tasks in it.
:param location: The location name in which the queue will be deleted.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
client.delete_queue(
request={"name": full_queue_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def purge_queue(
self,
location: str,
queue_name: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Queue:
"""
Purges a queue by deleting all of its tasks from Cloud Tasks.
:param location: The location name in which the queue will be purged.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
return client.purge_queue(
request={"name": full_queue_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def pause_queue(
self,
location: str,
queue_name: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Queue:
"""
Pauses a queue in Cloud Tasks.
:param location: The location name in which the queue will be paused.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
return client.pause_queue(
request={"name": full_queue_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def resume_queue(
self,
location: str,
queue_name: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Queue:
"""
Resumes a queue in Cloud Tasks.
:param location: The location name in which the queue will be resumed.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
return client.resume_queue(
request={"name": full_queue_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_task(
self,
location: str,
queue_name: str,
task: dict | Task,
project_id: str = PROVIDE_PROJECT_ID,
task_name: str | None = None,
response_view: Task.View | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Task:
"""
Creates a task in Cloud Tasks.
:param location: The location name in which the task will be created.
:param queue_name: The queue's name.
:param task: The task to add.
If a dict is provided, it must be of the same form as the protobuf message Task.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param task_name: (Optional) The task's name.
If provided, it will be used to construct the full task path.
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
if task_name:
full_task_name = (
f"projects/{project_id}/locations/{location}/queues/{queue_name}/tasks/{task_name}"
)
if isinstance(task, Task):
task.name = full_task_name
elif isinstance(task, dict):
task["name"] = full_task_name
else:
raise AirflowException("Unable to set task_name.")
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
return client.create_task(
request={"parent": full_queue_name, "task": task, "response_view": response_view},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: str = PROVIDE_PROJECT_ID,
response_view: Task.View | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Task:
"""
Gets a task from Cloud Tasks.
:param location: The location name in which the task was created.
:param queue_name: The queue's name.
:param task_name: The task's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_task_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}/tasks/{task_name}"
return client.get_task(
request={"name": full_task_name, "response_view": response_view},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def list_tasks(
self,
location: str,
queue_name: str,
project_id: str,
response_view: Task.View | None = None,
page_size: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> list[Task]:
"""
Lists the tasks in Cloud Tasks.
:param location: The location name in which the tasks were created.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
tasks = client.list_tasks(
request={"parent": full_queue_name, "response_view": response_view, "page_size": page_size},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return list(tasks)
@GoogleBaseHook.fallback_to_default_project_id
def delete_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Deletes a task from Cloud Tasks.
:param location: The location name in which the task will be deleted.
:param queue_name: The queue's name.
:param task_name: The task's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_task_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}/tasks/{task_name}"
client.delete_task(
request={"name": full_task_name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def run_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: str,
response_view: Task.View | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Task:
"""
Forces to run a task in Cloud Tasks.
:param location: The location name in which the task was created.
:param queue_name: The queue's name.
:param task_name: The task's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
"""
client = self.get_conn()
full_task_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}/tasks/{task_name}"
return client.run_task(
request={"name": full_task_name, "response_view": response_view},
retry=retry,
timeout=timeout,
metadata=metadata,
)
| 26,550 | 43.698653 | 108 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/automl.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a Google AutoML hook.
.. spelling:word-list::
PredictResponse
"""
from __future__ import annotations
from functools import cached_property
from typing import Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.operation import Operation
from google.api_core.retry import Retry
from google.cloud.automl_v1beta1 import (
AutoMlClient,
BatchPredictInputConfig,
BatchPredictOutputConfig,
Dataset,
ExamplePayload,
ImageObjectDetectionModelDeploymentMetadata,
InputConfig,
Model,
PredictionServiceClient,
PredictResponse,
)
from google.cloud.automl_v1beta1.services.auto_ml.pagers import (
ListColumnSpecsPager,
ListDatasetsPager,
ListTableSpecsPager,
)
from google.protobuf.field_mask_pb2 import FieldMask
from airflow import AirflowException
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import PROVIDE_PROJECT_ID, GoogleBaseHook
class CloudAutoMLHook(GoogleBaseHook):
"""
Google Cloud AutoML hook.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._client: AutoMlClient | None = None
@staticmethod
def extract_object_id(obj: dict) -> str:
"""Returns unique id of the object."""
return obj["name"].rpartition("/")[-1]
def get_conn(self) -> AutoMlClient:
"""
Retrieves connection to AutoML.
:return: Google Cloud AutoML client object.
"""
if self._client is None:
self._client = AutoMlClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
return self._client
def wait_for_operation(self, operation: Operation, timeout: float | None = None):
"""Waits for long-lasting operation to complete."""
try:
return operation.result(timeout=timeout)
except Exception:
error = operation.exception(timeout=timeout)
raise AirflowException(error)
@cached_property
def prediction_client(self) -> PredictionServiceClient:
"""
Creates PredictionServiceClient.
:return: Google Cloud AutoML PredictionServiceClient client object.
"""
return PredictionServiceClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
@GoogleBaseHook.fallback_to_default_project_id
def create_model(
self,
model: dict | Model,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
retry: Retry | _MethodDefault = DEFAULT,
) -> Operation:
"""
Creates a model_id and returns a Model in the `response` field when it completes.
When you create a model, several model evaluations are created for it:
a global evaluation, and one evaluation for each annotation spec.
:param model: The model_id to create. If a dict is provided, it must be of the same form
as the protobuf message `google.cloud.automl_v1beta1.types.Model`
:param project_id: ID of the Google Cloud project where model will be created if None then
default project_id is used.
:param location: The location of the project.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:return: `google.cloud.automl_v1beta1.types._OperationFuture` instance
"""
client = self.get_conn()
parent = f"projects/{project_id}/locations/{location}"
return client.create_model(
request={"parent": parent, "model": model},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def batch_predict(
self,
model_id: str,
input_config: dict | BatchPredictInputConfig,
output_config: dict | BatchPredictOutputConfig,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
params: dict[str, str] | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Perform a batch prediction and returns a long-running operation object.
Unlike the online `Predict`, batch prediction result won't be immediately
available in the response. Instead, a long-running operation object is returned.
:param model_id: Name of the model_id requested to serve the batch prediction.
:param input_config: Required. The input configuration for batch prediction.
If a dict is provided, it must be of the same form as the protobuf message
`google.cloud.automl_v1beta1.types.BatchPredictInputConfig`
:param output_config: Required. The Configuration specifying where output predictions should be
written. If a dict is provided, it must be of the same form as the protobuf message
`google.cloud.automl_v1beta1.types.BatchPredictOutputConfig`
:param params: Additional domain-specific parameters for the predictions, any string must be up to
25000 characters long.
:param project_id: ID of the Google Cloud project where model is located if None then
default project_id is used.
:param location: The location of the project.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:return: `google.cloud.automl_v1beta1.types._OperationFuture` instance
"""
client = self.prediction_client
name = f"projects/{project_id}/locations/{location}/models/{model_id}"
result = client.batch_predict(
request={
"name": name,
"input_config": input_config,
"output_config": output_config,
"params": params,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def predict(
self,
model_id: str,
payload: dict | ExamplePayload,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
params: dict[str, str] | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> PredictResponse:
"""
Perform an online prediction and returns the prediction result in the response.
:param model_id: Name of the model_id requested to serve the prediction.
:param payload: Required. Payload to perform a prediction on. The payload must match the problem type
that the model_id was trained to solve. If a dict is provided, it must be of
the same form as the protobuf message `google.cloud.automl_v1beta1.types.ExamplePayload`
:param params: Additional domain-specific parameters, any string must be up to 25000 characters long.
:param project_id: ID of the Google Cloud project where model is located if None then
default project_id is used.
:param location: The location of the project.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:return: `google.cloud.automl_v1beta1.types.PredictResponse` instance
"""
client = self.prediction_client
name = f"projects/{project_id}/locations/{location}/models/{model_id}"
result = client.predict(
request={"name": name, "payload": payload, "params": params},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def create_dataset(
self,
dataset: dict | Dataset,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Dataset:
"""
Creates a dataset.
:param dataset: The dataset to create. If a dict is provided, it must be of the
same form as the protobuf message Dataset.
:param project_id: ID of the Google Cloud project where dataset is located if None then
default project_id is used.
:param location: The location of the project.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:return: `google.cloud.automl_v1beta1.types.Dataset` instance.
"""
client = self.get_conn()
parent = f"projects/{project_id}/locations/{location}"
result = client.create_dataset(
request={"parent": parent, "dataset": dataset},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def import_data(
self,
dataset_id: str,
location: str,
input_config: dict | InputConfig,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Imports data into a dataset. For Tables this method can only be called on an empty Dataset.
:param dataset_id: Name of the AutoML dataset.
:param input_config: The desired input location and its domain specific semantics, if any.
If a dict is provided, it must be of the same form as the protobuf message InputConfig.
:param project_id: ID of the Google Cloud project where dataset is located if None then
default project_id is used.
:param location: The location of the project.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:return: `google.cloud.automl_v1beta1.types._OperationFuture` instance
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/datasets/{dataset_id}"
result = client.import_data(
request={"name": name, "input_config": input_config},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_column_specs(
self,
dataset_id: str,
table_spec_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
field_mask: dict | FieldMask | None = None,
filter_: str | None = None,
page_size: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListColumnSpecsPager:
"""
Lists column specs in a table spec.
:param dataset_id: Name of the AutoML dataset.
:param table_spec_id: table_spec_id for path builder.
:param field_mask: Mask specifying which fields to read. If a dict is provided, it must be of the same
form as the protobuf message `google.cloud.automl_v1beta1.types.FieldMask`
:param filter_: Filter expression, see go/filtering.
:param page_size: The maximum number of resources contained in the
underlying API response. If page streaming is performed per
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
:param project_id: ID of the Google Cloud project where dataset is located if None then
default project_id is used.
:param location: The location of the project.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:return: `google.cloud.automl_v1beta1.types.ColumnSpec` instance.
"""
client = self.get_conn()
parent = client.table_spec_path(
project=project_id,
location=location,
dataset=dataset_id,
table_spec=table_spec_id,
)
result = client.list_column_specs(
request={"parent": parent, "field_mask": field_mask, "filter": filter_, "page_size": page_size},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_model(
self,
model_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Model:
"""
Gets a AutoML model.
:param model_id: Name of the model.
:param project_id: ID of the Google Cloud project where model is located if None then
default project_id is used.
:param location: The location of the project.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:return: `google.cloud.automl_v1beta1.types.Model` instance.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/models/{model_id}"
result = client.get_model(
request={"name": name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_model(
self,
model_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Deletes a AutoML model.
:param model_id: Name of the model.
:param project_id: ID of the Google Cloud project where model is located if None then
default project_id is used.
:param location: The location of the project.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:return: `google.cloud.automl_v1beta1.types._OperationFuture` instance.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/models/{model_id}"
result = client.delete_model(
request={"name": name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
def update_dataset(
self,
dataset: dict | Dataset,
update_mask: dict | FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Dataset:
"""
Updates a dataset.
:param dataset: The dataset which replaces the resource on the server.
If a dict is provided, it must be of the same form as the protobuf message Dataset.
:param update_mask: The update mask applies to the resource. If a dict is provided, it must
be of the same form as the protobuf message FieldMask.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:return: `google.cloud.automl_v1beta1.types.Dataset` instance..
"""
client = self.get_conn()
result = client.update_dataset(
request={"dataset": dataset, "update_mask": update_mask},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def deploy_model(
self,
model_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
image_detection_metadata: ImageObjectDetectionModelDeploymentMetadata | dict | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Deploys a model.
If a model is already deployed, deploying it with the same parameters
has no effect. Deploying with different parameters (as e.g. changing node_number) will
reset the deployment state without pausing the model_id's availability.
Only applicable for Text Classification, Image Object Detection and Tables; all other
domains manage deployment automatically.
:param model_id: Name of the model requested to serve the prediction.
:param image_detection_metadata: Model deployment metadata specific to Image Object Detection.
If a dict is provided, it must be of the same form as the protobuf message
ImageObjectDetectionModelDeploymentMetadata
:param project_id: ID of the Google Cloud project where model will be created if None then
default project_id is used.
:param location: The location of the project.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:return: `google.cloud.automl_v1beta1.types._OperationFuture` instance.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/models/{model_id}"
result = client.deploy_model(
request={
"name": name,
"image_object_detection_model_deployment_metadata": image_detection_metadata,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
def list_table_specs(
self,
dataset_id: str,
location: str,
project_id: str | None = None,
filter_: str | None = None,
page_size: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListTableSpecsPager:
"""
Lists table specs in a dataset_id.
:param dataset_id: Name of the dataset.
:param filter_: Filter expression, see go/filtering.
:param page_size: The maximum number of resources contained in the
underlying API response. If page streaming is performed per
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
:param project_id: ID of the Google Cloud project where dataset is located if None then
default project_id is used.
:param location: The location of the project.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:return: A `google.gax.PageIterator` instance. By default, this
is an iterable of `google.cloud.automl_v1beta1.types.TableSpec` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
"""
client = self.get_conn()
parent = f"projects/{project_id}/locations/{location}/datasets/{dataset_id}"
result = client.list_table_specs(
request={"parent": parent, "filter": filter_, "page_size": page_size},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_datasets(
self,
location: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListDatasetsPager:
"""
Lists datasets in a project.
:param project_id: ID of the Google Cloud project where dataset is located if None then
default project_id is used.
:param location: The location of the project.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:return: A `google.gax.PageIterator` instance. By default, this
is an iterable of `google.cloud.automl_v1beta1.types.Dataset` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
"""
client = self.get_conn()
parent = f"projects/{project_id}/locations/{location}"
result = client.list_datasets(
request={"parent": parent},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_dataset(
self,
dataset_id: str,
location: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Deletes a dataset and all of its contents.
:param dataset_id: ID of dataset to be deleted.
:param project_id: ID of the Google Cloud project where dataset is located if None then
default project_id is used.
:param location: The location of the project.
:param retry: A retry object used to retry requests. If `None` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
`retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:return: `google.cloud.automl_v1beta1.types._OperationFuture` instance
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/datasets/{dataset_id}"
result = client.delete_dataset(
request={"name": name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
| 27,490 | 41.954688 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/dataproc_metastore.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Dataproc Metastore hook."""
from __future__ import annotations
from typing import Any, Sequence
from google.api_core.client_options import ClientOptions
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.operation import Operation
from google.api_core.retry import Retry
from google.cloud.metastore_v1 import DataprocMetastoreClient
from google.cloud.metastore_v1.types import Backup, MetadataImport, Service
from google.cloud.metastore_v1.types.metastore import DatabaseDumpSpec, Restore
from google.protobuf.field_mask_pb2 import FieldMask
from airflow.exceptions import AirflowException
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class DataprocMetastoreHook(GoogleBaseHook):
"""Hook for Google Cloud Dataproc Metastore APIs."""
def __init__(self, **kwargs):
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(**kwargs)
def get_dataproc_metastore_client(self) -> DataprocMetastoreClient:
"""Returns DataprocMetastoreClient."""
client_options = ClientOptions(api_endpoint="metastore.googleapis.com:443")
return DataprocMetastoreClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def get_dataproc_metastore_client_v1beta(self):
"""Returns DataprocMetastoreClient (from v1 beta)."""
from google.cloud.metastore_v1beta import DataprocMetastoreClient
client_options = ClientOptions(api_endpoint="metastore.googleapis.com:443")
return DataprocMetastoreClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def wait_for_operation(self, timeout: float | None, operation: Operation):
"""Waits for long-lasting operation to complete."""
try:
return operation.result(timeout=timeout)
except Exception:
error = operation.exception(timeout=timeout)
raise AirflowException(error)
@GoogleBaseHook.fallback_to_default_project_id
def create_backup(
self,
project_id: str,
region: str,
service_id: str,
backup: dict[Any, Any] | Backup,
backup_id: str,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Creates a new backup in a given project and location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param backup: Required. The backup to create. The ``name`` field is ignored. The ID of the created
backup must be provided in the request's ``backup_id`` field.
This corresponds to the ``backup`` field on the ``request`` instance; if ``request`` is provided,
this should not be set.
:param backup_id: Required. The ID of the backup, which is used as the final component of the
backup's name. This value must be between 1 and 64 characters long, begin with a letter, end with
a letter or number, and consist of alphanumeric ASCII characters or hyphens.
This corresponds to the ``backup_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
parent = f"projects/{project_id}/locations/{region}/services/{service_id}"
client = self.get_dataproc_metastore_client()
result = client.create_backup(
request={
"parent": parent,
"backup": backup,
"backup_id": backup_id,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def create_metadata_import(
self,
project_id: str,
region: str,
service_id: str,
metadata_import: dict | MetadataImport,
metadata_import_id: str,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Creates a new MetadataImport in a given project and location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param metadata_import: Required. The metadata import to create. The ``name`` field is ignored. The
ID of the created metadata import must be provided in the request's ``metadata_import_id`` field.
This corresponds to the ``metadata_import`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param metadata_import_id: Required. The ID of the metadata import, which is used as the final
component of the metadata import's name. This value must be between 1 and 64 characters long,
begin with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``metadata_import_id`` field on the ``request`` instance; if ``request``
is provided, this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
parent = f"projects/{project_id}/locations/{region}/services/{service_id}"
client = self.get_dataproc_metastore_client()
result = client.create_metadata_import(
request={
"parent": parent,
"metadata_import": metadata_import,
"metadata_import_id": metadata_import_id,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def create_service(
self,
region: str,
project_id: str,
service: dict | Service,
service_id: str,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Creates a metastore service in a project and location.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param service: Required. The Metastore service to create. The ``name`` field is ignored. The ID of
the created metastore service must be provided in the request's ``service_id`` field.
This corresponds to the ``service`` field on the ``request`` instance; if ``request`` is provided,
this should not be set.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
parent = f"projects/{project_id}/locations/{region}"
client = self.get_dataproc_metastore_client()
result = client.create_service(
request={
"parent": parent,
"service_id": service_id,
"service": service if service else {},
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_backup(
self,
project_id: str,
region: str,
service_id: str,
backup_id: str,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Deletes a single backup.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param backup_id: Required. The ID of the backup, which is used as the final component of the
backup's name. This value must be between 1 and 64 characters long, begin with a letter, end with
a letter or number, and consist of alphanumeric ASCII characters or hyphens.
This corresponds to the ``backup_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
name = f"projects/{project_id}/locations/{region}/services/{service_id}/backups/{backup_id}"
client = self.get_dataproc_metastore_client()
result = client.delete_backup(
request={
"name": name,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_service(
self,
project_id: str,
region: str,
service_id: str,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Deletes a single service.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
name = f"projects/{project_id}/locations/{region}/services/{service_id}"
client = self.get_dataproc_metastore_client()
result = client.delete_service(
request={
"name": name,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def export_metadata(
self,
destination_gcs_folder: str,
project_id: str,
region: str,
service_id: str,
request_id: str | None = None,
database_dump_type: DatabaseDumpSpec | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Exports metadata from a service.
:param destination_gcs_folder: A Cloud Storage URI of a folder, in the format
``gs://<bucket_name>/<path_inside_bucket>``. A sub-folder
``<export_folder>`` containing exported files will be
created below it.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param database_dump_type: Optional. The type of the database dump. If unspecified,
defaults to ``MYSQL``.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
service = f"projects/{project_id}/locations/{region}/services/{service_id}"
client = self.get_dataproc_metastore_client()
result = client.export_metadata(
request={
"destination_gcs_folder": destination_gcs_folder,
"service": service,
"request_id": request_id,
"database_dump_type": database_dump_type,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_service(
self,
project_id: str,
region: str,
service_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Gets the details of a single service.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
name = f"projects/{project_id}/locations/{region}/services/{service_id}"
client = self.get_dataproc_metastore_client()
result = client.get_service(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_backup(
self,
project_id: str,
region: str,
service_id: str,
backup_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Backup:
"""
Get backup from a service.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param backup_id: Required. The ID of the metastore service backup to restore from
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
backup = f"projects/{project_id}/locations/{region}/services/{service_id}/backups/{backup_id}"
client = self.get_dataproc_metastore_client()
result = client.get_backup(
request={
"name": backup,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_backups(
self,
project_id: str,
region: str,
service_id: str,
page_size: int | None = None,
page_token: str | None = None,
filter: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Lists backups in a service.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param page_size: Optional. The maximum number of backups to
return. The response may contain less than the
maximum number. If unspecified, no more than 500
backups are returned. The maximum value is 1000;
values above 1000 are changed to 1000.
:param page_token: Optional. A page token, received from a previous
[DataprocMetastore.ListBackups][google.cloud.metastore.v1.DataprocMetastore.ListBackups]
call. Provide this token to retrieve the subsequent page.
To retrieve the first page, supply an empty page token.
When paginating, other parameters provided to
[DataprocMetastore.ListBackups][google.cloud.metastore.v1.DataprocMetastore.ListBackups]
must match the call that provided the page token.
:param filter: Optional. The filter to apply to list
results.
:param order_by: Optional. Specify the ordering of results as described in
`Sorting
Order <https://cloud.google.com/apis/design/design_patterns#sorting_order>`__.
If not specified, the results will be sorted in the default
order.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
parent = f"projects/{project_id}/locations/{region}/services/{service_id}/backups"
client = self.get_dataproc_metastore_client()
result = client.list_backups(
request={
"parent": parent,
"page_size": page_size,
"page_token": page_token,
"filter": filter,
"order_by": order_by,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def restore_service(
self,
project_id: str,
region: str,
service_id: str,
backup_project_id: str,
backup_region: str,
backup_service_id: str,
backup_id: str,
restore_type: Restore | None = None,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Restores a service from a backup.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param backup_project_id: Required. The ID of the Google Cloud project that the metastore service
backup to restore from.
:param backup_region: Required. The ID of the Google Cloud region that the metastore
service backup to restore from.
:param backup_service_id: Required. The ID of the metastore service backup to restore from,
which is used as the final component of the metastore service's name. This value must be
between 2 and 63 characters long inclusive, begin with a letter, end with a letter or number,
and consist of alphanumeric ASCII characters or hyphens.
:param backup_id: Required. The ID of the metastore service backup to restore from
:param restore_type: Optional. The type of restore. If unspecified, defaults to
``METADATA_ONLY``
:param request_id: Optional. A unique id used to identify the request.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
service = f"projects/{project_id}/locations/{region}/services/{service_id}"
backup = (
f"projects/{backup_project_id}/locations/{backup_region}/services/"
f"{backup_service_id}/backups/{backup_id}"
)
client = self.get_dataproc_metastore_client()
result = client.restore_service(
request={
"service": service,
"backup": backup,
"restore_type": restore_type,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_service(
self,
project_id: str,
region: str,
service_id: str,
service: dict | Service,
update_mask: FieldMask,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Updates the parameters of a single service.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param service: Required. The metastore service to update. The server only merges fields in the
service if they are specified in ``update_mask``.
The metastore service's ``name`` field is used to identify the metastore service to be updated.
This corresponds to the ``service`` field on the ``request`` instance; if ``request`` is provided,
this should not be set.
:param update_mask: Required. A field mask used to specify the fields to be overwritten in the
metastore service resource by the update. Fields specified in the ``update_mask`` are relative to
the resource (not to the full request). A field is overwritten if it is in the mask.
This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param request_id: Optional. A unique id used to identify the request.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataproc_metastore_client()
service_name = f"projects/{project_id}/locations/{region}/services/{service_id}"
service["name"] = service_name
result = client.update_service(
request={
"service": service,
"update_mask": update_mask,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_hive_partitions(
self,
project_id: str,
service_id: str,
region: str,
table: str,
partition_names: list[str] | None = None,
) -> Operation:
"""
Lists Hive partitions.
:param project_id: Optional. The ID of a dbt Cloud project.
:param service_id: Required. Dataproc Metastore service id.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param table: Required. Name of the partitioned table
:param partition_names: Optional. List of table partitions to wait for.
A name of a partition should look like "ds=1", or "a=1/b=2" in case of multiple partitions.
Note that you cannot use logical or comparison operators as in HivePartitionSensor.
If not specified then the sensor will wait for at least one partition regardless its name.
"""
# Remove duplicates from the `partition_names` and preserve elements order
# because dictionaries are ordered since Python 3.7+
_partitions = list(dict.fromkeys(partition_names)) if partition_names else []
query = f"""
SELECT *
FROM PARTITIONS
INNER JOIN TBLS
ON PARTITIONS.TBL_ID = TBLS.TBL_ID
WHERE
TBLS.TBL_NAME = '{table}'"""
if _partitions:
query += f"""
AND PARTITIONS.PART_NAME IN ({', '.join(f"'{p}'" for p in _partitions)})"""
query += ";"
client = self.get_dataproc_metastore_client_v1beta()
result = client.query_metadata(
request={
"service": f"projects/{project_id}/locations/{region}/services/{service_id}",
"query": query,
}
)
return result
| 32,123 | 45.155172 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/kubernetes_engine.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a Google Kubernetes Engine Hook.
.. spelling:word-list::
gapic
enums
"""
from __future__ import annotations
import contextlib
import json
import time
import warnings
from functools import cached_property
from typing import Sequence
import google.auth.credentials
from gcloud.aio.auth import Token
from google.api_core.exceptions import NotFound
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.auth.transport import requests as google_requests
# not sure why but mypy complains on missing `container_v1` but it is clearly there and is importable
from google.cloud import container_v1, exceptions # type: ignore[attr-defined]
from google.cloud.container_v1 import ClusterManagerAsyncClient, ClusterManagerClient
from google.cloud.container_v1.types import Cluster, Operation
from kubernetes import client
from kubernetes_asyncio import client as async_client
from kubernetes_asyncio.client.models import V1Pod
from kubernetes_asyncio.config.kube_config import FileOrData
from urllib3.exceptions import HTTPError
from airflow import version
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.providers.cncf.kubernetes.utils.pod_manager import PodOperatorHookProtocol
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import (
PROVIDE_PROJECT_ID,
GoogleBaseAsyncHook,
GoogleBaseHook,
)
OPERATIONAL_POLL_INTERVAL = 15
class GKEHook(GoogleBaseHook):
"""Google Kubernetes Engine cluster APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._client: ClusterManagerClient | None = None
self.location = location
def get_cluster_manager_client(self) -> ClusterManagerClient:
"""Create or get a ClusterManagerClient."""
if self._client is None:
self._client = ClusterManagerClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
return self._client
# To preserve backward compatibility
# TODO: remove one day
def get_conn(self) -> container_v1.ClusterManagerClient:
warnings.warn(
"The get_conn method has been deprecated. You should use the get_cluster_manager_client method.",
AirflowProviderDeprecationWarning,
)
return self.get_cluster_manager_client()
# To preserve backward compatibility
# TODO: remove one day
def get_client(self) -> ClusterManagerClient:
warnings.warn(
"The get_client method has been deprecated. You should use the get_conn method.",
AirflowProviderDeprecationWarning,
)
return self.get_conn()
def wait_for_operation(self, operation: Operation, project_id: str | None = None) -> Operation:
"""Continuously fetch the status from Google Cloud.
This is done until the given operation completes, or raises an error.
:param operation: The Operation to wait for.
:param project_id: Google Cloud project ID.
:return: A new, updated operation fetched from Google Cloud.
"""
self.log.info("Waiting for OPERATION_NAME %s", operation.name)
time.sleep(OPERATIONAL_POLL_INTERVAL)
while operation.status != Operation.Status.DONE:
if operation.status == Operation.Status.RUNNING or operation.status == Operation.Status.PENDING:
time.sleep(OPERATIONAL_POLL_INTERVAL)
else:
raise exceptions.GoogleCloudError(f"Operation has failed with status: {operation.status}")
# To update status of operation
operation = self.get_operation(operation.name, project_id=project_id or self.project_id)
return operation
def get_operation(self, operation_name: str, project_id: str | None = None) -> Operation:
"""Get an operation from Google Cloud.
:param operation_name: Name of operation to fetch
:param project_id: Google Cloud project ID
:return: The new, updated operation from Google Cloud
"""
return self.get_cluster_manager_client().get_operation(
name=(
f"projects/{project_id or self.project_id}"
f"/locations/{self.location}/operations/{operation_name}"
)
)
@staticmethod
def _append_label(cluster_proto: Cluster, key: str, val: str) -> Cluster:
"""Append labels to provided Cluster Protobuf.
Labels must fit the regex ``[a-z]([-a-z0-9]*[a-z0-9])?`` (current
airflow version string follows semantic versioning spec: x.y.z).
:param cluster_proto: The proto to append resource_label airflow
version to
:param key: The key label
:param val:
:return: The cluster proto updated with new label
"""
val = val.replace(".", "-").replace("+", "-")
cluster_proto.resource_labels.update({key: val})
return cluster_proto
@GoogleBaseHook.fallback_to_default_project_id
def delete_cluster(
self,
name: str,
project_id: str = PROVIDE_PROJECT_ID,
wait_to_complete: bool = True,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
) -> Operation | None:
"""Deletes the cluster, the Kubernetes endpoint, and all worker nodes.
Firewalls and routes that were configured during cluster creation are
also deleted. Other Google Compute Engine resources that might be in use
by the cluster (e.g. load balancer resources) will not be deleted if
they were not present at the initial create time.
:param name: The name of the cluster to delete.
:param project_id: Google Cloud project ID.
:param wait_to_complete: If *True*, wait until the deletion is finished
before returning.
:param retry: Retry object used to determine when/if to retry requests.
If None is specified, requests will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to
each individual attempt.
:return: The full url to the delete operation if successful, else None.
"""
self.log.info("Deleting (project_id=%s, location=%s, cluster_id=%s)", project_id, self.location, name)
try:
operation = self.get_cluster_manager_client().delete_cluster(
name=f"projects/{project_id}/locations/{self.location}/clusters/{name}",
retry=retry,
timeout=timeout,
)
if wait_to_complete:
operation = self.wait_for_operation(operation, project_id)
# Returns server-defined url for the resource
return operation
except NotFound as error:
self.log.info("Assuming Success: %s", error.message)
return None
@GoogleBaseHook.fallback_to_default_project_id
def create_cluster(
self,
cluster: dict | Cluster,
project_id: str = PROVIDE_PROJECT_ID,
wait_to_complete: bool = True,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
) -> Operation | Cluster:
"""Create a cluster.
This should consist of the specified number, and the type of Google
Compute Engine instances.
:param cluster: A Cluster protobuf or dict. If dict is provided, it must
be of the same form as the protobuf message
:class:`google.cloud.container_v1.types.Cluster`.
:param project_id: Google Cloud project ID.
:param wait_to_complete: A boolean value which makes method to sleep
while operation of creation is not finished.
:param retry: A retry object (``google.api_core.retry.Retry``) used to
retry requests. If None is specified, requests will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to
each individual attempt.
:return: The full url to the new, or existing, cluster.
:raises ParseError: On JSON parsing problems when trying to convert
dict.
:raises AirflowException: cluster is not dict type nor Cluster proto
type.
"""
if isinstance(cluster, dict):
cluster = Cluster.from_json(json.dumps(cluster))
elif not isinstance(cluster, Cluster):
raise AirflowException("cluster is not instance of Cluster proto or python dict")
self._append_label(cluster, "airflow-version", "v" + version.version) # type: ignore
self.log.info(
"Creating (project_id=%s, location=%s, cluster_name=%s)",
project_id,
self.location,
cluster.name, # type: ignore
)
operation = self.get_cluster_manager_client().create_cluster(
parent=f"projects/{project_id}/locations/{self.location}",
cluster=cluster, # type: ignore
retry=retry,
timeout=timeout,
)
if wait_to_complete:
operation = self.wait_for_operation(operation, project_id)
return operation
@GoogleBaseHook.fallback_to_default_project_id
def get_cluster(
self,
name: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
) -> Cluster:
"""Get details of specified cluster.
:param name: The name of the cluster to retrieve.
:param project_id: Google Cloud project ID.
:param retry: A retry object used to retry requests. If None is
specified, requests will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to
each individual attempt.
"""
self.log.info(
"Fetching cluster (project_id=%s, location=%s, cluster_name=%s)",
project_id or self.project_id,
self.location,
name,
)
return self.get_cluster_manager_client().get_cluster(
name=f"projects/{project_id}/locations/{self.location}/clusters/{name}",
retry=retry,
timeout=timeout,
)
class GKEAsyncHook(GoogleBaseAsyncHook):
"""Asynchronous client of GKE."""
sync_hook_class = GKEHook
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._client: ClusterManagerAsyncClient | None = None
self.location = location
async def _get_client(self) -> ClusterManagerAsyncClient:
if self._client is None:
self._client = ClusterManagerAsyncClient(
credentials=(await self.get_sync_hook()).get_credentials(),
client_info=CLIENT_INFO,
)
return self._client
@GoogleBaseHook.fallback_to_default_project_id
async def get_operation(
self,
operation_name: str,
project_id: str = PROVIDE_PROJECT_ID,
) -> Operation:
"""Fetch an operation from Google Cloud.
:param operation_name: Name of operation to fetch.
:param project_id: Google Cloud project ID.
:return: The new, updated operation from Google Cloud.
"""
project_id = project_id or (await self.get_sync_hook()).project_id
operation_path = f"projects/{project_id}/locations/{self.location}/operations/{operation_name}"
client = await self._get_client()
return await client.get_operation(
name=operation_path,
)
class GKEPodHook(GoogleBaseHook, PodOperatorHookProtocol):
"""Google Kubernetes Engine pod APIs."""
def __init__(
self,
cluster_url: str,
ssl_ca_cert: str,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self._cluster_url = cluster_url
self._ssl_ca_cert = ssl_ca_cert
@cached_property
def api_client(self) -> client.ApiClient:
return self.get_conn()
@cached_property
def core_v1_client(self) -> client.CoreV1Api:
return client.CoreV1Api(self.api_client)
@property
def is_in_cluster(self) -> bool:
return False
def get_namespace(self):
"""Get the namespace configured by the Airflow connection."""
def _get_namespace(self):
"""For compatibility with KubernetesHook. Deprecated; do not use."""
def get_xcom_sidecar_container_image(self):
"""Get the xcom sidecar image defined in the connection.
Implemented for compatibility with KubernetesHook.
"""
def get_xcom_sidecar_container_resources(self):
"""Get the xcom sidecar resources defined in the connection.
Implemented for compatibility with KubernetesHook.
"""
def get_conn(self) -> client.ApiClient:
configuration = self._get_config()
return client.ApiClient(configuration)
def _get_config(self) -> client.configuration.Configuration:
configuration = client.Configuration(
host=self._cluster_url,
api_key_prefix={"authorization": "Bearer"},
api_key={"authorization": self._get_token(self.get_credentials())},
)
configuration.ssl_ca_cert = FileOrData(
{
"certificate-authority-data": self._ssl_ca_cert,
},
file_key_name="certificate-authority",
).as_file()
return configuration
@staticmethod
def _get_token(creds: google.auth.credentials.Credentials) -> str:
if creds.token is None or creds.expired:
auth_req = google_requests.Request()
creds.refresh(auth_req)
return creds.token
def get_pod(self, name: str, namespace: str) -> V1Pod:
"""Get a pod object.
:param name: Name of the pod.
:param namespace: Name of the pod's namespace.
"""
return self.core_v1_client.read_namespaced_pod(
name=name,
namespace=namespace,
)
class GKEPodAsyncHook(GoogleBaseAsyncHook):
"""Google Kubernetes Engine pods APIs asynchronously.
:param cluster_url: The URL pointed to the cluster.
:param ssl_ca_cert: SSL certificate used for authentication to the pod.
"""
sync_hook_class = GKEPodHook
scopes = ["https://www.googleapis.com/auth/cloud-platform"]
def __init__(self, cluster_url: str, ssl_ca_cert: str, **kwargs) -> None:
self._cluster_url = cluster_url
self._ssl_ca_cert = ssl_ca_cert
super().__init__(cluster_url=cluster_url, ssl_ca_cert=ssl_ca_cert, **kwargs)
@contextlib.asynccontextmanager
async def get_conn(self, token: Token) -> async_client.ApiClient: # type: ignore[override]
kube_client = None
try:
kube_client = await self._load_config(token)
yield kube_client
finally:
if kube_client is not None:
await kube_client.close()
async def _load_config(self, token: Token) -> async_client.ApiClient:
configuration = self._get_config()
access_token = await token.get()
return async_client.ApiClient(
configuration,
header_name="Authorization",
header_value=f"Bearer {access_token}",
)
def _get_config(self) -> async_client.configuration.Configuration:
configuration = async_client.Configuration(
host=self._cluster_url,
ssl_ca_cert=FileOrData(
{
"certificate-authority-data": self._ssl_ca_cert,
},
file_key_name="certificate-authority",
).as_file(),
)
return configuration
async def get_pod(self, name: str, namespace: str) -> V1Pod:
"""Get a pod object.
:param name: Name of the pod.
:param namespace: Name of the pod's namespace.
"""
async with Token(scopes=self.scopes) as token:
async with self.get_conn(token) as connection:
v1_api = async_client.CoreV1Api(connection)
pod: V1Pod = await v1_api.read_namespaced_pod(
name=name,
namespace=namespace,
)
return pod
async def delete_pod(self, name: str, namespace: str):
"""Delete a pod.
:param name: Name of the pod.
:param namespace: Name of the pod's namespace.
"""
async with Token(scopes=self.scopes) as token:
async with self.get_conn(token) as connection:
try:
v1_api = async_client.CoreV1Api(connection)
await v1_api.delete_namespaced_pod(
name=name,
namespace=namespace,
body=client.V1DeleteOptions(),
)
except async_client.ApiException as e:
# If the pod is already deleted
if e.status != 404:
raise
async def read_logs(self, name: str, namespace: str):
"""Read logs inside the pod while starting containers inside.
All the logs will be outputted with its timestamp to track the logs
after the execution of the pod is completed. The method is used for
async output of the logs only in the pod failed it execution or the task
was cancelled by the user.
:param name: Name of the pod.
:param namespace: Name of the pod's namespace.
"""
async with Token(scopes=self.scopes) as token:
async with self.get_conn(token) as connection:
try:
v1_api = async_client.CoreV1Api(connection)
logs = await v1_api.read_namespaced_pod_log(
name=name,
namespace=namespace,
follow=False,
timestamps=True,
)
logs = logs.splitlines()
for line in logs:
self.log.info("Container logs from %s", line)
return logs
except HTTPError:
self.log.exception("There was an error reading the kubernetes API.")
raise
| 20,455 | 37.307116 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/secret_manager.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hook for Secrets Manager service."""
from __future__ import annotations
from typing import Sequence
from airflow.providers.google.cloud._internal_client.secret_manager_client import _SecretManagerClient
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class SecretsManagerHook(GoogleBaseHook):
"""
Hook for the Google Secret Manager API.
See https://cloud.google.com/secret-manager
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self.client = _SecretManagerClient(credentials=self.get_credentials())
def get_conn(self) -> _SecretManagerClient:
"""
Retrieves the connection to Secret Manager.
:return: Secret Manager client.
"""
return self.client
@GoogleBaseHook.fallback_to_default_project_id
def get_secret(
self, secret_id: str, secret_version: str = "latest", project_id: str | None = None
) -> str | None:
"""
Get secret value from the Secret Manager.
:param secret_id: Secret Key
:param secret_version: version of the secret (default is 'latest')
:param project_id: Project id (if you want to override the project_id from credentials)
"""
return self.get_conn().get_secret(
secret_id=secret_id, secret_version=secret_version, project_id=project_id # type: ignore
)
| 3,542 | 40.197674 | 108 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/natural_language.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Natural Language Hook."""
from __future__ import annotations
from typing import Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.language_v1 import EncodingType, LanguageServiceClient
from google.cloud.language_v1.types import (
AnalyzeEntitiesResponse,
AnalyzeEntitySentimentResponse,
AnalyzeSentimentResponse,
AnalyzeSyntaxResponse,
AnnotateTextRequest,
AnnotateTextResponse,
ClassifyTextResponse,
Document,
)
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class CloudNaturalLanguageHook(GoogleBaseHook):
"""
Hook for Google Cloud Natural Language Service.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._conn: LanguageServiceClient | None = None
def get_conn(self) -> LanguageServiceClient:
"""
Retrieves connection to Cloud Natural Language service.
:return: Cloud Natural Language service object
"""
if not self._conn:
self._conn = LanguageServiceClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
return self._conn
@GoogleBaseHook.quota_retry()
def analyze_entities(
self,
document: dict | Document,
encoding_type: EncodingType | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AnalyzeEntitiesResponse:
"""
Finds named entities in the text along with various properties.
Examples properties: entity types, salience, mentions for each entity, and others.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
if isinstance(document, dict):
document = Document(document)
return client.analyze_entities(
document=document, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata
)
@GoogleBaseHook.quota_retry()
def analyze_entity_sentiment(
self,
document: dict | Document,
encoding_type: EncodingType | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AnalyzeEntitySentimentResponse:
"""
Similar to AnalyzeEntities, also analyzes sentiment associated with each entity and its mentions.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
if isinstance(document, dict):
document = Document(document)
return client.analyze_entity_sentiment(
document=document, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata
)
@GoogleBaseHook.quota_retry()
def analyze_sentiment(
self,
document: dict | Document,
encoding_type: EncodingType | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AnalyzeSentimentResponse:
"""
Analyzes the sentiment of the provided text.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
if isinstance(document, dict):
document = Document(document)
return client.analyze_sentiment(
document=document, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata
)
@GoogleBaseHook.quota_retry()
def analyze_syntax(
self,
document: dict | Document,
encoding_type: EncodingType | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AnalyzeSyntaxResponse:
"""
Analyzes the syntax of the text.
Provides sentence boundaries and tokenization along with part
of speech tags, dependency trees, and other properties.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
if isinstance(document, dict):
document = Document(document)
return client.analyze_syntax(
document=document, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata
)
@GoogleBaseHook.quota_retry()
def annotate_text(
self,
document: dict | Document,
features: dict | AnnotateTextRequest.Features,
encoding_type: EncodingType | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AnnotateTextResponse:
"""
Provide all features that analyzeSentiment, analyzeEntities, and analyzeSyntax provide in one call.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:param features: The enabled features.
If a dict is provided, it must be of the same form as the protobuf message Features
:param encoding_type: The encoding type used by the API to calculate offsets.
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
if isinstance(document, dict):
document = Document(document)
if isinstance(features, dict):
features = AnnotateTextRequest.Features(features)
return client.annotate_text(
document=document,
features=features,
encoding_type=encoding_type,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.quota_retry()
def classify_text(
self,
document: dict | Document,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ClassifyTextResponse:
"""
Classifies a document into categories.
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
if isinstance(document, dict):
document = Document(document)
return client.classify_text(document=document, retry=retry, timeout=timeout, metadata=metadata)
| 11,620 | 42.36194 | 108 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/compute.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Compute Engine Hook."""
from __future__ import annotations
import time
from typing import Any, Sequence
from google.api_core.retry import Retry
from google.cloud.compute_v1.services.instance_group_managers import InstanceGroupManagersClient
from google.cloud.compute_v1.services.instance_templates import InstanceTemplatesClient
from google.cloud.compute_v1.services.instances import InstancesClient
from google.cloud.compute_v1.types import Instance, InstanceGroupManager, InstanceTemplate
from googleapiclient.discovery import build
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import PROVIDE_PROJECT_ID, GoogleBaseHook
# Time to sleep between active checks of the operation results
TIME_TO_SLEEP_IN_SECONDS = 1
class GceOperationStatus:
"""Class with GCE operations statuses."""
PENDING = "PENDING"
RUNNING = "RUNNING"
DONE = "DONE"
class ComputeEngineHook(GoogleBaseHook):
"""
Hook for Google Compute Engine APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def __init__(
self,
api_version: str = "v1",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self.api_version = api_version
_conn: Any | None = None
def get_conn(self):
"""
Retrieves connection to Google Compute Engine.
:return: Google Compute Engine services object
:rtype: dict
"""
if not self._conn:
http_authorized = self._authorize()
self._conn = build("compute", self.api_version, http=http_authorized, cache_discovery=False)
return self._conn
def get_compute_instance_template_client(self):
"""Returns Compute Engine Instance Template Client."""
return InstanceTemplatesClient(credentials=self.get_credentials(), client_info=self.client_info)
def get_compute_instance_client(self):
"""Returns Compute Engine Instance Client."""
return InstancesClient(credentials=self.get_credentials(), client_info=self.client_info)
def get_compute_instance_group_managers_client(self):
"""Returns Compute Engine Instance Group Managers Client."""
return InstanceGroupManagersClient(credentials=self.get_credentials(), client_info=self.client_info)
@GoogleBaseHook.fallback_to_default_project_id
def insert_instance_template(
self,
body: dict,
request_id: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | None = None,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Creates Instance Template using body specified.
Must be called with keyword arguments rather than positional.
:param body: Instance Template representation as an object.
:param request_id: Unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again)
It should be in UUID format as defined in RFC 4122
:param project_id: Google Cloud project ID where the Compute Engine Instance Template exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_compute_instance_template_client()
operation = client.insert(
# Calling method insert() on client to create Instance Template.
# This method accepts request object as an argument and should be of type
# Union[google.cloud.compute_v1.types.InsertInstanceTemplateRequest, dict] to construct a request
# message.
# The request object should be represented using arguments:
# instance_template_resource (google.cloud.compute_v1.types.InstanceTemplate):
# The body resource for this request.
# request_id (str):
# An optional request ID to identify requests.
# project (str):
# Project ID for this request.
request={
"instance_template_resource": body,
"request_id": request_id,
"project": project_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self._wait_for_operation_to_complete(operation_name=operation.name, project_id=project_id)
@GoogleBaseHook.fallback_to_default_project_id
def delete_instance_template(
self,
resource_id: str,
request_id: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | None = None,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Deletes Instance Template.
Deleting an Instance Template is permanent and cannot be undone. It is not
possible to delete templates that are already in use by a managed instance
group. Must be called with keyword arguments rather than positional.
:param resource_id: Name of the Compute Engine Instance Template resource.
:param request_id: Unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again)
It should be in UUID format as defined in RFC 4122
:param project_id: Google Cloud project ID where the Compute Engine Instance Template exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_compute_instance_template_client()
operation = client.delete(
# Calling method delete() on client to delete Instance Template.
# This method accepts request object as an argument and should be of type
# Union[google.cloud.compute_v1.types.DeleteInstanceTemplateRequest, dict] to
# construct a request message.
# The request object should be represented using arguments:
# instance_template (str):
# The name of the Instance Template to delete.
# project (str):
# Project ID for this request.
# request_id (str):
# An optional request ID to identify requests.
request={
"instance_template": resource_id,
"project": project_id,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self._wait_for_operation_to_complete(operation_name=operation.name, project_id=project_id)
@GoogleBaseHook.fallback_to_default_project_id
def get_instance_template(
self,
resource_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | None = None,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> InstanceTemplate:
"""
Retrieves Instance Template by project_id and resource_id.
Must be called with keyword arguments rather than positional.
:param resource_id: Name of the Instance Template.
:param project_id: Google Cloud project ID where the Compute Engine Instance Template exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:return: Instance Template representation as object according to
https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates
:rtype: object
"""
client = self.get_compute_instance_template_client()
instance_template = client.get(
# Calling method get() on client to get the specified Instance Template.
# This method accepts request object as an argument and should be of type
# Union[google.cloud.compute_v1.types.GetInstanceTemplateRequest, dict] to construct a request
# message.
# The request object should be represented using arguments:
# instance_template (str):
# The name of the Instance Template.
# project (str):
# Project ID for this request.
request={
"instance_template": resource_id,
"project": project_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return instance_template
@GoogleBaseHook.fallback_to_default_project_id
def insert_instance(
self,
body: dict,
zone: str,
project_id: str = PROVIDE_PROJECT_ID,
request_id: str | None = None,
source_instance_template: str | None = None,
retry: Retry | None = None,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Creates Instance using body specified.
Must be called with keyword arguments rather than positional.
:param body: Instance representation as an object. Should at least include 'name', 'machine_type',
'disks' and 'network_interfaces' fields but doesn't include 'zone' field, as it will be specified
in 'zone' parameter.
Full or partial URL and can be represented as examples below:
1. "machine_type": "projects/your-project-name/zones/your-zone/machineTypes/your-machine-type"
2. "source_image": "projects/your-project-name/zones/your-zone/diskTypes/your-disk-type"
3. "subnetwork": "projects/your-project-name/regions/your-region/subnetworks/your-subnetwork"
:param zone: Google Cloud zone where the Instance exists
:param project_id: Google Cloud project ID where the Compute Engine Instance Template exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param source_instance_template: Existing Instance Template that will be used as a base while
creating new Instance.
When specified, only name of new Instance should be provided as input arguments in 'body'
parameter when creating new Instance. All other parameters, will be passed to Instance as they
are specified in the Instance Template.
Full or partial URL and can be represented as examples below:
1. "https://www.googleapis.com/compute/v1/projects/your-project/global/instanceTemplates/temp"
2. "projects/your-project/global/instanceTemplates/temp"
3. "global/instanceTemplates/temp"
:param request_id: Unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again)
It should be in UUID format as defined in RFC 4122
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_compute_instance_client()
operation = client.insert(
# Calling method insert() on client to create Instance.
# This method accepts request object as an argument and should be of type
# Union[google.cloud.compute_v1.types.InsertInstanceRequest, dict] to construct a request
# message.
# The request object should be represented using arguments:
# instance_resource (google.cloud.compute_v1.types.Instance):
# The body resource for this request.
# request_id (str):
# Optional, request ID to identify requests.
# project (str):
# Project ID for this request.
# zone (str):
# The name of the zone for this request.
# source_instance_template (str):
# Optional, link to Instance Template, that can be used to create new Instance.
request={
"instance_resource": body,
"request_id": request_id,
"project": project_id,
"zone": zone,
"source_instance_template": source_instance_template,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation.name, zone=zone)
@GoogleBaseHook.fallback_to_default_project_id
def get_instance(
self,
resource_id: str,
zone: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | None = None,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Instance:
"""
Retrieves Instance by project_id and resource_id.
Must be called with keyword arguments rather than positional.
:param resource_id: Name of the Instance
:param zone: Google Cloud zone where the Instance exists
:param project_id: Google Cloud project ID where the Compute Engine Instance exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:return: Instance representation as object according to
https://cloud.google.com/compute/docs/reference/rest/v1/instances
:rtype: object
"""
client = self.get_compute_instance_client()
instance = client.get(
# Calling method get() on client to get the specified Instance.
# This method accepts request object as an argument and should be of type
# Union[google.cloud.compute_v1.types.GetInstanceRequest, dict] to construct a request
# message.
# The request object should be represented using arguments:
# instance (str):
# The name of the Instance.
# project (str):
# Project ID for this request.
# zone (str):
# The name of the zone for this request.
request={
"instance": resource_id,
"project": project_id,
"zone": zone,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return instance
@GoogleBaseHook.fallback_to_default_project_id
def delete_instance(
self,
resource_id: str,
zone: str,
project_id: str = PROVIDE_PROJECT_ID,
request_id: str | None = None,
retry: Retry | None = None,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Permanently and irrevocably deletes an Instance.
It is not possible to delete Instances that are already in use by a managed instance group.
Must be called with keyword arguments rather than positional.
:param resource_id: Name of the Compute Engine Instance Template resource.
:param request_id: Unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again)
It should be in UUID format as defined in RFC 4122
:param project_id: Google Cloud project ID where the Compute Engine Instance Template exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param zone: Google Cloud zone where the Instance exists
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_compute_instance_client()
operation = client.delete(
# Calling method delete() on client to delete Instance.
# This method accepts request object as an argument and should be of type
# Union[google.cloud.compute_v1.types.DeleteInstanceRequest, dict] to construct a request
# message.
# The request object should be represented using arguments:
# instance (str):
# Name of the Instance resource to delete.
# project (str):
# Project ID for this request.
# request_id (str):
# An optional request ID to identify requests.
# zone (str):
# The name of the zone for this request.
request={
"instance": resource_id,
"project": project_id,
"request_id": request_id,
"zone": zone,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation.name, zone=zone)
@GoogleBaseHook.fallback_to_default_project_id
def start_instance(self, zone: str, resource_id: str, project_id: str) -> None:
"""
Starts an existing instance defined by project_id, zone and resource_id.
Must be called with keyword arguments rather than positional.
:param zone: Google Cloud zone where the instance exists
:param resource_id: Name of the Compute Engine instance resource
:param project_id: Optional, Google Cloud project ID where the
Compute Engine Instance exists. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:return: None
"""
response = (
self.get_conn()
.instances()
.start(project=project_id, zone=zone, instance=resource_id)
.execute(num_retries=self.num_retries)
)
try:
operation_name = response["name"]
except KeyError:
raise AirflowException(f"Wrong response '{response}' returned - it should contain 'name' field")
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name, zone=zone)
@GoogleBaseHook.fallback_to_default_project_id
def stop_instance(self, zone: str, resource_id: str, project_id: str) -> None:
"""
Stops an instance defined by project_id, zone and resource_id.
Must be called with keyword arguments rather than positional.
:param zone: Google Cloud zone where the instance exists
:param resource_id: Name of the Compute Engine instance resource
:param project_id: Optional, Google Cloud project ID where the
Compute Engine Instance exists. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:return: None
"""
response = (
self.get_conn()
.instances()
.stop(project=project_id, zone=zone, instance=resource_id)
.execute(num_retries=self.num_retries)
)
try:
operation_name = response["name"]
except KeyError:
raise AirflowException(f"Wrong response '{response}' returned - it should contain 'name' field")
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name, zone=zone)
@GoogleBaseHook.fallback_to_default_project_id
def set_machine_type(self, zone: str, resource_id: str, body: dict, project_id: str) -> None:
"""
Sets machine type of an instance defined by project_id, zone and resource_id.
Must be called with keyword arguments rather than positional.
:param zone: Google Cloud zone where the instance exists.
:param resource_id: Name of the Compute Engine instance resource
:param body: Body required by the Compute Engine setMachineType API,
as described in
https://cloud.google.com/compute/docs/reference/rest/v1/instances/setMachineType
:param project_id: Optional, Google Cloud project ID where the
Compute Engine Instance exists. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:return: None
"""
response = self._execute_set_machine_type(zone, resource_id, body, project_id)
try:
operation_name = response["name"]
except KeyError:
raise AirflowException(f"Wrong response '{response}' returned - it should contain 'name' field")
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name, zone=zone)
def _execute_set_machine_type(self, zone: str, resource_id: str, body: dict, project_id: str) -> dict:
return (
self.get_conn()
.instances()
.setMachineType(project=project_id, zone=zone, instance=resource_id, body=body)
.execute(num_retries=self.num_retries)
)
@GoogleBaseHook.fallback_to_default_project_id
def insert_instance_group_manager(
self,
body: dict,
zone: str,
project_id: str = PROVIDE_PROJECT_ID,
request_id: str | None = None,
retry: Retry | None = None,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Creates an Instance Group Managers using the body specified.
After the group is created, instances in the group are created using the specified Instance Template.
Must be called with keyword arguments rather than positional.
:param body: Instance Group Manager representation as an object.
:param request_id: Unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new Instance Group Managers again)
It should be in UUID format as defined in RFC 4122
:param project_id: Google Cloud project ID where the Compute Engine Instance Group Managers exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param zone: Google Cloud zone where the Instance exists
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_compute_instance_group_managers_client()
operation = client.insert(
# Calling method insert() on client to create the specified Instance Group Managers.
# This method accepts request object as an argument and should be of type
# Union[google.cloud.compute_v1.types.InsertInstanceGroupManagerRequest, dict] to construct
# a request message.
# The request object should be represented using arguments:
# instance_group_manager_resource (google.cloud.compute_v1.types.InstanceGroupManager):
# The body resource for this request.
# project (str):
# Project ID for this request.
# zone (str):
# The name of the zone where you want to create the managed instance group.
# request_id (str):
# An optional request ID to identify requests.
request={
"instance_group_manager_resource": body,
"project": project_id,
"zone": zone,
"request_id": request_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation.name, zone=zone)
@GoogleBaseHook.fallback_to_default_project_id
def get_instance_group_manager(
self,
resource_id: str,
zone: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | None = None,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> InstanceGroupManager:
"""
Retrieves Instance Group Manager by project_id, zone and resource_id.
Must be called with keyword arguments rather than positional.
:param resource_id: The name of the Managed Instance Group
:param zone: Google Cloud zone where the Instance Group Managers exists
:param project_id: Google Cloud project ID where the Compute Engine Instance Group Managers exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:return: Instance Group Managers representation as object according to
https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroupManagers
:rtype: object
"""
client = self.get_compute_instance_group_managers_client()
instance_group_manager = client.get(
# Calling method get() on client to get the specified Instance Group Manager.
# This method accepts request object as an argument and should be of type
# Union[google.cloud.compute_v1.types.GetInstanceGroupManagerRequest, dict] to construct a
# request message.
# The request object should be represented using arguments:
# instance_group_manager (str):
# The name of the Managed Instance Group.
# project (str):
# Project ID for this request.
# zone (str):
# The name of the zone for this request.
request={
"instance_group_manager": resource_id,
"project": project_id,
"zone": zone,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return instance_group_manager
@GoogleBaseHook.fallback_to_default_project_id
def delete_instance_group_manager(
self,
resource_id: str,
zone: str,
project_id: str = PROVIDE_PROJECT_ID,
request_id: str | None = None,
retry: Retry | None = None,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Permanently and irrevocably deletes Instance Group Managers.
Must be called with keyword arguments rather than positional.
:param resource_id: Name of the Compute Engine Instance Group Managers resource.
:param request_id: Unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again)
It should be in UUID format as defined in RFC 4122
:param project_id: Google Cloud project ID where the Compute Engine Instance Group Managers exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param zone: Google Cloud zone where the Instance Group Managers exists
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_compute_instance_group_managers_client()
operation = client.delete(
# Calling method delete() on client to delete Instance Group Managers.
# This method accepts request object as an argument and should be of type
# Union[google.cloud.compute_v1.types.DeleteInstanceGroupManagerRequest, dict] to construct a
# request message.
# The request object should be represented using arguments:
# instance_group_manager (str):
# Name of the Instance resource to delete.
# project (str):
# Project ID for this request.
# request_id (str):
# An optional request ID to identify requests.
# zone (str):
# The name of the zone for this request.
request={
"instance_group_manager": resource_id,
"project": project_id,
"request_id": request_id,
"zone": zone,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation.name, zone=zone)
@GoogleBaseHook.fallback_to_default_project_id
def patch_instance_group_manager(
self,
zone: str,
resource_id: str,
body: dict,
project_id: str,
request_id: str | None = None,
) -> None:
"""
Patches Instance Group Manager with the specified body.
Must be called with keyword arguments rather than positional.
:param zone: Google Cloud zone where the Instance Group Manager exists
:param resource_id: Name of the Instance Group Manager
:param body: Instance Group Manager representation as json-merge-patch object
according to
https://cloud.google.com/compute/docs/reference/rest/beta/instanceTemplates/patch
:param request_id: Optional, unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again).
It should be in UUID format as defined in RFC 4122
:param project_id: Optional, Google Cloud project ID where the
Compute Engine Instance exists. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:return: None
"""
response = (
self.get_conn()
.instanceGroupManagers()
.patch(
project=project_id,
zone=zone,
instanceGroupManager=resource_id,
body=body,
requestId=request_id,
)
.execute(num_retries=self.num_retries)
)
try:
operation_name = response["name"]
except KeyError:
raise AirflowException(f"Wrong response '{response}' returned - it should contain 'name' field")
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name, zone=zone)
def _wait_for_operation_to_complete(
self, project_id: str, operation_name: str, zone: str | None = None
) -> None:
"""
Waits for the named operation to complete - checks status of the async call.
:param operation_name: name of the operation
:param zone: optional region of the request (might be None for global operations)
:param project_id: Google Cloud project ID where the Compute Engine Instance exists.
:return: None
"""
service = self.get_conn()
while True:
self.log.info("Waiting for Operation to complete...")
if zone is None:
operation_response = self._check_global_operation_status(
service=service,
operation_name=operation_name,
project_id=project_id,
num_retries=self.num_retries,
)
else:
operation_response = self._check_zone_operation_status(
service, operation_name, project_id, zone, self.num_retries
)
if operation_response.get("status") == GceOperationStatus.DONE:
error = operation_response.get("error")
if error:
code = operation_response.get("httpErrorStatusCode")
msg = operation_response.get("httpErrorMessage")
# Extracting the errors list as string and trimming square braces
error_msg = str(error.get("errors"))[1:-1]
raise AirflowException(f"{code} {msg}: " + error_msg)
break
time.sleep(TIME_TO_SLEEP_IN_SECONDS)
@staticmethod
def _check_zone_operation_status(
service: Any, operation_name: str, project_id: str, zone: str, num_retries: int
) -> dict:
return (
service.zoneOperations()
.get(project=project_id, zone=zone, operation=operation_name)
.execute(num_retries=num_retries)
)
@staticmethod
def _check_global_operation_status(
service: Any, operation_name: str, project_id: str, num_retries: int
) -> dict:
return (
service.globalOperations()
.get(project=project_id, operation=operation_name)
.execute(num_retries=num_retries)
)
@GoogleBaseHook.fallback_to_default_project_id
def get_instance_info(self, zone: str, resource_id: str, project_id: str) -> dict[str, Any]:
"""
Gets instance information.
:param zone: Google Cloud zone where the Instance Group Manager exists
:param resource_id: Name of the Instance Group Manager
:param project_id: Optional, Google Cloud project ID where the
Compute Engine Instance exists. If set to None or missing,
the default project_id from the Google Cloud connection is used.
"""
instance_info = (
self.get_conn()
.instances()
.get(project=project_id, instance=resource_id, zone=zone)
.execute(num_retries=self.num_retries)
)
return instance_info
@GoogleBaseHook.fallback_to_default_project_id
def get_instance_address(
self, zone: str, resource_id: str, project_id: str = PROVIDE_PROJECT_ID, use_internal_ip: bool = False
) -> str:
"""
Return network address associated to instance.
:param zone: Google Cloud zone where the Instance Group Manager exists
:param resource_id: Name of the Instance Group Manager
:param project_id: Optional, Google Cloud project ID where the
Compute Engine Instance exists. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:param use_internal_ip: If true, return private IP address.
"""
instance_info = self.get_instance_info(project_id=project_id, resource_id=resource_id, zone=zone)
if use_internal_ip:
return instance_info["networkInterfaces"][0].get("networkIP")
access_config = instance_info["networkInterfaces"][0].get("accessConfigs")
if access_config:
return access_config[0].get("natIP")
raise AirflowException("The target instance does not have external IP")
@GoogleBaseHook.fallback_to_default_project_id
def set_instance_metadata(
self, zone: str, resource_id: str, metadata: dict[str, str], project_id: str
) -> None:
"""
Set instance metadata.
:param zone: Google Cloud zone where the Instance Group Manager exists
:param resource_id: Name of the Instance Group Manager
:param metadata: The new instance metadata.
:param project_id: Optional, Google Cloud project ID where the
Compute Engine Instance exists. If set to None or missing,
the default project_id from the Google Cloud connection is used.
"""
response = (
self.get_conn()
.instances()
.setMetadata(project=project_id, zone=zone, instance=resource_id, body=metadata)
.execute(num_retries=self.num_retries)
)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name, zone=zone)
| 40,647 | 46.485981 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/workflows.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.operation import Operation
from google.api_core.retry import Retry
from google.cloud.workflows.executions_v1beta import Execution, ExecutionsClient
from google.cloud.workflows.executions_v1beta.services.executions.pagers import ListExecutionsPager
from google.cloud.workflows_v1beta import Workflow, WorkflowsClient
from google.cloud.workflows_v1beta.services.workflows.pagers import ListWorkflowsPager
from google.protobuf.field_mask_pb2 import FieldMask
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import PROVIDE_PROJECT_ID, GoogleBaseHook
class WorkflowsHook(GoogleBaseHook):
"""
Hook for Google GCP APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def __init__(self, **kwargs):
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(**kwargs)
def get_workflows_client(self) -> WorkflowsClient:
"""Returns WorkflowsClient."""
return WorkflowsClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
def get_executions_client(self) -> ExecutionsClient:
"""Returns ExecutionsClient."""
return ExecutionsClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
@GoogleBaseHook.fallback_to_default_project_id
def create_workflow(
self,
workflow: dict,
workflow_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Creates a new workflow.
If a workflow with the specified name already exists in the
specified project and location, the long running operation will
return [ALREADY_EXISTS][google.rpc.Code.ALREADY_EXISTS] error.
:param workflow: Required. Workflow to be created.
:param workflow_id: Required. The ID of the workflow to be created.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
metadata = metadata or ()
client = self.get_workflows_client()
parent = f"projects/{project_id}/locations/{location}"
return client.create_workflow(
request={"parent": parent, "workflow": workflow, "workflow_id": workflow_id},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_workflow(
self,
workflow_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Workflow:
"""
Gets details of a single Workflow.
:param workflow_id: Required. The ID of the workflow to be created.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
metadata = metadata or ()
client = self.get_workflows_client()
name = f"projects/{project_id}/locations/{location}/workflows/{workflow_id}"
return client.get_workflow(request={"name": name}, retry=retry, timeout=timeout, metadata=metadata)
def update_workflow(
self,
workflow: dict | Workflow,
update_mask: FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Updates an existing workflow.
Running this method has no impact on already running
executions of the workflow. A new revision of the
workflow may be created as a result of a successful
update operation. In that case, such revision will be
used in new workflow executions.
:param workflow: Required. Workflow to be created.
:param update_mask: List of fields to be updated. If not present,
the entire workflow will be updated.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
metadata = metadata or ()
client = self.get_workflows_client()
return client.update_workflow(
request={"workflow": workflow, "update_mask": update_mask},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_workflow(
self,
workflow_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Delete a workflow with the specified name and all running executions of the workflow.
:param workflow_id: Required. The ID of the workflow to be created.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
metadata = metadata or ()
client = self.get_workflows_client()
name = f"projects/{project_id}/locations/{location}/workflows/{workflow_id}"
return client.delete_workflow(request={"name": name}, retry=retry, timeout=timeout, metadata=metadata)
@GoogleBaseHook.fallback_to_default_project_id
def list_workflows(
self,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
filter_: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListWorkflowsPager:
"""
Lists Workflows in a given project and location; the default order is not specified.
:param filter_: Filter to restrict results to specific workflows.
:param order_by: Comma-separated list of fields that
specifies the order of the results. Default sorting order for a field is ascending.
To specify descending order for a field, append a "desc" suffix.
If not specified, the results will be returned in an unspecified order.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
metadata = metadata or ()
client = self.get_workflows_client()
parent = f"projects/{project_id}/locations/{location}"
return client.list_workflows(
request={"parent": parent, "filter": filter_, "order_by": order_by},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_execution(
self,
workflow_id: str,
location: str,
execution: dict,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Execution:
"""
Creates a new execution using the latest revision of the given workflow.
:param execution: Required. Input parameters of the execution represented as a dictionary.
:param workflow_id: Required. The ID of the workflow.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
metadata = metadata or ()
client = self.get_executions_client()
parent = f"projects/{project_id}/locations/{location}/workflows/{workflow_id}"
execution = {k: str(v) if isinstance(v, dict) else v for k, v in execution.items()}
return client.create_execution(
request={"parent": parent, "execution": execution},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_execution(
self,
workflow_id: str,
execution_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Execution:
"""
Returns an execution for the given ``workflow_id`` and ``execution_id``.
:param workflow_id: Required. The ID of the workflow.
:param execution_id: Required. The ID of the execution.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
metadata = metadata or ()
client = self.get_executions_client()
name = f"projects/{project_id}/locations/{location}/workflows/{workflow_id}/executions/{execution_id}"
return client.get_execution(request={"name": name}, retry=retry, timeout=timeout, metadata=metadata)
@GoogleBaseHook.fallback_to_default_project_id
def cancel_execution(
self,
workflow_id: str,
execution_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Execution:
"""
Cancels an execution using the given ``workflow_id`` and ``execution_id``.
:param workflow_id: Required. The ID of the workflow.
:param execution_id: Required. The ID of the execution.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
metadata = metadata or ()
client = self.get_executions_client()
name = f"projects/{project_id}/locations/{location}/workflows/{workflow_id}/executions/{execution_id}"
return client.cancel_execution(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata
)
@GoogleBaseHook.fallback_to_default_project_id
def list_executions(
self,
workflow_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListExecutionsPager:
"""
Returns a list of executions which belong to the workflow with the given name.
The method returns executions of all workflow revisions. Returned
executions are ordered by their start time (newest first).
:param workflow_id: Required. The ID of the workflow to be created.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
metadata = metadata or ()
client = self.get_executions_client()
parent = f"projects/{project_id}/locations/{location}/workflows/{workflow_id}"
return client.list_executions(
request={"parent": parent}, retry=retry, timeout=timeout, metadata=metadata
)
| 16,460 | 46.166189 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/vertex_ai/hyperparameter_tuning_job.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Vertex AI hook.
.. spelling:word-list::
irreproducible
codepoints
Tensorboard
aiplatform
myVPC
"""
from __future__ import annotations
from typing import Sequence
from google.api_core.client_options import ClientOptions
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.operation import Operation
from google.api_core.retry import Retry
from google.cloud.aiplatform import CustomJob, HyperparameterTuningJob, gapic, hyperparameter_tuning
from google.cloud.aiplatform_v1 import JobServiceClient, types
from google.cloud.aiplatform_v1.services.job_service.pagers import ListHyperparameterTuningJobsPager
from airflow import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class HyperparameterTuningJobHook(GoogleBaseHook):
"""Hook for Google Cloud Vertex AI Hyperparameter Tuning Job APIs."""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._hyperparameter_tuning_job: HyperparameterTuningJob | None = None
def get_job_service_client(self, region: str | None = None) -> JobServiceClient:
"""Returns JobServiceClient."""
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-aiplatform.googleapis.com:443")
else:
client_options = ClientOptions()
return JobServiceClient(
credentials=self.get_credentials(), client_info=self.client_info, client_options=client_options
)
def get_hyperparameter_tuning_job_object(
self,
display_name: str,
custom_job: CustomJob,
metric_spec: dict[str, str],
parameter_spec: dict[str, hyperparameter_tuning._ParameterSpec],
max_trial_count: int,
parallel_trial_count: int,
max_failed_trial_count: int = 0,
search_algorithm: str | None = None,
measurement_selection: str | None = "best",
project: str | None = None,
location: str | None = None,
labels: dict[str, str] | None = None,
encryption_spec_key_name: str | None = None,
) -> HyperparameterTuningJob:
"""Returns HyperparameterTuningJob object."""
return HyperparameterTuningJob(
display_name=display_name,
custom_job=custom_job,
metric_spec=metric_spec,
parameter_spec=parameter_spec,
max_trial_count=max_trial_count,
parallel_trial_count=parallel_trial_count,
max_failed_trial_count=max_failed_trial_count,
search_algorithm=search_algorithm,
measurement_selection=measurement_selection,
project=project,
location=location,
credentials=self.get_credentials(),
labels=labels,
encryption_spec_key_name=encryption_spec_key_name,
)
def get_custom_job_object(
self,
display_name: str,
worker_pool_specs: list[dict] | list[gapic.WorkerPoolSpec],
base_output_dir: str | None = None,
project: str | None = None,
location: str | None = None,
labels: dict[str, str] | None = None,
encryption_spec_key_name: str | None = None,
staging_bucket: str | None = None,
) -> CustomJob:
"""Returns CustomJob object."""
return CustomJob(
display_name=display_name,
worker_pool_specs=worker_pool_specs,
base_output_dir=base_output_dir,
project=project,
location=location,
credentials=self.get_credentials,
labels=labels,
encryption_spec_key_name=encryption_spec_key_name,
staging_bucket=staging_bucket,
)
@staticmethod
def extract_hyperparameter_tuning_job_id(obj: dict) -> str:
"""Returns unique id of the hyperparameter_tuning_job."""
return obj["name"].rpartition("/")[-1]
def wait_for_operation(self, operation: Operation, timeout: float | None = None):
"""Waits for long-lasting operation to complete."""
try:
return operation.result(timeout=timeout)
except Exception:
error = operation.exception(timeout=timeout)
raise AirflowException(error)
def cancel_hyperparameter_tuning_job(self) -> None:
"""Cancel HyperparameterTuningJob."""
if self._hyperparameter_tuning_job:
self._hyperparameter_tuning_job.cancel()
@GoogleBaseHook.fallback_to_default_project_id
def create_hyperparameter_tuning_job(
self,
project_id: str,
region: str,
display_name: str,
metric_spec: dict[str, str],
parameter_spec: dict[str, hyperparameter_tuning._ParameterSpec],
max_trial_count: int,
parallel_trial_count: int,
# START: CustomJob param
worker_pool_specs: list[dict] | list[gapic.WorkerPoolSpec],
base_output_dir: str | None = None,
custom_job_labels: dict[str, str] | None = None,
custom_job_encryption_spec_key_name: str | None = None,
staging_bucket: str | None = None,
# END: CustomJob param
max_failed_trial_count: int = 0,
search_algorithm: str | None = None,
measurement_selection: str | None = "best",
hyperparameter_tuning_job_labels: dict[str, str] | None = None,
hyperparameter_tuning_job_encryption_spec_key_name: str | None = None,
# START: run param
service_account: str | None = None,
network: str | None = None,
timeout: int | None = None, # seconds
restart_job_on_worker_restart: bool = False,
enable_web_access: bool = False,
tensorboard: str | None = None,
sync: bool = True,
# END: run param
) -> HyperparameterTuningJob:
"""
Create a HyperparameterTuningJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param display_name: Required. The user-defined name of the HyperparameterTuningJob. The name can be
up to 128 characters long and can be consist of any UTF-8 characters.
:param metric_spec: Required. Dictionary representing metrics to optimize. The dictionary key is the
metric_id, which is reported by your training job, and the dictionary value is the optimization
goal of the metric('minimize' or 'maximize').
example: metric_spec = {'loss': 'minimize', 'accuracy': 'maximize'}
:param parameter_spec: Required. Dictionary representing parameters to optimize. The dictionary key
is the metric_id, which is passed into your training job as a command line key word argument, and
the dictionary value is the parameter specification of the metric.
:param max_trial_count: Required. The desired total number of Trials.
:param parallel_trial_count: Required. The desired number of Trials to run in parallel.
:param worker_pool_specs: Required. The spec of the worker pools including machine type and Docker
image. Can provided as a list of dictionaries or list of WorkerPoolSpec proto messages.
:param base_output_dir: Optional. GCS output directory of job. If not provided a timestamped
directory in the staging directory will be used.
:param custom_job_labels: Optional. The labels with user-defined metadata to organize CustomJobs.
Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param custom_job_encryption_spec_key_name: Optional.Customer-managed encryption key name for a
CustomJob. If this is set, then all resources created by the CustomJob will be encrypted with the
provided encryption key.
:param staging_bucket: Optional. Bucket for produced custom job artifacts. Overrides staging_bucket
set in aiplatform.init.
:param max_failed_trial_count: Optional. The number of failed Trials that need to be seen before
failing the HyperparameterTuningJob. If set to 0, Vertex AI decides how many Trials must fail
before the whole job fails.
:param search_algorithm: The search algorithm specified for the Study. Accepts one of the following:
`None` - If you do not specify an algorithm, your job uses the default Vertex AI algorithm. The
default algorithm applies Bayesian optimization to arrive at the optimal solution with a more
effective search over the parameter space.
'grid' - A simple grid search within the feasible space. This option is particularly useful if
you want to specify a quantity of trials that is greater than the number of points in the
feasible space. In such cases, if you do not specify a grid search, the Vertex AI default
algorithm may generate duplicate suggestions. To use grid search, all parameter specs must be of
type `IntegerParameterSpec`, `CategoricalParameterSpace`, or `DiscreteParameterSpec`.
'random' - A simple random search within the feasible space.
:param measurement_selection: This indicates which measurement to use if/when the service
automatically selects the final measurement from previously reported intermediate measurements.
Accepts: 'best', 'last'
Choose this based on two considerations:
A) Do you expect your measurements to monotonically improve? If so, choose 'last'. On the other
hand, if you're in a situation where your system can "over-train" and you expect the performance
to get better for a while but then start declining, choose 'best'.
B) Are your measurements significantly noisy and/or irreproducible? If so, 'best' will tend to be
over-optimistic, and it may be better to choose 'last'.
If both or neither of (A) and (B) apply, it doesn't matter which selection type is chosen.
:param hyperparameter_tuning_job_labels: Optional. The labels with user-defined metadata to organize
HyperparameterTuningJobs. Label keys and values can be no longer than 64 characters (Unicode
codepoints), can only contain lowercase letters, numeric characters, underscores and dashes.
International characters are allowed. See https://goo.gl/xmQnxf for more information and examples
of labels.
:param hyperparameter_tuning_job_encryption_spec_key_name: Optional. Customer-managed encryption key
options for a HyperparameterTuningJob. If this is set, then all resources created by the
HyperparameterTuningJob will be encrypted with the provided encryption key.
:param service_account: Optional. Specifies the service account for workload run-as account. Users
submitting jobs must have act-as permission on this run-as account.
:param network: Optional. The full name of the Compute Engine network to which the job should be
peered. For example, projects/12345/global/networks/myVPC. Private services access must already
be configured for the network. If left unspecified, the job is not peered with any network.
:param timeout: The maximum job running time in seconds. The default is 7 days.
:param restart_job_on_worker_restart: Restarts the entire CustomJob if a worker gets restarted. This
feature can be used by distributed training jobs that are not resilient to workers leaving and
joining a job.
:param enable_web_access: Whether you want Vertex AI to enable interactive shell access to training
containers. https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell
:param tensorboard: Optional. The name of a Vertex AI
[Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard] resource to which this CustomJob will
upload Tensorboard logs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` The training script should
write Tensorboard to following Vertex AI environment variable: AIP_TENSORBOARD_LOG_DIR
`service_account` is required with provided `tensorboard`. For more information on configuring
your service account please visit:
https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training
:param sync: Whether to execute this method synchronously. If False, this method will unblock and it
will be executed in a concurrent Future.
"""
custom_job = self.get_custom_job_object(
project=project_id,
location=region,
display_name=display_name,
worker_pool_specs=worker_pool_specs,
base_output_dir=base_output_dir,
labels=custom_job_labels,
encryption_spec_key_name=custom_job_encryption_spec_key_name,
staging_bucket=staging_bucket,
)
self._hyperparameter_tuning_job = self.get_hyperparameter_tuning_job_object(
project=project_id,
location=region,
display_name=display_name,
custom_job=custom_job,
metric_spec=metric_spec,
parameter_spec=parameter_spec,
max_trial_count=max_trial_count,
parallel_trial_count=parallel_trial_count,
max_failed_trial_count=max_failed_trial_count,
search_algorithm=search_algorithm,
measurement_selection=measurement_selection,
labels=hyperparameter_tuning_job_labels,
encryption_spec_key_name=hyperparameter_tuning_job_encryption_spec_key_name,
)
self._hyperparameter_tuning_job.run(
service_account=service_account,
network=network,
timeout=timeout, # seconds
restart_job_on_worker_restart=restart_job_on_worker_restart,
enable_web_access=enable_web_access,
tensorboard=tensorboard,
sync=sync,
)
self._hyperparameter_tuning_job.wait()
return self._hyperparameter_tuning_job
@GoogleBaseHook.fallback_to_default_project_id
def get_hyperparameter_tuning_job(
self,
project_id: str,
region: str,
hyperparameter_tuning_job: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> types.HyperparameterTuningJob:
"""
Gets a HyperparameterTuningJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param hyperparameter_tuning_job: Required. The name of the HyperparameterTuningJob resource.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
name = client.hyperparameter_tuning_job_path(project_id, region, hyperparameter_tuning_job)
result = client.get_hyperparameter_tuning_job(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_hyperparameter_tuning_jobs(
self,
project_id: str,
region: str,
filter: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
read_mask: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListHyperparameterTuningJobsPager:
"""
Lists HyperparameterTuningJobs in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: The standard list filter.
Supported fields:
- ``display_name`` supports = and !=.
- ``state`` supports = and !=.
- ``model_display_name`` supports = and !=
Some examples of using the filter are:
- ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"``
- ``state="JOB_STATE_RUNNING" OR display_name="my_job"``
- ``NOT display_name="my_job"``
- ``state="JOB_STATE_FAILED"``
:param page_size: The standard list page size.
:param page_token: The standard list page token.
:param read_mask: Mask specifying which fields to read.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.list_hyperparameter_tuning_jobs(
request={
"parent": parent,
"filter": filter,
"page_size": page_size,
"page_token": page_token,
"read_mask": read_mask,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_hyperparameter_tuning_job(
self,
project_id: str,
region: str,
hyperparameter_tuning_job: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Deletes a HyperparameterTuningJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param hyperparameter_tuning_job: Required. The name of the HyperparameterTuningJob resource to be
deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
name = client.hyperparameter_tuning_job_path(project_id, region, hyperparameter_tuning_job)
result = client.delete_hyperparameter_tuning_job(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
| 20,841 | 48.271868 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/vertex_ai/auto_ml.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a Google Cloud Vertex AI hook.
.. spelling:word-list::
aiplatform
au
codepoints
milli
mae
quantile
quantiles
Quantiles
rmse
rmsle
rmspe
wape
prc
roc
Jetson
forecasted
Struct
sentimentMax
TrainingPipeline
targetColumn
optimizationObjective
"""
from __future__ import annotations
import warnings
from typing import Sequence
from google.api_core.client_options import ClientOptions
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.operation import Operation
from google.api_core.retry import Retry
from google.cloud.aiplatform import (
AutoMLForecastingTrainingJob,
AutoMLImageTrainingJob,
AutoMLTabularTrainingJob,
AutoMLTextTrainingJob,
AutoMLVideoTrainingJob,
datasets,
models,
)
from google.cloud.aiplatform_v1 import JobServiceClient, PipelineServiceClient
from google.cloud.aiplatform_v1.services.pipeline_service.pagers import ListTrainingPipelinesPager
from google.cloud.aiplatform_v1.types import TrainingPipeline
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class AutoMLHook(GoogleBaseHook):
"""Hook for Google Cloud Vertex AI Auto ML APIs."""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._job: None | (
AutoMLForecastingTrainingJob
| AutoMLImageTrainingJob
| AutoMLTabularTrainingJob
| AutoMLTextTrainingJob
| AutoMLVideoTrainingJob
) = None
def get_pipeline_service_client(
self,
region: str | None = None,
) -> PipelineServiceClient:
"""Returns PipelineServiceClient."""
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-aiplatform.googleapis.com:443")
else:
client_options = ClientOptions()
return PipelineServiceClient(
credentials=self.get_credentials(), client_info=self.client_info, client_options=client_options
)
def get_job_service_client(
self,
region: str | None = None,
) -> JobServiceClient:
"""Returns JobServiceClient."""
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-aiplatform.googleapis.com:443")
else:
client_options = ClientOptions()
return JobServiceClient(
credentials=self.get_credentials(), client_info=self.client_info, client_options=client_options
)
def get_auto_ml_tabular_training_job(
self,
display_name: str,
optimization_prediction_type: str,
optimization_objective: str | None = None,
column_specs: dict[str, str] | None = None,
column_transformations: list[dict[str, dict[str, str]]] | None = None,
optimization_objective_recall_value: float | None = None,
optimization_objective_precision_value: float | None = None,
project: str | None = None,
location: str | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
) -> AutoMLTabularTrainingJob:
"""Returns AutoMLTabularTrainingJob object."""
return AutoMLTabularTrainingJob(
display_name=display_name,
optimization_prediction_type=optimization_prediction_type,
optimization_objective=optimization_objective,
column_specs=column_specs,
column_transformations=column_transformations,
optimization_objective_recall_value=optimization_objective_recall_value,
optimization_objective_precision_value=optimization_objective_precision_value,
project=project,
location=location,
credentials=self.get_credentials(),
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
def get_auto_ml_forecasting_training_job(
self,
display_name: str,
optimization_objective: str | None = None,
column_specs: dict[str, str] | None = None,
column_transformations: list[dict[str, dict[str, str]]] | None = None,
project: str | None = None,
location: str | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
) -> AutoMLForecastingTrainingJob:
"""Returns AutoMLForecastingTrainingJob object."""
return AutoMLForecastingTrainingJob(
display_name=display_name,
optimization_objective=optimization_objective,
column_specs=column_specs,
column_transformations=column_transformations,
project=project,
location=location,
credentials=self.get_credentials(),
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
def get_auto_ml_image_training_job(
self,
display_name: str,
prediction_type: str = "classification",
multi_label: bool = False,
model_type: str = "CLOUD",
base_model: models.Model | None = None,
project: str | None = None,
location: str | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
) -> AutoMLImageTrainingJob:
"""Returns AutoMLImageTrainingJob object."""
return AutoMLImageTrainingJob(
display_name=display_name,
prediction_type=prediction_type,
multi_label=multi_label,
model_type=model_type,
base_model=base_model,
project=project,
location=location,
credentials=self.get_credentials(),
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
def get_auto_ml_text_training_job(
self,
display_name: str,
prediction_type: str,
multi_label: bool = False,
sentiment_max: int = 10,
project: str | None = None,
location: str | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
) -> AutoMLTextTrainingJob:
"""Returns AutoMLTextTrainingJob object."""
return AutoMLTextTrainingJob(
display_name=display_name,
prediction_type=prediction_type,
multi_label=multi_label,
sentiment_max=sentiment_max,
project=project,
location=location,
credentials=self.get_credentials(),
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
def get_auto_ml_video_training_job(
self,
display_name: str,
prediction_type: str = "classification",
model_type: str = "CLOUD",
project: str | None = None,
location: str | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
) -> AutoMLVideoTrainingJob:
"""Returns AutoMLVideoTrainingJob object."""
return AutoMLVideoTrainingJob(
display_name=display_name,
prediction_type=prediction_type,
model_type=model_type,
project=project,
location=location,
credentials=self.get_credentials(),
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
@staticmethod
def extract_model_id(obj: dict) -> str:
"""Returns unique id of the Model."""
return obj["name"].rpartition("/")[-1]
@staticmethod
def extract_training_id(resource_name: str) -> str:
"""Returns unique id of the Training pipeline."""
return resource_name.rpartition("/")[-1]
def wait_for_operation(self, operation: Operation, timeout: float | None = None):
"""Waits for long-lasting operation to complete."""
try:
return operation.result(timeout=timeout)
except Exception:
error = operation.exception(timeout=timeout)
raise AirflowException(error)
def cancel_auto_ml_job(self) -> None:
"""Cancel Auto ML Job for training pipeline."""
if self._job:
self._job.cancel()
@GoogleBaseHook.fallback_to_default_project_id
def create_auto_ml_tabular_training_job(
self,
project_id: str,
region: str,
display_name: str,
dataset: datasets.TabularDataset,
target_column: str,
optimization_prediction_type: str,
optimization_objective: str | None = None,
column_specs: dict[str, str] | None = None,
column_transformations: list[dict[str, dict[str, str]]] | None = None,
optimization_objective_recall_value: float | None = None,
optimization_objective_precision_value: float | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
training_fraction_split: float | None = None,
validation_fraction_split: float | None = None,
test_fraction_split: float | None = None,
predefined_split_column_name: str | None = None,
timestamp_split_column_name: str | None = None,
weight_column: str | None = None,
budget_milli_node_hours: int = 1000,
model_display_name: str | None = None,
model_labels: dict[str, str] | None = None,
disable_early_stopping: bool = False,
export_evaluated_data_items: bool = False,
export_evaluated_data_items_bigquery_destination_uri: str | None = None,
export_evaluated_data_items_override_destination: bool = False,
sync: bool = True,
) -> tuple[models.Model | None, str]:
"""
Create an AutoML Tabular Training Job.
:param project_id: Required. Project to run training in.
:param region: Required. Location to run training in.
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param dataset: Required. The dataset within the same Project from which data will be used to train
the Model. The Dataset must use schema compatible with Model being trained, and what is
compatible should be described in the used TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]. For tabular
Datasets, all their data is exported to training, to pick and choose from.
:param target_column: Required. The name of the column values of which the Model is to predict.
:param optimization_prediction_type: The type of prediction the Model is to produce.
"classification" - Predict one out of multiple target values is picked for each row.
"regression" - Predict a value based on its relation to other values. This type is available only
to columns that contain semantically numeric values, i.e. integers or floating point number, even
if stored as e.g. strings.
:param optimization_objective: Optional. Objective function the Model is to be optimized towards.
The training task creates a Model that maximizes/minimizes the value of the objective function
over the validation set.
The supported optimization objectives depend on the prediction type, and in the case of
classification also the number of distinct values in the target column (two distinct values
-> binary, 3 or more distinct values -> multi class). If the field is not set, the default
objective function is used.
Classification (binary):
"maximize-au-roc" (default) - Maximize the area under the receiver operating characteristic (ROC)
curve.
"minimize-log-loss" - Minimize log loss.
"maximize-au-prc" - Maximize the area under the precision-recall curve.
"maximize-precision-at-recall" - Maximize precision for a specified recall value.
"maximize-recall-at-precision" - Maximize recall for a specified precision value.
Classification (multi class):
"minimize-log-loss" (default) - Minimize log loss.
Regression:
"minimize-rmse" (default) - Minimize root-mean-squared error (RMSE).
"minimize-mae" - Minimize mean-absolute error (MAE).
"minimize-rmsle" - Minimize root-mean-squared log error (RMSLE).
:param column_specs: Optional. Alternative to column_transformations where the keys of the dict are
column names and their respective values are one of AutoMLTabularTrainingJob.column_data_types.
When creating transformation for BigQuery Struct column, the column should be flattened using "."
as the delimiter. Only columns with no child should have a transformation. If an input column has
no transformations on it, such a column is ignored by the training, except for the targetColumn,
which should have no transformations defined on. Only one of column_transformations or
column_specs should be passed.
:param column_transformations: Optional. Transformations to apply to the input columns (i.e. columns
other than the targetColumn). Each transformation may produce multiple result values from the
column's value, and all are used for training. When creating transformation for BigQuery Struct
column, the column should be flattened using "." as the delimiter. Only columns with no child
should have a transformation. If an input column has no transformations on it, such a column is
ignored by the training, except for the targetColumn, which should have no transformations
defined on. Only one of column_transformations or column_specs should be passed. Consider using
column_specs as column_transformations will be deprecated eventually.
:param optimization_objective_recall_value: Optional. Required when maximize-precision-at-recall
optimizationObjective was picked, represents the recall value at which the optimization is done.
The minimum value is 0 and the maximum is 1.0.
:param optimization_objective_precision_value: Optional. Required when maximize-recall-at-precision
optimizationObjective was picked, represents the precision value at which the optimization is
done.
The minimum value is 0 and the maximum is 1.0.
:param labels: Optional. The labels with user-defined metadata to organize TrainingPipelines. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be
in the same region as where the compute resource is created. If set, this TrainingPipeline will
be secured by this key.
Note: Model trained by this TrainingPipeline is also secured by this key if ``model_to_upload``
is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be
in the same region as where the compute resource is created. If set, the trained Model will be
secured by this key.
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param validation_fraction_split: Optional. The fraction of the input data that is to be used to
validate the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param predefined_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or value in the column) must be one of
{``training``, ``validation``, ``test``}, and it defines to which set the given piece of data is
assigned. If for a piece of data the key is not present or has an invalid value, that piece is
ignored by the pipeline. Supported only for tabular and time series Datasets.
:param timestamp_split_column_name: Optional. The key is a name of one of the Dataset's data columns.
The value of the key values of the key (the values in the column) must be in RFC 3339 `date-time`
format, where `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a piece of data the
key is not present or has an invalid value, that piece is ignored by the pipeline. Supported only
for tabular and time series Datasets. This parameter must be used with training_fraction_split,
validation_fraction_split and test_fraction_split.
:param weight_column: Optional. Name of the column that should be used as the weight column. Higher
values in this column give more importance to the row during Model training. The column must have
numeric values between 0 and 10000 inclusively, and 0 value means that the row is ignored. If the
weight column field is not set, then all rows are assumed to have equal weight of 1.
:param budget_milli_node_hours (int): Optional. The train budget of creating this Model, expressed in
milli node hours i.e. 1,000 value in this field means 1 node hour. The training cost of the model
will not exceed this budget. The final cost will be attempted to be close to the budget, though
may end up being (even) noticeably smaller - at the backend's discretion. This especially may
happen when further model training ceases to provide any improvements. If the budget is set to a
value known to be insufficient to train a Model for the given training set, the training won't be
attempted and will error. The minimum value is 1000 and the maximum is 72000.
:param model_display_name: Optional. If the script produces a managed Vertex AI Model. The display
name of the Model. The name can be up to 128 characters long and can be consist of any UTF-8
characters. If not provided upon creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to organize your Models. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param disable_early_stopping: Required. If true, the entire budget is used. This disables the early
stopping feature. By default, the early stopping feature is enabled, which means that training
might stop before the entire training budget has been used, if further training does no longer
brings significant improvement to the model.
:param export_evaluated_data_items: Whether to export the test set predictions to a BigQuery table.
If False, then the export is not performed.
:param export_evaluated_data_items_bigquery_destination_uri: Optional. URI of desired destination
BigQuery table for exported test set predictions.
Expected format: ``bq://<project_id>:<dataset_id>:<table>``
If not specified, then results are exported to the following auto-created BigQuery table:
``<project_id>:export_evaluated_examples_<model_name>_<yyyy_MM_dd'T'HH_mm_ss_SSS'Z'>
.evaluated_examples``
Applies only if [export_evaluated_data_items] is True.
:param export_evaluated_data_items_override_destination: Whether to override the contents of
[export_evaluated_data_items_bigquery_destination_uri], if the table exists, for exported test
set predictions. If False, and the table exists, then the training job will fail. Applies only if
[export_evaluated_data_items] is True and [export_evaluated_data_items_bigquery_destination_uri]
is specified.
:param sync: Whether to execute this method synchronously. If False, this method will be executed in
concurrent Future and any downstream object will be immediately returned and synced when the
Future has completed.
"""
if column_transformations:
warnings.warn(
"Consider using column_specs as column_transformations will be deprecated eventually.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self._job = self.get_auto_ml_tabular_training_job(
project=project_id,
location=region,
display_name=display_name,
optimization_prediction_type=optimization_prediction_type,
optimization_objective=optimization_objective,
column_specs=column_specs,
column_transformations=column_transformations,
optimization_objective_recall_value=optimization_objective_recall_value,
optimization_objective_precision_value=optimization_objective_precision_value,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
if not self._job:
raise AirflowException("AutoMLTabularTrainingJob was not created")
model = self._job.run(
dataset=dataset,
target_column=target_column,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
weight_column=weight_column,
budget_milli_node_hours=budget_milli_node_hours,
model_display_name=model_display_name,
model_labels=model_labels,
disable_early_stopping=disable_early_stopping,
export_evaluated_data_items=export_evaluated_data_items,
export_evaluated_data_items_bigquery_destination_uri=(
export_evaluated_data_items_bigquery_destination_uri
),
export_evaluated_data_items_override_destination=export_evaluated_data_items_override_destination,
sync=sync,
)
training_id = self.extract_training_id(self._job.resource_name)
if model:
model.wait()
else:
self.log.warning(
"Training did not produce a Managed Model returning None. Training Pipeline is not "
"configured to upload a Model."
)
return model, training_id
@GoogleBaseHook.fallback_to_default_project_id
def create_auto_ml_forecasting_training_job(
self,
project_id: str,
region: str,
display_name: str,
dataset: datasets.TimeSeriesDataset,
target_column: str,
time_column: str,
time_series_identifier_column: str,
unavailable_at_forecast_columns: list[str],
available_at_forecast_columns: list[str],
forecast_horizon: int,
data_granularity_unit: str,
data_granularity_count: int,
optimization_objective: str | None = None,
column_specs: dict[str, str] | None = None,
column_transformations: list[dict[str, dict[str, str]]] | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
training_fraction_split: float | None = None,
validation_fraction_split: float | None = None,
test_fraction_split: float | None = None,
predefined_split_column_name: str | None = None,
weight_column: str | None = None,
time_series_attribute_columns: list[str] | None = None,
context_window: int | None = None,
export_evaluated_data_items: bool = False,
export_evaluated_data_items_bigquery_destination_uri: str | None = None,
export_evaluated_data_items_override_destination: bool = False,
quantiles: list[float] | None = None,
validation_options: str | None = None,
budget_milli_node_hours: int = 1000,
model_display_name: str | None = None,
model_labels: dict[str, str] | None = None,
sync: bool = True,
) -> tuple[models.Model | None, str]:
"""
Create an AutoML Forecasting Training Job.
:param project_id: Required. Project to run training in.
:param region: Required. Location to run training in.
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param dataset: Required. The dataset within the same Project from which data will be used to train
the Model. The Dataset must use schema compatible with Model being trained, and what is
compatible should be described in the used TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]. For time series
Datasets, all their data is exported to training, to pick and choose from.
:param target_column: Required. Name of the column that the Model is to predict values for.
:param time_column: Required. Name of the column that identifies time order in the time series.
:param time_series_identifier_column: Required. Name of the column that identifies the time series.
:param unavailable_at_forecast_columns: Required. Column names of columns that are unavailable at
forecast. Each column contains information for the given entity (identified by the
[time_series_identifier_column]) that is unknown before the forecast (e.g. population of a city
in a given year, or weather on a given day).
:param available_at_forecast_columns: Required. Column names of columns that are available at
forecast. Each column contains information for the given entity (identified by the
[time_series_identifier_column]) that is known at forecast.
:param forecast_horizon: Required. The amount of time into the future for which forecasted values for
the target are returned. Expressed in number of units defined by the [data_granularity_unit] and
[data_granularity_count] field. Inclusive.
:param data_granularity_unit: Required. The data granularity unit. Accepted values are ``minute``,
``hour``, ``day``, ``week``, ``month``, ``year``.
:param data_granularity_count: Required. The number of data granularity units between data points in
the training data. If [data_granularity_unit] is `minute`, can be 1, 5, 10, 15, or 30. For all
other values of [data_granularity_unit], must be 1.
:param optimization_objective: Optional. Objective function the model is to be optimized towards. The
training process creates a Model that optimizes the value of the objective function over the
validation set. The supported optimization objectives:
"minimize-rmse" (default) - Minimize root-mean-squared error (RMSE).
"minimize-mae" - Minimize mean-absolute error (MAE).
"minimize-rmsle" - Minimize root-mean-squared log error (RMSLE).
"minimize-rmspe" - Minimize root-mean-squared percentage error (RMSPE).
"minimize-wape-mae" - Minimize the combination of weighted absolute percentage error (WAPE) and
mean-absolute-error (MAE).
"minimize-quantile-loss" - Minimize the quantile loss at the defined quantiles. (Set this
objective to build quantile forecasts.)
:param column_specs: Optional. Alternative to column_transformations where the keys of the dict are
column names and their respective values are one of AutoMLTabularTrainingJob.column_data_types.
When creating transformation for BigQuery Struct column, the column should be flattened using "."
as the delimiter. Only columns with no child should have a transformation. If an input column has
no transformations on it, such a column is ignored by the training, except for the targetColumn,
which should have no transformations defined on. Only one of column_transformations or
column_specs should be passed.
:param column_transformations: Optional. Transformations to apply to the input columns (i.e. columns
other than the targetColumn). Each transformation may produce multiple result values from the
column's value, and all are used for training. When creating transformation for BigQuery Struct
column, the column should be flattened using "." as the delimiter. Only columns with no child
should have a transformation. If an input column has no transformations on it, such a column is
ignored by the training, except for the targetColumn, which should have no transformations
defined on. Only one of column_transformations or column_specs should be passed. Consider using
column_specs as column_transformations will be deprecated eventually.
:param labels: Optional. The labels with user-defined metadata to organize TrainingPipelines. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be
in the same region as where the compute resource is created. If set, this TrainingPipeline will
be secured by this key.
Note: Model trained by this TrainingPipeline is also secured by this key if ``model_to_upload``
is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be
in the same region as where the compute resource is created.
If set, the trained Model will be secured by this key.
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param validation_fraction_split: Optional. The fraction of the input data that is to be used to
validate the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param predefined_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or value in the column) must be one of
{``TRAIN``, ``VALIDATE``, ``TEST``}, and it defines to which set the given piece of data is
assigned. If for a piece of data the key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param weight_column: Optional. Name of the column that should be used as the weight column. Higher
values in this column give more importance to the row during Model training. The column must have
numeric values between 0 and 10000 inclusively, and 0 value means that the row is ignored. If the
weight column field is not set, then all rows are assumed to have equal weight of 1.
:param time_series_attribute_columns: Optional. Column names that should be used as attribute
columns. Each column is constant within a time series.
:param context_window: Optional. The amount of time into the past training and prediction data is
used for model training and prediction respectively. Expressed in number of units defined by the
[data_granularity_unit] and [data_granularity_count] fields. When not provided uses the default
value of 0 which means the model sets each series context window to be 0 (also known as "cold
start"). Inclusive.
:param export_evaluated_data_items: Whether to export the test set predictions to a BigQuery table.
If False, then the export is not performed.
:param export_evaluated_data_items_bigquery_destination_uri: Optional. URI of desired destination
BigQuery table for exported test set predictions. Expected format:
``bq://<project_id>:<dataset_id>:<table>``
If not specified, then results are exported to the following auto-created BigQuery table:
``<project_id>:export_evaluated_examples_<model_name>_<yyyy_MM_dd'T'HH_mm_ss_SSS'Z'>
.evaluated_examples``
Applies only if [export_evaluated_data_items] is True.
:param export_evaluated_data_items_override_destination: Whether to override the contents of
[export_evaluated_data_items_bigquery_destination_uri], if the table exists, for exported test
set predictions. If False, and the table exists, then the training job will fail.
Applies only if [export_evaluated_data_items] is True and
[export_evaluated_data_items_bigquery_destination_uri] is specified.
:param quantiles: Quantiles to use for the `minizmize-quantile-loss`
[AutoMLForecastingTrainingJob.optimization_objective]. This argument is required in this case.
Accepts up to 5 quantiles in the form of a double from 0 to 1, exclusive. Each quantile must be
unique.
:param validation_options: Validation options for the data validation component. The available
options are: "fail-pipeline" - (default), will validate against the validation and fail the
pipeline if it fails. "ignore-validation" - ignore the results of the validation and continue the
pipeline
:param budget_milli_node_hours: Optional. The train budget of creating this Model, expressed in milli
node hours i.e. 1,000 value in this field means 1 node hour. The training cost of the model will
not exceed this budget. The final cost will be attempted to be close to the budget, though may
end up being (even) noticeably smaller - at the backend's discretion. This especially may happen
when further model training ceases to provide any improvements. If the budget is set to a value
known to be insufficient to train a Model for the given training set, the training won't be
attempted and will error. The minimum value is 1000 and the maximum is 72000.
:param model_display_name: Optional. If the script produces a managed Vertex AI Model. The display
name of the Model. The name can be up to 128 characters long and can be consist of any UTF-8
characters. If not provided upon creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to organize your Models. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param sync: Whether to execute this method synchronously. If False, this method will be executed in
concurrent Future and any downstream object will be immediately returned and synced when the
Future has completed.
"""
if column_transformations:
warnings.warn(
"Consider using column_specs as column_transformations will be deprecated eventually.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self._job = self.get_auto_ml_forecasting_training_job(
project=project_id,
location=region,
display_name=display_name,
optimization_objective=optimization_objective,
column_specs=column_specs,
column_transformations=column_transformations,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
if not self._job:
raise AirflowException("AutoMLForecastingTrainingJob was not created")
model = self._job.run(
dataset=dataset,
target_column=target_column,
time_column=time_column,
time_series_identifier_column=time_series_identifier_column,
unavailable_at_forecast_columns=unavailable_at_forecast_columns,
available_at_forecast_columns=available_at_forecast_columns,
forecast_horizon=forecast_horizon,
data_granularity_unit=data_granularity_unit,
data_granularity_count=data_granularity_count,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
predefined_split_column_name=predefined_split_column_name,
weight_column=weight_column,
time_series_attribute_columns=time_series_attribute_columns,
context_window=context_window,
export_evaluated_data_items=export_evaluated_data_items,
export_evaluated_data_items_bigquery_destination_uri=(
export_evaluated_data_items_bigquery_destination_uri
),
export_evaluated_data_items_override_destination=export_evaluated_data_items_override_destination,
quantiles=quantiles,
validation_options=validation_options,
budget_milli_node_hours=budget_milli_node_hours,
model_display_name=model_display_name,
model_labels=model_labels,
sync=sync,
)
training_id = self.extract_training_id(self._job.resource_name)
if model:
model.wait()
else:
self.log.warning(
"Training did not produce a Managed Model returning None. Training Pipeline is not "
"configured to upload a Model."
)
return model, training_id
@GoogleBaseHook.fallback_to_default_project_id
def create_auto_ml_image_training_job(
self,
project_id: str,
region: str,
display_name: str,
dataset: datasets.ImageDataset,
prediction_type: str = "classification",
multi_label: bool = False,
model_type: str = "CLOUD",
base_model: models.Model | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
training_fraction_split: float | None = None,
validation_fraction_split: float | None = None,
test_fraction_split: float | None = None,
training_filter_split: str | None = None,
validation_filter_split: str | None = None,
test_filter_split: str | None = None,
budget_milli_node_hours: int | None = None,
model_display_name: str | None = None,
model_labels: dict[str, str] | None = None,
disable_early_stopping: bool = False,
sync: bool = True,
) -> tuple[models.Model | None, str]:
"""
Create an AutoML Image Training Job.
:param project_id: Required. Project to run training in.
:param region: Required. Location to run training in.
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param dataset: Required. The dataset within the same Project from which data will be used to train
the Model. The Dataset must use schema compatible with Model being trained, and what is
compatible should be described in the used TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]. For tabular
Datasets, all their data is exported to training, to pick and choose from.
:param prediction_type: The type of prediction the Model is to produce, one of:
"classification" - Predict one out of multiple target values is picked for each row.
"object_detection" - Predict a value based on its relation to other values. This type is
available only to columns that contain semantically numeric values, i.e. integers or floating
point number, even if stored as e.g. strings.
:param multi_label: Required. Default is False. If false, a single-label (multi-class) Model will be
trained (i.e. assuming that for each image just up to one annotation may be applicable). If true,
a multi-label Model will be trained (i.e. assuming that for each image multiple annotations may
be applicable).
This is only applicable for the "classification" prediction_type and will be ignored otherwise.
:param model_type: Required. One of the following:
"CLOUD" - Default for Image Classification. A Model best tailored to be used within Google Cloud,
and which cannot be exported.
"CLOUD_HIGH_ACCURACY_1" - Default for Image Object Detection. A model best tailored to be used
within Google Cloud, and which cannot be exported. Expected to have a higher latency, but should
also have a higher prediction quality than other cloud models.
"CLOUD_LOW_LATENCY_1" - A model best tailored to be used within Google Cloud, and which cannot be
exported. Expected to have a low latency, but may have lower prediction quality than other cloud
models.
"MOBILE_TF_LOW_LATENCY_1" - A model that, in addition to being available within Google Cloud, can
also be exported as TensorFlow or Core ML model and used on a mobile or edge device afterwards.
Expected to have low latency, but may have lower prediction quality than other mobile models.
"MOBILE_TF_VERSATILE_1" - A model that, in addition to being available within Google Cloud, can
also be exported as TensorFlow or Core ML model and used on a mobile or edge device with
afterwards.
"MOBILE_TF_HIGH_ACCURACY_1" - A model that, in addition to being available within Google Cloud,
can also be exported as TensorFlow or Core ML model and used on a mobile or edge device
afterwards. Expected to have a higher latency, but should also have a higher prediction quality
than other mobile models.
:param base_model: Optional. Only permitted for Image Classification models. If it is specified, the
new model will be trained based on the `base` model. Otherwise, the new model will be trained
from scratch. The `base` model must be in the same Project and Location as the new Model to
train, and have the same model_type.
:param labels: Optional. The labels with user-defined metadata to organize TrainingPipelines. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be
in the same region as where the compute resource is created. If set, this TrainingPipeline will
be secured by this key.
Note: Model trained by this TrainingPipeline is also secured by this key if ``model_to_upload``
is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute resource is created.
If set, the trained Model will be secured by this key.
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param validation_fraction_split: Optional. The fraction of the input data that is to be used to
validate the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param training_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax as the one used in
DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the
FilterSplit filters, then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param validation_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax as the one used in
DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the
FilterSplit filters, then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param test_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match this
filter are used to test the Model. A filter with same syntax as the one used in
DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the
FilterSplit filters, then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param budget_milli_node_hours: Optional. The train budget of creating this Model, expressed in milli
node hours i.e. 1,000 value in this field means 1 node hour.
Defaults by `prediction_type`:
`classification` - For Cloud models the budget must be: 8,000 - 800,000 milli node hours
(inclusive). The default value is 192,000 which represents one day in wall time, assuming 8 nodes
are used.
`object_detection` - For Cloud models the budget must be: 20,000 - 900,000 milli node hours
(inclusive). The default value is 216,000 which represents one day in wall time, assuming 9 nodes
are used.
The training cost of the model will not exceed this budget. The final cost will be attempted to
be close to the budget, though may end up being (even) noticeably smaller - at the backend's
discretion. This especially may happen when further model training ceases to provide any
improvements. If the budget is set to a value known to be insufficient to train a Model for the
given training set, the training won't be attempted and will error.
:param model_display_name: Optional. The display name of the managed Vertex AI Model. The name can be
up to 128 characters long and can be consist of any UTF-8 characters. If not provided upon
creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to organize your Models. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param disable_early_stopping: Required. If true, the entire budget is used. This disables the early
stopping feature. By default, the early stopping feature is enabled, which means that training
might stop before the entire training budget has been used, if further training does no longer
brings significant improvement to the model.
:param sync: Whether to execute this method synchronously. If False, this method will be executed in
concurrent Future and any downstream object will be immediately returned and synced when the
Future has completed.
"""
self._job = self.get_auto_ml_image_training_job(
project=project_id,
location=region,
display_name=display_name,
prediction_type=prediction_type,
multi_label=multi_label,
model_type=model_type,
base_model=base_model,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
if not self._job:
raise AirflowException("AutoMLImageTrainingJob was not created")
model = self._job.run(
dataset=dataset,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
budget_milli_node_hours=budget_milli_node_hours,
model_display_name=model_display_name,
model_labels=model_labels,
disable_early_stopping=disable_early_stopping,
sync=sync,
)
training_id = self.extract_training_id(self._job.resource_name)
if model:
model.wait()
else:
self.log.warning(
"Training did not produce a Managed Model returning None. AutoML Image Training "
"Pipeline is not configured to upload a Model."
)
return model, training_id
@GoogleBaseHook.fallback_to_default_project_id
def create_auto_ml_text_training_job(
self,
project_id: str,
region: str,
display_name: str,
dataset: datasets.TextDataset,
prediction_type: str,
multi_label: bool = False,
sentiment_max: int = 10,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
training_fraction_split: float | None = None,
validation_fraction_split: float | None = None,
test_fraction_split: float | None = None,
training_filter_split: str | None = None,
validation_filter_split: str | None = None,
test_filter_split: str | None = None,
model_display_name: str | None = None,
model_labels: dict[str, str] | None = None,
sync: bool = True,
) -> tuple[models.Model | None, str]:
"""
Create an AutoML Text Training Job.
:param project_id: Required. Project to run training in.
:param region: Required. Location to run training in.
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param dataset: Required. The dataset within the same Project from which data will be used to train
the Model. The Dataset must use schema compatible with Model being trained, and what is
compatible should be described in the used TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition].
:param prediction_type: The type of prediction the Model is to produce, one of:
"classification" - A classification model analyzes text data and returns a list of categories
that apply to the text found in the data. Vertex AI offers both single-label and multi-label text
classification models.
"extraction" - An entity extraction model inspects text data for known entities referenced in the
data and labels those entities in the text.
"sentiment" - A sentiment analysis model inspects text data and identifies the prevailing
emotional opinion within it, especially to determine a writer's attitude as positive, negative,
or neutral.
:param multi_label: Required and only applicable for text classification task. If false, a
single-label (multi-class) Model will be trained (i.e. assuming that for each text snippet just
up to one annotation may be applicable). If true, a multi-label Model will be trained (i.e.
assuming that for each text snippet multiple annotations may be applicable).
:param sentiment_max: Required and only applicable for sentiment task. A sentiment is expressed as an
integer ordinal, where higher value means a more positive sentiment. The range of sentiments that
will be used is between 0 and sentimentMax (inclusive on both ends), and all the values in the
range must be represented in the dataset before a model can be created. Only the Annotations with
this sentimentMax will be used for training. sentimentMax value must be between 1 and 10
(inclusive).
:param labels: Optional. The labels with user-defined metadata to organize TrainingPipelines. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured by this key if ``model_to_upload``
is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute resource is created.
If set, the trained Model will be secured by this key.
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param validation_fraction_split: Optional. The fraction of the input data that is to be used to
validate the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param training_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax as the one used in
DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the
FilterSplit filters, then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param validation_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax as the one used in
DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the
FilterSplit filters, then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param test_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match this
filter are used to test the Model. A filter with same syntax as the one used in
DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the
FilterSplit filters, then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param model_display_name: Optional. The display name of the managed Vertex AI Model. The name can be
up to 128 characters long and can consist of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to organize your Models. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param sync: Whether to execute this method synchronously. If False, this method will be executed in
concurrent Future and any downstream object will be immediately returned and synced when the
Future has completed.
"""
self._job = self.get_auto_ml_text_training_job(
project=project_id,
location=region,
display_name=display_name,
prediction_type=prediction_type,
multi_label=multi_label,
sentiment_max=sentiment_max,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
if not self._job:
raise AirflowException("AutoMLTextTrainingJob was not created")
model = self._job.run(
dataset=dataset,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
model_display_name=model_display_name,
model_labels=model_labels,
sync=sync,
)
training_id = self.extract_training_id(self._job.resource_name)
if model:
model.wait()
else:
self.log.warning(
"Training did not produce a Managed Model returning None. AutoML Text Training "
"Pipeline is not configured to upload a Model."
)
return model, training_id
@GoogleBaseHook.fallback_to_default_project_id
def create_auto_ml_video_training_job(
self,
project_id: str,
region: str,
display_name: str,
dataset: datasets.VideoDataset,
prediction_type: str = "classification",
model_type: str = "CLOUD",
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
training_fraction_split: float | None = None,
test_fraction_split: float | None = None,
training_filter_split: str | None = None,
test_filter_split: str | None = None,
model_display_name: str | None = None,
model_labels: dict[str, str] | None = None,
sync: bool = True,
) -> tuple[models.Model | None, str]:
"""
Create an AutoML Video Training Job.
:param project_id: Required. Project to run training in.
:param region: Required. Location to run training in.
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param dataset: Required. The dataset within the same Project from which data will be used to train
the Model. The Dataset must use schema compatible with Model being trained, and what is
compatible should be described in the used TrainingPipeline's [training_task_definition]
[google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]. For tabular
Datasets, all their data is exported to training, to pick and choose from.
:param prediction_type: The type of prediction the Model is to produce, one of:
"classification" - A video classification model classifies shots and segments in your videos
according to your own defined labels.
"object_tracking" - A video object tracking model detects and tracks multiple objects in shots
and segments. You can use these models to track objects in your videos according to your own
pre-defined, custom labels.
"action_recognition" - A video action recognition model pinpoints the location of actions with
short temporal durations (~1 second).
:param model_type: Required. One of the following:
"CLOUD" - available for "classification", "object_tracking" and "action_recognition" A Model best
tailored to be used within Google Cloud, and which cannot be exported.
"MOBILE_VERSATILE_1" - available for "classification", "object_tracking" and "action_recognition"
A model that, in addition to being available within Google Cloud, can also be exported (see
ModelService.ExportModel) as a TensorFlow or TensorFlow Lite model and used on a mobile or edge
device with afterwards.
"MOBILE_CORAL_VERSATILE_1" - available only for "object_tracking" A versatile model that is meant
to be exported (see ModelService.ExportModel) and used on a Google Coral device.
"MOBILE_CORAL_LOW_LATENCY_1" - available only for "object_tracking" A model that trades off
quality for low latency, to be exported (see ModelService.ExportModel) and used on a Google Coral
device.
"MOBILE_JETSON_VERSATILE_1" - available only for "object_tracking" A versatile model that is
meant to be exported (see ModelService.ExportModel) and used on an NVIDIA Jetson device.
"MOBILE_JETSON_LOW_LATENCY_1" - available only for "object_tracking" A model that trades off
quality for low latency, to be exported (see ModelService.ExportModel) and used on an NVIDIA
Jetson device.
:param labels: Optional. The labels with user-defined metadata to organize TrainingPipelines. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured by this key if ``model_to_upload``
is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute resource is created.
If set, the trained Model will be secured by this key.
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param training_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax as the one used in
DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the
FilterSplit filters, then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param test_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match this
filter are used to test the Model. A filter with same syntax as the one used in
DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the
FilterSplit filters, then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param model_display_name: Optional. The display name of the managed Vertex AI Model. The name can be
up to 128 characters long and can be consist of any UTF-8 characters. If not provided upon
creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to organize your Models. Label
keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param sync: Whether to execute this method synchronously. If False, this method will be executed in
concurrent Future and any downstream object will be immediately returned and synced when the
Future has completed.
"""
self._job = self.get_auto_ml_video_training_job(
project=project_id,
location=region,
display_name=display_name,
prediction_type=prediction_type,
model_type=model_type,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
)
if not self._job:
raise AirflowException("AutoMLVideoTrainingJob was not created")
model = self._job.run(
dataset=dataset,
training_fraction_split=training_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
test_filter_split=test_filter_split,
model_display_name=model_display_name,
model_labels=model_labels,
sync=sync,
)
training_id = self.extract_training_id(self._job.resource_name)
if model:
model.wait()
else:
self.log.warning(
"Training did not produce a Managed Model returning None. AutoML Video Training "
"Pipeline is not configured to upload a Model."
)
return model, training_id
@GoogleBaseHook.fallback_to_default_project_id
def delete_training_pipeline(
self,
project_id: str,
region: str,
training_pipeline: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Deletes a TrainingPipeline.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param training_pipeline: Required. The name of the TrainingPipeline resource to be deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
name = client.training_pipeline_path(project_id, region, training_pipeline)
result = client.delete_training_pipeline(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_training_pipeline(
self,
project_id: str,
region: str,
training_pipeline: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> TrainingPipeline:
"""
Gets a TrainingPipeline.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param training_pipeline: Required. The name of the TrainingPipeline resource.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
name = client.training_pipeline_path(project_id, region, training_pipeline)
result = client.get_training_pipeline(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_training_pipelines(
self,
project_id: str,
region: str,
page_size: int | None = None,
page_token: str | None = None,
filter: str | None = None,
read_mask: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListTrainingPipelinesPager:
"""
Lists TrainingPipelines in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: Optional. The standard list filter. Supported fields:
- ``display_name`` supports = and !=.
- ``state`` supports = and !=.
Some examples of using the filter are:
- ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"``
- ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"``
- ``NOT display_name="my_pipeline"``
- ``state="PIPELINE_STATE_FAILED"``
:param page_size: Optional. The standard list page size.
:param page_token: Optional. The standard list page token. Typically obtained via
[ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesResponse.next_page_token]
of the previous
[PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]
call.
:param read_mask: Optional. Mask specifying which fields to read.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.list_training_pipelines(
request={
"parent": parent,
"page_size": page_size,
"page_token": page_token,
"filter": filter,
"read_mask": read_mask,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
| 78,205 | 58.562833 | 133 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/vertex_ai/endpoint_service.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Vertex AI hook.
.. spelling:word-list::
undeployed
undeploy
Undeploys
aiplatform
FieldMask
unassigns
"""
from __future__ import annotations
from typing import Sequence
from google.api_core.client_options import ClientOptions
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.operation import Operation
from google.api_core.retry import Retry
from google.cloud.aiplatform_v1 import EndpointServiceClient
from google.cloud.aiplatform_v1.services.endpoint_service.pagers import ListEndpointsPager
from google.cloud.aiplatform_v1.types import DeployedModel, Endpoint
from google.protobuf.field_mask_pb2 import FieldMask
from airflow import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class EndpointServiceHook(GoogleBaseHook):
"""Hook for Google Cloud Vertex AI Endpoint Service APIs."""
def __init__(self, **kwargs):
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(**kwargs)
def get_endpoint_service_client(self, region: str | None = None) -> EndpointServiceClient:
"""Returns EndpointServiceClient."""
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-aiplatform.googleapis.com:443")
else:
client_options = ClientOptions()
return EndpointServiceClient(
credentials=self.get_credentials(), client_info=self.client_info, client_options=client_options
)
def wait_for_operation(self, operation: Operation, timeout: float | None = None):
"""Waits for long-lasting operation to complete."""
try:
return operation.result(timeout=timeout)
except Exception:
error = operation.exception(timeout=timeout)
raise AirflowException(error)
@staticmethod
def extract_endpoint_id(obj: dict) -> str:
"""Returns unique id of the endpoint."""
return obj["name"].rpartition("/")[-1]
@staticmethod
def extract_deployed_model_id(obj: dict) -> str:
"""Returns unique id of the deploy model."""
return obj["deployed_model"]["id"]
@GoogleBaseHook.fallback_to_default_project_id
def create_endpoint(
self,
project_id: str,
region: str,
endpoint: Endpoint | dict,
endpoint_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Creates an Endpoint.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param endpoint: Required. The Endpoint to create.
:param endpoint_id: The ID of Endpoint. This value should be 1-10 characters, and valid characters
are /[0-9]/. If not provided, Vertex AI will generate a value for this ID.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_endpoint_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.create_endpoint(
request={
"parent": parent,
"endpoint": endpoint,
"endpoint_id": endpoint_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_endpoint(
self,
project_id: str,
region: str,
endpoint: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Deletes an Endpoint.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param endpoint: Required. The Endpoint to delete.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_endpoint_service_client(region)
name = client.endpoint_path(project_id, region, endpoint)
result = client.delete_endpoint(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def deploy_model(
self,
project_id: str,
region: str,
endpoint: str,
deployed_model: DeployedModel | dict,
traffic_split: Sequence | dict | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Deploys a Model into this Endpoint, creating a DeployedModel within it.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param endpoint: Required. The name of the Endpoint resource into which to deploy a Model. Format:
``projects/{project}/locations/{location}/endpoints/{endpoint}``
:param deployed_model: Required. The DeployedModel to be created within the Endpoint. Note that
[Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] must be updated for
the DeployedModel to start receiving traffic, either as part of this call, or via
[EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint].
:param traffic_split: A map from a DeployedModel's ID to the percentage of this Endpoint's traffic
that should be forwarded to that DeployedModel.
If this field is non-empty, then the Endpoint's
[traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] will be overwritten with it. To
refer to the ID of the just being deployed Model, a "0" should be used, and the actual ID of the
new DeployedModel will be filled in its place by this method. The traffic percentage values must
add up to 100.
If this field is empty, then the Endpoint's
[traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] is not updated.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_endpoint_service_client(region)
endpoint_path = client.endpoint_path(project_id, region, endpoint)
result = client.deploy_model(
request={
"endpoint": endpoint_path,
"deployed_model": deployed_model,
"traffic_split": traffic_split,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_endpoint(
self,
project_id: str,
region: str,
endpoint: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Endpoint:
"""
Gets an Endpoint.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param endpoint: Required. The Endpoint to get.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_endpoint_service_client(region)
name = client.endpoint_path(project_id, region, endpoint)
result = client.get_endpoint(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_endpoints(
self,
project_id: str,
region: str,
filter: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
read_mask: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListEndpointsPager:
"""
Lists Endpoints in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: The standard list filter.
Supported fields:
- ``display_name`` supports = and !=.
- ``state`` supports = and !=.
- ``model_display_name`` supports = and !=
Some examples of using the filter are:
- ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"``
- ``state="JOB_STATE_RUNNING" OR display_name="my_job"``
- ``NOT display_name="my_job"``
- ``state="JOB_STATE_FAILED"``
:param page_size: The standard list page size.
:param page_token: The standard list page token.
:param read_mask: Mask specifying which fields to read.
:param order_by: A comma-separated list of fields to order by, sorted in
ascending order. Use "desc" after a field name for
descending. Supported fields:
- ``display_name``
- ``create_time``
- ``update_time``
Example: ``display_name, create_time desc``.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_endpoint_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.list_endpoints(
request={
"parent": parent,
"filter": filter,
"page_size": page_size,
"page_token": page_token,
"read_mask": read_mask,
"order_by": order_by,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def undeploy_model(
self,
project_id: str,
region: str,
endpoint: str,
deployed_model_id: str,
traffic_split: Sequence | dict | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Undeploys a Model from an Endpoint, removing a DeployedModel from it, and freeing all used resources.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param endpoint: Required. The name of the Endpoint resource from which to undeploy a Model.
:param deployed_model_id: Required. The ID of the DeployedModel to be undeployed from the Endpoint.
:param traffic_split: If this field is provided, then the Endpoint's
[traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] will be overwritten with it. If
last DeployedModel is being undeployed from the Endpoint, the [Endpoint.traffic_split] will always
end up empty when this call returns. A DeployedModel will be successfully undeployed only if it
doesn't have any traffic assigned to it when this method executes, or if this field unassigns any
traffic to it.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_endpoint_service_client(region)
endpoint_path = client.endpoint_path(project_id, region, endpoint)
result = client.undeploy_model(
request={
"endpoint": endpoint_path,
"deployed_model_id": deployed_model_id,
"traffic_split": traffic_split,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_endpoint(
self,
project_id: str,
region: str,
endpoint_id: str,
endpoint: Endpoint | dict,
update_mask: FieldMask | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Endpoint:
"""
Updates an Endpoint.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param endpoint: Required. The Endpoint which replaces the resource on the server.
:param update_mask: Required. The update mask applies to the resource. See
[google.protobuf.FieldMask][google.protobuf.FieldMask].
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_endpoint_service_client(region)
endpoint["name"] = client.endpoint_path(project_id, region, endpoint_id)
result = client.update_endpoint(
request={
"endpoint": endpoint,
"update_mask": update_mask,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
| 16,164 | 40.770026 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/vertex_ai/batch_prediction_job.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Vertex AI hook.
.. spelling:word-list::
jsonl
codepoints
aiplatform
gapic
"""
from __future__ import annotations
from typing import Sequence
from google.api_core.client_options import ClientOptions
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.operation import Operation
from google.api_core.retry import Retry
from google.cloud.aiplatform import BatchPredictionJob, Model, explain
from google.cloud.aiplatform_v1 import JobServiceClient
from google.cloud.aiplatform_v1.services.job_service.pagers import ListBatchPredictionJobsPager
from airflow import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class BatchPredictionJobHook(GoogleBaseHook):
"""Hook for Google Cloud Vertex AI Batch Prediction Job APIs."""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._batch_prediction_job: BatchPredictionJob | None = None
def get_job_service_client(self, region: str | None = None) -> JobServiceClient:
"""Returns JobServiceClient."""
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-aiplatform.googleapis.com:443")
else:
client_options = ClientOptions()
return JobServiceClient(
credentials=self.get_credentials(), client_info=self.client_info, client_options=client_options
)
def wait_for_operation(self, operation: Operation, timeout: float | None = None):
"""Waits for long-lasting operation to complete."""
try:
return operation.result(timeout=timeout)
except Exception:
error = operation.exception(timeout=timeout)
raise AirflowException(error)
@staticmethod
def extract_batch_prediction_job_id(obj: dict) -> str:
"""Returns unique id of the batch_prediction_job."""
return obj["name"].rpartition("/")[-1]
def cancel_batch_prediction_job(self) -> None:
"""Cancel BatchPredictionJob."""
if self._batch_prediction_job:
self._batch_prediction_job.cancel()
@GoogleBaseHook.fallback_to_default_project_id
def create_batch_prediction_job(
self,
project_id: str,
region: str,
job_display_name: str,
model_name: str | Model,
instances_format: str = "jsonl",
predictions_format: str = "jsonl",
gcs_source: str | Sequence[str] | None = None,
bigquery_source: str | None = None,
gcs_destination_prefix: str | None = None,
bigquery_destination_prefix: str | None = None,
model_parameters: dict | None = None,
machine_type: str | None = None,
accelerator_type: str | None = None,
accelerator_count: int | None = None,
starting_replica_count: int | None = None,
max_replica_count: int | None = None,
generate_explanation: bool | None = False,
explanation_metadata: explain.ExplanationMetadata | None = None,
explanation_parameters: explain.ExplanationParameters | None = None,
labels: dict[str, str] | None = None,
encryption_spec_key_name: str | None = None,
sync: bool = True,
create_request_timeout: float | None = None,
batch_size: int | None = None,
) -> BatchPredictionJob:
"""
Create a batch prediction job.
:param project_id: Required. Project to run training in.
:param region: Required. Location to run training in.
:param job_display_name: Required. The user-defined name of the BatchPredictionJob. The name can be
up to 128 characters long and can be consist of any UTF-8 characters.
:param model_name: Required. A fully-qualified model resource name or model ID.
:param instances_format: Required. The format in which instances are provided. Must be one of the
formats listed in `Model.supported_input_storage_formats`. Default is "jsonl" when using
`gcs_source`. If a `bigquery_source` is provided, this is overridden to "bigquery".
:param predictions_format: Required. The format in which Vertex AI outputs the predictions, must be
one of the formats specified in `Model.supported_output_storage_formats`. Default is "jsonl" when
using `gcs_destination_prefix`. If a `bigquery_destination_prefix` is provided, this is
overridden to "bigquery".
:param gcs_source: Google Cloud Storage URI(-s) to your instances to run batch prediction on. They
must match `instances_format`. May contain wildcards. For more information on wildcards, see
https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames.
:param bigquery_source: BigQuery URI to a table, up to 2000 characters long.
For example: `bq://projectId.bqDatasetId.bqTableId`
:param gcs_destination_prefix: The Google Cloud Storage location of the directory where the output is
to be written to. In the given directory a new directory is created. Its name is
``prediction-<model-display-name>-<job-create-time>``, where timestamp is in
YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files ``predictions_0001.<extension>``,
``predictions_0002.<extension>``, ..., ``predictions_N.<extension>`` are created where
``<extension>`` depends on chosen ``predictions_format``, and N may equal 0001 and depends on the
total number of successfully predicted instances. If the Model has both ``instance`` and
``prediction`` schemata defined then each such file contains predictions as per the
``predictions_format``. If prediction for any instance failed (partially or completely), then an
additional ``errors_0001.<extension>``, ``errors_0002.<extension>``,..., ``errors_N.<extension>``
files are created (N depends on total number of failed predictions). These files contain the
failed instances, as per their schema, followed by an additional ``error`` field which as value
has ```google.rpc.Status`` <Status>`__ containing only ``code`` and ``message`` fields.
:param bigquery_destination_prefix: The BigQuery project location where the output is to be written
to. In the given project a new dataset is created with name
``prediction_<model-display-name>_<job-create-time>`` where is made BigQuery-dataset-name
compatible (for example, most special characters become underscores), and timestamp is in
YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created,
``predictions``, and ``errors``. If the Model has both ``instance`` and ``prediction`` schemata
defined then the tables have columns as follows: The ``predictions`` table contains instances for
which the prediction succeeded, it has columns as per a concatenation of the Model's instance and
prediction schemata. The ``errors`` table contains rows for which the prediction has failed, it
has instance columns, as per the instance schema, followed by a single "errors" column, which as
values has ```google.rpc.Status`` <Status>`__ represented as a STRUCT, and containing only
``code`` and ``message``.
:param model_parameters: The parameters that govern the predictions. The schema of the parameters may
be specified via the Model's `parameters_schema_uri`.
:param machine_type: The type of machine for running batch prediction on dedicated resources. Not
specifying machine type will result in batch prediction job being run with automatic resources.
:param accelerator_type: The type of accelerator(s) that may be attached to the machine as per
`accelerator_count`. Only used if `machine_type` is set.
:param accelerator_count: The number of accelerators to attach to the `machine_type`. Only used if
`machine_type` is set.
:param starting_replica_count: The number of machine replicas used at the start of the batch
operation. If not set, Vertex AI decides starting number, not greater than `max_replica_count`.
Only used if `machine_type` is set.
:param max_replica_count: The maximum number of machine replicas the batch operation may be scaled
to. Only used if `machine_type` is set. Default is 10.
:param generate_explanation: Optional. Generate explanation along with the batch prediction results.
This will cause the batch prediction output to include explanations based on the
`prediction_format`:
- `bigquery`: output includes a column named `explanation`. The value is a struct that conforms
to the [aiplatform.gapic.Explanation] object.
- `jsonl`: The JSON objects on each line include an additional entry keyed `explanation`. The
value of the entry is a JSON object that conforms to the [aiplatform.gapic.Explanation] object.
- `csv`: Generating explanations for CSV format is not supported.
:param explanation_metadata: Optional. Explanation metadata configuration for this
BatchPredictionJob. Can be specified only if `generate_explanation` is set to `True`.
This value overrides the value of `Model.explanation_metadata`. All fields of
`explanation_metadata` are optional in the request. If a field of the `explanation_metadata`
object is not populated, the corresponding field of the `Model.explanation_metadata` object is
inherited. For more details, see `Ref docs <http://tinyurl.com/1igh60kt>`
:param explanation_parameters: Optional. Parameters to configure explaining for Model's predictions.
Can be specified only if `generate_explanation` is set to `True`.
This value overrides the value of `Model.explanation_parameters`. All fields of
`explanation_parameters` are optional in the request. If a field of the `explanation_parameters`
object is not populated, the corresponding field of the `Model.explanation_parameters` object is
inherited. For more details, see `Ref docs <http://tinyurl.com/1an4zake>`
:param labels: Optional. The labels with user-defined metadata to organize your BatchPredictionJobs.
Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer managed
encryption key used to protect the job. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. The key needs to be
in the same region as where the compute resource is created.
If this is set, then all resources created by the BatchPredictionJob will be encrypted with the
provided encryption key.
Overrides encryption_spec_key_name set in aiplatform.init.
:param sync: Whether to execute this method synchronously. If False, this method will be executed in
concurrent Future and any downstream object will be immediately returned and synced when the
Future has completed.
:param create_request_timeout: Optional. The timeout for the create request in seconds.
:param batch_size: Optional. The number of the records (e.g. instances)
of the operation given in each batch
to a machine replica. Machine type, and size of a single record should be considered
when setting this parameter, higher value speeds up the batch operation's execution,
but too high value will result in a whole batch not fitting in a machine's memory,
and the whole operation will fail.
The default value is same as in the aiplatform's BatchPredictionJob.
"""
self._batch_prediction_job = BatchPredictionJob.create(
job_display_name=job_display_name,
model_name=model_name,
instances_format=instances_format,
predictions_format=predictions_format,
gcs_source=gcs_source,
bigquery_source=bigquery_source,
gcs_destination_prefix=gcs_destination_prefix,
bigquery_destination_prefix=bigquery_destination_prefix,
model_parameters=model_parameters,
machine_type=machine_type,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
starting_replica_count=starting_replica_count,
max_replica_count=max_replica_count,
generate_explanation=generate_explanation,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
labels=labels,
project=project_id,
location=region,
credentials=self.get_credentials(),
encryption_spec_key_name=encryption_spec_key_name,
sync=sync,
create_request_timeout=create_request_timeout,
batch_size=batch_size,
)
return self._batch_prediction_job
@GoogleBaseHook.fallback_to_default_project_id
def delete_batch_prediction_job(
self,
project_id: str,
region: str,
batch_prediction_job: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Deletes a BatchPredictionJob. Can only be called on jobs that already finished.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param batch_prediction_job: The name of the BatchPredictionJob resource to be deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
name = client.batch_prediction_job_path(project_id, region, batch_prediction_job)
result = client.delete_batch_prediction_job(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_batch_prediction_job(
self,
project_id: str,
region: str,
batch_prediction_job: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> BatchPredictionJob:
"""
Gets a BatchPredictionJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param batch_prediction_job: Required. The name of the BatchPredictionJob resource.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
name = client.batch_prediction_job_path(project_id, region, batch_prediction_job)
result = client.get_batch_prediction_job(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_batch_prediction_jobs(
self,
project_id: str,
region: str,
filter: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
read_mask: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListBatchPredictionJobsPager:
"""
Lists BatchPredictionJobs in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: The standard list filter.
Supported fields:
- ``display_name`` supports = and !=.
- ``state`` supports = and !=.
- ``model_display_name`` supports = and !=
Some examples of using the filter are:
- ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"``
- ``state="JOB_STATE_RUNNING" OR display_name="my_job"``
- ``NOT display_name="my_job"``
- ``state="JOB_STATE_FAILED"``
:param page_size: The standard list page size.
:param page_token: The standard list page token.
:param read_mask: Mask specifying which fields to read.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.list_batch_prediction_jobs(
request={
"parent": parent,
"filter": filter,
"page_size": page_size,
"page_token": page_token,
"read_mask": read_mask,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
| 19,770 | 52.871935 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/vertex_ai/dataset.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Vertex AI hook."""
from __future__ import annotations
from typing import Sequence
from google.api_core.client_options import ClientOptions
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.operation import Operation
from google.api_core.retry import Retry
from google.cloud.aiplatform_v1 import DatasetServiceClient
from google.cloud.aiplatform_v1.services.dataset_service.pagers import (
ListAnnotationsPager,
ListDataItemsPager,
ListDatasetsPager,
)
from google.cloud.aiplatform_v1.types import AnnotationSpec, Dataset, ExportDataConfig, ImportDataConfig
from google.protobuf.field_mask_pb2 import FieldMask
from airflow import AirflowException
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class DatasetHook(GoogleBaseHook):
"""Hook for Google Cloud Vertex AI Dataset APIs."""
def __init__(self, **kwargs):
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(**kwargs)
def get_dataset_service_client(self, region: str | None = None) -> DatasetServiceClient:
"""Returns DatasetServiceClient."""
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-aiplatform.googleapis.com:443")
else:
client_options = ClientOptions()
return DatasetServiceClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def wait_for_operation(self, operation: Operation, timeout: float | None = None):
"""Waits for long-lasting operation to complete."""
try:
return operation.result(timeout=timeout)
except Exception:
error = operation.exception(timeout=timeout)
raise AirflowException(error)
@staticmethod
def extract_dataset_id(obj: dict) -> str:
"""Returns unique id of the dataset."""
return obj["name"].rpartition("/")[-1]
@GoogleBaseHook.fallback_to_default_project_id
def create_dataset(
self,
project_id: str,
region: str,
dataset: Dataset | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Creates a Dataset.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param dataset: Required. The Dataset to create.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataset_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.create_dataset(
request={
"parent": parent,
"dataset": dataset,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_dataset(
self,
project_id: str,
region: str,
dataset: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Deletes a Dataset.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param dataset: Required. The ID of the Dataset to delete.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataset_service_client(region)
name = client.dataset_path(project_id, region, dataset)
result = client.delete_dataset(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def export_data(
self,
project_id: str,
region: str,
dataset: str,
export_config: ExportDataConfig | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Exports data from a Dataset.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param dataset: Required. The ID of the Dataset to export.
:param export_config: Required. The desired output location.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataset_service_client(region)
name = client.dataset_path(project_id, region, dataset)
result = client.export_data(
request={
"name": name,
"export_config": export_config,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_annotation_spec(
self,
project_id: str,
region: str,
dataset: str,
annotation_spec: str,
read_mask: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AnnotationSpec:
"""
Gets an AnnotationSpec.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param dataset: Required. The ID of the Dataset.
:param annotation_spec: The ID of the AnnotationSpec resource.
:param read_mask: Optional. Mask specifying which fields to read.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataset_service_client(region)
name = client.annotation_spec_path(project_id, region, dataset, annotation_spec)
result = client.get_annotation_spec(
request={
"name": name,
"read_mask": read_mask,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_dataset(
self,
project_id: str,
region: str,
dataset: str,
read_mask: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Dataset:
"""
Gets a Dataset.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param dataset: Required. The ID of the Dataset to export.
:param read_mask: Optional. Mask specifying which fields to read.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataset_service_client(region)
name = client.dataset_path(project_id, region, dataset)
result = client.get_dataset(
request={
"name": name,
"read_mask": read_mask,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def import_data(
self,
project_id: str,
region: str,
dataset: str,
import_configs: Sequence[ImportDataConfig],
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Imports data into a Dataset.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param dataset: Required. The ID of the Dataset to import.
:param import_configs: Required. The desired input locations. The contents of all input locations
will be imported in one batch.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataset_service_client(region)
name = client.dataset_path(project_id, region, dataset)
result = client.import_data(
request={
"name": name,
"import_configs": import_configs,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_annotations(
self,
project_id: str,
region: str,
dataset: str,
data_item: str,
filter: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
read_mask: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListAnnotationsPager:
"""
Lists Annotations belongs to a data item.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param dataset: Required. The ID of the Dataset.
:param data_item: Required. The ID of the DataItem to list Annotations from.
:param filter: The standard list filter.
:param page_size: The standard list page size.
:param page_token: The standard list page token.
:param read_mask: Mask specifying which fields to read.
:param order_by: A comma-separated list of fields to order by, sorted in ascending order. Use "desc"
after a field name for descending.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataset_service_client(region)
parent = client.data_item_path(project_id, region, dataset, data_item)
result = client.list_annotations(
request={
"parent": parent,
"filter": filter,
"page_size": page_size,
"page_token": page_token,
"read_mask": read_mask,
"order_by": order_by,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_data_items(
self,
project_id: str,
region: str,
dataset: str,
filter: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
read_mask: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListDataItemsPager:
"""
Lists DataItems in a Dataset.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param dataset: Required. The ID of the Dataset.
:param filter: The standard list filter.
:param page_size: The standard list page size.
:param page_token: The standard list page token.
:param read_mask: Mask specifying which fields to read.
:param order_by: A comma-separated list of fields to order by, sorted in ascending order. Use "desc"
after a field name for descending.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataset_service_client(region)
parent = client.dataset_path(project_id, region, dataset)
result = client.list_data_items(
request={
"parent": parent,
"filter": filter,
"page_size": page_size,
"page_token": page_token,
"read_mask": read_mask,
"order_by": order_by,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_datasets(
self,
project_id: str,
region: str,
filter: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
read_mask: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListDatasetsPager:
"""
Lists Datasets in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: The standard list filter.
:param page_size: The standard list page size.
:param page_token: The standard list page token.
:param read_mask: Mask specifying which fields to read.
:param order_by: A comma-separated list of fields to order by, sorted in ascending order. Use "desc"
after a field name for descending.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataset_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.list_datasets(
request={
"parent": parent,
"filter": filter,
"page_size": page_size,
"page_token": page_token,
"read_mask": read_mask,
"order_by": order_by,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
def update_dataset(
self,
project_id: str,
region: str,
dataset_id: str,
dataset: Dataset | dict,
update_mask: FieldMask | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Dataset:
"""
Updates a Dataset.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param dataset_id: Required. The ID of the Dataset.
:param dataset: Required. The Dataset which replaces the resource on the server.
:param update_mask: Required. The update mask applies to the resource.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataset_service_client(region)
dataset["name"] = client.dataset_path(project_id, region, dataset_id)
result = client.update_dataset(
request={
"dataset": dataset,
"update_mask": update_mask,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
| 18,624 | 38.376321 | 108 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/vertex_ai/custom_job.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Vertex AI hook."""
from __future__ import annotations
from typing import Sequence
from google.api_core.client_options import ClientOptions
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.operation import Operation
from google.api_core.retry import Retry
from google.cloud.aiplatform import (
CustomContainerTrainingJob,
CustomPythonPackageTrainingJob,
CustomTrainingJob,
datasets,
models,
)
from google.cloud.aiplatform_v1 import JobServiceClient, PipelineServiceClient
from google.cloud.aiplatform_v1.services.job_service.pagers import ListCustomJobsPager
from google.cloud.aiplatform_v1.services.pipeline_service.pagers import (
ListPipelineJobsPager,
ListTrainingPipelinesPager,
)
from google.cloud.aiplatform_v1.types import CustomJob, PipelineJob, TrainingPipeline
from airflow import AirflowException
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class CustomJobHook(GoogleBaseHook):
"""Hook for Google Cloud Vertex AI Custom Job APIs."""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self._job: None | (
CustomContainerTrainingJob | CustomPythonPackageTrainingJob | CustomTrainingJob
) = None
def get_pipeline_service_client(
self,
region: str | None = None,
) -> PipelineServiceClient:
"""Returns PipelineServiceClient."""
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-aiplatform.googleapis.com:443")
else:
client_options = ClientOptions()
return PipelineServiceClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def get_job_service_client(
self,
region: str | None = None,
) -> JobServiceClient:
"""Returns JobServiceClient."""
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-aiplatform.googleapis.com:443")
else:
client_options = ClientOptions()
return JobServiceClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def get_custom_container_training_job(
self,
display_name: str,
container_uri: str,
command: Sequence[str] = [],
model_serving_container_image_uri: str | None = None,
model_serving_container_predict_route: str | None = None,
model_serving_container_health_route: str | None = None,
model_serving_container_command: Sequence[str] | None = None,
model_serving_container_args: Sequence[str] | None = None,
model_serving_container_environment_variables: dict[str, str] | None = None,
model_serving_container_ports: Sequence[int] | None = None,
model_description: str | None = None,
model_instance_schema_uri: str | None = None,
model_parameters_schema_uri: str | None = None,
model_prediction_schema_uri: str | None = None,
project: str | None = None,
location: str | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
staging_bucket: str | None = None,
) -> CustomContainerTrainingJob:
"""Returns CustomContainerTrainingJob object."""
return CustomContainerTrainingJob(
display_name=display_name,
container_uri=container_uri,
command=command,
model_serving_container_image_uri=model_serving_container_image_uri,
model_serving_container_predict_route=model_serving_container_predict_route,
model_serving_container_health_route=model_serving_container_health_route,
model_serving_container_command=model_serving_container_command,
model_serving_container_args=model_serving_container_args,
model_serving_container_environment_variables=model_serving_container_environment_variables,
model_serving_container_ports=model_serving_container_ports,
model_description=model_description,
model_instance_schema_uri=model_instance_schema_uri,
model_parameters_schema_uri=model_parameters_schema_uri,
model_prediction_schema_uri=model_prediction_schema_uri,
project=project,
location=location,
credentials=self.get_credentials(),
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
staging_bucket=staging_bucket,
)
def get_custom_python_package_training_job(
self,
display_name: str,
python_package_gcs_uri: str,
python_module_name: str,
container_uri: str,
model_serving_container_image_uri: str | None = None,
model_serving_container_predict_route: str | None = None,
model_serving_container_health_route: str | None = None,
model_serving_container_command: Sequence[str] | None = None,
model_serving_container_args: Sequence[str] | None = None,
model_serving_container_environment_variables: dict[str, str] | None = None,
model_serving_container_ports: Sequence[int] | None = None,
model_description: str | None = None,
model_instance_schema_uri: str | None = None,
model_parameters_schema_uri: str | None = None,
model_prediction_schema_uri: str | None = None,
project: str | None = None,
location: str | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
staging_bucket: str | None = None,
):
"""Returns CustomPythonPackageTrainingJob object."""
return CustomPythonPackageTrainingJob(
display_name=display_name,
container_uri=container_uri,
python_package_gcs_uri=python_package_gcs_uri,
python_module_name=python_module_name,
model_serving_container_image_uri=model_serving_container_image_uri,
model_serving_container_predict_route=model_serving_container_predict_route,
model_serving_container_health_route=model_serving_container_health_route,
model_serving_container_command=model_serving_container_command,
model_serving_container_args=model_serving_container_args,
model_serving_container_environment_variables=model_serving_container_environment_variables,
model_serving_container_ports=model_serving_container_ports,
model_description=model_description,
model_instance_schema_uri=model_instance_schema_uri,
model_parameters_schema_uri=model_parameters_schema_uri,
model_prediction_schema_uri=model_prediction_schema_uri,
project=project,
location=location,
credentials=self.get_credentials(),
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
staging_bucket=staging_bucket,
)
def get_custom_training_job(
self,
display_name: str,
script_path: str,
container_uri: str,
requirements: Sequence[str] | None = None,
model_serving_container_image_uri: str | None = None,
model_serving_container_predict_route: str | None = None,
model_serving_container_health_route: str | None = None,
model_serving_container_command: Sequence[str] | None = None,
model_serving_container_args: Sequence[str] | None = None,
model_serving_container_environment_variables: dict[str, str] | None = None,
model_serving_container_ports: Sequence[int] | None = None,
model_description: str | None = None,
model_instance_schema_uri: str | None = None,
model_parameters_schema_uri: str | None = None,
model_prediction_schema_uri: str | None = None,
project: str | None = None,
location: str | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
staging_bucket: str | None = None,
):
"""Returns CustomTrainingJob object."""
return CustomTrainingJob(
display_name=display_name,
script_path=script_path,
container_uri=container_uri,
requirements=requirements,
model_serving_container_image_uri=model_serving_container_image_uri,
model_serving_container_predict_route=model_serving_container_predict_route,
model_serving_container_health_route=model_serving_container_health_route,
model_serving_container_command=model_serving_container_command,
model_serving_container_args=model_serving_container_args,
model_serving_container_environment_variables=model_serving_container_environment_variables,
model_serving_container_ports=model_serving_container_ports,
model_description=model_description,
model_instance_schema_uri=model_instance_schema_uri,
model_parameters_schema_uri=model_parameters_schema_uri,
model_prediction_schema_uri=model_prediction_schema_uri,
project=project,
location=location,
credentials=self.get_credentials(),
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
staging_bucket=staging_bucket,
)
@staticmethod
def extract_model_id(obj: dict) -> str:
"""Returns unique id of the Model."""
return obj["name"].rpartition("/")[-1]
@staticmethod
def extract_training_id(resource_name: str) -> str:
"""Returns unique id of the Training pipeline."""
return resource_name.rpartition("/")[-1]
@staticmethod
def extract_custom_job_id(custom_job_name: str) -> str:
"""Returns unique id of the Custom Job pipeline."""
return custom_job_name.rpartition("/")[-1]
def wait_for_operation(self, operation: Operation, timeout: float | None = None):
"""Waits for long-lasting operation to complete."""
try:
return operation.result(timeout=timeout)
except Exception:
error = operation.exception(timeout=timeout)
raise AirflowException(error)
def cancel_job(self) -> None:
"""Cancel Job for training pipeline."""
if self._job:
self._job.cancel()
def _run_job(
self,
job: (CustomTrainingJob | CustomContainerTrainingJob | CustomPythonPackageTrainingJob),
dataset: None
| (
datasets.ImageDataset | datasets.TabularDataset | datasets.TextDataset | datasets.VideoDataset
) = None,
annotation_schema_uri: str | None = None,
model_display_name: str | None = None,
model_labels: dict[str, str] | None = None,
base_output_dir: str | None = None,
service_account: str | None = None,
network: str | None = None,
bigquery_destination: str | None = None,
args: list[str | float | int] | None = None,
environment_variables: dict[str, str] | None = None,
replica_count: int = 1,
machine_type: str = "n1-standard-4",
accelerator_type: str = "ACCELERATOR_TYPE_UNSPECIFIED",
accelerator_count: int = 0,
boot_disk_type: str = "pd-ssd",
boot_disk_size_gb: int = 100,
training_fraction_split: float | None = None,
validation_fraction_split: float | None = None,
test_fraction_split: float | None = None,
training_filter_split: str | None = None,
validation_filter_split: str | None = None,
test_filter_split: str | None = None,
predefined_split_column_name: str | None = None,
timestamp_split_column_name: str | None = None,
tensorboard: str | None = None,
sync=True,
) -> tuple[models.Model | None, str, str]:
"""Run Job for training pipeline."""
model = job.run(
dataset=dataset,
annotation_schema_uri=annotation_schema_uri,
model_display_name=model_display_name,
model_labels=model_labels,
base_output_dir=base_output_dir,
service_account=service_account,
network=network,
bigquery_destination=bigquery_destination,
args=args,
environment_variables=environment_variables,
replica_count=replica_count,
machine_type=machine_type,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
boot_disk_type=boot_disk_type,
boot_disk_size_gb=boot_disk_size_gb,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
tensorboard=tensorboard,
sync=sync,
)
training_id = self.extract_training_id(job.resource_name)
custom_job_id = self.extract_custom_job_id(
job.gca_resource.training_task_metadata.get("backingCustomJob")
)
if model:
model.wait()
else:
self.log.warning(
"Training did not produce a Managed Model returning None. Training Pipeline is not "
"configured to upload a Model. Create the Training Pipeline with "
"model_serving_container_image_uri and model_display_name passed in. "
"Ensure that your training script saves to model to os.environ['AIP_MODEL_DIR']."
)
return model, training_id, custom_job_id
@GoogleBaseHook.fallback_to_default_project_id
def cancel_pipeline_job(
self,
project_id: str,
region: str,
pipeline_job: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Cancels a PipelineJob.
Starts asynchronous cancellation on the PipelineJob. The server makes a best
effort to cancel the pipeline, but success is not guaranteed. Clients can use
[PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] or other
methods to check whether the cancellation succeeded or whether the pipeline completed despite
cancellation. On successful cancellation, the PipelineJob is not deleted; instead it becomes a
pipeline with a [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] value with a
[google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and
[PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] is set to ``CANCELLED``.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param pipeline_job: The name of the PipelineJob to cancel.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
name = client.pipeline_job_path(project_id, region, pipeline_job)
client.cancel_pipeline_job(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def cancel_training_pipeline(
self,
project_id: str,
region: str,
training_pipeline: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Cancels a TrainingPipeline.
Starts asynchronous cancellation on the TrainingPipeline. The server makes
a best effort to cancel the pipeline, but success is not guaranteed. Clients can use
[PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]
or other methods to check whether the cancellation succeeded or whether the pipeline completed despite
cancellation. On successful cancellation, the TrainingPipeline is not deleted; instead it becomes a
pipeline with a [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] value with
a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and
[TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] is set to ``CANCELLED``.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param training_pipeline: Required. The name of the TrainingPipeline to cancel.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
name = client.training_pipeline_path(project_id, region, training_pipeline)
client.cancel_training_pipeline(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def cancel_custom_job(
self,
project_id: str,
region: str,
custom_job: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Cancels a CustomJob.
Starts asynchronous cancellation on the CustomJob. The server makes a best effort
to cancel the job, but success is not guaranteed. Clients can use
[JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] or other methods to
check whether the cancellation succeeded or whether the job completed despite cancellation. On
successful cancellation, the CustomJob is not deleted; instead it becomes a job with a
[CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] value with a
[google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and
[CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is set to ``CANCELLED``.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param custom_job: Required. The name of the CustomJob to cancel.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
name = JobServiceClient.custom_job_path(project_id, region, custom_job)
client.cancel_custom_job(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_pipeline_job(
self,
project_id: str,
region: str,
pipeline_job: PipelineJob,
pipeline_job_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> PipelineJob:
"""
Creates a PipelineJob. A PipelineJob will run immediately when created.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param pipeline_job: Required. The PipelineJob to create.
:param pipeline_job_id: The ID to use for the PipelineJob, which will become the final component of
the PipelineJob name. If not provided, an ID will be automatically generated.
This value should be less than 128 characters, and valid characters are /[a-z][0-9]-/.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.create_pipeline_job(
request={
"parent": parent,
"pipeline_job": pipeline_job,
"pipeline_job_id": pipeline_job_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def create_training_pipeline(
self,
project_id: str,
region: str,
training_pipeline: TrainingPipeline,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> TrainingPipeline:
"""
Creates a TrainingPipeline. A created TrainingPipeline right away will be attempted to be run.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param training_pipeline: Required. The TrainingPipeline to create.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.create_training_pipeline(
request={
"parent": parent,
"training_pipeline": training_pipeline,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def create_custom_job(
self,
project_id: str,
region: str,
custom_job: CustomJob,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> CustomJob:
"""
Creates a CustomJob. A created CustomJob right away will be attempted to be run.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param custom_job: Required. The CustomJob to create. This corresponds to the ``custom_job`` field on
the ``request`` instance; if ``request`` is provided, this should not be set.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
parent = JobServiceClient.common_location_path(project_id, region)
result = client.create_custom_job(
request={
"parent": parent,
"custom_job": custom_job,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def create_custom_container_training_job(
self,
project_id: str,
region: str,
display_name: str,
container_uri: str,
command: Sequence[str] = [],
model_serving_container_image_uri: str | None = None,
model_serving_container_predict_route: str | None = None,
model_serving_container_health_route: str | None = None,
model_serving_container_command: Sequence[str] | None = None,
model_serving_container_args: Sequence[str] | None = None,
model_serving_container_environment_variables: dict[str, str] | None = None,
model_serving_container_ports: Sequence[int] | None = None,
model_description: str | None = None,
model_instance_schema_uri: str | None = None,
model_parameters_schema_uri: str | None = None,
model_prediction_schema_uri: str | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
staging_bucket: str | None = None,
# RUN
dataset: None
| (
datasets.ImageDataset | datasets.TabularDataset | datasets.TextDataset | datasets.VideoDataset
) = None,
annotation_schema_uri: str | None = None,
model_display_name: str | None = None,
model_labels: dict[str, str] | None = None,
base_output_dir: str | None = None,
service_account: str | None = None,
network: str | None = None,
bigquery_destination: str | None = None,
args: list[str | float | int] | None = None,
environment_variables: dict[str, str] | None = None,
replica_count: int = 1,
machine_type: str = "n1-standard-4",
accelerator_type: str = "ACCELERATOR_TYPE_UNSPECIFIED",
accelerator_count: int = 0,
boot_disk_type: str = "pd-ssd",
boot_disk_size_gb: int = 100,
training_fraction_split: float | None = None,
validation_fraction_split: float | None = None,
test_fraction_split: float | None = None,
training_filter_split: str | None = None,
validation_filter_split: str | None = None,
test_filter_split: str | None = None,
predefined_split_column_name: str | None = None,
timestamp_split_column_name: str | None = None,
tensorboard: str | None = None,
sync=True,
) -> tuple[models.Model | None, str, str]:
"""
Create Custom Container Training Job.
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param command: The command to be invoked when the container is started.
It overrides the entrypoint instruction in Dockerfile when provided
:param container_uri: Required: Uri of the training container image in the GCR.
:param model_serving_container_image_uri: If the training produces a managed Vertex AI Model, the URI
of the Model serving container suitable for serving the model produced by the
training script.
:param model_serving_container_predict_route: If the training produces a managed Vertex AI Model, An
HTTP path to send prediction requests to the container, and which must be supported
by it. If not specified a default HTTP path will be used by Vertex AI.
:param model_serving_container_health_route: If the training produces a managed Vertex AI Model, an
HTTP path to send health check requests to the container, and which must be supported
by it. If not specified a standard HTTP path will be used by AI Platform.
:param model_serving_container_command: The command with which the container is run. Not executed
within a shell. The Docker image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's
environment. If a variable cannot be resolved, the reference in the
input string will be unchanged. The $(VAR_NAME) syntax can be escaped
with a double $$, ie: $$(VAR_NAME). Escaped references will never be
expanded, regardless of whether the variable exists or not.
:param model_serving_container_args: The arguments to the command. The Docker image's CMD is used if
this is not provided. Variable references $(VAR_NAME) are expanded using the
container's environment. If a variable cannot be resolved, the reference
in the input string will be unchanged. The $(VAR_NAME) syntax can be
escaped with a double $$, ie: $$(VAR_NAME). Escaped references will
never be expanded, regardless of whether the variable exists or not.
:param model_serving_container_environment_variables: The environment variables that are to be
present in the container. Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
:param model_serving_container_ports: Declaration of ports that are exposed by the container. This
field is primarily informational, it gives Vertex AI information about the
network connections the container uses. Listing or not a port here has
no impact on whether the port is actually exposed, any port listening on
the default "0.0.0.0" address inside a container will be accessible from
the network.
:param model_description: The description of the Model.
:param model_instance_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single instance, which
are used in
``PredictRequest.instances``,
``ExplainRequest.instances``
and
``BatchPredictionJob.input_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
:param model_parameters_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the parameters of prediction and
explanation via
``PredictRequest.parameters``,
``ExplainRequest.parameters``
and
``BatchPredictionJob.model_parameters``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform, if no parameters are supported it is set to an
empty string. Note: The URI given on output will be
immutable and probably different, including the URI scheme,
than the one given on input. The output URI will point to a
location where the user only has a read access.
:param model_prediction_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single prediction
produced by this Model, which are returned via
``PredictResponse.predictions``,
``ExplainResponse.explanations``,
and
``BatchPredictionJob.output_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
:param project_id: Project to run training in.
:param region: Location to run training in.
:param labels: Optional. The labels with user-defined metadata to
organize TrainingPipelines.
Label keys and values can be no longer than 64
characters, can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured
by this key if ``model_to_upload`` is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, the trained Model will be secured by this key.
:param staging_bucket: Bucket used to stage source and training artifacts.
:param dataset: Vertex AI to fit this training against.
:param annotation_schema_uri: Google Cloud Storage URI points to a YAML file describing
annotation schema. The schema is defined as an OpenAPI 3.0.2
[Schema Object]
(https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schema-object)
Only Annotations that both match this schema and belong to
DataItems not ignored by the split method are used in
respectively training, validation or test role, depending on
the role of the DataItem they are on.
When used in conjunction with
``annotations_filter``,
the Annotations used for training are filtered by both
``annotations_filter``
and
``annotation_schema_uri``.
:param model_display_name: If the script produces a managed Vertex AI Model. The display name of
the Model. The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters, can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
:param base_output_dir: GCS output directory of job. If not provided a timestamped directory in the
staging directory will be used.
Vertex AI sets the following environment variables when it runs your training code:
- AIP_MODEL_DIR: a Cloud Storage URI of a directory intended for saving model artifacts,
i.e. <base_output_dir>/model/
- AIP_CHECKPOINT_DIR: a Cloud Storage URI of a directory intended for saving checkpoints,
i.e. <base_output_dir>/checkpoints/
- AIP_TENSORBOARD_LOG_DIR: a Cloud Storage URI of a directory intended for saving TensorBoard
logs, i.e. <base_output_dir>/logs/
:param service_account: Specifies the service account for workload run-as account.
Users submitting jobs must have act-as permission on this run-as account.
:param network: The full name of the Compute Engine network to which the job
should be peered.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
:param bigquery_destination: Provide this field if `dataset` is a BiqQuery dataset.
The BigQuery project location where the training data is to
be written to. In the given project a new dataset is created
with name
``dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>``
where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All
training input data will be written into that dataset. In
the dataset three tables will be created, ``training``,
``validation`` and ``test``.
- AIP_DATA_FORMAT = "bigquery".
- AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training"
- AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation"
- AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test"
:param args: Command line arguments to be passed to the Python script.
:param environment_variables: Environment variables to be passed to the container.
Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
At most 10 environment variables can be specified.
The Name of the environment variable must be unique.
:param replica_count: The number of worker replicas. If replica count = 1 then one chief
replica will be provisioned. If replica_count > 1 the remainder will be
provisioned as a worker replica pool.
:param machine_type: The type of machine to use for training.
:param accelerator_type: Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED,
NVIDIA_TESLA_K80, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, NVIDIA_TESLA_P4,
NVIDIA_TESLA_T4
:param accelerator_count: The number of accelerators to attach to a worker replica.
:param boot_disk_type: Type of the boot disk, default is `pd-ssd`.
Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or
`pd-standard` (Persistent Disk Hard Disk Drive).
:param boot_disk_size_gb: Size in GB of the boot disk, default is 100GB.
boot disk size must be within the range of [100, 64000].
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param validation_fraction_split: Optional. The fraction of the input data that is to be used to
validate the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param training_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param validation_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param test_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param predefined_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``training``,
``validation``, ``test``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param timestamp_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key values of the key (the values in
the column) must be in RFC 3339 `date-time` format, where
`time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a
piece of data the key is not present or has an invalid value,
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param tensorboard: Optional. The name of a Vertex AI resource to which this CustomJob will upload
logs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
For more information on configuring your service account please visit:
https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training
:param sync: Whether to execute the AI Platform job synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
"""
self._job = self.get_custom_container_training_job(
project=project_id,
location=region,
display_name=display_name,
container_uri=container_uri,
command=command,
model_serving_container_image_uri=model_serving_container_image_uri,
model_serving_container_predict_route=model_serving_container_predict_route,
model_serving_container_health_route=model_serving_container_health_route,
model_serving_container_command=model_serving_container_command,
model_serving_container_args=model_serving_container_args,
model_serving_container_environment_variables=model_serving_container_environment_variables,
model_serving_container_ports=model_serving_container_ports,
model_description=model_description,
model_instance_schema_uri=model_instance_schema_uri,
model_parameters_schema_uri=model_parameters_schema_uri,
model_prediction_schema_uri=model_prediction_schema_uri,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
staging_bucket=staging_bucket,
)
if not self._job:
raise AirflowException("CustomJob was not created")
model, training_id, custom_job_id = self._run_job(
job=self._job,
dataset=dataset,
annotation_schema_uri=annotation_schema_uri,
model_display_name=model_display_name,
model_labels=model_labels,
base_output_dir=base_output_dir,
service_account=service_account,
network=network,
bigquery_destination=bigquery_destination,
args=args,
environment_variables=environment_variables,
replica_count=replica_count,
machine_type=machine_type,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
boot_disk_type=boot_disk_type,
boot_disk_size_gb=boot_disk_size_gb,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
tensorboard=tensorboard,
sync=sync,
)
return model, training_id, custom_job_id
@GoogleBaseHook.fallback_to_default_project_id
def create_custom_python_package_training_job(
self,
project_id: str,
region: str,
display_name: str,
python_package_gcs_uri: str,
python_module_name: str,
container_uri: str,
model_serving_container_image_uri: str | None = None,
model_serving_container_predict_route: str | None = None,
model_serving_container_health_route: str | None = None,
model_serving_container_command: Sequence[str] | None = None,
model_serving_container_args: Sequence[str] | None = None,
model_serving_container_environment_variables: dict[str, str] | None = None,
model_serving_container_ports: Sequence[int] | None = None,
model_description: str | None = None,
model_instance_schema_uri: str | None = None,
model_parameters_schema_uri: str | None = None,
model_prediction_schema_uri: str | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
staging_bucket: str | None = None,
# RUN
dataset: None
| (
datasets.ImageDataset | datasets.TabularDataset | datasets.TextDataset | datasets.VideoDataset
) = None,
annotation_schema_uri: str | None = None,
model_display_name: str | None = None,
model_labels: dict[str, str] | None = None,
base_output_dir: str | None = None,
service_account: str | None = None,
network: str | None = None,
bigquery_destination: str | None = None,
args: list[str | float | int] | None = None,
environment_variables: dict[str, str] | None = None,
replica_count: int = 1,
machine_type: str = "n1-standard-4",
accelerator_type: str = "ACCELERATOR_TYPE_UNSPECIFIED",
accelerator_count: int = 0,
boot_disk_type: str = "pd-ssd",
boot_disk_size_gb: int = 100,
training_fraction_split: float | None = None,
validation_fraction_split: float | None = None,
test_fraction_split: float | None = None,
training_filter_split: str | None = None,
validation_filter_split: str | None = None,
test_filter_split: str | None = None,
predefined_split_column_name: str | None = None,
timestamp_split_column_name: str | None = None,
tensorboard: str | None = None,
sync=True,
) -> tuple[models.Model | None, str, str]:
"""
Create Custom Python Package Training Job.
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param python_package_gcs_uri: Required: GCS location of the training python package.
:param python_module_name: Required: The module name of the training python package.
:param container_uri: Required: Uri of the training container image in the GCR.
:param model_serving_container_image_uri: If the training produces a managed Vertex AI Model, the URI
of the Model serving container suitable for serving the model produced by the
training script.
:param model_serving_container_predict_route: If the training produces a managed Vertex AI Model, An
HTTP path to send prediction requests to the container, and which must be supported
by it. If not specified a default HTTP path will be used by Vertex AI.
:param model_serving_container_health_route: If the training produces a managed Vertex AI Model, an
HTTP path to send health check requests to the container, and which must be supported
by it. If not specified a standard HTTP path will be used by AI Platform.
:param model_serving_container_command: The command with which the container is run. Not executed
within a shell. The Docker image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's
environment. If a variable cannot be resolved, the reference in the
input string will be unchanged. The $(VAR_NAME) syntax can be escaped
with a double $$, ie: $$(VAR_NAME). Escaped references will never be
expanded, regardless of whether the variable exists or not.
:param model_serving_container_args: The arguments to the command. The Docker image's CMD is used if
this is not provided. Variable references $(VAR_NAME) are expanded using the
container's environment. If a variable cannot be resolved, the reference
in the input string will be unchanged. The $(VAR_NAME) syntax can be
escaped with a double $$, ie: $$(VAR_NAME). Escaped references will
never be expanded, regardless of whether the variable exists or not.
:param model_serving_container_environment_variables: The environment variables that are to be
present in the container. Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
:param model_serving_container_ports: Declaration of ports that are exposed by the container. This
field is primarily informational, it gives Vertex AI information about the
network connections the container uses. Listing or not a port here has
no impact on whether the port is actually exposed, any port listening on
the default "0.0.0.0" address inside a container will be accessible from
the network.
:param model_description: The description of the Model.
:param model_instance_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single instance, which
are used in
``PredictRequest.instances``,
``ExplainRequest.instances``
and
``BatchPredictionJob.input_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
:param model_parameters_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the parameters of prediction and
explanation via
``PredictRequest.parameters``,
``ExplainRequest.parameters``
and
``BatchPredictionJob.model_parameters``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform, if no parameters are supported it is set to an
empty string. Note: The URI given on output will be
immutable and probably different, including the URI scheme,
than the one given on input. The output URI will point to a
location where the user only has a read access.
:param model_prediction_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single prediction
produced by this Model, which are returned via
``PredictResponse.predictions``,
``ExplainResponse.explanations``,
and
``BatchPredictionJob.output_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
:param project_id: Project to run training in.
:param region: Location to run training in.
:param labels: Optional. The labels with user-defined metadata to
organize TrainingPipelines.
Label keys and values can be no longer than 64
characters, can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured
by this key if ``model_to_upload`` is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, the trained Model will be secured by this key.
:param staging_bucket: Bucket used to stage source and training artifacts.
:param dataset: Vertex AI to fit this training against.
:param annotation_schema_uri: Google Cloud Storage URI points to a YAML file describing
annotation schema. The schema is defined as an OpenAPI 3.0.2
[Schema Object]
(https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schema-object)
Only Annotations that both match this schema and belong to
DataItems not ignored by the split method are used in
respectively training, validation or test role, depending on
the role of the DataItem they are on.
When used in conjunction with
``annotations_filter``,
the Annotations used for training are filtered by both
``annotations_filter``
and
``annotation_schema_uri``.
:param model_display_name: If the script produces a managed Vertex AI Model. The display name of
the Model. The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters, can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
:param base_output_dir: GCS output directory of job. If not provided a timestamped directory in the
staging directory will be used.
Vertex AI sets the following environment variables when it runs your training code:
- AIP_MODEL_DIR: a Cloud Storage URI of a directory intended for saving model artifacts,
i.e. <base_output_dir>/model/
- AIP_CHECKPOINT_DIR: a Cloud Storage URI of a directory intended for saving checkpoints,
i.e. <base_output_dir>/checkpoints/
- AIP_TENSORBOARD_LOG_DIR: a Cloud Storage URI of a directory intended for saving TensorBoard
logs, i.e. <base_output_dir>/logs/
:param service_account: Specifies the service account for workload run-as account.
Users submitting jobs must have act-as permission on this run-as account.
:param network: The full name of the Compute Engine network to which the job
should be peered.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
:param bigquery_destination: Provide this field if `dataset` is a BiqQuery dataset.
The BigQuery project location where the training data is to
be written to. In the given project a new dataset is created
with name
``dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>``
where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All
training input data will be written into that dataset. In
the dataset three tables will be created, ``training``,
``validation`` and ``test``.
- AIP_DATA_FORMAT = "bigquery".
- AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training"
- AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation"
- AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test"
:param args: Command line arguments to be passed to the Python script.
:param environment_variables: Environment variables to be passed to the container.
Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
At most 10 environment variables can be specified.
The Name of the environment variable must be unique.
:param replica_count: The number of worker replicas. If replica count = 1 then one chief
replica will be provisioned. If replica_count > 1 the remainder will be
provisioned as a worker replica pool.
:param machine_type: The type of machine to use for training.
:param accelerator_type: Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED,
NVIDIA_TESLA_K80, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, NVIDIA_TESLA_P4,
NVIDIA_TESLA_T4
:param accelerator_count: The number of accelerators to attach to a worker replica.
:param boot_disk_type: Type of the boot disk, default is `pd-ssd`.
Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or
`pd-standard` (Persistent Disk Hard Disk Drive).
:param boot_disk_size_gb: Size in GB of the boot disk, default is 100GB.
boot disk size must be within the range of [100, 64000].
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param validation_fraction_split: Optional. The fraction of the input data that is to be used to
validate the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param training_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param validation_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param test_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param predefined_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``training``,
``validation``, ``test``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param timestamp_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key values of the key (the values in
the column) must be in RFC 3339 `date-time` format, where
`time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a
piece of data the key is not present or has an invalid value,
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param tensorboard: Optional. The name of a Vertex AI resource to which this CustomJob will upload
logs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
For more information on configuring your service account please visit:
https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training
:param sync: Whether to execute the AI Platform job synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
"""
self._job = self.get_custom_python_package_training_job(
project=project_id,
location=region,
display_name=display_name,
python_package_gcs_uri=python_package_gcs_uri,
python_module_name=python_module_name,
container_uri=container_uri,
model_serving_container_image_uri=model_serving_container_image_uri,
model_serving_container_predict_route=model_serving_container_predict_route,
model_serving_container_health_route=model_serving_container_health_route,
model_serving_container_command=model_serving_container_command,
model_serving_container_args=model_serving_container_args,
model_serving_container_environment_variables=model_serving_container_environment_variables,
model_serving_container_ports=model_serving_container_ports,
model_description=model_description,
model_instance_schema_uri=model_instance_schema_uri,
model_parameters_schema_uri=model_parameters_schema_uri,
model_prediction_schema_uri=model_prediction_schema_uri,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
staging_bucket=staging_bucket,
)
if not self._job:
raise AirflowException("CustomJob was not created")
model, training_id, custom_job_id = self._run_job(
job=self._job,
dataset=dataset,
annotation_schema_uri=annotation_schema_uri,
model_display_name=model_display_name,
model_labels=model_labels,
base_output_dir=base_output_dir,
service_account=service_account,
network=network,
bigquery_destination=bigquery_destination,
args=args,
environment_variables=environment_variables,
replica_count=replica_count,
machine_type=machine_type,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
boot_disk_type=boot_disk_type,
boot_disk_size_gb=boot_disk_size_gb,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
tensorboard=tensorboard,
sync=sync,
)
return model, training_id, custom_job_id
@GoogleBaseHook.fallback_to_default_project_id
def create_custom_training_job(
self,
project_id: str,
region: str,
display_name: str,
script_path: str,
container_uri: str,
requirements: Sequence[str] | None = None,
model_serving_container_image_uri: str | None = None,
model_serving_container_predict_route: str | None = None,
model_serving_container_health_route: str | None = None,
model_serving_container_command: Sequence[str] | None = None,
model_serving_container_args: Sequence[str] | None = None,
model_serving_container_environment_variables: dict[str, str] | None = None,
model_serving_container_ports: Sequence[int] | None = None,
model_description: str | None = None,
model_instance_schema_uri: str | None = None,
model_parameters_schema_uri: str | None = None,
model_prediction_schema_uri: str | None = None,
labels: dict[str, str] | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
staging_bucket: str | None = None,
# RUN
dataset: None
| (
datasets.ImageDataset | datasets.TabularDataset | datasets.TextDataset | datasets.VideoDataset
) = None,
annotation_schema_uri: str | None = None,
model_display_name: str | None = None,
model_labels: dict[str, str] | None = None,
base_output_dir: str | None = None,
service_account: str | None = None,
network: str | None = None,
bigquery_destination: str | None = None,
args: list[str | float | int] | None = None,
environment_variables: dict[str, str] | None = None,
replica_count: int = 1,
machine_type: str = "n1-standard-4",
accelerator_type: str = "ACCELERATOR_TYPE_UNSPECIFIED",
accelerator_count: int = 0,
boot_disk_type: str = "pd-ssd",
boot_disk_size_gb: int = 100,
training_fraction_split: float | None = None,
validation_fraction_split: float | None = None,
test_fraction_split: float | None = None,
training_filter_split: str | None = None,
validation_filter_split: str | None = None,
test_filter_split: str | None = None,
predefined_split_column_name: str | None = None,
timestamp_split_column_name: str | None = None,
tensorboard: str | None = None,
sync=True,
) -> tuple[models.Model | None, str, str]:
"""
Create Custom Training Job.
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param script_path: Required. Local path to training script.
:param container_uri: Required: Uri of the training container image in the GCR.
:param requirements: List of python packages dependencies of script.
:param model_serving_container_image_uri: If the training produces a managed Vertex AI Model, the URI
of the Model serving container suitable for serving the model produced by the
training script.
:param model_serving_container_predict_route: If the training produces a managed Vertex AI Model, An
HTTP path to send prediction requests to the container, and which must be supported
by it. If not specified a default HTTP path will be used by Vertex AI.
:param model_serving_container_health_route: If the training produces a managed Vertex AI Model, an
HTTP path to send health check requests to the container, and which must be supported
by it. If not specified a standard HTTP path will be used by AI Platform.
:param model_serving_container_command: The command with which the container is run. Not executed
within a shell. The Docker image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's
environment. If a variable cannot be resolved, the reference in the
input string will be unchanged. The $(VAR_NAME) syntax can be escaped
with a double $$, ie: $$(VAR_NAME). Escaped references will never be
expanded, regardless of whether the variable exists or not.
:param model_serving_container_args: The arguments to the command. The Docker image's CMD is used if
this is not provided. Variable references $(VAR_NAME) are expanded using the
container's environment. If a variable cannot be resolved, the reference
in the input string will be unchanged. The $(VAR_NAME) syntax can be
escaped with a double $$, ie: $$(VAR_NAME). Escaped references will
never be expanded, regardless of whether the variable exists or not.
:param model_serving_container_environment_variables: The environment variables that are to be
present in the container. Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
:param model_serving_container_ports: Declaration of ports that are exposed by the container. This
field is primarily informational, it gives Vertex AI information about the
network connections the container uses. Listing or not a port here has
no impact on whether the port is actually exposed, any port listening on
the default "0.0.0.0" address inside a container will be accessible from
the network.
:param model_description: The description of the Model.
:param model_instance_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single instance, which
are used in
``PredictRequest.instances``,
``ExplainRequest.instances``
and
``BatchPredictionJob.input_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
:param model_parameters_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the parameters of prediction and
explanation via
``PredictRequest.parameters``,
``ExplainRequest.parameters``
and
``BatchPredictionJob.model_parameters``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform, if no parameters are supported it is set to an
empty string. Note: The URI given on output will be
immutable and probably different, including the URI scheme,
than the one given on input. The output URI will point to a
location where the user only has a read access.
:param model_prediction_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single prediction
produced by this Model, which are returned via
``PredictResponse.predictions``,
``ExplainResponse.explanations``,
and
``BatchPredictionJob.output_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
:param project_id: Project to run training in.
:param region: Location to run training in.
:param labels: Optional. The labels with user-defined metadata to
organize TrainingPipelines.
Label keys and values can be no longer than 64
characters, can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured
by this key if ``model_to_upload`` is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, the trained Model will be secured by this key.
:param staging_bucket: Bucket used to stage source and training artifacts.
:param dataset: Vertex AI to fit this training against.
:param annotation_schema_uri: Google Cloud Storage URI points to a YAML file describing
annotation schema. The schema is defined as an OpenAPI 3.0.2
[Schema Object]
(https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schema-object)
Only Annotations that both match this schema and belong to
DataItems not ignored by the split method are used in
respectively training, validation or test role, depending on
the role of the DataItem they are on.
When used in conjunction with
``annotations_filter``,
the Annotations used for training are filtered by both
``annotations_filter``
and
``annotation_schema_uri``.
:param model_display_name: If the script produces a managed Vertex AI Model. The display name of
the Model. The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters, can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
:param base_output_dir: GCS output directory of job. If not provided a timestamped directory in the
staging directory will be used.
Vertex AI sets the following environment variables when it runs your training code:
- AIP_MODEL_DIR: a Cloud Storage URI of a directory intended for saving model artifacts,
i.e. <base_output_dir>/model/
- AIP_CHECKPOINT_DIR: a Cloud Storage URI of a directory intended for saving checkpoints,
i.e. <base_output_dir>/checkpoints/
- AIP_TENSORBOARD_LOG_DIR: a Cloud Storage URI of a directory intended for saving TensorBoard
logs, i.e. <base_output_dir>/logs/
:param service_account: Specifies the service account for workload run-as account.
Users submitting jobs must have act-as permission on this run-as account.
:param network: The full name of the Compute Engine network to which the job
should be peered.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
:param bigquery_destination: Provide this field if `dataset` is a BiqQuery dataset.
The BigQuery project location where the training data is to
be written to. In the given project a new dataset is created
with name
``dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>``
where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All
training input data will be written into that dataset. In
the dataset three tables will be created, ``training``,
``validation`` and ``test``.
- AIP_DATA_FORMAT = "bigquery".
- AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training"
- AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation"
- AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test"
:param args: Command line arguments to be passed to the Python script.
:param environment_variables: Environment variables to be passed to the container.
Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
At most 10 environment variables can be specified.
The Name of the environment variable must be unique.
:param replica_count: The number of worker replicas. If replica count = 1 then one chief
replica will be provisioned. If replica_count > 1 the remainder will be
provisioned as a worker replica pool.
:param machine_type: The type of machine to use for training.
:param accelerator_type: Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED,
NVIDIA_TESLA_K80, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, NVIDIA_TESLA_P4,
NVIDIA_TESLA_T4
:param accelerator_count: The number of accelerators to attach to a worker replica.
:param boot_disk_type: Type of the boot disk, default is `pd-ssd`.
Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or
`pd-standard` (Persistent Disk Hard Disk Drive).
:param boot_disk_size_gb: Size in GB of the boot disk, default is 100GB.
boot disk size must be within the range of [100, 64000].
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param validation_fraction_split: Optional. The fraction of the input data that is to be used to
validate the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param training_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param validation_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param test_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param predefined_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``training``,
``validation``, ``test``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param timestamp_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key values of the key (the values in
the column) must be in RFC 3339 `date-time` format, where
`time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a
piece of data the key is not present or has an invalid value,
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param tensorboard: Optional. The name of a Vertex AI resource to which this CustomJob will upload
logs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
For more information on configuring your service account please visit:
https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training
:param sync: Whether to execute the AI Platform job synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
"""
self._job = self.get_custom_training_job(
project=project_id,
location=region,
display_name=display_name,
script_path=script_path,
container_uri=container_uri,
requirements=requirements,
model_serving_container_image_uri=model_serving_container_image_uri,
model_serving_container_predict_route=model_serving_container_predict_route,
model_serving_container_health_route=model_serving_container_health_route,
model_serving_container_command=model_serving_container_command,
model_serving_container_args=model_serving_container_args,
model_serving_container_environment_variables=model_serving_container_environment_variables,
model_serving_container_ports=model_serving_container_ports,
model_description=model_description,
model_instance_schema_uri=model_instance_schema_uri,
model_parameters_schema_uri=model_parameters_schema_uri,
model_prediction_schema_uri=model_prediction_schema_uri,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
staging_bucket=staging_bucket,
)
if not self._job:
raise AirflowException("CustomJob was not created")
model, training_id, custom_job_id = self._run_job(
job=self._job,
dataset=dataset,
annotation_schema_uri=annotation_schema_uri,
model_display_name=model_display_name,
model_labels=model_labels,
base_output_dir=base_output_dir,
service_account=service_account,
network=network,
bigquery_destination=bigquery_destination,
args=args,
environment_variables=environment_variables,
replica_count=replica_count,
machine_type=machine_type,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
boot_disk_type=boot_disk_type,
boot_disk_size_gb=boot_disk_size_gb,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
tensorboard=tensorboard,
sync=sync,
)
return model, training_id, custom_job_id
@GoogleBaseHook.fallback_to_default_project_id
def delete_pipeline_job(
self,
project_id: str,
region: str,
pipeline_job: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Deletes a PipelineJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param pipeline_job: Required. The name of the PipelineJob resource to be deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
name = client.pipeline_job_path(project_id, region, pipeline_job)
result = client.delete_pipeline_job(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_training_pipeline(
self,
project_id: str,
region: str,
training_pipeline: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Deletes a TrainingPipeline.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param training_pipeline: Required. The name of the TrainingPipeline resource to be deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
name = client.training_pipeline_path(project_id, region, training_pipeline)
result = client.delete_training_pipeline(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_custom_job(
self,
project_id: str,
region: str,
custom_job: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Deletes a CustomJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param custom_job: Required. The name of the CustomJob to delete.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
name = client.custom_job_path(project_id, region, custom_job)
result = client.delete_custom_job(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_pipeline_job(
self,
project_id: str,
region: str,
pipeline_job: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> PipelineJob:
"""
Gets a PipelineJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param pipeline_job: Required. The name of the PipelineJob resource.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
name = client.pipeline_job_path(project_id, region, pipeline_job)
result = client.get_pipeline_job(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_training_pipeline(
self,
project_id: str,
region: str,
training_pipeline: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> TrainingPipeline:
"""
Gets a TrainingPipeline.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param training_pipeline: Required. The name of the TrainingPipeline resource.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
name = client.training_pipeline_path(project_id, region, training_pipeline)
result = client.get_training_pipeline(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_custom_job(
self,
project_id: str,
region: str,
custom_job: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> CustomJob:
"""
Gets a CustomJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param custom_job: Required. The name of the CustomJob to get.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
name = JobServiceClient.custom_job_path(project_id, region, custom_job)
result = client.get_custom_job(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_pipeline_jobs(
self,
project_id: str,
region: str,
page_size: int | None = None,
page_token: str | None = None,
filter: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListPipelineJobsPager:
"""
Lists PipelineJobs in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: Optional. Lists the PipelineJobs that match the filter expression. The
following fields are supported:
- ``pipeline_name``: Supports ``=`` and ``!=`` comparisons.
- ``display_name``: Supports ``=``, ``!=`` comparisons, and
``:`` wildcard.
- ``pipeline_job_user_id``: Supports ``=``, ``!=``
comparisons, and ``:`` wildcard. for example, can check
if pipeline's display_name contains *step* by doing
display_name:"*step*"
- ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``,
``<=``, and ``>=`` comparisons. Values must be in RFC
3339 format.
- ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``,
``<=``, and ``>=`` comparisons. Values must be in RFC
3339 format.
- ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``,
``<=``, and ``>=`` comparisons. Values must be in RFC
3339 format.
- ``labels``: Supports key-value equality and key presence.
Filter expressions can be combined together using logical
operators (``AND`` & ``OR``). For example:
``pipeline_name="test" AND create_time>"2020-05-18T13:30:00Z"``.
The syntax to define filter expression is based on
https://google.aip.dev/160.
:param page_size: Optional. The standard list page size.
:param page_token: Optional. The standard list page token. Typically obtained via
[ListPipelineJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListPipelineJobsResponse.next_page_token]
of the previous
[PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]
call.
:param order_by: Optional. A comma-separated list of fields to order by. The default
sort order is in ascending order. Use "desc" after a field
name for descending. You can have multiple order_by fields
provided e.g. "create_time desc, end_time", "end_time,
start_time, update_time" For example, using "create_time
desc, end_time" will order results by create time in
descending order, and if there are multiple jobs having the
same create time, order them by the end time in ascending
order. if order_by is not specified, it will order by
default order is create time in descending order. Supported
fields:
- ``create_time``
- ``update_time``
- ``end_time``
- ``start_time``
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.list_pipeline_jobs(
request={
"parent": parent,
"page_size": page_size,
"page_token": page_token,
"filter": filter,
"order_by": order_by,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_training_pipelines(
self,
project_id: str,
region: str,
page_size: int | None = None,
page_token: str | None = None,
filter: str | None = None,
read_mask: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListTrainingPipelinesPager:
"""
Lists TrainingPipelines in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: Optional. The standard list filter. Supported fields:
- ``display_name`` supports = and !=.
- ``state`` supports = and !=.
Some examples of using the filter are:
- ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"``
- ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"``
- ``NOT display_name="my_pipeline"``
- ``state="PIPELINE_STATE_FAILED"``
:param page_size: Optional. The standard list page size.
:param page_token: Optional. The standard list page token. Typically obtained via
[ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesResponse.next_page_token]
of the previous
[PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]
call.
:param read_mask: Optional. Mask specifying which fields to read.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.list_training_pipelines(
request={
"parent": parent,
"page_size": page_size,
"page_token": page_token,
"filter": filter,
"read_mask": read_mask,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_custom_jobs(
self,
project_id: str,
region: str,
page_size: int | None,
page_token: str | None,
filter: str | None,
read_mask: str | None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListCustomJobsPager:
"""
Lists CustomJobs in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: Optional. The standard list filter. Supported fields:
- ``display_name`` supports = and !=.
- ``state`` supports = and !=.
Some examples of using the filter are:
- ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"``
- ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"``
- ``NOT display_name="my_pipeline"``
- ``state="PIPELINE_STATE_FAILED"``
:param page_size: Optional. The standard list page size.
:param page_token: Optional. The standard list page token. Typically obtained via
[ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesResponse.next_page_token]
of the previous
[PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]
call.
:param read_mask: Optional. Mask specifying which fields to read.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
parent = JobServiceClient.common_location_path(project_id, region)
result = client.list_custom_jobs(
request={
"parent": parent,
"page_size": page_size,
"page_token": page_token,
"filter": filter,
"read_mask": read_mask,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
| 109,612 | 52.184377 | 133 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/vertex_ai/model_service.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Vertex AI hook.
.. spelling:word-list::
aiplatform
camelCase
"""
from __future__ import annotations
from typing import Sequence
from google.api_core.client_options import ClientOptions
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.operation import Operation
from google.api_core.retry import Retry
from google.cloud.aiplatform_v1 import ModelServiceClient
from google.cloud.aiplatform_v1.services.model_service.pagers import ListModelsPager
from google.cloud.aiplatform_v1.types import Model, model_service
from airflow import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class ModelServiceHook(GoogleBaseHook):
"""Hook for Google Cloud Vertex AI Endpoint Service APIs."""
def __init__(self, **kwargs):
if kwargs.get("delegate_to") is not None:
raise RuntimeError(
"The `delegate_to` parameter has been deprecated before and finally removed in this version"
" of Google Provider. You MUST convert it to `impersonate_chain`"
)
super().__init__(**kwargs)
def get_model_service_client(self, region: str | None = None) -> ModelServiceClient:
"""Returns ModelServiceClient."""
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-aiplatform.googleapis.com:443")
else:
client_options = ClientOptions()
return ModelServiceClient(
credentials=self.get_credentials(), client_info=self.client_info, client_options=client_options
)
@staticmethod
def extract_model_id(obj: dict) -> str:
"""Returns unique id of the model."""
return obj["model"].rpartition("/")[-1]
def wait_for_operation(self, operation: Operation, timeout: float | None = None):
"""Waits for long-lasting operation to complete."""
try:
return operation.result(timeout=timeout)
except Exception:
error = operation.exception(timeout=timeout)
raise AirflowException(error)
@GoogleBaseHook.fallback_to_default_project_id
def delete_model(
self,
project_id: str,
region: str,
model: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Deletes a Model.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param model: Required. The name of the Model resource to be deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_model_service_client(region)
name = client.model_path(project_id, region, model)
result = client.delete_model(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def export_model(
self,
project_id: str,
region: str,
model: str,
output_config: model_service.ExportModelRequest.OutputConfig | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Exports a trained, exportable Model to a location specified by the user.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param model: Required. The resource name of the Model to export.
:param output_config: Required. The desired output location and configuration.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_model_service_client(region)
name = client.model_path(project_id, region, model)
result = client.export_model(
request={
"name": name,
"output_config": output_config,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_models(
self,
project_id: str,
region: str,
filter: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
read_mask: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListModelsPager:
r"""
Lists Models in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: An expression for filtering the results of the request. For field names both
snake_case and camelCase are supported.
- ``model`` supports = and !=. ``model`` represents the Model ID, i.e. the last segment of the
Model's [resource name][google.cloud.aiplatform.v1.Model.name].
- ``display_name`` supports = and !=
- ``labels`` supports general map functions that is:
-- ``labels.key=value`` - key:value equality
-- \`labels.key:\* or labels:key - key existence
-- A key including a space must be quoted. ``labels."a key"``.
:param page_size: The standard list page size.
:param page_token: The standard list page token. Typically obtained via
[ListModelsResponse.next_page_token][google.cloud.aiplatform.v1.ListModelsResponse.next_page_token]
of the previous
[ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels]
call.
:param read_mask: Mask specifying which fields to read.
:param order_by: A comma-separated list of fields to order by, sorted in ascending order. Use "desc"
after a field name for descending.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_model_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.list_models(
request={
"parent": parent,
"filter": filter,
"page_size": page_size,
"page_token": page_token,
"read_mask": read_mask,
"order_by": order_by,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def upload_model(
self,
project_id: str,
region: str,
model: Model | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Uploads a Model artifact into Vertex AI.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param model: Required. The Model to create.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_model_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.upload_model(
request={
"parent": parent,
"model": model,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
| 9,602 | 39.518987 | 111 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/hooks/vertex_ai/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/utils/credentials_provider.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a mechanism for providing temporary Google Cloud authentication."""
from __future__ import annotations
import json
import logging
import os.path
import tempfile
from contextlib import ExitStack, contextmanager
from typing import Collection, Generator, Sequence
from urllib.parse import urlencode
import google.auth
import google.auth.credentials
import google.oauth2.service_account
from google.auth import impersonated_credentials
from google.auth.environment_vars import CREDENTIALS, LEGACY_PROJECT, PROJECT
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud._internal_client.secret_manager_client import _SecretManagerClient
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.process_utils import patch_environ
log = logging.getLogger(__name__)
AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT = "AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT"
_DEFAULT_SCOPES: Sequence[str] = ("https://www.googleapis.com/auth/cloud-platform",)
def build_gcp_conn(
key_file_path: str | None = None,
scopes: Sequence[str] | None = None,
project_id: str | None = None,
) -> str:
"""
Build a uri that can be used as :envvar:`AIRFLOW_CONN_{CONN_ID}` with provided values.
:param key_file_path: Path to service key.
:param scopes: Required OAuth scopes.
:param project_id: The Google Cloud project id to be used for the connection.
:return: String representing Airflow connection.
"""
conn = "google-cloud-platform://?{}"
query_params = {}
if key_file_path:
query_params["key_path"] = key_file_path
if scopes:
scopes_string = ",".join(scopes)
query_params["scope"] = scopes_string
if project_id:
query_params["projects"] = project_id
query = urlencode(query_params)
return conn.format(query)
@contextmanager
def provide_gcp_credentials(
key_file_path: str | None = None,
key_file_dict: dict | None = None,
) -> Generator[None, None, None]:
"""
Context manager that provides Google Cloud credentials for Application Default Credentials (ADC).
.. seealso::
`Application Default Credentials (ADC) strategy`__.
It can be used to provide credentials for external programs (e.g. gcloud) that expect authorization
file in ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable.
:param key_file_path: Path to file with Google Cloud Service Account .json file.
:param key_file_dict: Dictionary with credentials.
__ https://cloud.google.com/docs/authentication/production
"""
if not key_file_path and not key_file_dict:
raise ValueError("Please provide `key_file_path` or `key_file_dict`.")
if key_file_path and key_file_path.endswith(".p12"):
raise AirflowException("Legacy P12 key file are not supported, use a JSON key file.")
with tempfile.NamedTemporaryFile(mode="w+t") as conf_file:
if not key_file_path and key_file_dict:
conf_file.write(json.dumps(key_file_dict))
conf_file.flush()
key_file_path = conf_file.name
if key_file_path:
with patch_environ({CREDENTIALS: key_file_path}):
yield
else:
# We will use the default service account credentials.
yield
@contextmanager
def provide_gcp_connection(
key_file_path: str | None = None,
scopes: Sequence | None = None,
project_id: str | None = None,
) -> Generator[None, None, None]:
"""
Context manager that provides a temporary value of :envvar:`AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT` connection.
It builds a new connection that includes path to provided service json, required scopes and project id.
:param key_file_path: Path to file with Google Cloud Service Account .json file.
:param scopes: OAuth scopes for the connection
:param project_id: The id of Google Cloud project for the connection.
"""
if key_file_path and key_file_path.endswith(".p12"):
raise AirflowException("Legacy P12 key file are not supported, use a JSON key file.")
conn = build_gcp_conn(scopes=scopes, key_file_path=key_file_path, project_id=project_id)
with patch_environ({AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT: conn}):
yield
@contextmanager
def provide_gcp_conn_and_credentials(
key_file_path: str | None = None,
scopes: Sequence | None = None,
project_id: str | None = None,
) -> Generator[None, None, None]:
"""
Context manager that provides GPC connection and credentials.
It provides both:
- Google Cloud credentials for application supporting `Application Default Credentials (ADC)
strategy`__.
- temporary value of :envvar:`AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT` connection
:param key_file_path: Path to file with Google Cloud Service Account .json file.
:param scopes: OAuth scopes for the connection
:param project_id: The id of Google Cloud project for the connection.
__ https://cloud.google.com/docs/authentication/production
"""
with ExitStack() as stack:
if key_file_path:
stack.enter_context(provide_gcp_credentials(key_file_path)) # type; ignore
if project_id:
stack.enter_context( # type; ignore
patch_environ({PROJECT: project_id, LEGACY_PROJECT: project_id})
)
stack.enter_context(provide_gcp_connection(key_file_path, scopes, project_id)) # type; ignore
yield
class _CredentialProvider(LoggingMixin):
"""
Prepare the Credentials object for Google API and the associated project_id.
Only either `key_path` or `keyfile_dict` should be provided, or an exception will
occur. If neither of them are provided, return default credentials for the current environment
:param key_path: Path to Google Cloud Service Account key file (JSON).
:param keyfile_dict: A dict representing Cloud Service Account as in the Credential JSON file
:param key_secret_name: Keyfile Secret Name in GCP Secret Manager.
:param key_secret_project_id: Project ID to read the secrets from. If not passed, the project ID from
default credentials will be used.
:param scopes: OAuth scopes for the connection
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param disable_logging: If true, disable all log messages, which allows you to use this
class to configure Logger.
:param target_principal: The service account to directly impersonate using short-term
credentials, if any. For this to work, the target_principal account must grant
the originating account the Service Account Token Creator IAM role.
:param delegates: optional chained list of accounts required to get the access_token of
target_principal. If set, the sequence of identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account and target_principal
granting the role to the last account from the list.
"""
def __init__(
self,
key_path: str | None = None,
keyfile_dict: dict[str, str] | None = None,
credential_config_file: dict[str, str] | str | None = None,
key_secret_name: str | None = None,
key_secret_project_id: str | None = None,
scopes: Collection[str] | None = None,
delegate_to: str | None = None,
disable_logging: bool = False,
target_principal: str | None = None,
delegates: Sequence[str] | None = None,
) -> None:
super().__init__()
key_options = [key_path, key_secret_name, keyfile_dict]
if len([x for x in key_options if x]) > 1:
raise AirflowException(
"The `keyfile_dict`, `key_path`, and `key_secret_name` fields "
"are all mutually exclusive. Please provide only one value."
)
self.key_path = key_path
self.keyfile_dict = keyfile_dict
self.credential_config_file = credential_config_file
self.key_secret_name = key_secret_name
self.key_secret_project_id = key_secret_project_id
self.scopes = scopes
self.delegate_to = delegate_to
self.disable_logging = disable_logging
self.target_principal = target_principal
self.delegates = delegates
def get_credentials_and_project(self) -> tuple[google.auth.credentials.Credentials, str]:
"""
Get current credentials and project ID.
:return: Google Auth Credentials
"""
if self.key_path:
credentials, project_id = self._get_credentials_using_key_path()
elif self.key_secret_name:
credentials, project_id = self._get_credentials_using_key_secret_name()
elif self.keyfile_dict:
credentials, project_id = self._get_credentials_using_keyfile_dict()
elif self.credential_config_file:
credentials, project_id = self._get_credentials_using_credential_config_file()
else:
credentials, project_id = self._get_credentials_using_adc()
if self.delegate_to:
if hasattr(credentials, "with_subject"):
credentials = credentials.with_subject(self.delegate_to)
else:
raise AirflowException(
"The `delegate_to` parameter cannot be used here as the current "
"authentication method does not support account impersonate. "
"Please use service-account for authorization."
)
if self.target_principal:
credentials = impersonated_credentials.Credentials(
source_credentials=credentials,
target_principal=self.target_principal,
delegates=self.delegates,
target_scopes=self.scopes,
)
project_id = _get_project_id_from_service_account_email(self.target_principal)
return credentials, project_id
def _get_credentials_using_keyfile_dict(self):
self._log_debug("Getting connection using JSON Dict")
# Depending on how the JSON was formatted, it may contain
# escaped newlines. Convert those to actual newlines.
self.keyfile_dict["private_key"] = self.keyfile_dict["private_key"].replace("\\n", "\n")
credentials = google.oauth2.service_account.Credentials.from_service_account_info(
self.keyfile_dict, scopes=self.scopes
)
project_id = credentials.project_id
return credentials, project_id
def _get_credentials_using_key_path(self):
if self.key_path.endswith(".p12"):
raise AirflowException("Legacy P12 key file are not supported, use a JSON key file.")
if not self.key_path.endswith(".json"):
raise AirflowException("Unrecognised extension for key file.")
self._log_debug("Getting connection using JSON key file %s", self.key_path)
credentials = google.oauth2.service_account.Credentials.from_service_account_file(
self.key_path, scopes=self.scopes
)
project_id = credentials.project_id
return credentials, project_id
def _get_credentials_using_key_secret_name(self):
self._log_debug("Getting connection using JSON key data from GCP secret: %s", self.key_secret_name)
# Use ADC to access GCP Secret Manager.
adc_credentials, adc_project_id = google.auth.default(scopes=self.scopes)
secret_manager_client = _SecretManagerClient(credentials=adc_credentials)
if not secret_manager_client.is_valid_secret_name(self.key_secret_name):
raise AirflowException("Invalid secret name specified for fetching JSON key data.")
secret_value = secret_manager_client.get_secret(
secret_id=self.key_secret_name,
project_id=self.key_secret_project_id if self.key_secret_project_id else adc_project_id,
)
if secret_value is None:
raise AirflowException(f"Failed getting value of secret {self.key_secret_name}.")
try:
keyfile_dict = json.loads(secret_value)
except json.decoder.JSONDecodeError:
raise AirflowException("Key data read from GCP Secret Manager is not valid JSON.")
credentials = google.oauth2.service_account.Credentials.from_service_account_info(
keyfile_dict, scopes=self.scopes
)
project_id = credentials.project_id
return credentials, project_id
def _get_credentials_using_credential_config_file(self):
if isinstance(self.credential_config_file, str) and os.path.exists(self.credential_config_file):
self._log_info(
f"Getting connection using credential configuration file: `{self.credential_config_file}`"
)
credentials, project_id = google.auth.load_credentials_from_file(
self.credential_config_file, scopes=self.scopes
)
else:
with tempfile.NamedTemporaryFile(mode="w+t") as temp_credentials_fd:
if isinstance(self.credential_config_file, dict):
self._log_info("Getting connection using credential configuration dict.")
temp_credentials_fd.write(json.dumps(self.credential_config_file))
elif isinstance(self.credential_config_file, str):
self._log_info("Getting connection using credential configuration string.")
temp_credentials_fd.write(self.credential_config_file)
temp_credentials_fd.flush()
credentials, project_id = google.auth.load_credentials_from_file(
temp_credentials_fd.name, scopes=self.scopes
)
return credentials, project_id
def _get_credentials_using_adc(self):
self._log_info(
"Getting connection using `google.auth.default()` since no explicit credentials are provided."
)
credentials, project_id = google.auth.default(scopes=self.scopes)
return credentials, project_id
def _log_info(self, *args, **kwargs) -> None:
if not self.disable_logging:
self.log.info(*args, **kwargs)
def _log_debug(self, *args, **kwargs) -> None:
if not self.disable_logging:
self.log.debug(*args, **kwargs)
def get_credentials_and_project_id(*args, **kwargs) -> tuple[google.auth.credentials.Credentials, str]:
"""Returns the Credentials object for Google API and the associated project_id."""
return _CredentialProvider(*args, **kwargs).get_credentials_and_project()
def _get_scopes(scopes: str | None = None) -> Sequence[str]:
"""
Parse a comma-separated string containing OAuth2 scopes if `scopes` is provided; otherwise return default.
:param scopes: A comma-separated string containing OAuth2 scopes
:return: Returns the scope defined in the connection configuration, or the default scope
"""
return [s.strip() for s in scopes.split(",")] if scopes else _DEFAULT_SCOPES
def _get_target_principal_and_delegates(
impersonation_chain: str | Sequence[str] | None = None,
) -> tuple[str | None, Sequence[str] | None]:
"""
Get the target_principal and optional list of delegates from impersonation_chain.
Analyze contents of impersonation_chain and return target_principal (the service account
to directly impersonate using short-term credentials, if any) and optional list of delegates
required to get the access_token of target_principal.
:param impersonation_chain: the service account to impersonate or a chained list leading to this
account
:return: Returns the tuple of target_principal and delegates
"""
if not impersonation_chain:
return None, None
if isinstance(impersonation_chain, str):
return impersonation_chain, None
return impersonation_chain[-1], impersonation_chain[:-1]
def _get_project_id_from_service_account_email(service_account_email: str) -> str:
"""
Extracts project_id from service account's email address.
:param service_account_email: email of the service account.
:return: Returns the project_id of the provided service account.
"""
try:
return service_account_email.split("@")[1].split(".")[0]
except IndexError:
raise AirflowException(
f"Could not extract project_id from service account's email: {service_account_email}."
)
| 17,554 | 41.506053 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/utils/bigquery_get_data.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from collections.abc import Iterator
from logging import Logger
from google.cloud.bigquery.table import Row, RowIterator
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook
def bigquery_get_data(
logger: Logger,
dataset_id: str,
table_id: str,
big_query_hook: BigQueryHook,
batch_size: int,
selected_fields: list[str] | str | None,
) -> Iterator:
logger.info("Fetching Data from:")
logger.info("Dataset: %s ; Table: %s", dataset_id, table_id)
i = 0
while True:
rows: list[Row] | RowIterator = big_query_hook.list_rows(
dataset_id=dataset_id,
table_id=table_id,
max_results=batch_size,
selected_fields=selected_fields,
start_index=i * batch_size,
)
if isinstance(rows, RowIterator):
raise TypeError("BigQueryHook.list_rows() returns iterator when return_iterator=False (default)")
if len(rows) == 0:
logger.info("Job Finished")
return
logger.info("Total Extracted rows: %s", len(rows) + i * batch_size)
yield [row.values() for row in rows]
i += 1
| 1,988 | 32.15 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/utils/field_sanitizer.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Sanitizer for body fields sent via Google Cloud API.
The sanitizer removes fields specified from the body.
Context
-------
In some cases where Google Cloud operation requires modification of existing resources (such
as instances or instance templates) we need to sanitize body of the resources returned
via Google Cloud APIs. This is in the case when we retrieve information from Google Cloud first,
modify the body and either update the existing resource or create a new one with the
modified body. Usually when you retrieve resource from Google Cloud you get some extra fields which
are Output-only, and we need to delete those fields if we want to use
the body as input for subsequent create/insert type operation.
Field specification
-------------------
Specification of fields is an array of strings which denote names of fields to be removed.
The field can be either direct field name to remove from the body or the full
specification of the path you should delete - separated with '.'
>>> FIELDS_TO_SANITIZE = [
>>> "kind",
>>> "properties.disks.kind",
>>> "properties.metadata.kind",
>>>]
>>> body = {
>>> "kind": "compute#instanceTemplate",
>>> "name": "instance",
>>> "properties": {
>>> "disks": [
>>> {
>>> "name": "a",
>>> "kind": "compute#attachedDisk",
>>> "type": "PERSISTENT",
>>> "mode": "READ_WRITE",
>>> },
>>> {
>>> "name": "b",
>>> "kind": "compute#attachedDisk",
>>> "type": "PERSISTENT",
>>> "mode": "READ_WRITE",
>>> }
>>> ],
>>> "metadata": {
>>> "kind": "compute#metadata",
>>> "fingerprint": "GDPUYxlwHe4="
>>> },
>>> }
>>> }
>>> sanitizer=GcpBodyFieldSanitizer(FIELDS_TO_SANITIZE)
>>> sanitizer.sanitize(body)
>>> json.dumps(body, indent=2)
{
"name": "instance",
"properties": {
"disks": [
{
"name": "a",
"type": "PERSISTENT",
"mode": "READ_WRITE",
},
{
"name": "b",
"type": "PERSISTENT",
"mode": "READ_WRITE",
}
],
"metadata": {
"fingerprint": "GDPUYxlwHe4="
},
}
}
Note that the components of the path can be either dictionaries or arrays of dictionaries.
In case they are dictionaries, subsequent component names key of the field, in case of
arrays - the sanitizer iterates through all dictionaries in the array and searches
components in all elements of the array.
"""
from __future__ import annotations
from airflow.exceptions import AirflowException
from airflow.utils.log.logging_mixin import LoggingMixin
class GcpFieldSanitizerException(AirflowException):
"""Thrown when sanitizer finds unexpected field type in the path (other than dict or array)."""
class GcpBodyFieldSanitizer(LoggingMixin):
"""Sanitizes the body according to specification.
:param sanitize_specs: array of strings that specifies which fields to remove
"""
def __init__(self, sanitize_specs: list[str]) -> None:
super().__init__()
self._sanitize_specs = sanitize_specs
def _sanitize(self, dictionary, remaining_field_spec, current_path):
field_split = remaining_field_spec.split(".", 1)
if len(field_split) == 1:
field_name = field_split[0]
if field_name in dictionary:
self.log.info("Deleted %s [%s]", field_name, current_path)
del dictionary[field_name]
else:
self.log.debug(
"The field %s is missing in %s at the path %s.", field_name, dictionary, current_path
)
else:
field_name = field_split[0]
remaining_path = field_split[1]
child = dictionary.get(field_name)
if child is None:
self.log.debug(
"The field %s is missing in %s at the path %s. ", field_name, dictionary, current_path
)
elif isinstance(child, dict):
self._sanitize(child, remaining_path, f"{current_path}.{field_name}")
elif isinstance(child, list):
for index, elem in enumerate(child):
if not isinstance(elem, dict):
self.log.warning(
"The field %s element at index %s is of wrong type. "
"It should be dict and is %s. Skipping it.",
current_path,
index,
elem,
)
self._sanitize(elem, remaining_path, f"{current_path}.{field_name}[{index}]")
else:
self.log.warning(
"The field %s is of wrong type. It should be dict or list and it is %s. Skipping it.",
current_path,
child,
)
def sanitize(self, body) -> None:
"""Sanitizes the body according to specification."""
for elem in self._sanitize_specs:
self._sanitize(body, elem, "")
| 6,077 | 36.288344 | 106 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/utils/bigquery.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any
def bq_cast(string_field: str, bq_type: str) -> None | int | float | bool | str:
"""
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
"""
if string_field is None:
return None
elif bq_type == "INTEGER":
return int(string_field)
elif bq_type in ("FLOAT", "TIMESTAMP"):
return float(string_field)
elif bq_type == "BOOLEAN":
if string_field not in ["true", "false"]:
raise ValueError(f"{string_field} must have value 'true' or 'false'")
return string_field == "true"
else:
return string_field
def convert_job_id(job_id: str | list[str], project_id: str, location: str | None) -> Any:
"""
Helper method that converts to path: project_id:location:job_id.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param location: Optional. The ID of the Google Cloud region where workspace located.
:param job_id: Required. The ID of the job.
:return: str or list[str] of project_id:location:job_id.
"""
location = location if location else "US"
if isinstance(job_id, list):
return [f"{project_id}:{location}:{i}" for i in job_id]
else:
return f"{project_id}:{location}:{job_id}"
| 2,192 | 38.160714 | 92 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/utils/dataform.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
from enum import Enum
from typing import Mapping
from airflow.providers.google.cloud.operators.dataform import (
DataformInstallNpmPackagesOperator,
DataformMakeDirectoryOperator,
DataformWriteFileOperator,
)
class DataformLocations(str, Enum):
"""Enum for storing available locations for resources in Dataform."""
US = "US"
EUROPE = "EU"
def make_initialization_workspace_flow(
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
package_name: str | None = None,
without_installation: bool = False,
) -> tuple:
"""
Creates flow which simulates the initialization of the default project.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace which requires initialization.
:param package_name: Name of the package. If value is not provided then workspace_id will be used.
:param without_installation: Defines should installation of npm packages be added to flow.
"""
make_definitions_directory = DataformMakeDirectoryOperator(
task_id="make-definitions-directory",
project_id=project_id,
region=region,
repository_id=repository_id,
workspace_id=workspace_id,
directory_path="definitions",
)
first_view_content = b"""
-- This is an example SQLX file to help you learn the basics of Dataform.
-- Visit https://cloud.google.com/dataform/docs/how-to for more information on how to configure
-- your SQL workflow.
-- You can delete this file, then commit and push your changes to your repository when you are ready.
-- Config blocks allow you to configure, document, and test your data assets.
config {
type: "view", // Creates a view in BigQuery. Try changing to "table" instead.
columns: {
test: "A description for the test column", // Column descriptions are pushed to BigQuery.
}
}
-- The rest of a SQLX file contains your SELECT statement used to create the table.
SELECT 1 as test
"""
make_first_view_file = DataformWriteFileOperator(
task_id="write-first-view",
project_id=project_id,
region=region,
repository_id=repository_id,
workspace_id=workspace_id,
filepath="definitions/first_view.sqlx",
contents=first_view_content,
)
second_view_content = b"""
config { type: "view" }
-- Use the ref() function to manage dependencies.
-- Learn more about ref() and other built in functions
-- here: https://cloud.google.com/dataform/docs/dataform-core
SELECT test from ${ref("first_view")}
"""
make_second_view_file = DataformWriteFileOperator(
task_id="write-second-view",
project_id=project_id,
region=region,
repository_id=repository_id,
workspace_id=workspace_id,
filepath="definitions/second_view.sqlx",
contents=second_view_content,
)
make_includes_directory = DataformMakeDirectoryOperator(
task_id="make-includes-directory",
project_id=project_id,
region=region,
repository_id=repository_id,
workspace_id=workspace_id,
directory_path="includes",
)
gitignore_contents = b"""
node_modules/
"""
make_gitignore_file = DataformWriteFileOperator(
task_id="write-gitignore-file",
project_id=project_id,
region=region,
repository_id=repository_id,
workspace_id=workspace_id,
filepath=".gitignore",
contents=gitignore_contents,
)
default_location: str = define_default_location(region).value
dataform_config_content = json.dumps(
{
"defaultSchema": "dataform",
"assertionSchema": "dataform_assertions",
"warehouse": "bigquery",
"defaultDatabase": project_id,
"defaultLocation": default_location,
},
indent=4,
).encode()
make_dataform_config_file = DataformWriteFileOperator(
task_id="write-dataform-config-file",
project_id=project_id,
region=region,
repository_id=repository_id,
workspace_id=workspace_id,
filepath="dataform.json",
contents=dataform_config_content,
)
package_name = package_name if package_name else workspace_id
package_json_content = json.dumps(
{
"name": package_name,
"dependencies": {
"@dataform/core": "2.0.1",
},
},
indent=4,
).encode()
make_package_json_file = DataformWriteFileOperator(
task_id="write-package-json",
project_id=project_id,
region=region,
repository_id=repository_id,
workspace_id=workspace_id,
filepath="package.json",
contents=package_json_content,
)
(
make_definitions_directory
>> make_first_view_file
>> make_second_view_file
>> make_gitignore_file
>> make_dataform_config_file
>> make_package_json_file
)
if without_installation:
make_package_json_file >> make_includes_directory
else:
install_npm_packages = DataformInstallNpmPackagesOperator(
task_id="install-npm-packages",
project_id=project_id,
region=region,
repository_id=repository_id,
workspace_id=workspace_id,
)
make_package_json_file >> install_npm_packages >> make_includes_directory
return make_definitions_directory, make_includes_directory
def define_default_location(region: str) -> DataformLocations:
if "us" in region:
return DataformLocations.US
elif "europe" in region:
return DataformLocations.EUROPE
regions_mapping: Mapping[str, DataformLocations] = {}
return regions_mapping[region]
| 7,029 | 32.636364 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/utils/mlengine_operator_utils.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains helper functions for MLEngine operators."""
from __future__ import annotations
import base64
import json
import os
import re
from typing import Callable, Iterable, TypeVar
from urllib.parse import urlsplit
import dill
from airflow import DAG
from airflow.exceptions import AirflowException
from airflow.operators.python import PythonOperator
from airflow.providers.apache.beam.hooks.beam import BeamRunnerType
from airflow.providers.apache.beam.operators.beam import BeamRunPythonPipelineOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.cloud.operators.mlengine import MLEngineStartBatchPredictionJobOperator
T = TypeVar("T", bound=Callable)
def create_evaluate_ops(
task_prefix: str,
data_format: str,
input_paths: list[str],
prediction_path: str,
metric_fn_and_keys: tuple[T, Iterable[str]],
validate_fn: T,
batch_prediction_job_id: str | None = None,
region: str | None = None,
project_id: str | None = None,
dataflow_options: dict | None = None,
model_uri: str | None = None,
model_name: str | None = None,
version_name: str | None = None,
dag: DAG | None = None,
py_interpreter="python3",
) -> tuple[MLEngineStartBatchPredictionJobOperator, BeamRunPythonPipelineOperator, PythonOperator]:
"""
Creates Operators needed for model evaluation and returns.
It gets prediction over inputs via Cloud ML Engine BatchPrediction API by
calling MLEngineBatchPredictionOperator, then summarize and validate
the result via Cloud Dataflow using DataFlowPythonOperator.
For details and pricing about Batch prediction, please refer to the website
https://cloud.google.com/ml-engine/docs/how-tos/batch-predict
and for Cloud Dataflow, https://cloud.google.com/dataflow/docs/
It returns three chained operators for prediction, summary, and validation,
named as ``<prefix>-prediction``, ``<prefix>-summary``, and ``<prefix>-validation``,
respectively.
(``<prefix>`` should contain only alphanumeric characters or hyphen.)
The upstream and downstream can be set accordingly like:
.. code-block:: python
pred, _, val = create_evaluate_ops(...)
pred.set_upstream(upstream_op)
...
downstream_op.set_upstream(val)
Callers will provide two python callables, metric_fn and validate_fn, in
order to customize the evaluation behavior as they wish.
- metric_fn receives a dictionary per instance derived from json in the
batch prediction result. The keys might vary depending on the model.
It should return a tuple of metrics.
- validation_fn receives a dictionary of the averaged metrics that metric_fn
generated over all instances.
The key/value of the dictionary matches to what's given by
metric_fn_and_keys arg.
The dictionary contains an additional metric, 'count' to represent the
total number of instances received for evaluation.
The function would raise an exception to mark the task as failed, in a
case the validation result is not okay to proceed (i.e. to set the trained
version as default).
Typical examples are like this:
.. code-block:: python
def get_metric_fn_and_keys():
import math # imports should be outside of the metric_fn below.
def error_and_squared_error(inst):
label = float(inst["input_label"])
classes = float(inst["classes"]) # 0 or 1
err = abs(classes - label)
squared_err = math.pow(classes - label, 2)
return (err, squared_err) # returns a tuple.
return error_and_squared_error, ["err", "mse"] # key order must match.
def validate_err_and_count(summary):
if summary["err"] > 0.2:
raise ValueError("Too high err>0.2; summary=%s" % summary)
if summary["mse"] > 0.05:
raise ValueError("Too high mse>0.05; summary=%s" % summary)
if summary["count"] < 1000:
raise ValueError("Too few instances<1000; summary=%s" % summary)
return summary
For the details on the other BatchPrediction-related arguments (project_id,
job_id, region, data_format, input_paths, prediction_path, model_uri),
please refer to MLEngineBatchPredictionOperator too.
:param task_prefix: a prefix for the tasks. Only alphanumeric characters and
hyphen are allowed (no underscores), since this will be used as dataflow
job name, which doesn't allow other characters.
:param data_format: either of 'TEXT', 'TF_RECORD', 'TF_RECORD_GZIP'
:param input_paths: a list of input paths to be sent to BatchPrediction.
:param prediction_path: GCS path to put the prediction results in.
:param metric_fn_and_keys: a tuple of metric_fn and metric_keys:
- metric_fn is a function that accepts a dictionary (for an instance),
and returns a tuple of metric(s) that it calculates.
- metric_keys is a list of strings to denote the key of each metric.
:param validate_fn: a function to validate whether the averaged metric(s) is
good enough to push the model.
:param batch_prediction_job_id: the id to use for the Cloud ML Batch
prediction job. Passed directly to the MLEngineBatchPredictionOperator as
the job_id argument.
:param project_id: the Google Cloud project id in which to execute
Cloud ML Batch Prediction and Dataflow jobs. If None, then the `dag`'s
`default_args['project_id']` will be used.
:param region: the Google Cloud region in which to execute Cloud ML
Batch Prediction and Dataflow jobs. If None, then the `dag`'s
`default_args['region']` will be used.
:param dataflow_options: options to run Dataflow jobs. If None, then the
`dag`'s `default_args['dataflow_default_options']` will be used.
:param model_uri: GCS path of the model exported by Tensorflow using
``tensorflow.estimator.export_savedmodel()``. It cannot be used with
model_name or version_name below. See MLEngineBatchPredictionOperator for
more detail.
:param model_name: Used to indicate a model to use for prediction. Can be
used in combination with version_name, but cannot be used together with
model_uri. See MLEngineBatchPredictionOperator for more detail. If None,
then the `dag`'s `default_args['model_name']` will be used.
:param version_name: Used to indicate a model version to use for prediction,
in combination with model_name. Cannot be used together with model_uri.
See MLEngineBatchPredictionOperator for more detail. If None, then the
`dag`'s `default_args['version_name']` will be used.
:param dag: The `DAG` to use for all Operators.
:param py_interpreter: Python version of the beam pipeline.
If None, this defaults to the python3.
To track python versions supported by beam and related
issues check: https://issues.apache.org/jira/browse/BEAM-1251
:returns: a tuple of three operators, (prediction, summary, validation)
PythonOperator)
"""
batch_prediction_job_id = batch_prediction_job_id or ""
dataflow_options = dataflow_options or {}
region = region or ""
# Verify that task_prefix doesn't have any special characters except hyphen
# '-', which is the only allowed non-alphanumeric character by Dataflow.
if not re.match(r"^[a-zA-Z][-A-Za-z0-9]*$", task_prefix):
raise AirflowException(
"Malformed task_id for DataFlowPythonOperator (only alphanumeric "
"and hyphens are allowed but got: " + task_prefix
)
metric_fn, metric_keys = metric_fn_and_keys
if not callable(metric_fn):
raise AirflowException("`metric_fn` param must be callable.")
if not callable(validate_fn):
raise AirflowException("`validate_fn` param must be callable.")
if dag is not None and dag.default_args is not None:
default_args = dag.default_args
project_id = project_id or default_args.get("project_id")
region = region or default_args["region"]
model_name = model_name or default_args.get("model_name")
version_name = version_name or default_args.get("version_name")
dataflow_options = dataflow_options or default_args.get("dataflow_default_options")
evaluate_prediction = MLEngineStartBatchPredictionJobOperator(
task_id=(task_prefix + "-prediction"),
project_id=project_id,
job_id=batch_prediction_job_id,
region=region,
data_format=data_format,
input_paths=input_paths,
output_path=prediction_path,
uri=model_uri,
model_name=model_name,
version_name=version_name,
dag=dag,
)
metric_fn_encoded = base64.b64encode(dill.dumps(metric_fn, recurse=True)).decode()
evaluate_summary = BeamRunPythonPipelineOperator(
task_id=(task_prefix + "-summary"),
runner=BeamRunnerType.DataflowRunner,
py_file=os.path.join(os.path.dirname(__file__), "mlengine_prediction_summary.py"),
default_pipeline_options=dataflow_options,
pipeline_options={
"prediction_path": prediction_path,
"metric_fn_encoded": metric_fn_encoded,
"metric_keys": ",".join(metric_keys),
},
py_interpreter=py_interpreter,
py_requirements=["apache-beam[gcp]>=2.46.0"],
dag=dag,
)
evaluate_summary.set_upstream(evaluate_prediction)
def apply_validate_fn(*args, templates_dict, **kwargs):
prediction_path = templates_dict["prediction_path"]
scheme, bucket, obj, _, _ = urlsplit(prediction_path)
if scheme != "gs" or not bucket or not obj:
raise ValueError(f"Wrong format prediction_path: {prediction_path}")
summary = os.path.join(obj.strip("/"), "prediction.summary.json")
gcs_hook = GCSHook()
summary = json.loads(gcs_hook.download(bucket, summary).decode("utf-8"))
return validate_fn(summary)
evaluate_validation = PythonOperator(
task_id=(task_prefix + "-validation"),
python_callable=apply_validate_fn,
templates_dict={"prediction_path": prediction_path},
dag=dag,
)
evaluate_validation.set_upstream(evaluate_summary)
return evaluate_prediction, evaluate_summary, evaluate_validation
| 11,336 | 41.943182 | 101 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/utils/field_validator.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Validator for body fields sent via Google Cloud API.
The validator performs validation of the body (being dictionary of fields) that
is sent in the API request to Google Cloud (via ``googleclient`` API usually).
Context
-------
The specification mostly focuses on helping Airflow DAG developers in the development
phase. You can build your own Google Cloud operator (such as GcfDeployOperator for example) which
can have built-in validation specification for the particular API. It's super helpful
when developer plays with different fields and their values at the initial phase of
DAG development. Most of the Google Cloud APIs perform their own validation on the
server side, but most of the requests are asynchronous and you need to wait for result
of the operation. This takes precious times and slows
down iteration over the API. BodyFieldValidator is meant to be used on the client side
and it should therefore provide an instant feedback to the developer on misspelled or
wrong type of parameters.
The validation should be performed in "execute()" method call in order to allow
template parameters to be expanded before validation is performed.
Types of fields
---------------
Specification is an array of dictionaries - each dictionary describes field, its type,
validation, optionality, api_version supported and nested fields (for unions and dicts).
Typically (for clarity and in order to aid syntax highlighting) the array of
dicts should be defined as series of dict() executions. Fragment of example
specification might look as follows::
SPECIFICATION =[
dict(name="an_union", type="union", optional=True, fields=[
dict(name="variant_1", type="dict"),
dict(name="variant_2", regexp=r'^.+$', api_version='v1beta2'),
),
dict(name="an_union", type="dict", fields=[
dict(name="field_1", type="dict"),
dict(name="field_2", regexp=r'^.+$'),
),
...
]
Each field should have key = "name" indicating field name. The field can be of one of the
following types:
* Dict fields: (key = "type", value="dict"):
Field of this type should contain nested fields in form of an array of dicts.
Each of the fields in the array is then expected (unless marked as optional)
and validated recursively. If an extra field is present in the dictionary, warning is
printed in log file (but the validation succeeds - see the Forward-compatibility notes)
* List fields: (key = "type", value="list"):
Field of this type should be a list. Only the type correctness is validated.
The contents of a list are not subject to validation.
* Union fields (key = "type", value="union"): field of this type should contain nested
fields in form of an array of dicts. One of the fields (and only one) should be
present (unless the union is marked as optional). If more than one union field is
present, FieldValidationException is raised. If none of the union fields is
present - warning is printed in the log (see below Forward-compatibility notes).
* Fields validated for non-emptiness: (key = "allow_empty") - this applies only to
fields the value of which is a string, and it allows to check for non-emptiness of
the field (allow_empty=False).
* Regexp-validated fields: (key = "regexp") - fields of this type are assumed to be
strings and they are validated with the regexp specified. Remember that the regexps
should ideally contain ^ at the beginning and $ at the end to make sure that
the whole field content is validated. Typically such regexp
validations should be used carefully and sparingly (see Forward-compatibility
notes below).
* Custom-validated fields: (key = "custom_validation") - fields of this type are validated
using method specified via custom_validation field. Any exception thrown in the custom
validation will be turned into FieldValidationException and will cause validation to
fail. Such custom validations might be used to check numeric fields (including
ranges of values), booleans or any other types of fields.
* API version: (key="api_version") if API version is specified, then the field will only
be validated when api_version used at field validator initialization matches exactly the
version specified. If you want to declare fields that are available in several
versions of the APIs, you should specify the field as many times as many API versions
should be supported (each time with different API version).
* if none of the keys ("type", "regexp", "custom_validation" - the field is not validated
You can see some of the field examples in EXAMPLE_VALIDATION_SPECIFICATION.
Forward-compatibility notes
---------------------------
Certain decisions are crucial to allow the client APIs to work also with future API
versions. Since body attached is passed to the API's call, this is entirely
possible to pass-through any new fields in the body (for future API versions) -
albeit without validation on the client side - they can and will still be validated
on the server side usually.
Here are the guidelines that you should follow to make validation forward-compatible:
* most of the fields are not validated for their content. It's possible to use regexp
in some specific cases that are guaranteed not to change in the future, but for most
fields regexp validation should be r'^.+$' indicating check for non-emptiness
* api_version is not validated - user can pass any future version of the api here. The API
version is only used to filter parameters that are marked as present in this api version
any new (not present in the specification) fields in the body are allowed (not verified)
For dictionaries, new fields can be added to dictionaries by future calls. However if an
unknown field in dictionary is added, a warning is logged by the client (but validation
remains successful). This is very nice feature to protect against typos in names.
* For unions, newly added union variants can be added by future calls and they will
pass validation, however the content or presence of those fields will not be validated.
This means that it's possible to send a new non-validated union field together with an
old validated field and this problem will not be detected by the client. In such case
warning will be printed.
* When you add validator to an operator, you should also add ``validate_body`` parameter
(default = True) to __init__ of such operators - when it is set to False,
no validation should be performed. This is a safeguard for totally unpredicted and
backwards-incompatible changes that might sometimes occur in the APIs.
"""
from __future__ import annotations
import re
from typing import Callable, Sequence
from airflow.exceptions import AirflowException
from airflow.utils.log.logging_mixin import LoggingMixin
COMPOSITE_FIELD_TYPES = ["union", "dict", "list"]
class GcpFieldValidationException(AirflowException):
"""Thrown when validation finds dictionary field not valid according to specification."""
class GcpValidationSpecificationException(AirflowException):
"""Thrown when validation specification is wrong.
This should only happen during development as ideally
specification itself should not be invalid ;) .
"""
def _int_greater_than_zero(value):
if int(value) <= 0:
raise GcpFieldValidationException("The available memory has to be greater than 0")
EXAMPLE_VALIDATION_SPECIFICATION = [
dict(name="name", allow_empty=False),
dict(name="description", allow_empty=False, optional=True),
dict(name="availableMemoryMb", custom_validation=_int_greater_than_zero, optional=True),
dict(name="labels", optional=True, type="dict"),
dict(
name="an_union",
type="union",
fields=[
dict(name="variant_1", regexp=r"^.+$"),
dict(name="variant_2", regexp=r"^.+$", api_version="v1beta2"),
dict(name="variant_3", type="dict", fields=[dict(name="url", regexp=r"^.+$")]),
dict(name="variant_4"),
],
),
]
class GcpBodyFieldValidator(LoggingMixin):
"""Validates correctness of request body according to specification.
The specification can describe various type of
fields including custom validation, and union of fields. This validator is
to be reusable by various operators. See the EXAMPLE_VALIDATION_SPECIFICATION
for some examples and explanations of how to create specification.
:param validation_specs: dictionary describing validation specification
:param api_version: Version of the api used (for example v1)
"""
def __init__(self, validation_specs: Sequence[dict], api_version: str) -> None:
super().__init__()
self._validation_specs = validation_specs
self._api_version = api_version
@staticmethod
def _get_field_name_with_parent(field_name, parent):
if parent:
return parent + "." + field_name
return field_name
@staticmethod
def _sanity_checks(
children_validation_specs: dict,
field_type: str,
full_field_path: str,
regexp: str,
allow_empty: bool,
custom_validation: Callable | None,
value,
) -> None:
if value is None and field_type != "union":
raise GcpFieldValidationException(
f"The required body field '{full_field_path}' is missing. Please add it."
)
if regexp and field_type:
raise GcpValidationSpecificationException(
f"The validation specification entry '{full_field_path}' has both type and regexp. "
"The regexp is only allowed without type (i.e. assume type is 'str' that can be "
"validated with regexp)"
)
if allow_empty is not None and field_type:
raise GcpValidationSpecificationException(
f"The validation specification entry '{full_field_path}' has both type and allow_empty. "
"The allow_empty is only allowed without type (i.e. assume type is 'str' that can "
"be validated with allow_empty)"
)
if children_validation_specs and field_type not in COMPOSITE_FIELD_TYPES:
raise GcpValidationSpecificationException(
f"Nested fields are specified in field '{full_field_path}' of type '{field_type}'. "
f"Nested fields are only allowed for fields of those types: ('{COMPOSITE_FIELD_TYPES}')."
)
if custom_validation and field_type:
raise GcpValidationSpecificationException(
f"The validation specification field '{full_field_path}' has both type and "
f"custom_validation. Custom validation is only allowed without type."
)
@staticmethod
def _validate_regexp(full_field_path: str, regexp: str, value: str) -> None:
if not re.match(regexp, value):
# Note matching of only the beginning as we assume the regexps all-or-nothing
raise GcpFieldValidationException(
f"The body field '{full_field_path}' of value '{value}' does not match the field "
f"specification regexp: '{regexp}'."
)
@staticmethod
def _validate_is_empty(full_field_path: str, value: str) -> None:
if not value:
raise GcpFieldValidationException(
f"The body field '{full_field_path}' can't be empty. Please provide a value."
)
def _validate_dict(self, children_validation_specs: dict, full_field_path: str, value: dict) -> None:
for child_validation_spec in children_validation_specs:
self._validate_field(
validation_spec=child_validation_spec, dictionary_to_validate=value, parent=full_field_path
)
all_dict_keys = [spec["name"] for spec in children_validation_specs]
for field_name in value.keys():
if field_name not in all_dict_keys:
self.log.warning(
"The field '%s' is in the body, but is not specified in the "
"validation specification '%s'. "
"This might be because you are using newer API version and "
"new field names defined for that version. Then the warning "
"can be safely ignored, or you might want to upgrade the operator"
"to the version that supports the new API version.",
self._get_field_name_with_parent(field_name, full_field_path),
children_validation_specs,
)
def _validate_union(
self, children_validation_specs: dict, full_field_path: str, dictionary_to_validate: dict
) -> None:
field_found = False
found_field_name = None
for child_validation_spec in children_validation_specs:
# Forcing optional so that we do not have to type optional = True
# in specification for all union fields
new_field_found = self._validate_field(
validation_spec=child_validation_spec,
dictionary_to_validate=dictionary_to_validate,
parent=full_field_path,
force_optional=True,
)
field_name = child_validation_spec["name"]
if new_field_found and field_found:
raise GcpFieldValidationException(
f"The mutually exclusive fields '{field_name}' and '{found_field_name}' belonging to "
f"the union '{full_field_path}' are both present. Please remove one"
)
if new_field_found:
field_found = True
found_field_name = field_name
if not field_found:
self.log.warning(
"There is no '%s' union defined in the body %s. "
"Validation expected one of '%s' but could not find any. It's possible "
"that you are using newer API version and there is another union variant "
"defined for that version. Then the warning can be safely ignored, "
"or you might want to upgrade the operator to the version that "
"supports the new API version.",
full_field_path,
dictionary_to_validate,
[field["name"] for field in children_validation_specs],
)
def _validate_field(self, validation_spec, dictionary_to_validate, parent=None, force_optional=False):
"""
Validates if field is OK.
:param validation_spec: specification of the field
:param dictionary_to_validate: dictionary where the field should be present
:param parent: full path of parent field
:param force_optional: forces the field to be optional
(all union fields have force_optional set to True)
:return: True if the field is present
"""
field_name = validation_spec["name"]
field_type = validation_spec.get("type")
optional = validation_spec.get("optional")
regexp = validation_spec.get("regexp")
allow_empty = validation_spec.get("allow_empty")
children_validation_specs = validation_spec.get("fields")
required_api_version = validation_spec.get("api_version")
custom_validation = validation_spec.get("custom_validation")
full_field_path = self._get_field_name_with_parent(field_name=field_name, parent=parent)
if required_api_version and required_api_version != self._api_version:
self.log.debug(
"Skipping validation of the field '%s' for API version '%s' "
"as it is only valid for API version '%s'",
field_name,
self._api_version,
required_api_version,
)
return False
value = dictionary_to_validate.get(field_name)
if (optional or force_optional) and value is None:
self.log.debug("The optional field '%s' is missing. That's perfectly OK.", full_field_path)
return False
# Certainly down from here the field is present (value is not None)
# so we should only return True from now on
self._sanity_checks(
children_validation_specs=children_validation_specs,
field_type=field_type,
full_field_path=full_field_path,
regexp=regexp,
allow_empty=allow_empty,
custom_validation=custom_validation,
value=value,
)
if allow_empty is False:
self._validate_is_empty(full_field_path, value)
if regexp:
self._validate_regexp(full_field_path, regexp, value)
elif field_type == "dict":
if not isinstance(value, dict):
raise GcpFieldValidationException(
f"The field '{full_field_path}' should be of dictionary type according to "
f"the specification '{validation_spec}' but it is '{value}'"
)
if children_validation_specs is None:
self.log.debug(
"The dict field '%s' has no nested fields defined in the "
"specification '%s'. That's perfectly ok - it's content will "
"not be validated.",
full_field_path,
validation_spec,
)
else:
self._validate_dict(children_validation_specs, full_field_path, value)
elif field_type == "union":
if not children_validation_specs:
raise GcpValidationSpecificationException(
f"The union field '{full_field_path}' has no nested fields defined in "
f"specification '{validation_spec}'. "
"Unions should have at least one nested field defined."
)
self._validate_union(children_validation_specs, full_field_path, dictionary_to_validate)
elif field_type == "list":
if not isinstance(value, list):
raise GcpFieldValidationException(
f"The field '{full_field_path}' should be of list type according to "
f"the specification '{validation_spec}' but it is '{value}'"
)
elif custom_validation:
try:
custom_validation(value)
except Exception as e:
raise GcpFieldValidationException(
f"Error while validating custom field '{full_field_path}' "
f"specified by '{validation_spec}': '{e}'"
)
elif field_type is None:
self.log.debug(
"The type of field '%s' is not specified in '%s'. Not validating its content.",
full_field_path,
validation_spec,
)
else:
raise GcpValidationSpecificationException(
f"The field '{full_field_path}' is of type '{field_type}' in "
f"specification '{validation_spec}'.This type is unknown to validation!"
)
return True
def validate(self, body_to_validate: dict) -> None:
"""
Validates if the body (dictionary) follows specification that the validator was instantiated with.
Raises ValidationSpecificationException or ValidationFieldException in case of problems
with specification or the body not conforming to the specification respectively.
:param body_to_validate: body that must follow the specification
:return: None
"""
try:
for validation_spec in self._validation_specs:
self._validate_field(validation_spec=validation_spec, dictionary_to_validate=body_to_validate)
except GcpFieldValidationException as e:
raise GcpFieldValidationException(
f"There was an error when validating: body '{body_to_validate}': '{e}'"
)
all_field_names = [
spec["name"]
for spec in self._validation_specs
if spec.get("type") != "union" and spec.get("api_version") != self._api_version
]
all_union_fields = [spec for spec in self._validation_specs if spec.get("type") == "union"]
for union_field in all_union_fields:
all_field_names.extend(
[
nested_union_spec["name"]
for nested_union_spec in union_field["fields"]
if nested_union_spec.get("type") != "union"
and nested_union_spec.get("api_version") != self._api_version
]
)
for field_name in body_to_validate.keys():
if field_name not in all_field_names:
self.log.warning(
"The field '%s' is in the body, but is not specified in the "
"validation specification '%s'. "
"This might be because you are using newer API version and "
"new field names defined for that version. Then the warning "
"can be safely ignored, or you might want to upgrade the operator"
"to the version that supports the new API version.",
field_name,
self._validation_specs,
)
| 22,272 | 47.631004 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/utils/mlengine_prediction_summary.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
A template called by DataFlowPythonOperator to summarize BatchPrediction.
It accepts a user function to calculate the metric(s) per instance in
the prediction results, then aggregates to output as a summary.
It accepts the following arguments:
- ``--prediction_path``:
The GCS folder that contains BatchPrediction results, containing
``prediction.results-NNNNN-of-NNNNN`` files in the json format.
Output will be also stored in this folder, as 'prediction.summary.json'.
- ``--metric_fn_encoded``:
An encoded function that calculates and returns a tuple of metric(s)
for a given instance (as a dictionary). It should be encoded
via ``base64.b64encode(dill.dumps(fn, recurse=True))``.
- ``--metric_keys``:
A comma-separated key(s) of the aggregated metric(s) in the summary
output. The order and the size of the keys must match to the output
of metric_fn.
The summary will have an additional key, 'count', to represent the
total number of instances, so the keys shouldn't include 'count'.
Usage example:
.. code-block: python
from airflow.providers.google.cloud.operators.dataflow import DataflowCreatePythonJobOperator
def get_metric_fn():
import math # all imports must be outside of the function to be passed.
def metric_fn(inst):
label = float(inst["input_label"])
classes = float(inst["classes"])
prediction = float(inst["scores"][1])
log_loss = math.log(1 + math.exp(
-(label * 2 - 1) * math.log(prediction / (1 - prediction))))
squared_err = (classes-label)**2
return (log_loss, squared_err)
return metric_fn
metric_fn_encoded = base64.b64encode(dill.dumps(get_metric_fn(), recurse=True))
DataflowCreatePythonJobOperator(
task_id="summary-prediction",
py_options=["-m"],
py_file="airflow.providers.google.cloud.utils.mlengine_prediction_summary",
options={
"prediction_path": prediction_path,
"metric_fn_encoded": metric_fn_encoded,
"metric_keys": "log_loss,mse"
},
dataflow_default_options={
"project": "xxx", "region": "us-east1",
"staging_location": "gs://yy", "temp_location": "gs://zz",
}
) >> dag
When the input file is like the following::
{"inputs": "1,x,y,z", "classes": 1, "scores": [0.1, 0.9]}
{"inputs": "0,o,m,g", "classes": 0, "scores": [0.7, 0.3]}
{"inputs": "1,o,m,w", "classes": 0, "scores": [0.6, 0.4]}
{"inputs": "1,b,r,b", "classes": 1, "scores": [0.2, 0.8]}
The output file will be::
{"log_loss": 0.43890510565304547, "count": 4, "mse": 0.25}
To test outside of the dag:
.. code-block:: python
subprocess.check_call(
[
"python",
"-m",
"airflow.providers.google.cloud.utils.mlengine_prediction_summary",
"--prediction_path=gs://...",
"--metric_fn_encoded=" + metric_fn_encoded,
"--metric_keys=log_loss,mse",
"--runner=DataflowRunner",
"--staging_location=gs://...",
"--temp_location=gs://...",
]
)
.. spelling:word-list::
pcoll
"""
from __future__ import annotations
import argparse
import base64
import json
import logging
import os
import apache_beam as beam
import dill
from apache_beam.coders.coders import Coder
class JsonCoder(Coder):
"""JSON encoder/decoder."""
@staticmethod
def encode(x):
"""JSON encoder."""
return json.dumps(x).encode()
@staticmethod
def decode(x):
"""JSON decoder."""
return json.loads(x)
@beam.ptransform_fn
def MakeSummary(pcoll, metric_fn, metric_keys):
"""Summary PTransform used in Dataflow."""
return (
pcoll
| "ApplyMetricFnPerInstance" >> beam.Map(metric_fn)
| "PairWith1" >> beam.Map(lambda tup: tup + (1,))
| "SumTuple" >> beam.CombineGlobally(beam.combiners.TupleCombineFn(*([sum] * (len(metric_keys) + 1))))
| "AverageAndMakeDict"
>> beam.Map(
lambda tup: dict(
[(name, tup[i] / tup[-1]) for i, name in enumerate(metric_keys)] + [("count", tup[-1])]
)
)
)
def run(argv=None):
"""Helper for obtaining prediction summary."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--prediction_path",
required=True,
help=(
"The GCS folder that contains BatchPrediction results, containing "
"prediction.results-NNNNN-of-NNNNN files in the json format. "
"Output will be also stored in this folder, as a file"
"'prediction.summary.json'."
),
)
parser.add_argument(
"--metric_fn_encoded",
required=True,
help=(
"An encoded function that calculates and returns a tuple of "
"metric(s) for a given instance (as a dictionary). It should be "
"encoded via base64.b64encode(dill.dumps(fn, recurse=True))."
),
)
parser.add_argument(
"--metric_keys",
required=True,
help=(
"A comma-separated keys of the aggregated metric(s) in the summary "
"output. The order and the size of the keys must match to the "
"output of metric_fn. The summary will have an additional key, "
"'count', to represent the total number of instances, so this flag "
"shouldn't include 'count'."
),
)
known_args, pipeline_args = parser.parse_known_args(argv)
metric_fn = dill.loads(base64.b64decode(known_args.metric_fn_encoded))
if not callable(metric_fn):
raise ValueError("--metric_fn_encoded must be an encoded callable.")
metric_keys = known_args.metric_keys.split(",")
with beam.Pipeline(options=beam.pipeline.PipelineOptions(pipeline_args)) as pipe:
prediction_result_pattern = os.path.join(known_args.prediction_path, "prediction.results-*-of-*")
prediction_summary_path = os.path.join(known_args.prediction_path, "prediction.summary.json")
# This is apache-beam ptransform's convention
_ = (
pipe
| "ReadPredictionResult" >> beam.io.ReadFromText(prediction_result_pattern, coder=JsonCoder())
| "Summary" >> MakeSummary(metric_fn, metric_keys)
| "Write"
>> beam.io.WriteToText(
prediction_summary_path,
shard_name_template="", # without trailing -NNNNN-of-NNNNN.
coder=JsonCoder(),
)
)
if __name__ == "__main__":
# Dataflow does not print anything on the screen by default. Good practice says to configure the logger
# to be able to track the progress. This code is run in a separate process, so it's safe.
logging.getLogger().setLevel(logging.INFO)
run()
| 7,682 | 34.569444 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/utils/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/utils/helpers.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains helper functions for Google Cloud operators."""
from __future__ import annotations
def normalize_directory_path(source_object: str | None) -> str | None:
"""Makes sure dir path ends with a slash."""
return source_object + "/" if source_object and not source_object.endswith("/") else source_object
| 1,117 | 45.583333 | 102 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/log/stackdriver_task_handler.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Handler that integrates with Stackdriver."""
from __future__ import annotations
import logging
from contextvars import ContextVar
from functools import cached_property
from typing import Collection
from urllib.parse import urlencode
from google.auth.credentials import Credentials
from google.cloud import logging as gcp_logging
from google.cloud.logging import Resource
from google.cloud.logging.handlers.transports import BackgroundThreadTransport, Transport
from google.cloud.logging_v2.services.logging_service_v2 import LoggingServiceV2Client
from google.cloud.logging_v2.types import ListLogEntriesRequest, ListLogEntriesResponse
from airflow.models import TaskInstance
from airflow.providers.google.cloud.utils.credentials_provider import get_credentials_and_project_id
from airflow.providers.google.common.consts import CLIENT_INFO
try:
# todo: remove this conditional import when min airflow version >= 2.6
ctx_indiv_trigger: ContextVar | None
from airflow.utils.log.trigger_handler import ctx_indiv_trigger
except ImportError:
ctx_indiv_trigger = None
DEFAULT_LOGGER_NAME = "airflow"
_GLOBAL_RESOURCE = Resource(type="global", labels={})
_DEFAULT_SCOPESS = frozenset(
["https://www.googleapis.com/auth/logging.read", "https://www.googleapis.com/auth/logging.write"]
)
class StackdriverTaskHandler(logging.Handler):
"""Handler that directly makes Stackdriver logging API calls.
This is a Python standard ``logging`` handler using that can be used to
route Python standard logging messages directly to the Stackdriver
Logging API.
It can also be used to save logs for executing tasks. To do this, you should set as a handler with
the name "tasks". In this case, it will also be used to read the log for display in Web UI.
This handler supports both an asynchronous and synchronous transport.
:param gcp_key_path: Path to Google Cloud Credential JSON file.
If omitted, authorization based on `the Application Default Credentials
<https://cloud.google.com/docs/authentication/production#finding_credentials_automatically>`__ will
be used.
:param scopes: OAuth scopes for the credentials,
:param name: the name of the custom log in Stackdriver Logging. Defaults
to 'airflow'. The name of the Python logger will be represented
in the ``python_logger`` field.
:param transport: Class for creating new transport objects. It should
extend from the base :class:`google.cloud.logging.handlers.Transport` type and
implement :meth`google.cloud.logging.handlers.Transport.send`. Defaults to
:class:`google.cloud.logging.handlers.BackgroundThreadTransport`. The other
option is :class:`google.cloud.logging.handlers.SyncTransport`.
:param resource: (Optional) Monitored resource of the entry, defaults
to the global resource type.
:param labels: (Optional) Mapping of labels for the entry.
"""
LABEL_TASK_ID = "task_id"
LABEL_DAG_ID = "dag_id"
LABEL_EXECUTION_DATE = "execution_date"
LABEL_TRY_NUMBER = "try_number"
LOG_VIEWER_BASE_URL = "https://console.cloud.google.com/logs/viewer"
LOG_NAME = "Google Stackdriver"
trigger_supported = True
trigger_should_queue = False
trigger_should_wrap = False
trigger_send_end_marker = False
def __init__(
self,
gcp_key_path: str | None = None,
scopes: Collection[str] | None = _DEFAULT_SCOPESS,
name: str = DEFAULT_LOGGER_NAME,
transport: type[Transport] = BackgroundThreadTransport,
resource: Resource = _GLOBAL_RESOURCE,
labels: dict[str, str] | None = None,
):
super().__init__()
self.gcp_key_path: str | None = gcp_key_path
self.scopes: Collection[str] | None = scopes
self.name: str = name
self.transport_type: type[Transport] = transport
self.resource: Resource = resource
self.labels: dict[str, str] | None = labels
self.task_instance_labels: dict[str, str] | None = {}
self.task_instance_hostname = "default-hostname"
@cached_property
def _credentials_and_project(self) -> tuple[Credentials, str]:
credentials, project = get_credentials_and_project_id(
key_path=self.gcp_key_path, scopes=self.scopes, disable_logging=True
)
return credentials, project
@property
def _client(self) -> gcp_logging.Client:
"""The Cloud Library API client."""
credentials, project = self._credentials_and_project
client = gcp_logging.Client(
credentials=credentials,
project=project,
client_info=CLIENT_INFO,
)
return client
@property
def _logging_service_client(self) -> LoggingServiceV2Client:
"""The Cloud logging service v2 client."""
credentials, _ = self._credentials_and_project
client = LoggingServiceV2Client(
credentials=credentials,
client_info=CLIENT_INFO,
)
return client
@cached_property
def _transport(self) -> Transport:
"""Object responsible for sending data to Stackdriver."""
# The Transport object is badly defined (no init) but in the docs client/name as constructor
# arguments are a requirement for any class that derives from Transport class, hence ignore:
return self.transport_type(self._client, self.name) # type: ignore[call-arg]
def _get_labels(self, task_instance=None):
if task_instance:
ti_labels = self._task_instance_to_labels(task_instance)
else:
ti_labels = self.task_instance_labels
labels: dict[str, str] | None
if self.labels and ti_labels:
labels = {}
labels.update(self.labels)
labels.update(ti_labels)
elif self.labels:
labels = self.labels
elif ti_labels:
labels = ti_labels
else:
labels = None
return labels or {}
def emit(self, record: logging.LogRecord) -> None:
"""Actually log the specified logging record.
:param record: The record to be logged.
"""
message = self.format(record)
ti = None
# todo: remove ctx_indiv_trigger is not None check when min airflow version >= 2.6
if ctx_indiv_trigger is not None and getattr(record, ctx_indiv_trigger.name, None):
ti = getattr(record, "task_instance", None) # trigger context
labels = self._get_labels(ti)
self._transport.send(record, message, resource=self.resource, labels=labels)
def set_context(self, task_instance: TaskInstance) -> None:
"""
Configures the logger to add information with information about the current task.
:param task_instance: Currently executed task
"""
self.task_instance_labels = self._task_instance_to_labels(task_instance)
self.task_instance_hostname = task_instance.hostname
def read(
self, task_instance: TaskInstance, try_number: int | None = None, metadata: dict | None = None
) -> tuple[list[tuple[tuple[str, str]]], list[dict[str, str | bool]]]:
"""
Read logs of given task instance from Stackdriver logging.
:param task_instance: task instance object
:param try_number: task instance try_number to read logs from. If None
it returns all logs
:param metadata: log metadata. It is used for steaming log reading and auto-tailing.
:return: a tuple of (
list of (one element tuple with two element tuple - hostname and logs)
and list of metadata)
"""
if try_number is not None and try_number < 1:
logs = f"Error fetching the logs. Try number {try_number} is invalid."
return [((self.task_instance_hostname, logs),)], [{"end_of_log": "true"}]
if not metadata:
metadata = {}
ti_labels = self._task_instance_to_labels(task_instance)
if try_number is not None:
ti_labels[self.LABEL_TRY_NUMBER] = str(try_number)
else:
del ti_labels[self.LABEL_TRY_NUMBER]
log_filter = self._prepare_log_filter(ti_labels)
next_page_token = metadata.get("next_page_token", None)
all_pages = "download_logs" in metadata and metadata["download_logs"]
messages, end_of_log, next_page_token = self._read_logs(log_filter, next_page_token, all_pages)
new_metadata: dict[str, str | bool] = {"end_of_log": end_of_log}
if next_page_token:
new_metadata["next_page_token"] = next_page_token
return [((self.task_instance_hostname, messages),)], [new_metadata]
def _prepare_log_filter(self, ti_labels: dict[str, str]) -> str:
"""
Prepares the filter that chooses which log entries to fetch.
More information:
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/list#body.request_body.FIELDS.filter
https://cloud.google.com/logging/docs/view/advanced-queries
:param ti_labels: Task Instance's labels that will be used to search for logs
:return: logs filter
"""
def escape_label_key(key: str) -> str:
return f'"{key}"' if "." in key else key
def escale_label_value(value: str) -> str:
escaped_value = value.replace("\\", "\\\\").replace('"', '\\"')
return f'"{escaped_value}"'
_, project = self._credentials_and_project
log_filters = [
f"resource.type={escale_label_value(self.resource.type)}",
f'logName="projects/{project}/logs/{self.name}"',
]
for key, value in self.resource.labels.items():
log_filters.append(f"resource.labels.{escape_label_key(key)}={escale_label_value(value)}")
for key, value in ti_labels.items():
log_filters.append(f"labels.{escape_label_key(key)}={escale_label_value(value)}")
return "\n".join(log_filters)
def _read_logs(
self, log_filter: str, next_page_token: str | None, all_pages: bool
) -> tuple[str, bool, str | None]:
"""
Sends requests to the Stackdriver service and downloads logs.
:param log_filter: Filter specifying the logs to be downloaded.
:param next_page_token: The token of the page from which the log download will start.
If None is passed, it will start from the first page.
:param all_pages: If True is passed, all subpages will be downloaded. Otherwise, only the first
page will be downloaded
:return: A token that contains the following items:
* string with logs
* Boolean value describing whether there are more logs,
* token of the next page
"""
messages = []
new_messages, next_page_token = self._read_single_logs_page(
log_filter=log_filter,
page_token=next_page_token,
)
messages.append(new_messages)
if all_pages:
while next_page_token:
new_messages, next_page_token = self._read_single_logs_page(
log_filter=log_filter, page_token=next_page_token
)
messages.append(new_messages)
if not messages:
break
end_of_log = True
next_page_token = None
else:
end_of_log = not bool(next_page_token)
return "\n".join(messages), end_of_log, next_page_token
def _read_single_logs_page(self, log_filter: str, page_token: str | None = None) -> tuple[str, str]:
"""
Sends requests to the Stackdriver service and downloads single pages with logs.
:param log_filter: Filter specifying the logs to be downloaded.
:param page_token: The token of the page to be downloaded. If None is passed, the first page will be
downloaded.
:return: Downloaded logs and next page token
"""
_, project = self._credentials_and_project
request = ListLogEntriesRequest(
resource_names=[f"projects/{project}"],
filter=log_filter,
page_token=page_token,
order_by="timestamp asc",
page_size=1000,
)
response = self._logging_service_client.list_log_entries(request=request)
page: ListLogEntriesResponse = next(response.pages)
messages: list[str] = []
for entry in page.entries:
if "message" in (entry.json_payload or {}):
messages.append(entry.json_payload["message"]) # type: ignore
elif entry.text_payload:
messages.append(entry.text_payload)
return "\n".join(messages), page.next_page_token
@classmethod
def _task_instance_to_labels(cls, ti: TaskInstance) -> dict[str, str]:
return {
cls.LABEL_TASK_ID: ti.task_id,
cls.LABEL_DAG_ID: ti.dag_id,
cls.LABEL_EXECUTION_DATE: str(ti.execution_date.isoformat()),
cls.LABEL_TRY_NUMBER: str(ti.try_number),
}
@property
def log_name(self):
"""Return log name."""
return self.LOG_NAME
@cached_property
def _resource_path(self):
segments = [self.resource.type]
for key, value in self.resource.labels:
segments += [key]
segments += [value]
return "/".join(segments)
def get_external_log_url(self, task_instance: TaskInstance, try_number: int) -> str:
"""
Creates an address for an external log collecting service.
:param task_instance: task instance object
:param try_number: task instance try_number to read logs from
:return: URL to the external log collection service
"""
_, project_id = self._credentials_and_project
ti_labels = self._task_instance_to_labels(task_instance)
ti_labels[self.LABEL_TRY_NUMBER] = str(try_number)
log_filter = self._prepare_log_filter(ti_labels)
url_query_string = {
"project": project_id,
"interval": "NO_LIMIT",
"resource": self._resource_path,
"advancedFilter": log_filter,
}
url = f"{self.LOG_VIEWER_BASE_URL}?{urlencode(url_query_string)}"
return url
def close(self) -> None:
self._transport.flush()
| 15,282 | 39.646277 | 111 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/log/gcs_task_handler.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
import os
import shutil
from functools import cached_property
from pathlib import Path
from typing import Collection
# not sure why but mypy complains on missing `storage` but it is clearly there and is importable
from google.cloud import storage # type: ignore[attr-defined]
from packaging.version import Version
from airflow.configuration import conf
from airflow.exceptions import AirflowNotFoundException
from airflow.providers.google.cloud.hooks.gcs import GCSHook, _parse_gcs_url
from airflow.providers.google.cloud.utils.credentials_provider import get_credentials_and_project_id
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.utils.log.file_task_handler import FileTaskHandler
from airflow.utils.log.logging_mixin import LoggingMixin
_DEFAULT_SCOPESS = frozenset(
[
"https://www.googleapis.com/auth/devstorage.read_write",
]
)
logger = logging.getLogger(__name__)
def get_default_delete_local_copy():
"""Load delete_local_logs conf if Airflow version > 2.6 and return False if not.
TODO: delete this function when min airflow version >= 2.6.
"""
from airflow.version import version
if Version(version) < Version("2.6"):
return False
return conf.getboolean("logging", "delete_local_logs")
class GCSTaskHandler(FileTaskHandler, LoggingMixin):
"""
GCSTaskHandler is a python log handler that handles and reads task instance logs.
It extends airflow FileTaskHandler and uploads to and reads from GCS remote
storage. Upon log reading failure, it reads from host machine's local disk.
:param base_log_folder: Base log folder to place logs.
:param gcs_log_folder: Path to a remote location where logs will be saved. It must have the prefix
``gs://``. For example: ``gs://bucket/remote/log/location``
:param filename_template: template filename string
:param gcp_key_path: Path to Google Cloud Service Account file (JSON). Mutually exclusive with
gcp_keyfile_dict.
If omitted, authorization based on `the Application Default Credentials
<https://cloud.google.com/docs/authentication/production#finding_credentials_automatically>`__ will
be used.
:param gcp_keyfile_dict: Dictionary of keyfile parameters. Mutually exclusive with gcp_key_path.
:param gcp_scopes: Comma-separated string containing OAuth2 scopes
:param project_id: Project ID to read the secrets from. If not passed, the project ID from credentials
will be used.
:param delete_local_copy: Whether local log files should be deleted after they are downloaded when using
remote logging
"""
trigger_should_wrap = True
def __init__(
self,
*,
base_log_folder: str,
gcs_log_folder: str,
filename_template: str | None = None,
gcp_key_path: str | None = None,
gcp_keyfile_dict: dict | None = None,
gcp_scopes: Collection[str] | None = _DEFAULT_SCOPESS,
project_id: str | None = None,
**kwargs,
):
super().__init__(base_log_folder, filename_template)
self.remote_base = gcs_log_folder
self.log_relative_path = ""
self.closed = False
self.upload_on_close = True
self.gcp_key_path = gcp_key_path
self.gcp_keyfile_dict = gcp_keyfile_dict
self.scopes = gcp_scopes
self.project_id = project_id
self.delete_local_copy = (
kwargs["delete_local_copy"] if "delete_local_copy" in kwargs else get_default_delete_local_copy()
)
@cached_property
def hook(self) -> GCSHook | None:
"""Returns GCSHook if remote_log_conn_id configured."""
conn_id = conf.get("logging", "remote_log_conn_id", fallback=None)
if conn_id:
try:
return GCSHook(gcp_conn_id=conn_id)
except AirflowNotFoundException:
pass
return None
@cached_property
def client(self) -> storage.Client:
"""Returns GCS Client."""
if self.hook:
credentials, project_id = self.hook.get_credentials_and_project_id()
else:
credentials, project_id = get_credentials_and_project_id(
key_path=self.gcp_key_path,
keyfile_dict=self.gcp_keyfile_dict,
scopes=self.scopes,
disable_logging=True,
)
return storage.Client(
credentials=credentials,
client_info=CLIENT_INFO,
project=self.project_id if self.project_id else project_id,
)
def set_context(self, ti):
super().set_context(ti)
# Log relative path is used to construct local and remote
# log path to upload log files into GCS and read from the
# remote location.
full_path = self.handler.baseFilename
self.log_relative_path = Path(full_path).relative_to(self.local_base).as_posix()
is_trigger_log_context = getattr(ti, "is_trigger_log_context", False)
self.upload_on_close = is_trigger_log_context or not ti.raw
def close(self):
"""Close and upload local log file to remote storage GCS."""
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
super().close()
if not self.upload_on_close:
return
local_loc = os.path.join(self.local_base, self.log_relative_path)
remote_loc = os.path.join(self.remote_base, self.log_relative_path)
if os.path.exists(local_loc):
# read log and remove old logs to get just the latest additions
with open(local_loc) as logfile:
log = logfile.read()
gcs_write = self.gcs_write(log, remote_loc)
if gcs_write and self.delete_local_copy:
shutil.rmtree(os.path.dirname(local_loc))
# Mark closed so we don't double write if close is called twice
self.closed = True
def _add_message(self, msg):
filename, lineno, func, stackinfo = logger.findCaller()
record = logging.LogRecord("", logging.INFO, filename, lineno, msg + "\n", None, None, func=func)
return self.format(record)
def _read_remote_logs(self, ti, try_number, metadata=None) -> tuple[list[str], list[str]]:
# Explicitly getting log relative path is necessary because this method
# is called from webserver from TaskLogReader, where we don't call set_context
# and can read logs for different TIs in each request
messages = []
logs = []
worker_log_relative_path = self._render_filename(ti, try_number)
remote_loc = os.path.join(self.remote_base, worker_log_relative_path)
uris = []
bucket, prefix = _parse_gcs_url(remote_loc)
blobs = list(self.client.list_blobs(bucket_or_name=bucket, prefix=prefix))
if blobs:
uris = [f"gs://{bucket}/{b.name}" for b in blobs]
messages.extend(["Found remote logs:", *[f" * {x}" for x in sorted(uris)]])
else:
messages.append(f"No logs found in GCS; ti=%s {ti}")
try:
for key in sorted(uris):
blob = storage.Blob.from_string(key, self.client)
remote_log = blob.download_as_bytes().decode()
if remote_log:
logs.append(remote_log)
except Exception as e:
messages.append(f"Unable to read remote log {e}")
return messages, logs
def _read(self, ti, try_number, metadata=None):
"""
Read logs of given task instance and try_number from GCS.
If failed, read the log from task instance host machine.
todo: when min airflow version >= 2.6, remove this method
:param ti: task instance object
:param try_number: task instance try_number to read logs from
:param metadata: log metadata,
can be used for steaming log reading and auto-tailing.
"""
if hasattr(super(), "_read_remote_logs"):
# from Airflow 2.6, we don't implement the `_read` method.
# if parent has _read_remote_logs, we're >= 2.6
return super()._read(ti, try_number, metadata)
messages, logs = self._read_remote_logs(ti, try_number, metadata)
if not logs:
return super()._read(ti, try_number, metadata)
return "".join([f"*** {x}\n" for x in messages]) + "\n".join(logs), {"end_of_log": True}
def gcs_write(self, log, remote_log_location) -> bool:
"""
Write the log to the remote location and return `True`; fail silently and return `False` on error.
:param log: the log to write to the remote_log_location
:param remote_log_location: the log's location in remote storage
:return: whether the log is successfully written to remote location or not.
"""
try:
blob = storage.Blob.from_string(remote_log_location, self.client)
old_log = blob.download_as_bytes().decode()
log = "\n".join([old_log, log]) if old_log else log
except Exception as e:
if self.no_log_found(e):
pass
else:
log += self._add_message(
f"Error checking for previous log; if exists, may be overwritten: {str(e)}"
)
self.log.warning("Error checking for previous log: %s", e)
try:
blob = storage.Blob.from_string(remote_log_location, self.client)
blob.upload_from_string(log, content_type="text/plain")
except Exception as e:
self.log.error("Could not write logs to %s: %s", remote_log_location, e)
return False
return True
@staticmethod
def no_log_found(exc):
"""
Given exception, determine whether it is result of log not found.
:meta private:
"""
if exc.args and isinstance(exc.args[0], str) and "No such object" in exc.args[0]:
return True
elif getattr(exc, "resp", {}).get("status") == "404":
return True
return False
| 11,224 | 40.117216 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/log/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/sensors/dataplex.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Dataplex sensors."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
if TYPE_CHECKING:
from airflow.utils.context import Context
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.dataplex import DataplexHook
from airflow.sensors.base import BaseSensorOperator
class TaskState:
"""Dataplex Task states."""
STATE_UNSPECIFIED = 0
ACTIVE = 1
CREATING = 2
DELETING = 3
ACTION_REQUIRED = 4
class DataplexTaskStateSensor(BaseSensorOperator):
"""
Check the status of the Dataplex task.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param lake_id: Required. The ID of the Google Cloud lake that the task belongs to.
:param dataplex_task_id: Required. Task identifier.
:param api_version: The version of the api that will be requested for example 'v3'.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ["dataplex_task_id"]
def __init__(
self,
project_id: str,
region: str,
lake_id: str,
dataplex_task_id: str,
api_version: str = "v1",
retry: Retry | _MethodDefault = DEFAULT,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.lake_id = lake_id
self.dataplex_task_id = dataplex_task_id
self.api_version = api_version
self.retry = retry
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def poke(self, context: Context) -> bool:
self.log.info("Waiting for task %s to be %s", self.dataplex_task_id, TaskState.ACTIVE)
hook = DataplexHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
task = hook.get_task(
project_id=self.project_id,
region=self.region,
lake_id=self.lake_id,
dataplex_task_id=self.dataplex_task_id,
retry=self.retry,
metadata=self.metadata,
)
task_status = task.state
if task_status == TaskState.DELETING:
raise AirflowException(f"Task is going to be deleted {self.dataplex_task_id}")
self.log.info("Current status of the Dataplex task %s => %s", self.dataplex_task_id, task_status)
return task_status == TaskState.ACTIVE
| 4,615 | 38.452991 | 105 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/sensors/gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Storage sensors."""
from __future__ import annotations
import os
import textwrap
import warnings
from datetime import datetime, timedelta
from typing import TYPE_CHECKING, Any, Callable, Sequence
from google.api_core.retry import Retry
from google.cloud.storage.retry import DEFAULT_RETRY
from airflow.configuration import conf
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.cloud.triggers.gcs import (
GCSBlobTrigger,
GCSCheckBlobUpdateTimeTrigger,
GCSPrefixBlobTrigger,
GCSUploadSessionTrigger,
)
from airflow.sensors.base import BaseSensorOperator, poke_mode_only
if TYPE_CHECKING:
from airflow.utils.context import Context
class GCSObjectExistenceSensor(BaseSensorOperator):
"""
Checks for the existence of a file in Google Cloud Storage.
:param bucket: The Google Cloud Storage bucket where the object is.
:param object: The name of the object to check in the Google cloud
storage bucket.
:param google_cloud_conn_id: The connection ID to use when
connecting to Google Cloud Storage.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param retry: (Optional) How to retry the RPC
"""
template_fields: Sequence[str] = (
"bucket",
"object",
"impersonation_chain",
)
ui_color = "#f0eee4"
def __init__(
self,
*,
bucket: str,
object: str,
google_cloud_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
retry: Retry = DEFAULT_RETRY,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.object = object
self.google_cloud_conn_id = google_cloud_conn_id
self.impersonation_chain = impersonation_chain
self.retry = retry
self.deferrable = deferrable
def poke(self, context: Context) -> bool:
self.log.info("Sensor checks existence of : %s, %s", self.bucket, self.object)
hook = GCSHook(
gcp_conn_id=self.google_cloud_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.exists(self.bucket, self.object, self.retry)
def execute(self, context: Context) -> None:
"""Airflow runs this method on the worker and defers using the trigger."""
if not self.deferrable:
super().execute(context)
else:
if not self.poke(context=context):
self.defer(
timeout=timedelta(seconds=self.timeout),
trigger=GCSBlobTrigger(
bucket=self.bucket,
object_name=self.object,
poke_interval=self.poke_interval,
google_cloud_conn_id=self.google_cloud_conn_id,
hook_params={
"impersonation_chain": self.impersonation_chain,
},
),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, str]) -> str:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info("File %s was found in bucket %s.", self.object, self.bucket)
return event["message"]
class GCSObjectExistenceAsyncSensor(GCSObjectExistenceSensor):
"""
Checks for the existence of a file in Google Cloud Storage.
This class is deprecated and will be removed in a future release.
Please use :class:`airflow.providers.google.cloud.sensors.gcs.GCSObjectExistenceSensor`
and set *deferrable* attribute to *True* instead.
:param bucket: The Google Cloud Storage bucket where the object is.
:param object: The name of the object to check in the Google cloud storage bucket.
:param google_cloud_conn_id: The connection ID to use when connecting to Google Cloud Storage.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
def __init__(self, **kwargs: Any) -> None:
warnings.warn(
"Class `GCSObjectExistenceAsyncSensor` is deprecated and will be removed in a future release. "
"Please use `GCSObjectExistenceSensor` and set `deferrable` attribute to `True` instead",
AirflowProviderDeprecationWarning,
)
super().__init__(deferrable=True, **kwargs)
def ts_function(context):
"""
Default callback for the GoogleCloudStorageObjectUpdatedSensor.
The default behaviour is check for the object being updated after the data interval's end,
or execution_date + interval on Airflow versions prior to 2.2 (before AIP-39 implementation).
"""
try:
return context["data_interval_end"]
except KeyError:
return context["dag"].following_schedule(context["execution_date"])
class GCSObjectUpdateSensor(BaseSensorOperator):
"""
Checks if an object is updated in Google Cloud Storage.
:param bucket: The Google Cloud Storage bucket where the object is.
:param object: The name of the object to download in the Google cloud
storage bucket.
:param ts_func: Callback for defining the update condition. The default callback
returns execution_date + schedule_interval. The callback takes the context
as parameter.
:param google_cloud_conn_id: The connection ID to use when
connecting to Google Cloud Storage.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run sensor in deferrable mode
"""
template_fields: Sequence[str] = (
"bucket",
"object",
"impersonation_chain",
)
ui_color = "#f0eee4"
def __init__(
self,
bucket: str,
object: str,
ts_func: Callable = ts_function,
google_cloud_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.object = object
self.ts_func = ts_func
self.google_cloud_conn_id = google_cloud_conn_id
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
def poke(self, context: Context) -> bool:
self.log.info("Sensor checks existence of : %s, %s", self.bucket, self.object)
hook = GCSHook(
gcp_conn_id=self.google_cloud_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.is_updated_after(self.bucket, self.object, self.ts_func(context))
def execute(self, context: Context) -> None:
"""Airflow runs this method on the worker and defers using the trigger."""
if self.deferrable is False:
super().execute(context)
else:
if not self.poke(context=context):
self.defer(
timeout=timedelta(seconds=self.timeout),
trigger=GCSCheckBlobUpdateTimeTrigger(
bucket=self.bucket,
object_name=self.object,
target_date=self.ts_func(context),
poke_interval=self.poke_interval,
google_cloud_conn_id=self.google_cloud_conn_id,
hook_params={
"impersonation_chain": self.impersonation_chain,
},
),
method_name="execute_complete",
)
def execute_complete(self, context: dict[str, Any], event: dict[str, str] | None = None) -> str:
"""Callback for when the trigger fires."""
if event:
if event["status"] == "success":
self.log.info(
"Checking last updated time for object %s in bucket : %s", self.object, self.bucket
)
return event["message"]
raise AirflowException(event["message"])
raise AirflowException("No event received in trigger callback")
class GCSObjectsWithPrefixExistenceSensor(BaseSensorOperator):
"""
Checks for the existence of GCS objects at a given prefix, passing matches via XCom.
When files matching the given prefix are found, the poke method's criteria will be
fulfilled and the matching objects will be returned from the operator and passed
through XCom for downstream tasks.
:param bucket: The Google Cloud Storage bucket where the object is.
:param prefix: The name of the prefix to check in the Google cloud
storage bucket.
:param google_cloud_conn_id: The connection ID to use when
connecting to Google Cloud Storage.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run sensor in deferrable mode
"""
template_fields: Sequence[str] = (
"bucket",
"prefix",
"impersonation_chain",
)
ui_color = "#f0eee4"
def __init__(
self,
bucket: str,
prefix: str,
google_cloud_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.prefix = prefix
self.google_cloud_conn_id = google_cloud_conn_id
self._matches: list[str] = []
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
def poke(self, context: Context) -> bool:
self.log.info("Checking for existence of object: %s, %s", self.bucket, self.prefix)
hook = GCSHook(
gcp_conn_id=self.google_cloud_conn_id,
impersonation_chain=self.impersonation_chain,
)
self._matches = hook.list(self.bucket, prefix=self.prefix)
return bool(self._matches)
def execute(self, context: Context):
"""Overridden to allow matches to be passed."""
self.log.info("Checking for existence of object: %s, %s", self.bucket, self.prefix)
if not self.deferrable:
super().execute(context)
return self._matches
else:
if not self.poke(context=context):
self.defer(
timeout=timedelta(seconds=self.timeout),
trigger=GCSPrefixBlobTrigger(
bucket=self.bucket,
prefix=self.prefix,
poke_interval=self.poke_interval,
google_cloud_conn_id=self.google_cloud_conn_id,
hook_params={
"impersonation_chain": self.impersonation_chain,
},
),
method_name="execute_complete",
)
def execute_complete(self, context: dict[str, Any], event: dict[str, str | list[str]]) -> str | list[str]:
"""Callback for the trigger; returns immediately and relies on trigger to throw a success event."""
self.log.info("Resuming from trigger and checking status")
if event["status"] == "success":
return event["matches"]
raise AirflowException(event["message"])
def get_time():
"""This is just a wrapper of datetime.datetime.now to simplify mocking in the unittests."""
return datetime.now()
@poke_mode_only
class GCSUploadSessionCompleteSensor(BaseSensorOperator):
"""
Return True if the inactivity period has passed with no increase in the number of objects in the bucket.
Checks for changes in the number of objects at prefix in Google Cloud Storage
bucket and returns True if the inactivity period has passed with no
increase in the number of objects. Note, this sensor will not behave correctly
in reschedule mode, as the state of the listed objects in the GCS bucket will
be lost between rescheduled invocations.
:param bucket: The Google Cloud Storage bucket where the objects are.
expected.
:param prefix: The name of the prefix to check in the Google cloud
storage bucket.
:param inactivity_period: The total seconds of inactivity to designate
an upload session is over. Note, this mechanism is not real time and
this operator may not return until a poke_interval after this period
has passed with no additional objects sensed.
:param min_objects: The minimum number of objects needed for upload session
to be considered valid.
:param previous_objects: The set of object ids found during the last poke.
:param allow_delete: Should this sensor consider objects being deleted
between pokes valid behavior. If true a warning message will be logged
when this happens. If false an error will be raised.
:param google_cloud_conn_id: The connection ID to use when connecting
to Google Cloud Storage.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run sensor in deferrable mode
"""
template_fields: Sequence[str] = (
"bucket",
"prefix",
"impersonation_chain",
)
ui_color = "#f0eee4"
def __init__(
self,
bucket: str,
prefix: str,
inactivity_period: float = 60 * 60,
min_objects: int = 1,
previous_objects: set[str] | None = None,
allow_delete: bool = True,
google_cloud_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.prefix = prefix
if inactivity_period < 0:
raise ValueError("inactivity_period must be non-negative")
self.inactivity_period = inactivity_period
self.min_objects = min_objects
self.previous_objects = previous_objects if previous_objects else set()
self.inactivity_seconds = 0
self.allow_delete = allow_delete
self.google_cloud_conn_id = google_cloud_conn_id
self.last_activity_time = None
self.impersonation_chain = impersonation_chain
self.hook: GCSHook | None = None
self.deferrable = deferrable
def _get_gcs_hook(self) -> GCSHook | None:
if not self.hook:
self.hook = GCSHook(
gcp_conn_id=self.google_cloud_conn_id,
impersonation_chain=self.impersonation_chain,
)
return self.hook
def is_bucket_updated(self, current_objects: set[str]) -> bool:
"""
Check whether new objects have been added and the inactivity_period has passed, and update the state.
:param current_objects: set of object ids in bucket during last poke.
"""
current_num_objects = len(current_objects)
if current_objects > self.previous_objects:
# When new objects arrived, reset the inactivity_seconds
# and update previous_objects for the next poke.
self.log.info(
"New objects found at %s resetting last_activity_time.",
os.path.join(self.bucket, self.prefix),
)
self.log.debug("New objects: %s", "\n".join(current_objects - self.previous_objects))
self.last_activity_time = get_time()
self.inactivity_seconds = 0
self.previous_objects = current_objects
return False
if self.previous_objects - current_objects:
# During the last poke interval objects were deleted.
if self.allow_delete:
self.previous_objects = current_objects
self.last_activity_time = get_time()
self.log.warning(
textwrap.dedent(
"""\
Objects were deleted during the last
poke interval. Updating the file counter and
resetting last_activity_time.
%s\
"""
),
self.previous_objects - current_objects,
)
return False
raise AirflowException(
"Illegal behavior: objects were deleted in "
f"{os.path.join(self.bucket, self.prefix)} between pokes."
)
if self.last_activity_time:
self.inactivity_seconds = (get_time() - self.last_activity_time).total_seconds()
else:
# Handles the first poke where last inactivity time is None.
self.last_activity_time = get_time()
self.inactivity_seconds = 0
if self.inactivity_seconds >= self.inactivity_period:
path = os.path.join(self.bucket, self.prefix)
if current_num_objects >= self.min_objects:
self.log.info(
textwrap.dedent(
"""\
SUCCESS:
Sensor found %s objects at %s.
Waited at least %s seconds, with no new objects dropped.
"""
),
current_num_objects,
path,
self.inactivity_period,
)
return True
self.log.error("FAILURE: Inactivity Period passed, not enough objects found in %s", path)
return False
return False
def poke(self, context: Context) -> bool:
return self.is_bucket_updated(
set(self._get_gcs_hook().list(self.bucket, prefix=self.prefix)) # type: ignore[union-attr]
)
def execute(self, context: Context) -> None:
"""Airflow runs this method on the worker and defers using the trigger."""
hook_params = {"impersonation_chain": self.impersonation_chain}
if not self.deferrable:
return super().execute(context)
if not self.poke(context=context):
self.defer(
timeout=timedelta(seconds=self.timeout),
trigger=GCSUploadSessionTrigger(
bucket=self.bucket,
prefix=self.prefix,
poke_interval=self.poke_interval,
google_cloud_conn_id=self.google_cloud_conn_id,
inactivity_period=self.inactivity_period,
min_objects=self.min_objects,
previous_objects=self.previous_objects,
allow_delete=self.allow_delete,
hook_params=hook_params,
),
method_name="execute_complete",
)
def execute_complete(self, context: dict[str, Any], event: dict[str, str] | None = None) -> str:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event:
if event["status"] == "success":
return event["message"]
raise AirflowException(event["message"])
raise AirflowException("No event received in trigger callback")
| 23,602 | 41.681736 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/sensors/cloud_composer.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Cloud Composer sensor."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.triggers.cloud_composer import CloudComposerExecutionTrigger
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class CloudComposerEnvironmentSensor(BaseSensorOperator):
"""
Check the status of the Cloud Composer Environment task.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param operation_name: The name of the operation resource
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param pooling_period_seconds: Optional: Control the rate of the poll for the result of deferrable run.
"""
def __init__(
self,
*,
project_id: str,
region: str,
operation_name: str,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
pooling_period_seconds: int = 30,
**kwargs,
):
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.operation_name = operation_name
self.pooling_period_seconds = pooling_period_seconds
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
"""Airflow runs this method on the worker and defers using the trigger."""
self.defer(
trigger=CloudComposerExecutionTrigger(
project_id=self.project_id,
region=self.region,
operation_name=self.operation_name,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
pooling_period_seconds=self.pooling_period_seconds,
),
method_name="execute_complete",
)
def execute_complete(self, context: dict[str, Any], event: dict[str, str] | None = None) -> str:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event:
if event.get("operation_done"):
return event["operation_done"]
raise AirflowException(event["message"])
raise AirflowException("No event received in trigger callback")
| 4,106 | 42.231579 | 107 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/sensors/pubsub.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google PubSub sensor."""
from __future__ import annotations
from datetime import timedelta
from typing import TYPE_CHECKING, Any, Callable, Sequence
from google.cloud.pubsub_v1.types import ReceivedMessage
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.pubsub import PubSubHook
from airflow.providers.google.cloud.triggers.pubsub import PubsubPullTrigger
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class PubSubPullSensor(BaseSensorOperator):
"""
Pulls messages from a PubSub subscription and passes them through XCom.
Always waits for at least one message to be returned from the subscription.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PubSubPullSensor`
.. seealso::
If you don't want to wait for at least one message to come, use Operator instead:
:class:`~airflow.providers.google.cloud.operators.pubsub.PubSubPullOperator`
This sensor operator will pull up to ``max_messages`` messages from the
specified PubSub subscription. When the subscription returns messages,
the poke method's criteria will be fulfilled and the messages will be
returned from the operator and passed through XCom for downstream tasks.
If ``ack_messages`` is set to True, messages will be immediately
acknowledged before being returned, otherwise, downstream tasks will be
responsible for acknowledging them.
If you want a non-blocking task that does not to wait for messages, please use
:class:`~airflow.providers.google.cloud.operators.pubsub.PubSubPullOperator`
instead.
``project_id`` and ``subscription`` are templated so you can use
variables in them.
:param project_id: the Google Cloud project ID for the subscription (templated)
:param subscription: the Pub/Sub subscription name. Do not include the
full subscription path.
:param max_messages: The maximum number of messages to retrieve per
PubSub pull request
:param ack_messages: If True, each message will be acknowledged
immediately rather than by any downstream tasks
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud.
:param messages_callback: (Optional) Callback to process received messages.
It's return value will be saved to XCom.
If you are pulling large messages, you probably want to provide a custom callback.
If not provided, the default implementation will convert `ReceivedMessage` objects
into JSON-serializable dicts using `google.protobuf.json_format.MessageToDict` function.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run sensor in deferrable mode
"""
template_fields: Sequence[str] = (
"project_id",
"subscription",
"impersonation_chain",
)
ui_color = "#ff7f50"
def __init__(
self,
*,
project_id: str,
subscription: str,
max_messages: int = 5,
ack_messages: bool = False,
gcp_conn_id: str = "google_cloud_default",
messages_callback: Callable[[list[ReceivedMessage], Context], Any] | None = None,
impersonation_chain: str | Sequence[str] | None = None,
poke_interval: float = 10.0,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.subscription = subscription
self.max_messages = max_messages
self.ack_messages = ack_messages
self.messages_callback = messages_callback
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
self.poke_interval = poke_interval
self._return_value = None
def poke(self, context: Context) -> bool:
hook = PubSubHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
pulled_messages = hook.pull(
project_id=self.project_id,
subscription=self.subscription,
max_messages=self.max_messages,
return_immediately=True,
)
handle_messages = self.messages_callback or self._default_message_callback
self._return_value = handle_messages(pulled_messages, context)
if pulled_messages and self.ack_messages:
hook.acknowledge(
project_id=self.project_id,
subscription=self.subscription,
messages=pulled_messages,
)
return bool(pulled_messages)
def execute(self, context: Context) -> None:
"""Airflow runs this method on the worker and defers using the triggers if deferrable is True."""
if not self.deferrable:
super().execute(context)
return self._return_value
else:
self.defer(
timeout=timedelta(seconds=self.timeout),
trigger=PubsubPullTrigger(
project_id=self.project_id,
subscription=self.subscription,
max_messages=self.max_messages,
ack_messages=self.ack_messages,
messages_callback=self.messages_callback,
poke_interval=self.poke_interval,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
),
method_name="execute_complete",
)
def execute_complete(self, context: dict[str, Any], event: dict[str, str | list[str]]) -> str | list[str]:
"""Callback for the trigger; returns immediately and relies on trigger to throw a success event."""
if event["status"] == "success":
self.log.info("Sensor pulls messages: %s", event["message"])
return event["message"]
self.log.info("Sensor failed: %s", event["message"])
raise AirflowException(event["message"])
def _default_message_callback(
self,
pulled_messages: list[ReceivedMessage],
context: Context,
):
"""
This method can be overridden by subclasses or by `messages_callback` constructor argument.
This default implementation converts `ReceivedMessage` objects into JSON-serializable dicts.
:param pulled_messages: messages received from the topic.
:param context: same as in `execute`
:return: value to be saved to XCom.
"""
messages_json = [ReceivedMessage.to_dict(m) for m in pulled_messages]
return messages_json
| 8,262 | 41.158163 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/sensors/dataprep.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Dataprep Job sensor."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.providers.google.cloud.hooks.dataprep import GoogleDataprepHook, JobGroupStatuses
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class DataprepJobGroupIsFinishedSensor(BaseSensorOperator):
"""
Check the status of the Dataprep task to be finished.
:param job_group_id: ID of the job group to check
"""
template_fields: Sequence[str] = ("job_group_id",)
def __init__(
self,
*,
job_group_id: int | str,
dataprep_conn_id: str = "dataprep_default",
**kwargs,
):
super().__init__(**kwargs)
self.job_group_id = job_group_id
self.dataprep_conn_id = dataprep_conn_id
def poke(self, context: Context) -> bool:
hooks = GoogleDataprepHook(dataprep_conn_id=self.dataprep_conn_id)
status = hooks.get_job_group_status(job_group_id=int(self.job_group_id))
return status != JobGroupStatuses.IN_PROGRESS
| 1,912 | 34.425926 | 94 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/sensors/bigquery.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google BigQuery sensors."""
from __future__ import annotations
import warnings
from datetime import timedelta
from typing import TYPE_CHECKING, Any, Sequence
from airflow.configuration import conf
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook
from airflow.providers.google.cloud.triggers.bigquery import (
BigQueryTableExistenceTrigger,
BigQueryTablePartitionExistenceTrigger,
)
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class BigQueryTableExistenceSensor(BaseSensorOperator):
"""
Checks for the existence of a table in Google Bigquery.
:param project_id: The Google cloud project in which to look for the table.
The connection supplied to the hook must provide
access to the specified project.
:param dataset_id: The name of the dataset in which to look for the table.
storage bucket.
:param table_id: The name of the table to check the existence of.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"dataset_id",
"table_id",
"impersonation_chain",
)
ui_color = "#f0eee4"
def __init__(
self,
*,
project_id: str,
dataset_id: str,
table_id: str,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
if deferrable and "poke_interval" not in kwargs:
# TODO: Remove once deprecated
if "polling_interval" in kwargs:
kwargs["poke_interval"] = kwargs["polling_interval"]
warnings.warn(
"Argument `poll_interval` is deprecated and will be removed "
"in a future release. Please use `poke_interval` instead.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
else:
kwargs["poke_interval"] = 5
super().__init__(**kwargs)
self.project_id = project_id
self.dataset_id = dataset_id
self.table_id = table_id
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
def poke(self, context: Context) -> bool:
table_uri = f"{self.project_id}:{self.dataset_id}.{self.table_id}"
self.log.info("Sensor checks existence of table: %s", table_uri)
hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.table_exists(
project_id=self.project_id, dataset_id=self.dataset_id, table_id=self.table_id
)
def execute(self, context: Context) -> None:
"""Airflow runs this method on the worker and defers using the trigger."""
if not self.deferrable:
super().execute(context)
else:
if not self.poke(context=context):
self.defer(
timeout=timedelta(seconds=self.timeout),
trigger=BigQueryTableExistenceTrigger(
dataset_id=self.dataset_id,
table_id=self.table_id,
project_id=self.project_id,
poll_interval=self.poke_interval,
gcp_conn_id=self.gcp_conn_id,
hook_params={
"impersonation_chain": self.impersonation_chain,
},
),
method_name="execute_complete",
)
def execute_complete(self, context: dict[str, Any], event: dict[str, str] | None = None) -> str:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
table_uri = f"{self.project_id}:{self.dataset_id}.{self.table_id}"
self.log.info("Sensor checks existence of table: %s", table_uri)
if event:
if event["status"] == "success":
return event["message"]
raise AirflowException(event["message"])
raise AirflowException("No event received in trigger callback")
class BigQueryTablePartitionExistenceSensor(BaseSensorOperator):
"""
Checks for the existence of a partition within a table in Google Bigquery.
:param project_id: The Google cloud project in which to look for the table.
The connection supplied to the hook must provide
access to the specified project.
:param dataset_id: The name of the dataset in which to look for the table.
storage bucket.
:param table_id: The name of the table to check the existence of.
:param partition_id: The name of the partition to check the existence of.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"dataset_id",
"table_id",
"partition_id",
"impersonation_chain",
)
ui_color = "#f0eee4"
def __init__(
self,
*,
project_id: str,
dataset_id: str,
table_id: str,
partition_id: str,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
if deferrable and "poke_interval" not in kwargs:
kwargs["poke_interval"] = 5
super().__init__(**kwargs)
self.project_id = project_id
self.dataset_id = dataset_id
self.table_id = table_id
self.partition_id = partition_id
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
def poke(self, context: Context) -> bool:
table_uri = f"{self.project_id}:{self.dataset_id}.{self.table_id}"
self.log.info('Sensor checks existence of partition: "%s" in table: %s', self.partition_id, table_uri)
hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.table_partition_exists(
project_id=self.project_id,
dataset_id=self.dataset_id,
table_id=self.table_id,
partition_id=self.partition_id,
)
def execute(self, context: Context) -> None:
"""Airflow runs this method on the worker and defers using the triggers if deferrable is True."""
if not self.deferrable:
super().execute(context)
else:
if not self.poke(context=context):
self.defer(
timeout=timedelta(seconds=self.timeout),
trigger=BigQueryTablePartitionExistenceTrigger(
dataset_id=self.dataset_id,
table_id=self.table_id,
project_id=self.project_id,
partition_id=self.partition_id,
poll_interval=self.poke_interval,
gcp_conn_id=self.gcp_conn_id,
hook_params={
"impersonation_chain": self.impersonation_chain,
},
),
method_name="execute_complete",
)
def execute_complete(self, context: dict[str, Any], event: dict[str, str] | None = None) -> str:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
table_uri = f"{self.project_id}:{self.dataset_id}.{self.table_id}"
self.log.info('Sensor checks existence of partition: "%s" in table: %s', self.partition_id, table_uri)
if event:
if event["status"] == "success":
return event["message"]
raise AirflowException(event["message"])
raise AirflowException("No event received in trigger callback")
class BigQueryTableExistenceAsyncSensor(BigQueryTableExistenceSensor):
"""
Checks for the existence of a table in Google Big Query.
This class is deprecated and will be removed in a future release.
Please use :class:`airflow.providers.google.cloud.sensors.bigquery.BigQueryTableExistenceSensor`
and set *deferrable* attribute to *True* instead.
:param project_id: The Google cloud project in which to look for the table.
The connection supplied to the hook must provide
access to the specified project.
:param dataset_id: The name of the dataset in which to look for the table.
storage bucket.
:param table_id: The name of the table to check the existence of.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param polling_interval: The interval in seconds to wait between checks table existence.
"""
def __init__(self, **kwargs):
warnings.warn(
"Class `BigQueryTableExistenceAsyncSensor` is deprecated and "
"will be removed in a future release. "
"Please use `BigQueryTableExistenceSensor` and "
"set `deferrable` attribute to `True` instead",
AirflowProviderDeprecationWarning,
)
super().__init__(deferrable=True, **kwargs)
class BigQueryTableExistencePartitionAsyncSensor(BigQueryTablePartitionExistenceSensor):
"""
Checks for the existence of a partition within a table in Google BigQuery.
This class is deprecated and will be removed in a future release.
Please use :class:`airflow.providers.google.cloud.sensors.bigquery.BigQueryTablePartitionExistenceSensor`
and set *deferrable* attribute to *True* instead.
:param project_id: The Google cloud project in which to look for the table.
The connection supplied to the hook must provide
access to the specified project.
:param dataset_id: The name of the dataset in which to look for the table.
storage bucket.
:param partition_id: The name of the partition to check the existence of.
:param table_id: The name of the table to check the existence of.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param poke_interval: The interval in seconds to wait between checks table existence.
"""
def __init__(self, **kwargs):
warnings.warn(
"Class `BigQueryTableExistencePartitionAsyncSensor` is deprecated and "
"will be removed in a future release. "
"Please use `BigQueryTablePartitionExistenceSensor` and "
"set `deferrable` attribute to `True` instead",
AirflowProviderDeprecationWarning,
)
super().__init__(deferrable=True, **kwargs)
| 15,033 | 44.011976 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/sensors/looker.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Looker sensors."""
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.looker import JobStatus, LookerHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class LookerCheckPdtBuildSensor(BaseSensorOperator):
"""
Check for the state of a previously submitted PDT materialization job.
:param materialization_id: Required. The materialization job ID to poll. (templated)
:param looker_conn_id: Required. The connection ID to use connecting to Looker.
:param cancel_on_kill: Optional. Flag which indicates whether cancel the hook's job or not,
when on_kill is called.
"""
template_fields = ["materialization_id"]
def __init__(
self, materialization_id: str, looker_conn_id: str, cancel_on_kill: bool = True, **kwargs
) -> None:
super().__init__(**kwargs)
self.materialization_id = materialization_id
self.looker_conn_id = looker_conn_id
self.cancel_on_kill = cancel_on_kill
self.hook: LookerHook | None = None
def poke(self, context: Context) -> bool:
self.hook = LookerHook(looker_conn_id=self.looker_conn_id)
if not self.materialization_id:
raise AirflowException("Invalid `materialization_id`.")
# materialization_id is templated var pulling output from start task
status_dict = self.hook.pdt_build_status(materialization_id=self.materialization_id)
status = status_dict["status"]
if status == JobStatus.ERROR.value:
msg = status_dict["message"]
raise AirflowException(
f'PDT materialization job failed. Job id: {self.materialization_id}. Message:\n"{msg}"'
)
elif status == JobStatus.CANCELLED.value:
raise AirflowException(
f"PDT materialization job was cancelled. Job id: {self.materialization_id}."
)
elif status == JobStatus.UNKNOWN.value:
raise AirflowException(
f"PDT materialization job has unknown status. Job id: {self.materialization_id}."
)
elif status == JobStatus.DONE.value:
self.log.debug(
"PDT materialization job completed successfully. Job id: %s.", self.materialization_id
)
return True
self.log.info("Waiting for PDT materialization job to complete. Job id: %s.", self.materialization_id)
return False
def on_kill(self):
if self.materialization_id and self.cancel_on_kill:
self.hook.stop_pdt_build(materialization_id=self.materialization_id)
| 3,579 | 39.681818 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/sensors/dataform.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Dataform sensor."""
from __future__ import annotations
from typing import TYPE_CHECKING, Iterable, Sequence
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.dataform import DataformHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class DataformWorkflowInvocationStateSensor(BaseSensorOperator):
"""
Checks for the status of a Workflow Invocation in Google Cloud Dataform.
:param project_id: Required, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param region: Required, The location of the Dataform workflow invocation (for example europe-west1).
:param repository_id: Required. The ID of the Dataform repository that the task belongs to.
:param workflow_invocation_id: Required, ID of the workflow invocation to be checked.
:param expected_statuses: The expected state of the operation.
See:
https://cloud.google.com/python/docs/reference/dataform/latest/google.cloud.dataform_v1beta1.types.WorkflowInvocation.State
:param failure_statuses: State that will terminate the sensor with an exception
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = ("workflow_invocation_id",)
def __init__(
self,
*,
project_id: str,
region: str,
repository_id: str,
workflow_invocation_id: str,
expected_statuses: set[int] | int,
failure_statuses: Iterable[int] | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.repository_id = repository_id
self.workflow_invocation_id = workflow_invocation_id
self.expected_statuses = (
{expected_statuses} if isinstance(expected_statuses, int) else expected_statuses
)
self.failure_statuses = failure_statuses
self.project_id = project_id
self.region = region
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.hook: DataformHook | None = None
def poke(self, context: Context) -> bool:
self.hook = DataformHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
workflow_invocation = self.hook.get_workflow_invocation(
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
workflow_invocation_id=self.workflow_invocation_id,
)
workflow_status = workflow_invocation.state
if workflow_status is not None:
if self.failure_statuses and workflow_status in self.failure_statuses:
raise AirflowException(
f"Workflow Invocation with id '{self.workflow_invocation_id}' "
f"state is: {workflow_status}. Terminating sensor..."
)
return workflow_status in self.expected_statuses
| 4,716 | 44.355769 | 131 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/sensors/bigquery_dts.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google BigQuery Data Transfer Service sensor."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.bigquery_datatransfer_v1 import TransferState
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.bigquery_dts import BiqQueryDataTransferServiceHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class BigQueryDataTransferServiceTransferRunSensor(BaseSensorOperator):
"""
Waits for Data Transfer Service run to complete.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/operator:BigQueryDataTransferServiceTransferRunSensor`
:param expected_statuses: The expected state of the operation.
See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferOperations#Status
:param run_id: ID of the transfer run.
:param transfer_config_id: ID of transfer config to be used.
:param project_id: The BigQuery project id where the transfer configuration should be
created. If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param request_timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: Additional metadata that is provided to the method.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:return: An ``google.cloud.bigquery_datatransfer_v1.types.TransferRun`` instance.
"""
template_fields: Sequence[str] = (
"run_id",
"transfer_config_id",
"expected_statuses",
"project_id",
"impersonation_chain",
)
def __init__(
self,
*,
run_id: str,
transfer_config_id: str,
expected_statuses: (
set[str | TransferState | int] | str | TransferState | int
) = TransferState.SUCCEEDED,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
retry: Retry | _MethodDefault = DEFAULT,
request_timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.run_id = run_id
self.transfer_config_id = transfer_config_id
self.retry = retry
self.request_timeout = request_timeout
self.metadata = metadata
self.expected_statuses = self._normalize_state_list(expected_statuses)
self.project_id = project_id
self.gcp_cloud_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.location = location
def _normalize_state_list(self, states) -> set[TransferState]:
states = {states} if isinstance(states, (str, TransferState, int)) else states
result = set()
for state in states:
if isinstance(state, str):
# The proto.Enum type is indexable (via MetaClass and aliased) but MyPy is not able to
# infer this https://github.com/python/mypy/issues/8968
result.add(TransferState[state.upper()]) # type: ignore[misc]
elif isinstance(state, int):
result.add(TransferState(state))
elif isinstance(state, TransferState):
result.add(state)
else:
raise TypeError(
f"Unsupported type. "
f"Expected: str, int, google.cloud.bigquery_datatransfer_v1.TransferState."
f"Current type: {type(state)}"
)
return result
def poke(self, context: Context) -> bool:
hook = BiqQueryDataTransferServiceHook(
gcp_conn_id=self.gcp_cloud_conn_id,
impersonation_chain=self.impersonation_chain,
location=self.location,
)
run = hook.get_transfer_run(
run_id=self.run_id,
transfer_config_id=self.transfer_config_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.request_timeout,
metadata=self.metadata,
)
self.log.info("Status of %s run: %s", self.run_id, str(run.state))
if run.state in (TransferState.FAILED, TransferState.CANCELLED):
raise AirflowException(f"Transfer {self.run_id} did not succeed")
return run.state in self.expected_statuses
| 6,303 | 42.777778 | 108 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/sensors/datafusion.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Data Fusion sensors."""
from __future__ import annotations
from typing import TYPE_CHECKING, Iterable, Sequence
from airflow.exceptions import AirflowException, AirflowNotFoundException
from airflow.providers.google.cloud.hooks.datafusion import DataFusionHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class CloudDataFusionPipelineStateSensor(BaseSensorOperator):
"""
Check the status of the pipeline in the Google Cloud Data Fusion.
:param pipeline_name: Your pipeline name.
:param pipeline_id: Your pipeline ID.
:param expected_statuses: State that is expected
:param failure_statuses: State that will terminate the sensor with an exception
:param instance_name: The name of the instance.
:param location: The Cloud Data Fusion location in which to handle the request.
:param project_id: The ID of the Google Cloud project that the instance belongs to.
:param namespace: If your pipeline belongs to a Basic edition instance, the namespace ID
is always default. If your pipeline belongs to an Enterprise edition instance, you
can create a namespace.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = ("pipeline_id",)
def __init__(
self,
pipeline_name: str,
pipeline_id: str,
expected_statuses: Iterable[str],
instance_name: str,
location: str,
failure_statuses: Iterable[str] | None = None,
project_id: str | None = None,
namespace: str = "default",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.pipeline_name = pipeline_name
self.pipeline_id = pipeline_id
self.expected_statuses = expected_statuses
self.failure_statuses = failure_statuses
self.instance_name = instance_name
self.location = location
self.project_id = project_id
self.namespace = namespace
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def poke(self, context: Context) -> bool:
self.log.info(
"Waiting for pipeline %s to be in one of the states: %s.",
self.pipeline_id,
", ".join(self.expected_statuses),
)
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
instance = hook.get_instance(
instance_name=self.instance_name,
location=self.location,
project_id=self.project_id,
)
api_url = instance["apiEndpoint"]
pipeline_status = None
try:
pipeline_workflow = hook.get_pipeline_workflow(
pipeline_name=self.pipeline_name,
instance_url=api_url,
pipeline_id=self.pipeline_id,
namespace=self.namespace,
)
pipeline_status = pipeline_workflow["status"]
except AirflowNotFoundException:
raise AirflowException("Specified Pipeline ID was not found.")
except AirflowException:
pass # Because the pipeline may not be visible in system yet
if pipeline_status is not None:
if self.failure_statuses and pipeline_status in self.failure_statuses:
raise AirflowException(
f"Pipeline with id '{self.pipeline_id}' state is: {pipeline_status}. "
f"Terminating sensor..."
)
self.log.debug(
"Current status of the pipeline workflow for %s: %s.", self.pipeline_id, pipeline_status
)
return pipeline_status in self.expected_statuses
| 5,377 | 41.68254 | 100 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/sensors/bigtable.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Bigtable sensor."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
import google.api_core.exceptions
from google.cloud.bigtable import enums
from google.cloud.bigtable.table import ClusterState
from airflow.providers.google.cloud.hooks.bigtable import BigtableHook
from airflow.providers.google.cloud.links.bigtable import BigtableTablesLink
from airflow.providers.google.cloud.operators.bigtable import BigtableValidationMixin
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class BigtableTableReplicationCompletedSensor(BaseSensorOperator, BigtableValidationMixin):
"""
Sensor that waits for Cloud Bigtable table to be fully replicated to its clusters.
No exception will be raised if the instance or the table does not exist.
For more details about cluster states for a table, have a look at the reference:
https://googleapis.github.io/google-cloud-python/latest/bigtable/table.html#google.cloud.bigtable.table.Table.get_cluster_states
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigtableTableReplicationCompletedSensor`
:param instance_id: The ID of the Cloud Bigtable instance.
:param table_id: The ID of the table to check replication status.
:param project_id: Optional, the ID of the Google Cloud project.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
REQUIRED_ATTRIBUTES = ("instance_id", "table_id")
template_fields: Sequence[str] = (
"project_id",
"instance_id",
"table_id",
"impersonation_chain",
)
operator_extra_links = (BigtableTablesLink(),)
def __init__(
self,
*,
instance_id: str,
table_id: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.project_id = project_id
self.instance_id = instance_id
self.table_id = table_id
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def poke(self, context: Context) -> bool:
hook = BigtableHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
instance = hook.get_instance(project_id=self.project_id, instance_id=self.instance_id)
if not instance:
self.log.info("Dependency: instance '%s' does not exist.", self.instance_id)
return False
try:
cluster_states = hook.get_cluster_states_for_table(instance=instance, table_id=self.table_id)
except google.api_core.exceptions.NotFound:
self.log.info(
"Dependency: table '%s' does not exist in instance '%s'.", self.table_id, self.instance_id
)
return False
ready_state = ClusterState(enums.Table.ReplicationState.READY)
is_table_replicated = True
for cluster_id in cluster_states.keys():
if cluster_states[cluster_id] != ready_state:
self.log.info("Table '%s' is not yet replicated on cluster '%s'.", self.table_id, cluster_id)
is_table_replicated = False
if not is_table_replicated:
return False
self.log.info("Table '%s' is replicated.", self.table_id)
BigtableTablesLink.persist(context=context, task_instance=self)
return True
| 5,059 | 40.818182 | 132 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/sensors/cloud_storage_transfer_service.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Transfer sensor."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.providers.google.cloud.hooks.cloud_storage_transfer_service import (
COUNTERS,
METADATA,
NAME,
CloudDataTransferServiceHook,
)
from airflow.providers.google.cloud.links.cloud_storage_transfer import CloudStorageTransferJobLink
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class CloudDataTransferServiceJobStatusSensor(BaseSensorOperator):
"""
Waits for at least one operation belonging to the job to have the expected status.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataTransferServiceJobStatusSensor`
:param job_name: The name of the transfer job
:param expected_statuses: The expected state of the operation.
See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferOperations#Status
:param project_id: (Optional) the ID of the project that owns the Transfer
Job. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_transfer_job_sensor_template_fields]
template_fields: Sequence[str] = (
"job_name",
"impersonation_chain",
)
# [END gcp_transfer_job_sensor_template_fields]
operator_extra_links = (CloudStorageTransferJobLink(),)
def __init__(
self,
*,
job_name: str,
expected_statuses: set[str] | str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.job_name = job_name
self.expected_statuses = (
{expected_statuses} if isinstance(expected_statuses, str) else expected_statuses
)
self.project_id = project_id
self.gcp_cloud_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def poke(self, context: Context) -> bool:
hook = CloudDataTransferServiceHook(
gcp_conn_id=self.gcp_cloud_conn_id,
impersonation_chain=self.impersonation_chain,
)
operations = hook.list_transfer_operations(
request_filter={"project_id": self.project_id or hook.project_id, "job_names": [self.job_name]}
)
for operation in operations:
self.log.info("Progress for operation %s: %s", operation[NAME], operation[METADATA][COUNTERS])
check = CloudDataTransferServiceHook.operations_contain_expected_statuses(
operations=operations, expected_statuses=self.expected_statuses
)
if check:
self.xcom_push(key="sensed_operations", value=operations, context=context)
project_id = self.project_id or hook.project_id
if project_id:
CloudStorageTransferJobLink.persist(
context=context,
task_instance=self,
project_id=project_id,
job_name=self.job_name,
)
return check
| 4,789 | 39.940171 | 107 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/sensors/dataproc.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Dataproc Job sensor."""
from __future__ import annotations
import time
from typing import TYPE_CHECKING, Sequence
from google.api_core.exceptions import ServerError
from google.cloud.dataproc_v1.types import Batch, JobStatus
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.dataproc import DataprocHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class DataprocJobSensor(BaseSensorOperator):
"""
Check for the state of a previously submitted Dataproc job.
:param dataproc_job_id: The Dataproc job ID to poll. (templated)
:param region: Required. The Cloud Dataproc region in which to handle the request. (templated)
:param project_id: The ID of the google cloud project in which
to create the cluster. (templated)
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:param wait_timeout: How many seconds wait for job to be ready.
"""
template_fields: Sequence[str] = ("project_id", "region", "dataproc_job_id")
ui_color = "#f0eee4"
def __init__(
self,
*,
dataproc_job_id: str,
region: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
wait_timeout: int | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.dataproc_job_id = dataproc_job_id
self.region = region
self.wait_timeout = wait_timeout
self.start_sensor_time: float | None = None
def execute(self, context: Context) -> None:
self.start_sensor_time = time.monotonic()
super().execute(context)
def _duration(self):
return time.monotonic() - self.start_sensor_time
def poke(self, context: Context) -> bool:
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id)
if self.wait_timeout:
try:
job = hook.get_job(
job_id=self.dataproc_job_id, region=self.region, project_id=self.project_id
)
except ServerError as err:
duration = self._duration()
self.log.info("DURATION RUN: %f", duration)
if duration > self.wait_timeout:
raise AirflowException(
f"Timeout: dataproc job {self.dataproc_job_id} "
f"is not ready after {self.wait_timeout}s"
)
self.log.info("Retrying. Dataproc API returned server error when waiting for job: %s", err)
return False
else:
job = hook.get_job(job_id=self.dataproc_job_id, region=self.region, project_id=self.project_id)
state = job.status.state
if state == JobStatus.State.ERROR:
raise AirflowException(f"Job failed:\n{job}")
elif state in {
JobStatus.State.CANCELLED,
JobStatus.State.CANCEL_PENDING,
JobStatus.State.CANCEL_STARTED,
}:
raise AirflowException(f"Job was cancelled:\n{job}")
elif JobStatus.State.DONE == state:
self.log.debug("Job %s completed successfully.", self.dataproc_job_id)
return True
elif JobStatus.State.ATTEMPT_FAILURE == state:
self.log.debug("Job %s attempt has failed.", self.dataproc_job_id)
self.log.info("Waiting for job %s to complete.", self.dataproc_job_id)
return False
class DataprocBatchSensor(BaseSensorOperator):
"""
Check for the state of batch.
:param batch_id: The Dataproc batch ID to poll. (templated)
:param region: Required. The Cloud Dataproc region in which to handle the request. (templated)
:param project_id: The ID of the google cloud project in which
to create the cluster. (templated)
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:param wait_timeout: How many seconds wait for job to be ready.
"""
template_fields: Sequence[str] = ("project_id", "region", "batch_id")
ui_color = "#f0eee4"
def __init__(
self,
*,
batch_id: str,
region: str,
project_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
wait_timeout: int | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.batch_id = batch_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.region = region
self.wait_timeout = wait_timeout
self.start_sensor_time: float | None = None
def execute(self, context: Context) -> None:
self.start_sensor_time = time.monotonic()
super().execute(context)
def _duration(self):
return time.monotonic() - self.start_sensor_time
def poke(self, context: Context) -> bool:
hook = DataprocHook(gcp_conn_id=self.gcp_conn_id)
if self.wait_timeout:
try:
batch = hook.get_batch(batch_id=self.batch_id, region=self.region, project_id=self.project_id)
except ServerError as err:
duration = self._duration()
self.log.info("DURATION RUN: %f", duration)
if duration > self.wait_timeout:
raise AirflowException(
f"Timeout: dataproc batch {self.batch_id} is not ready after {self.wait_timeout}s"
)
self.log.info("Retrying. Dataproc API returned server error when waiting for batch: %s", err)
return False
else:
batch = hook.get_batch(batch_id=self.batch_id, region=self.region, project_id=self.project_id)
state = batch.state
if state == Batch.State.FAILED:
raise AirflowException("Batch failed")
elif state in {
Batch.State.CANCELLED,
Batch.State.CANCELLING,
}:
raise AirflowException("Batch was cancelled.")
elif state == Batch.State.SUCCEEDED:
self.log.debug("Batch %s completed successfully.", self.batch_id)
return True
self.log.info("Waiting for the batch %s to complete.", self.batch_id)
return False
| 7,188 | 37.650538 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/sensors/dataflow.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Dataflow sensor."""
from __future__ import annotations
from typing import TYPE_CHECKING, Callable, Sequence
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.dataflow import (
DEFAULT_DATAFLOW_LOCATION,
DataflowHook,
DataflowJobStatus,
)
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class DataflowJobStatusSensor(BaseSensorOperator):
"""
Checks for the status of a job in Google Cloud Dataflow.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataflowJobStatusSensor`
:param job_id: ID of the job to be checked.
:param expected_statuses: The expected state of the operation.
See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#Job.JobState
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: The location of the Dataflow job (for example europe-west1). See:
https://cloud.google.com/dataflow/docs/concepts/regional-endpoints
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = ("job_id",)
def __init__(
self,
*,
job_id: str,
expected_statuses: set[str] | str,
project_id: str | None = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.job_id = job_id
self.expected_statuses = (
{expected_statuses} if isinstance(expected_statuses, str) else expected_statuses
)
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.hook: DataflowHook | None = None
def poke(self, context: Context) -> bool:
self.log.info(
"Waiting for job %s to be in one of the states: %s.",
self.job_id,
", ".join(self.expected_statuses),
)
self.hook = DataflowHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
job = self.hook.get_job(
job_id=self.job_id,
project_id=self.project_id,
location=self.location,
)
job_status = job["currentState"]
self.log.debug("Current job status for job %s: %s.", self.job_id, job_status)
if job_status in self.expected_statuses:
return True
elif job_status in DataflowJobStatus.TERMINAL_STATES:
raise AirflowException(f"Job with id '{self.job_id}' is already in terminal state: {job_status}")
return False
class DataflowJobMetricsSensor(BaseSensorOperator):
"""
Checks the metrics of a job in Google Cloud Dataflow.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataflowJobMetricsSensor`
:param job_id: ID of the job to be checked.
:param callback: callback which is called with list of read job metrics
See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/MetricUpdate
:param fail_on_terminal_state: If set to true sensor will raise Exception when
job is in terminal state
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: The location of the Dataflow job (for example europe-west1). See:
https://cloud.google.com/dataflow/docs/concepts/regional-endpoints
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = ("job_id",)
def __init__(
self,
*,
job_id: str,
callback: Callable[[dict], bool],
fail_on_terminal_state: bool = True,
project_id: str | None = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.job_id = job_id
self.project_id = project_id
self.callback = callback
self.fail_on_terminal_state = fail_on_terminal_state
self.location = location
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.hook: DataflowHook | None = None
def poke(self, context: Context) -> bool:
self.hook = DataflowHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if self.fail_on_terminal_state:
job = self.hook.get_job(
job_id=self.job_id,
project_id=self.project_id,
location=self.location,
)
job_status = job["currentState"]
if job_status in DataflowJobStatus.TERMINAL_STATES:
raise AirflowException(
f"Job with id '{self.job_id}' is already in terminal state: {job_status}"
)
result = self.hook.fetch_job_metrics_by_id(
job_id=self.job_id,
project_id=self.project_id,
location=self.location,
)
return self.callback(result["metrics"])
class DataflowJobMessagesSensor(BaseSensorOperator):
"""
Checks for the job message in Google Cloud Dataflow.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataflowJobMessagesSensor`
:param job_id: ID of the job to be checked.
:param callback: callback which is called with list of read job metrics
See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/MetricUpdate
:param fail_on_terminal_state: If set to true sensor will raise Exception when
job is in terminal state
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: Job location.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = ("job_id",)
def __init__(
self,
*,
job_id: str,
callback: Callable,
fail_on_terminal_state: bool = True,
project_id: str | None = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.job_id = job_id
self.project_id = project_id
self.callback = callback
self.fail_on_terminal_state = fail_on_terminal_state
self.location = location
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.hook: DataflowHook | None = None
def poke(self, context: Context) -> bool:
self.hook = DataflowHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if self.fail_on_terminal_state:
job = self.hook.get_job(
job_id=self.job_id,
project_id=self.project_id,
location=self.location,
)
job_status = job["currentState"]
if job_status in DataflowJobStatus.TERMINAL_STATES:
raise AirflowException(
f"Job with id '{self.job_id}' is already in terminal state: {job_status}"
)
result = self.hook.fetch_job_messages_by_id(
job_id=self.job_id,
project_id=self.project_id,
location=self.location,
)
return self.callback(result)
class DataflowJobAutoScalingEventsSensor(BaseSensorOperator):
"""
Checks for the job autoscaling event in Google Cloud Dataflow.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataflowJobAutoScalingEventsSensor`
:param job_id: ID of the job to be checked.
:param callback: callback which is called with list of read job metrics
See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/MetricUpdate
:param fail_on_terminal_state: If set to true sensor will raise Exception when
job is in terminal state
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: Job location.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = ("job_id",)
def __init__(
self,
*,
job_id: str,
callback: Callable,
fail_on_terminal_state: bool = True,
project_id: str | None = None,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.job_id = job_id
self.project_id = project_id
self.callback = callback
self.fail_on_terminal_state = fail_on_terminal_state
self.location = location
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.hook: DataflowHook | None = None
def poke(self, context: Context) -> bool:
self.hook = DataflowHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if self.fail_on_terminal_state:
job = self.hook.get_job(
job_id=self.job_id,
project_id=self.project_id,
location=self.location,
)
job_status = job["currentState"]
if job_status in DataflowJobStatus.TERMINAL_STATES:
raise AirflowException(
f"Job with id '{self.job_id}' is already in terminal state: {job_status}"
)
result = self.hook.fetch_job_autoscaling_events_by_id(
job_id=self.job_id,
project_id=self.project_id,
location=self.location,
)
return self.callback(result)
| 14,373 | 40.068571 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/sensors/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/sensors/tasks.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Task sensor."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.providers.google.cloud.hooks.tasks import CloudTasksHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class TaskQueueEmptySensor(BaseSensorOperator):
"""
Pulls tasks count from a cloud task queue; waits for queue to return task count as 0.
:param project_id: the Google Cloud project ID for the subscription (templated)
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param queue_name: The queue name to for which task empty sensing is required.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"location",
"queue_name",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
location: str,
project_id: str | None = None,
queue_name: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.project_id = project_id
self.queue_name = queue_name
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def poke(self, context: Context) -> bool:
hook = CloudTasksHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
# TODO uncomment page_size once https://issuetracker.google.com/issues/155978649?pli=1 gets fixed
tasks = hook.list_tasks(
location=self.location,
queue_name=self.queue_name,
# page_size=1
)
self.log.info("tasks exhausted in cloud task queue?: %s" % (len(tasks) == 0))
return len(tasks) == 0
| 3,398 | 37.191011 | 105 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/sensors/dataproc_metastore.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from google.api_core.operation import Operation
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.dataproc_metastore import DataprocMetastoreHook
from airflow.providers.google.cloud.hooks.gcs import parse_json_from_gcs
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class MetastoreHivePartitionSensor(BaseSensorOperator):
"""
Waits for partitions to show up in Hive.
This sensor uses Google Cloud SDK and passes requests via gRPC.
:param service_id: Required. Dataproc Metastore service id.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param table: Required. Name of the partitioned table
:param partitions: List of table partitions to wait for.
A name of a partition should look like "ds=1", or "a=1/b=2" in case of nested partitions.
Note that you cannot use logical or comparison operators as in HivePartitionSensor.
If not specified then the sensor will wait for at least one partition regardless its name.
:param gcp_conn_id: Airflow Google Cloud connection ID.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
template_fields: Sequence[str] = (
"service_id",
"region",
"table",
"partitions",
"impersonation_chain",
)
def __init__(
self,
service_id: str,
region: str,
table: str,
partitions: list[str] | None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.service_id = service_id
self.region = region
self.table = table
self.partitions = partitions or []
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def poke(self, context: Context) -> bool:
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
operation: Operation = hook.list_hive_partitions(
region=self.region, service_id=self.service_id, table=self.table, partition_names=self.partitions
)
metadata = hook.wait_for_operation(timeout=self.timeout, operation=operation)
result_manifest_uri: str = metadata.result_manifest_uri
self.log.info("Received result manifest URI: %s", result_manifest_uri)
self.log.info("Extracting result manifest")
manifest: dict = parse_json_from_gcs(gcp_conn_id=self.gcp_conn_id, file_uri=result_manifest_uri)
if not (manifest and isinstance(manifest, dict)):
raise AirflowException(
f"Failed to extract result manifest. "
f"Expected not empty dict, but this was received: {manifest}"
)
if manifest.get("status", {}).get("code") != 0:
raise AirflowException(f"Request failed: {manifest.get('message')}")
# Extract actual query results
result_base_uri = result_manifest_uri.rsplit("/", 1)[0]
results = (f"{result_base_uri}//{filename}" for filename in manifest.get("filenames", []))
found_partitions = sum(
len(parse_json_from_gcs(gcp_conn_id=self.gcp_conn_id, file_uri=uri).get("rows", []))
for uri in results
)
# Return True if we got all requested partitions.
# If no partitions were given in the request, then we expect to find at least one.
return found_partitions > 0 and found_partitions >= len(set(self.partitions))
| 5,124 | 42.803419 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/sensors/workflows.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.workflows.executions_v1beta import Execution
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.workflows import WorkflowsHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class WorkflowExecutionSensor(BaseSensorOperator):
"""
Checks state of an execution for the given ``workflow_id`` and ``execution_id``.
:param workflow_id: Required. The ID of the workflow.
:param execution_id: Required. The ID of the execution.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The Cloud Dataproc region in which to handle the request.
:param success_states: Execution states to be considered as successful, by default
it's only ``SUCCEEDED`` state
:param failure_states: Execution states to be considered as failures, by default
they are ``FAILED`` and ``CANCELLED`` states.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param request_timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
template_fields: Sequence[str] = ("location", "workflow_id", "execution_id")
def __init__(
self,
*,
workflow_id: str,
execution_id: str,
location: str,
project_id: str | None = None,
success_states: set[Execution.State] | None = None,
failure_states: set[Execution.State] | None = None,
retry: Retry | _MethodDefault = DEFAULT,
request_timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.success_states = success_states or {Execution.State(Execution.State.SUCCEEDED)}
self.failure_states = failure_states or {
Execution.State(Execution.State.FAILED),
Execution.State(Execution.State.CANCELLED),
}
self.workflow_id = workflow_id
self.execution_id = execution_id
self.location = location
self.project_id = project_id
self.retry = retry
self.request_timeout = request_timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def poke(self, context: Context):
hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Checking state of execution %s for workflow %s", self.execution_id, self.workflow_id)
execution: Execution = hook.get_execution(
workflow_id=self.workflow_id,
execution_id=self.execution_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.request_timeout,
metadata=self.metadata,
)
state = execution.state
if state in self.failure_states:
raise AirflowException(
f"Execution {self.execution_id} for workflow {self.execution_id} "
f"failed and is in `{state}` state",
)
if state in self.success_states:
self.log.info(
"Execution %s for workflow %s completed with state: %s",
self.execution_id,
self.workflow_id,
state,
)
return True
self.log.info(
"Execution %s for workflow %s does not completed yet, current state: %s",
self.execution_id,
self.workflow_id,
state,
)
return False
| 5,009 | 39.731707 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/example_dags/example_salesforce_to_gcs.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that shows how to use SalesforceToGcsOperator.
"""
from __future__ import annotations
import os
from datetime import datetime
from airflow import models
from airflow.providers.google.cloud.operators.bigquery import (
BigQueryCreateEmptyDatasetOperator,
BigQueryCreateEmptyTableOperator,
BigQueryDeleteDatasetOperator,
BigQueryInsertJobOperator,
)
from airflow.providers.google.cloud.operators.gcs import GCSCreateBucketOperator, GCSDeleteBucketOperator
from airflow.providers.google.cloud.transfers.gcs_to_bigquery import GCSToBigQueryOperator
from airflow.providers.google.cloud.transfers.salesforce_to_gcs import SalesforceToGcsOperator
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
GCS_BUCKET = os.environ.get("GCS_BUCKET", "airflow-salesforce-bucket")
DATASET_NAME = os.environ.get("SALESFORCE_DATASET_NAME", "salesforce_test_dataset")
TABLE_NAME = os.environ.get("SALESFORCE_TABLE_NAME", "salesforce_test_datatable")
GCS_OBJ_PATH = os.environ.get("GCS_OBJ_PATH", "results.csv")
QUERY = "SELECT Id, Name, Company, Phone, Email, CreatedDate, LastModifiedDate, IsDeleted FROM Lead"
GCS_CONN_ID = os.environ.get("GCS_CONN_ID", "google_cloud_default")
SALESFORCE_CONN_ID = os.environ.get("SALESFORCE_CONN_ID", "salesforce_default")
with models.DAG(
"example_salesforce_to_gcs",
start_date=datetime(2021, 1, 1),
catchup=False,
) as dag:
create_bucket = GCSCreateBucketOperator(
task_id="create_bucket",
bucket_name=GCS_BUCKET,
project_id=GCP_PROJECT_ID,
gcp_conn_id=GCS_CONN_ID,
)
# [START howto_operator_salesforce_to_gcs]
gcs_upload_task = SalesforceToGcsOperator(
query=QUERY,
include_deleted=True,
bucket_name=GCS_BUCKET,
object_name=GCS_OBJ_PATH,
salesforce_conn_id=SALESFORCE_CONN_ID,
export_format="csv",
coerce_to_timestamp=False,
record_time_added=False,
gcp_conn_id=GCS_CONN_ID,
task_id="upload_to_gcs",
dag=dag,
)
# [END howto_operator_salesforce_to_gcs]
create_dataset = BigQueryCreateEmptyDatasetOperator(
task_id="create_dataset", dataset_id=DATASET_NAME, project_id=GCP_PROJECT_ID, gcp_conn_id=GCS_CONN_ID
)
create_table = BigQueryCreateEmptyTableOperator(
task_id="create_table",
dataset_id=DATASET_NAME,
table_id=TABLE_NAME,
schema_fields=[
{"name": "id", "type": "STRING", "mode": "NULLABLE"},
{"name": "name", "type": "STRING", "mode": "NULLABLE"},
{"name": "company", "type": "STRING", "mode": "NULLABLE"},
{"name": "phone", "type": "STRING", "mode": "NULLABLE"},
{"name": "email", "type": "STRING", "mode": "NULLABLE"},
{"name": "createddate", "type": "STRING", "mode": "NULLABLE"},
{"name": "lastmodifieddate", "type": "STRING", "mode": "NULLABLE"},
{"name": "isdeleted", "type": "BOOL", "mode": "NULLABLE"},
],
)
load_csv = GCSToBigQueryOperator(
task_id="gcs_to_bq",
bucket=GCS_BUCKET,
source_objects=[GCS_OBJ_PATH],
destination_project_dataset_table=f"{DATASET_NAME}.{TABLE_NAME}",
write_disposition="WRITE_TRUNCATE",
)
read_data_from_gcs = BigQueryInsertJobOperator(
task_id="read_data_from_gcs",
configuration={
"query": {
"query": f"SELECT COUNT(*) FROM `{GCP_PROJECT_ID}.{DATASET_NAME}.{TABLE_NAME}`",
"useLegacySql": False,
}
},
)
delete_bucket = GCSDeleteBucketOperator(
task_id="delete_bucket",
bucket_name=GCS_BUCKET,
)
delete_dataset = BigQueryDeleteDatasetOperator(
task_id="delete_dataset",
project_id=GCP_PROJECT_ID,
dataset_id=DATASET_NAME,
delete_contents=True,
)
create_bucket >> gcs_upload_task >> load_csv
create_dataset >> create_table >> load_csv
load_csv >> read_data_from_gcs
read_data_from_gcs >> delete_bucket
read_data_from_gcs >> delete_dataset
| 4,891 | 36.922481 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/example_dags/example_cloud_sql_query.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that performs query in a Cloud SQL instance.
This DAG relies on the following OS environment variables
* GCP_PROJECT_ID - Google Cloud project for the Cloud SQL instance
* GCP_REGION - Google Cloud region where the database is created
*
* GCSQL_POSTGRES_INSTANCE_NAME - Name of the postgres Cloud SQL instance
* GCSQL_POSTGRES_USER - Name of the postgres database user
* GCSQL_POSTGRES_PASSWORD - Password of the postgres database user
* GCSQL_POSTGRES_PUBLIC_IP - Public IP of the Postgres database
* GCSQL_POSTGRES_PUBLIC_PORT - Port of the postgres database
*
* GCSQL_MYSQL_INSTANCE_NAME - Name of the postgres Cloud SQL instance
* GCSQL_MYSQL_USER - Name of the mysql database user
* GCSQL_MYSQL_PASSWORD - Password of the mysql database user
* GCSQL_MYSQL_PUBLIC_IP - Public IP of the mysql database
* GCSQL_MYSQL_PUBLIC_PORT - Port of the mysql database
"""
from __future__ import annotations
import os
import subprocess
from datetime import datetime
from os.path import expanduser
from urllib.parse import quote_plus
from airflow import models
from airflow.providers.google.cloud.operators.cloud_sql import CloudSQLExecuteQueryOperator
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
GCP_REGION = os.environ.get("GCP_REGION", "europe-west1")
GCSQL_POSTGRES_INSTANCE_NAME_QUERY = os.environ.get(
"GCSQL_POSTGRES_INSTANCE_NAME_QUERY", "test-postgres-query"
)
GCSQL_POSTGRES_DATABASE_NAME = os.environ.get("GCSQL_POSTGRES_DATABASE_NAME", "postgresdb")
GCSQL_POSTGRES_USER = os.environ.get("GCSQL_POSTGRES_USER", "postgres_user")
GCSQL_POSTGRES_PASSWORD = os.environ.get("GCSQL_POSTGRES_PASSWORD", "JoxHlwrPzwch0gz9")
GCSQL_POSTGRES_PUBLIC_IP = os.environ.get("GCSQL_POSTGRES_PUBLIC_IP", "0.0.0.0")
GCSQL_POSTGRES_PUBLIC_PORT = os.environ.get("GCSQL_POSTGRES_PUBLIC_PORT", 5432)
GCSQL_POSTGRES_CLIENT_CERT_FILE = os.environ.get(
"GCSQL_POSTGRES_CLIENT_CERT_FILE", ".key/postgres-client-cert.pem"
)
GCSQL_POSTGRES_CLIENT_KEY_FILE = os.environ.get(
"GCSQL_POSTGRES_CLIENT_KEY_FILE", ".key/postgres-client-key.pem"
)
GCSQL_POSTGRES_SERVER_CA_FILE = os.environ.get("GCSQL_POSTGRES_SERVER_CA_FILE", ".key/postgres-server-ca.pem")
GCSQL_MYSQL_INSTANCE_NAME_QUERY = os.environ.get("GCSQL_MYSQL_INSTANCE_NAME_QUERY", "test-mysql-query")
GCSQL_MYSQL_DATABASE_NAME = os.environ.get("GCSQL_MYSQL_DATABASE_NAME", "mysqldb")
GCSQL_MYSQL_USER = os.environ.get("GCSQL_MYSQL_USER", "mysql_user")
GCSQL_MYSQL_PASSWORD = os.environ.get("GCSQL_MYSQL_PASSWORD", "JoxHlwrPzwch0gz9")
GCSQL_MYSQL_PUBLIC_IP = os.environ.get("GCSQL_MYSQL_PUBLIC_IP", "0.0.0.0")
GCSQL_MYSQL_PUBLIC_PORT = os.environ.get("GCSQL_MYSQL_PUBLIC_PORT", 3306)
GCSQL_MYSQL_CLIENT_CERT_FILE = os.environ.get("GCSQL_MYSQL_CLIENT_CERT_FILE", ".key/mysql-client-cert.pem")
GCSQL_MYSQL_CLIENT_KEY_FILE = os.environ.get("GCSQL_MYSQL_CLIENT_KEY_FILE", ".key/mysql-client-key.pem")
GCSQL_MYSQL_SERVER_CA_FILE = os.environ.get("GCSQL_MYSQL_SERVER_CA_FILE", ".key/mysql-server-ca.pem")
SQL = [
"CREATE TABLE IF NOT EXISTS TABLE_TEST (I INTEGER)",
"CREATE TABLE IF NOT EXISTS TABLE_TEST (I INTEGER)", # shows warnings logged
"INSERT INTO TABLE_TEST VALUES (0)",
"CREATE TABLE IF NOT EXISTS TABLE_TEST2 (I INTEGER)",
"DROP TABLE TABLE_TEST",
"DROP TABLE TABLE_TEST2",
]
# [START howto_operator_cloudsql_query_connections]
HOME_DIR = expanduser("~")
def get_absolute_path(path):
"""
Returns absolute path.
"""
if path.startswith("/"):
return path
else:
return os.path.join(HOME_DIR, path)
postgres_kwargs = dict(
user=quote_plus(GCSQL_POSTGRES_USER),
password=quote_plus(GCSQL_POSTGRES_PASSWORD),
public_port=GCSQL_POSTGRES_PUBLIC_PORT,
public_ip=quote_plus(GCSQL_POSTGRES_PUBLIC_IP),
project_id=quote_plus(GCP_PROJECT_ID),
location=quote_plus(GCP_REGION),
instance=quote_plus(GCSQL_POSTGRES_INSTANCE_NAME_QUERY),
database=quote_plus(GCSQL_POSTGRES_DATABASE_NAME),
client_cert_file=quote_plus(get_absolute_path(GCSQL_POSTGRES_CLIENT_CERT_FILE)),
client_key_file=quote_plus(get_absolute_path(GCSQL_POSTGRES_CLIENT_KEY_FILE)),
server_ca_file=quote_plus(get_absolute_path(GCSQL_POSTGRES_SERVER_CA_FILE)),
)
# The connections below are created using one of the standard approaches - via environment
# variables named AIRFLOW_CONN_* . The connections can also be created in the database
# of AIRFLOW (using command line or UI).
# Postgres: connect via proxy over TCP
os.environ["AIRFLOW_CONN_PROXY_POSTGRES_TCP"] = (
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"database_type=postgres&"
"project_id={project_id}&"
"location={location}&"
"instance={instance}&"
"use_proxy=True&"
"sql_proxy_use_tcp=True".format(**postgres_kwargs)
)
# Postgres: connect via proxy over UNIX socket (specific proxy version)
os.environ["AIRFLOW_CONN_PROXY_POSTGRES_SOCKET"] = (
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"database_type=postgres&"
"project_id={project_id}&"
"location={location}&"
"instance={instance}&"
"use_proxy=True&"
"sql_proxy_version=v1.13&"
"sql_proxy_use_tcp=False".format(**postgres_kwargs)
)
# Postgres: connect directly via TCP (non-SSL)
os.environ["AIRFLOW_CONN_PUBLIC_POSTGRES_TCP"] = (
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"database_type=postgres&"
"project_id={project_id}&"
"location={location}&"
"instance={instance}&"
"use_proxy=False&"
"use_ssl=False".format(**postgres_kwargs)
)
# Postgres: connect directly via TCP (SSL)
os.environ["AIRFLOW_CONN_PUBLIC_POSTGRES_TCP_SSL"] = (
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"database_type=postgres&"
"project_id={project_id}&"
"location={location}&"
"instance={instance}&"
"use_proxy=False&"
"use_ssl=True&"
"sslcert={client_cert_file}&"
"sslkey={client_key_file}&"
"sslrootcert={server_ca_file}".format(**postgres_kwargs)
)
mysql_kwargs = dict(
user=quote_plus(GCSQL_MYSQL_USER),
password=quote_plus(GCSQL_MYSQL_PASSWORD),
public_port=GCSQL_MYSQL_PUBLIC_PORT,
public_ip=quote_plus(GCSQL_MYSQL_PUBLIC_IP),
project_id=quote_plus(GCP_PROJECT_ID),
location=quote_plus(GCP_REGION),
instance=quote_plus(GCSQL_MYSQL_INSTANCE_NAME_QUERY),
database=quote_plus(GCSQL_MYSQL_DATABASE_NAME),
client_cert_file=quote_plus(get_absolute_path(GCSQL_MYSQL_CLIENT_CERT_FILE)),
client_key_file=quote_plus(get_absolute_path(GCSQL_MYSQL_CLIENT_KEY_FILE)),
server_ca_file=quote_plus(get_absolute_path(GCSQL_MYSQL_SERVER_CA_FILE)),
)
# MySQL: connect via proxy over TCP (specific proxy version)
os.environ["AIRFLOW_CONN_PROXY_MYSQL_TCP"] = (
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"database_type=mysql&"
"project_id={project_id}&"
"location={location}&"
"instance={instance}&"
"use_proxy=True&"
"sql_proxy_version=v1.13&"
"sql_proxy_use_tcp=True".format(**mysql_kwargs)
)
# MySQL: connect via proxy over UNIX socket using pre-downloaded Cloud Sql Proxy binary
try:
sql_proxy_binary_path = subprocess.check_output(["which", "cloud_sql_proxy"]).decode("utf-8").rstrip()
except subprocess.CalledProcessError:
sql_proxy_binary_path = "/tmp/anyhow_download_cloud_sql_proxy"
os.environ["AIRFLOW_CONN_PROXY_MYSQL_SOCKET"] = (
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"database_type=mysql&"
"project_id={project_id}&"
"location={location}&"
"instance={instance}&"
"use_proxy=True&"
"sql_proxy_use_tcp=False".format(**mysql_kwargs)
)
# MySQL: connect directly via TCP (non-SSL)
os.environ["AIRFLOW_CONN_PUBLIC_MYSQL_TCP"] = (
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"database_type=mysql&"
"project_id={project_id}&"
"location={location}&"
"instance={instance}&"
"use_proxy=False&"
"use_ssl=False".format(**mysql_kwargs)
)
# MySQL: connect directly via TCP (SSL) and with fixed Cloud Sql Proxy binary path
os.environ["AIRFLOW_CONN_PUBLIC_MYSQL_TCP_SSL"] = (
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"database_type=mysql&"
"project_id={project_id}&"
"location={location}&"
"instance={instance}&"
"use_proxy=False&"
"use_ssl=True&"
"sslcert={client_cert_file}&"
"sslkey={client_key_file}&"
"sslrootcert={server_ca_file}".format(**mysql_kwargs)
)
# Special case: MySQL: connect directly via TCP (SSL) and with fixed Cloud Sql
# Proxy binary path AND with missing project_id
os.environ["AIRFLOW_CONN_PUBLIC_MYSQL_TCP_SSL_NO_PROJECT_ID"] = (
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"database_type=mysql&"
"location={location}&"
"instance={instance}&"
"use_proxy=False&"
"use_ssl=True&"
"sslcert={client_cert_file}&"
"sslkey={client_key_file}&"
"sslrootcert={server_ca_file}".format(**mysql_kwargs)
)
# [END howto_operator_cloudsql_query_connections]
# [START howto_operator_cloudsql_query_operators]
connection_names = [
"proxy_postgres_tcp",
"proxy_postgres_socket",
"public_postgres_tcp",
"public_postgres_tcp_ssl",
"proxy_mysql_tcp",
"proxy_mysql_socket",
"public_mysql_tcp",
"public_mysql_tcp_ssl",
"public_mysql_tcp_ssl_no_project_id",
]
tasks = []
with models.DAG(
dag_id="example_gcp_sql_query",
start_date=datetime(2021, 1, 1),
catchup=False,
tags=["example"],
) as dag:
prev_task = None
for connection_name in connection_names:
task = CloudSQLExecuteQueryOperator(
gcp_cloudsql_conn_id=connection_name,
task_id="example_gcp_sql_task_" + connection_name,
sql=SQL,
sql_proxy_binary_path=sql_proxy_binary_path,
)
tasks.append(task)
if prev_task:
prev_task >> task
prev_task = task
# [END howto_operator_cloudsql_query_operators]
| 10,862 | 36.202055 | 110 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/example_dags/example_dataflow.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG for Google Cloud Dataflow service
"""
from __future__ import annotations
import os
from datetime import datetime
from typing import Callable
from urllib.parse import urlsplit
from airflow import models
from airflow.exceptions import AirflowException
from airflow.providers.apache.beam.operators.beam import (
BeamRunJavaPipelineOperator,
BeamRunPythonPipelineOperator,
)
from airflow.providers.google.cloud.hooks.dataflow import DataflowJobStatus
from airflow.providers.google.cloud.operators.dataflow import (
CheckJobRunning,
DataflowStopJobOperator,
DataflowTemplatedJobStartOperator,
)
from airflow.providers.google.cloud.sensors.dataflow import (
DataflowJobAutoScalingEventsSensor,
DataflowJobMessagesSensor,
DataflowJobMetricsSensor,
DataflowJobStatusSensor,
)
from airflow.providers.google.cloud.transfers.gcs_to_local import GCSToLocalFilesystemOperator
START_DATE = datetime(2021, 1, 1)
GCS_TMP = os.environ.get("GCP_DATAFLOW_GCS_TMP", "gs://INVALID BUCKET NAME/temp/")
GCS_STAGING = os.environ.get("GCP_DATAFLOW_GCS_STAGING", "gs://INVALID BUCKET NAME/staging/")
GCS_OUTPUT = os.environ.get("GCP_DATAFLOW_GCS_OUTPUT", "gs://INVALID BUCKET NAME/output")
GCS_JAR = os.environ.get("GCP_DATAFLOW_JAR", "gs://INVALID BUCKET NAME/word-count-beam-bundled-0.1.jar")
GCS_PYTHON = os.environ.get("GCP_DATAFLOW_PYTHON", "gs://INVALID BUCKET NAME/wordcount_debugging.py")
PROJECT_ID = os.environ.get("SYSTEM_TESTS_GCP_PROJECT", "default")
GCS_JAR_PARTS = urlsplit(GCS_JAR)
GCS_JAR_BUCKET_NAME = GCS_JAR_PARTS.netloc
GCS_JAR_OBJECT_NAME = GCS_JAR_PARTS.path[1:]
default_args = {
"dataflow_default_options": {
"tempLocation": GCS_TMP,
"stagingLocation": GCS_STAGING,
}
}
with models.DAG(
"example_gcp_dataflow_native_java",
start_date=START_DATE,
catchup=False,
tags=["example"],
) as dag_native_java:
# [START howto_operator_start_java_job_jar_on_gcs]
start_java_job = BeamRunJavaPipelineOperator(
task_id="start-java-job",
jar=GCS_JAR,
pipeline_options={
"output": GCS_OUTPUT,
},
job_class="org.apache.beam.examples.WordCount",
dataflow_config={
"check_if_running": CheckJobRunning.IgnoreJob,
"location": "europe-west3",
"poll_sleep": 10,
},
)
# [END howto_operator_start_java_job_jar_on_gcs]
# [START howto_operator_start_java_job_local_jar]
jar_to_local = GCSToLocalFilesystemOperator(
task_id="jar-to-local",
bucket=GCS_JAR_BUCKET_NAME,
object_name=GCS_JAR_OBJECT_NAME,
filename="/tmp/dataflow-{{ ds_nodash }}.jar",
)
start_java_job_local = BeamRunJavaPipelineOperator(
task_id="start-java-job-local",
jar="/tmp/dataflow-{{ ds_nodash }}.jar",
pipeline_options={
"output": GCS_OUTPUT,
},
job_class="org.apache.beam.examples.WordCount",
dataflow_config={
"check_if_running": CheckJobRunning.WaitForRun,
"location": "europe-west3",
"poll_sleep": 10,
},
)
jar_to_local >> start_java_job_local
# [END howto_operator_start_java_job_local_jar]
with models.DAG(
"example_gcp_dataflow_native_python",
default_args=default_args,
start_date=START_DATE,
catchup=False,
tags=["example"],
) as dag_native_python:
# [START howto_operator_start_python_job]
start_python_job = BeamRunPythonPipelineOperator(
task_id="start-python-job",
py_file=GCS_PYTHON,
py_options=[],
pipeline_options={
"output": GCS_OUTPUT,
},
py_requirements=["apache-beam[gcp]==2.21.0"],
py_interpreter="python3",
py_system_site_packages=False,
dataflow_config={"location": "europe-west3"},
)
# [END howto_operator_start_python_job]
start_python_job_local = BeamRunPythonPipelineOperator(
task_id="start-python-job-local",
py_file="apache_beam.examples.wordcount",
py_options=["-m"],
pipeline_options={
"output": GCS_OUTPUT,
},
py_requirements=["apache-beam[gcp]==2.14.0"],
py_interpreter="python3",
py_system_site_packages=False,
)
with models.DAG(
"example_gcp_dataflow_native_python_async",
default_args=default_args,
start_date=START_DATE,
catchup=False,
tags=["example"],
) as dag_native_python_async:
# [START howto_operator_start_python_job_async]
start_python_job_async = BeamRunPythonPipelineOperator(
task_id="start-python-job-async",
runner="DataflowRunner",
py_file=GCS_PYTHON,
py_options=[],
pipeline_options={
"output": GCS_OUTPUT,
},
py_requirements=["apache-beam[gcp]==2.25.0"],
py_interpreter="python3",
py_system_site_packages=False,
dataflow_config={
"job_name": "start-python-job-async",
"location": "europe-west3",
"wait_until_finished": False,
},
)
# [END howto_operator_start_python_job_async]
# [START howto_sensor_wait_for_job_status]
wait_for_python_job_async_done = DataflowJobStatusSensor(
task_id="wait-for-python-job-async-done",
job_id="{{task_instance.xcom_pull('start-python-job-async')['id']}}",
expected_statuses={DataflowJobStatus.JOB_STATE_DONE},
location="europe-west3",
)
# [END howto_sensor_wait_for_job_status]
# [START howto_sensor_wait_for_job_metric]
def check_metric_scalar_gte(metric_name: str, value: int) -> Callable:
"""Check is metric greater than equals to given value."""
def callback(metrics: list[dict]) -> bool:
dag_native_python_async.log.info("Looking for '%s' >= %d", metric_name, value)
for metric in metrics:
context = metric.get("name", {}).get("context", {})
original_name = context.get("original_name", "")
tentative = context.get("tentative", "")
if original_name == "Service-cpu_num_seconds" and not tentative:
return metric["scalar"] >= value
raise AirflowException(f"Metric '{metric_name}' not found in metrics")
return callback
wait_for_python_job_async_metric = DataflowJobMetricsSensor(
task_id="wait-for-python-job-async-metric",
job_id="{{task_instance.xcom_pull('start-python-job-async')['id']}}",
location="europe-west3",
callback=check_metric_scalar_gte(metric_name="Service-cpu_num_seconds", value=100),
fail_on_terminal_state=False,
)
# [END howto_sensor_wait_for_job_metric]
# [START howto_sensor_wait_for_job_message]
def check_message(messages: list[dict]) -> bool:
"""Check message"""
for message in messages:
if "Adding workflow start and stop steps." in message.get("messageText", ""):
return True
return False
wait_for_python_job_async_message = DataflowJobMessagesSensor(
task_id="wait-for-python-job-async-message",
job_id="{{task_instance.xcom_pull('start-python-job-async')['id']}}",
location="europe-west3",
callback=check_message,
fail_on_terminal_state=False,
)
# [END howto_sensor_wait_for_job_message]
# [START howto_sensor_wait_for_job_autoscaling_event]
def check_autoscaling_event(autoscaling_events: list[dict]) -> bool:
"""Check autoscaling event"""
for autoscaling_event in autoscaling_events:
if "Worker pool started." in autoscaling_event.get("description", {}).get("messageText", ""):
return True
return False
wait_for_python_job_async_autoscaling_event = DataflowJobAutoScalingEventsSensor(
task_id="wait-for-python-job-async-autoscaling-event",
job_id="{{task_instance.xcom_pull('start-python-job-async')['id']}}",
location="europe-west3",
callback=check_autoscaling_event,
fail_on_terminal_state=False,
)
# [END howto_sensor_wait_for_job_autoscaling_event]
start_python_job_async >> wait_for_python_job_async_done
start_python_job_async >> wait_for_python_job_async_metric
start_python_job_async >> wait_for_python_job_async_message
start_python_job_async >> wait_for_python_job_async_autoscaling_event
with models.DAG(
"example_gcp_dataflow_template",
default_args=default_args,
start_date=START_DATE,
catchup=False,
tags=["example"],
) as dag_template:
# [START howto_operator_start_template_job]
start_template_job = DataflowTemplatedJobStartOperator(
task_id="start-template-job",
project_id=PROJECT_ID,
template="gs://dataflow-templates/latest/Word_Count",
parameters={"inputFile": "gs://dataflow-samples/shakespeare/kinglear.txt", "output": GCS_OUTPUT},
location="europe-west3",
)
# [END howto_operator_start_template_job]
with models.DAG(
"example_gcp_stop_dataflow_job",
default_args=default_args,
start_date=START_DATE,
catchup=False,
tags=["example"],
) as dag_template:
# [START howto_operator_stop_dataflow_job]
stop_dataflow_job = DataflowStopJobOperator(
task_id="stop-dataflow-job",
location="europe-west3",
job_name_prefix="start-template-job",
)
# [END howto_operator_stop_dataflow_job]
start_template_job = DataflowTemplatedJobStartOperator(
task_id="start-template-job",
project_id=PROJECT_ID,
template="gs://dataflow-templates/latest/Word_Count",
parameters={"inputFile": "gs://dataflow-samples/shakespeare/kinglear.txt", "output": GCS_OUTPUT},
location="europe-west3",
append_job_name=False,
)
stop_dataflow_job >> start_template_job
| 10,681 | 35.582192 | 105 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/example_dags/example_looker.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that show how to use various Looker
operators to submit PDT materialization job and manage it.
"""
from __future__ import annotations
from datetime import datetime
from airflow import models
from airflow.providers.google.cloud.operators.looker import LookerStartPdtBuildOperator
from airflow.providers.google.cloud.sensors.looker import LookerCheckPdtBuildSensor
with models.DAG(
dag_id="example_gcp_looker",
start_date=datetime(2021, 1, 1),
catchup=False,
) as dag:
# [START cloud_looker_async_start_pdt_sensor]
start_pdt_task_async = LookerStartPdtBuildOperator(
task_id="start_pdt_task_async",
looker_conn_id="your_airflow_connection_for_looker",
model="your_lookml_model",
view="your_lookml_view",
asynchronous=True,
)
check_pdt_task_async_sensor = LookerCheckPdtBuildSensor(
task_id="check_pdt_task_async_sensor",
looker_conn_id="your_airflow_connection_for_looker",
materialization_id=start_pdt_task_async.output,
poke_interval=10,
)
# [END cloud_looker_async_start_pdt_sensor]
# [START how_to_cloud_looker_start_pdt_build_operator]
build_pdt_task = LookerStartPdtBuildOperator(
task_id="build_pdt_task",
looker_conn_id="your_airflow_connection_for_looker",
model="your_lookml_model",
view="your_lookml_view",
)
# [END how_to_cloud_looker_start_pdt_build_operator]
start_pdt_task_async >> check_pdt_task_async_sensor
build_pdt_task
| 2,320 | 35.265625 | 87 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/example_dags/example_dataflow_sql.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG for Google Cloud Dataflow service
"""
from __future__ import annotations
import os
from datetime import datetime
from airflow import models
from airflow.providers.google.cloud.operators.dataflow import DataflowStartSqlJobOperator
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
BQ_SQL_DATASET = os.environ.get("GCP_DATAFLOW_BQ_SQL_DATASET", "airflow_dataflow_samples")
BQ_SQL_TABLE_INPUT = os.environ.get("GCP_DATAFLOW_BQ_SQL_TABLE_INPUT", "beam_input")
BQ_SQL_TABLE_OUTPUT = os.environ.get("GCP_DATAFLOW_BQ_SQL_TABLE_OUTPUT", "beam_output")
DATAFLOW_SQL_JOB_NAME = os.environ.get("GCP_DATAFLOW_SQL_JOB_NAME", "dataflow-sql")
DATAFLOW_SQL_LOCATION = os.environ.get("GCP_DATAFLOW_SQL_LOCATION", "us-west1")
with models.DAG(
dag_id="example_gcp_dataflow_sql",
start_date=datetime(2021, 1, 1),
catchup=False,
tags=["example"],
) as dag_sql:
# [START howto_operator_start_sql_job]
start_sql = DataflowStartSqlJobOperator(
task_id="start_sql_query",
job_name=DATAFLOW_SQL_JOB_NAME,
query=f"""
SELECT
sales_region as sales_region,
count(state_id) as count_state
FROM
bigquery.table.`{GCP_PROJECT_ID}`.`{BQ_SQL_DATASET}`.`{BQ_SQL_TABLE_INPUT}`
WHERE state_id >= @state_id_min
GROUP BY sales_region;
""",
options={
"bigquery-project": GCP_PROJECT_ID,
"bigquery-dataset": BQ_SQL_DATASET,
"bigquery-table": BQ_SQL_TABLE_OUTPUT,
"bigquery-write-disposition": "write-truncate",
"parameter": "state_id_min:INT64:2",
},
location=DATAFLOW_SQL_LOCATION,
do_xcom_push=True,
)
# [END howto_operator_start_sql_job]
| 2,586 | 37.044118 | 91 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/example_dags/example_vertex_ai.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that demonstrates operators for the Google Vertex AI service in the Google
Cloud Platform.
This DAG relies on the following OS environment variables:
* GCP_VERTEX_AI_BUCKET - Google Cloud Storage bucket where the model will be saved
after training process was finished.
* CUSTOM_CONTAINER_URI - path to container with model.
* PYTHON_PACKAGE_GSC_URI - path to test model in archive.
* LOCAL_TRAINING_SCRIPT_PATH - path to local training script.
* DATASET_ID - ID of dataset which will be used in training process.
* MODEL_ID - ID of model which will be used in predict process.
* MODEL_ARTIFACT_URI - The artifact_uri should be the path to a GCS directory containing saved model
artifacts.
"""
from __future__ import annotations
import os
from datetime import datetime
from uuid import uuid4
from google.cloud import aiplatform
from google.protobuf.struct_pb2 import Value
from airflow import models
from airflow.providers.google.cloud.operators.vertex_ai.auto_ml import (
CreateAutoMLForecastingTrainingJobOperator,
CreateAutoMLImageTrainingJobOperator,
CreateAutoMLTabularTrainingJobOperator,
CreateAutoMLTextTrainingJobOperator,
CreateAutoMLVideoTrainingJobOperator,
DeleteAutoMLTrainingJobOperator,
ListAutoMLTrainingJobOperator,
)
from airflow.providers.google.cloud.operators.vertex_ai.batch_prediction_job import (
CreateBatchPredictionJobOperator,
DeleteBatchPredictionJobOperator,
ListBatchPredictionJobsOperator,
)
from airflow.providers.google.cloud.operators.vertex_ai.custom_job import (
CreateCustomContainerTrainingJobOperator,
CreateCustomPythonPackageTrainingJobOperator,
CreateCustomTrainingJobOperator,
DeleteCustomTrainingJobOperator,
ListCustomTrainingJobOperator,
)
from airflow.providers.google.cloud.operators.vertex_ai.dataset import (
CreateDatasetOperator,
DeleteDatasetOperator,
ExportDataOperator,
GetDatasetOperator,
ImportDataOperator,
ListDatasetsOperator,
UpdateDatasetOperator,
)
from airflow.providers.google.cloud.operators.vertex_ai.endpoint_service import (
CreateEndpointOperator,
DeleteEndpointOperator,
DeployModelOperator,
ListEndpointsOperator,
UndeployModelOperator,
)
from airflow.providers.google.cloud.operators.vertex_ai.hyperparameter_tuning_job import (
CreateHyperparameterTuningJobOperator,
DeleteHyperparameterTuningJobOperator,
GetHyperparameterTuningJobOperator,
ListHyperparameterTuningJobOperator,
)
from airflow.providers.google.cloud.operators.vertex_ai.model_service import (
DeleteModelOperator,
ExportModelOperator,
ListModelsOperator,
UploadModelOperator,
)
# mypy ignore arg types (for templated fields)
# type: ignore[arg-type]
PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "an-id")
REGION = os.environ.get("GCP_LOCATION", "us-central1")
BUCKET = os.environ.get("GCP_VERTEX_AI_BUCKET", "vertex-ai-system-tests")
STAGING_BUCKET = f"gs://{BUCKET}"
DISPLAY_NAME = str(uuid4()) # Create random display name
CONTAINER_URI = "gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest"
CUSTOM_CONTAINER_URI = os.environ.get("CUSTOM_CONTAINER_URI", "path_to_container_with_model")
MODEL_SERVING_CONTAINER_URI = "gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest"
REPLICA_COUNT = 1
MACHINE_TYPE = "n1-standard-4"
ACCELERATOR_TYPE = "ACCELERATOR_TYPE_UNSPECIFIED"
ACCELERATOR_COUNT = 0
TRAINING_FRACTION_SPLIT = 0.7
TEST_FRACTION_SPLIT = 0.15
VALIDATION_FRACTION_SPLIT = 0.15
PYTHON_PACKAGE_GCS_URI = os.environ.get("PYTHON_PACKAGE_GSC_URI", "path_to_test_model_in_arch")
PYTHON_MODULE_NAME = "aiplatform_custom_trainer_script.task"
LOCAL_TRAINING_SCRIPT_PATH = os.environ.get("LOCAL_TRAINING_SCRIPT_PATH", "path_to_training_script")
TRAINING_PIPELINE_ID = "test-training-pipeline-id"
CUSTOM_JOB_ID = "test-custom-job-id"
IMAGE_DATASET = {
"display_name": str(uuid4()),
"metadata_schema_uri": "gs://google-cloud-aiplatform/schema/dataset/metadata/image_1.0.0.yaml",
"metadata": Value(string_value="test-image-dataset"),
}
TABULAR_DATASET = {
"display_name": str(uuid4()),
"metadata_schema_uri": "gs://google-cloud-aiplatform/schema/dataset/metadata/tabular_1.0.0.yaml",
"metadata": Value(string_value="test-tabular-dataset"),
}
TEXT_DATASET = {
"display_name": str(uuid4()),
"metadata_schema_uri": "gs://google-cloud-aiplatform/schema/dataset/metadata/text_1.0.0.yaml",
"metadata": Value(string_value="test-text-dataset"),
}
VIDEO_DATASET = {
"display_name": str(uuid4()),
"metadata_schema_uri": "gs://google-cloud-aiplatform/schema/dataset/metadata/video_1.0.0.yaml",
"metadata": Value(string_value="test-video-dataset"),
}
TIME_SERIES_DATASET = {
"display_name": str(uuid4()),
"metadata_schema_uri": "gs://google-cloud-aiplatform/schema/dataset/metadata/time_series_1.0.0.yaml",
"metadata": Value(string_value="test-video-dataset"),
}
DATASET_ID = os.environ.get("DATASET_ID", "test-dataset-id")
TEST_EXPORT_CONFIG = {"gcs_destination": {"output_uri_prefix": "gs://test-vertex-ai-bucket/exports"}}
TEST_IMPORT_CONFIG = [
{
"data_item_labels": {
"test-labels-name": "test-labels-value",
},
"import_schema_uri": (
"gs://google-cloud-aiplatform/schema/dataset/ioformat/image_bounding_box_io_format_1.0.0.yaml"
),
"gcs_source": {
"uris": ["gs://ucaip-test-us-central1/dataset/salads_oid_ml_use_public_unassigned.jsonl"]
},
},
]
DATASET_TO_UPDATE = {"display_name": "test-name"}
TEST_UPDATE_MASK = {"paths": ["displayName"]}
TEST_TIME_COLUMN = "date"
TEST_TIME_SERIES_IDENTIFIER_COLUMN = "store_name"
TEST_TARGET_COLUMN = "sale_dollars"
COLUMN_SPECS = {
TEST_TIME_COLUMN: "timestamp",
TEST_TARGET_COLUMN: "numeric",
"city": "categorical",
"zip_code": "categorical",
"county": "categorical",
}
COLUMN_TRANSFORMATIONS = [
{"categorical": {"column_name": "Type"}},
{"numeric": {"column_name": "Age"}},
{"categorical": {"column_name": "Breed1"}},
{"categorical": {"column_name": "Color1"}},
{"categorical": {"column_name": "Color2"}},
{"categorical": {"column_name": "MaturitySize"}},
{"categorical": {"column_name": "FurLength"}},
{"categorical": {"column_name": "Vaccinated"}},
{"categorical": {"column_name": "Sterilized"}},
{"categorical": {"column_name": "Health"}},
{"numeric": {"column_name": "Fee"}},
{"numeric": {"column_name": "PhotoAmt"}},
]
MODEL_ID = os.environ.get("MODEL_ID", "test-model-id")
MODEL_ARTIFACT_URI = os.environ.get("MODEL_ARTIFACT_URI", "path_to_folder_with_model_artifacts")
MODEL_NAME = f"projects/{PROJECT_ID}/locations/{REGION}/models/{MODEL_ID}"
JOB_DISPLAY_NAME = f"temp_create_batch_prediction_job_test_{uuid4()}"
BIGQUERY_SOURCE = f"bq://{PROJECT_ID}.test_iowa_liquor_sales_forecasting_us.2021_sales_predict"
GCS_DESTINATION_PREFIX = "gs://test-vertex-ai-bucket-us/output"
MODEL_PARAMETERS: dict | None = {}
ENDPOINT_CONF = {
"display_name": f"endpoint_test_{uuid4()}",
}
DEPLOYED_MODEL = {
# format: 'projects/{project}/locations/{location}/models/{model}'
"model": f"projects/{PROJECT_ID}/locations/{REGION}/models/{MODEL_ID}",
"display_name": f"temp_endpoint_test_{uuid4()}",
"dedicated_resources": {
"machine_spec": {
"machine_type": "n1-standard-2",
"accelerator_type": aiplatform.gapic.AcceleratorType.NVIDIA_TESLA_K80,
"accelerator_count": 1,
},
"min_replica_count": 1,
"max_replica_count": 1,
},
}
MODEL_OUTPUT_CONFIG = {
"artifact_destination": {
"output_uri_prefix": STAGING_BUCKET,
},
"export_format_id": "custom-trained",
}
MODEL_OBJ = {
"display_name": f"model-{str(uuid4())}",
"artifact_uri": MODEL_ARTIFACT_URI,
"container_spec": {
"image_uri": MODEL_SERVING_CONTAINER_URI,
"command": [],
"args": [],
"env": [],
"ports": [],
"predict_route": "",
"health_route": "",
},
}
with models.DAG(
"example_gcp_vertex_ai_custom_jobs",
start_date=datetime(2021, 1, 1),
catchup=False,
) as custom_jobs_dag:
# [START how_to_cloud_vertex_ai_create_custom_container_training_job_operator]
create_custom_container_training_job = CreateCustomContainerTrainingJobOperator(
task_id="custom_container_task",
staging_bucket=STAGING_BUCKET,
display_name=f"train-housing-container-{DISPLAY_NAME}",
container_uri=CUSTOM_CONTAINER_URI,
model_serving_container_image_uri=MODEL_SERVING_CONTAINER_URI,
# run params
dataset_id=DATASET_ID,
command=["python3", "task.py"],
model_display_name=f"container-housing-model-{DISPLAY_NAME}",
replica_count=REPLICA_COUNT,
machine_type=MACHINE_TYPE,
accelerator_type=ACCELERATOR_TYPE,
accelerator_count=ACCELERATOR_COUNT,
training_fraction_split=TRAINING_FRACTION_SPLIT,
validation_fraction_split=VALIDATION_FRACTION_SPLIT,
test_fraction_split=TEST_FRACTION_SPLIT,
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_create_custom_container_training_job_operator]
# [START how_to_cloud_vertex_ai_create_custom_python_package_training_job_operator]
create_custom_python_package_training_job = CreateCustomPythonPackageTrainingJobOperator(
task_id="python_package_task",
staging_bucket=STAGING_BUCKET,
display_name=f"train-housing-py-package-{DISPLAY_NAME}",
python_package_gcs_uri=PYTHON_PACKAGE_GCS_URI,
python_module_name=PYTHON_MODULE_NAME,
container_uri=CONTAINER_URI,
model_serving_container_image_uri=MODEL_SERVING_CONTAINER_URI,
# run params
dataset_id=DATASET_ID,
model_display_name=f"py-package-housing-model-{DISPLAY_NAME}",
replica_count=REPLICA_COUNT,
machine_type=MACHINE_TYPE,
accelerator_type=ACCELERATOR_TYPE,
accelerator_count=ACCELERATOR_COUNT,
training_fraction_split=TRAINING_FRACTION_SPLIT,
validation_fraction_split=VALIDATION_FRACTION_SPLIT,
test_fraction_split=TEST_FRACTION_SPLIT,
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_create_custom_python_package_training_job_operator]
# [START how_to_cloud_vertex_ai_create_custom_training_job_operator]
create_custom_training_job = CreateCustomTrainingJobOperator(
task_id="custom_task",
staging_bucket=STAGING_BUCKET,
display_name=f"train-housing-custom-{DISPLAY_NAME}",
script_path=LOCAL_TRAINING_SCRIPT_PATH,
container_uri=CONTAINER_URI,
requirements=["gcsfs==0.7.1"],
model_serving_container_image_uri=MODEL_SERVING_CONTAINER_URI,
# run params
dataset_id=DATASET_ID,
replica_count=1,
model_display_name=f"custom-housing-model-{DISPLAY_NAME}",
sync=False,
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_create_custom_training_job_operator]
# [START how_to_cloud_vertex_ai_delete_custom_training_job_operator]
delete_custom_training_job = DeleteCustomTrainingJobOperator(
task_id="delete_custom_training_job",
training_pipeline_id=TRAINING_PIPELINE_ID,
custom_job_id=CUSTOM_JOB_ID,
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_delete_custom_training_job_operator]
# [START how_to_cloud_vertex_ai_list_custom_training_job_operator]
list_custom_training_job = ListCustomTrainingJobOperator(
task_id="list_custom_training_job",
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_list_custom_training_job_operator]
with models.DAG(
"example_gcp_vertex_ai_dataset",
start_date=datetime(2021, 1, 1),
catchup=False,
) as dataset_dag:
# [START how_to_cloud_vertex_ai_create_dataset_operator]
create_image_dataset_job = CreateDatasetOperator(
task_id="image_dataset",
dataset=IMAGE_DATASET,
region=REGION,
project_id=PROJECT_ID,
)
create_tabular_dataset_job = CreateDatasetOperator(
task_id="tabular_dataset",
dataset=TABULAR_DATASET,
region=REGION,
project_id=PROJECT_ID,
)
create_text_dataset_job = CreateDatasetOperator(
task_id="text_dataset",
dataset=TEXT_DATASET,
region=REGION,
project_id=PROJECT_ID,
)
create_video_dataset_job = CreateDatasetOperator(
task_id="video_dataset",
dataset=VIDEO_DATASET,
region=REGION,
project_id=PROJECT_ID,
)
create_time_series_dataset_job = CreateDatasetOperator(
task_id="time_series_dataset",
dataset=TIME_SERIES_DATASET,
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_create_dataset_operator]
# [START how_to_cloud_vertex_ai_delete_dataset_operator]
delete_dataset_job = DeleteDatasetOperator(
task_id="delete_dataset",
dataset_id=create_text_dataset_job.output["dataset_id"],
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_delete_dataset_operator]
# [START how_to_cloud_vertex_ai_get_dataset_operator]
get_dataset = GetDatasetOperator(
task_id="get_dataset",
project_id=PROJECT_ID,
region=REGION,
dataset_id=create_tabular_dataset_job.output["dataset_id"],
)
# [END how_to_cloud_vertex_ai_get_dataset_operator]
# [START how_to_cloud_vertex_ai_export_data_operator]
export_data_job = ExportDataOperator(
task_id="export_data",
dataset_id=create_image_dataset_job.output["dataset_id"],
region=REGION,
project_id=PROJECT_ID,
export_config=TEST_EXPORT_CONFIG,
)
# [END how_to_cloud_vertex_ai_export_data_operator]
# [START how_to_cloud_vertex_ai_import_data_operator]
import_data_job = ImportDataOperator(
task_id="import_data",
dataset_id=create_image_dataset_job.output["dataset_id"],
region=REGION,
project_id=PROJECT_ID,
import_configs=TEST_IMPORT_CONFIG,
)
# [END how_to_cloud_vertex_ai_import_data_operator]
# [START how_to_cloud_vertex_ai_list_dataset_operator]
list_dataset_job = ListDatasetsOperator(
task_id="list_dataset",
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_list_dataset_operator]
# [START how_to_cloud_vertex_ai_update_dataset_operator]
update_dataset_job = UpdateDatasetOperator(
task_id="update_dataset",
project_id=PROJECT_ID,
region=REGION,
dataset_id=create_video_dataset_job.output["dataset_id"],
dataset=DATASET_TO_UPDATE,
update_mask=TEST_UPDATE_MASK,
)
# [END how_to_cloud_vertex_ai_update_dataset_operator]
create_time_series_dataset_job
create_text_dataset_job >> delete_dataset_job
create_tabular_dataset_job >> get_dataset
create_image_dataset_job >> import_data_job >> export_data_job
create_video_dataset_job >> update_dataset_job
list_dataset_job
with models.DAG(
"example_gcp_vertex_ai_auto_ml",
start_date=datetime(2021, 1, 1),
catchup=False,
) as auto_ml_dag:
# [START how_to_cloud_vertex_ai_create_auto_ml_forecasting_training_job_operator]
create_auto_ml_forecasting_training_job = CreateAutoMLForecastingTrainingJobOperator(
task_id="auto_ml_forecasting_task",
display_name=f"auto-ml-forecasting-{DISPLAY_NAME}",
optimization_objective="minimize-rmse",
column_specs=COLUMN_SPECS,
# run params
dataset_id=DATASET_ID,
target_column=TEST_TARGET_COLUMN,
time_column=TEST_TIME_COLUMN,
time_series_identifier_column=TEST_TIME_SERIES_IDENTIFIER_COLUMN,
available_at_forecast_columns=[TEST_TIME_COLUMN],
unavailable_at_forecast_columns=[TEST_TARGET_COLUMN],
time_series_attribute_columns=["city", "zip_code", "county"],
forecast_horizon=30,
context_window=30,
data_granularity_unit="day",
data_granularity_count=1,
weight_column=None,
budget_milli_node_hours=1000,
model_display_name=f"auto-ml-forecasting-model-{DISPLAY_NAME}",
predefined_split_column_name=None,
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_create_auto_ml_forecasting_training_job_operator]
# [START how_to_cloud_vertex_ai_create_auto_ml_image_training_job_operator]
create_auto_ml_image_training_job = CreateAutoMLImageTrainingJobOperator(
task_id="auto_ml_image_task",
display_name=f"auto-ml-image-{DISPLAY_NAME}",
dataset_id=DATASET_ID,
prediction_type="classification",
multi_label=False,
model_type="CLOUD",
training_fraction_split=0.6,
validation_fraction_split=0.2,
test_fraction_split=0.2,
budget_milli_node_hours=8000,
model_display_name=f"auto-ml-image-model-{DISPLAY_NAME}",
disable_early_stopping=False,
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_create_auto_ml_image_training_job_operator]
# [START how_to_cloud_vertex_ai_create_auto_ml_tabular_training_job_operator]
create_auto_ml_tabular_training_job = CreateAutoMLTabularTrainingJobOperator(
task_id="auto_ml_tabular_task",
display_name=f"auto-ml-tabular-{DISPLAY_NAME}",
optimization_prediction_type="classification",
column_transformations=COLUMN_TRANSFORMATIONS,
dataset_id=DATASET_ID,
target_column="Adopted",
training_fraction_split=0.8,
validation_fraction_split=0.1,
test_fraction_split=0.1,
model_display_name="adopted-prediction-model",
disable_early_stopping=False,
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_create_auto_ml_tabular_training_job_operator]
# [START how_to_cloud_vertex_ai_create_auto_ml_text_training_job_operator]
create_auto_ml_text_training_job = CreateAutoMLTextTrainingJobOperator(
task_id="auto_ml_text_task",
display_name=f"auto-ml-text-{DISPLAY_NAME}",
prediction_type="classification",
multi_label=False,
dataset_id=DATASET_ID,
model_display_name=f"auto-ml-text-model-{DISPLAY_NAME}",
training_fraction_split=0.7,
validation_fraction_split=0.2,
test_fraction_split=0.1,
sync=True,
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_create_auto_ml_text_training_job_operator]
# [START how_to_cloud_vertex_ai_create_auto_ml_video_training_job_operator]
create_auto_ml_video_training_job = CreateAutoMLVideoTrainingJobOperator(
task_id="auto_ml_video_task",
display_name=f"auto-ml-video-{DISPLAY_NAME}",
prediction_type="classification",
model_type="CLOUD",
dataset_id=DATASET_ID,
model_display_name=f"auto-ml-video-model-{DISPLAY_NAME}",
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_create_auto_ml_video_training_job_operator]
# [START how_to_cloud_vertex_ai_delete_auto_ml_training_job_operator]
delete_auto_ml_training_job = DeleteAutoMLTrainingJobOperator(
task_id="delete_auto_ml_training_job",
training_pipeline_id=TRAINING_PIPELINE_ID,
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_delete_auto_ml_training_job_operator]
# [START how_to_cloud_vertex_ai_list_auto_ml_training_job_operator]
list_auto_ml_training_job = ListAutoMLTrainingJobOperator(
task_id="list_auto_ml_training_job",
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_list_auto_ml_training_job_operator]
with models.DAG(
"example_gcp_vertex_ai_batch_prediction_job",
start_date=datetime(2021, 1, 1),
catchup=False,
) as batch_prediction_job_dag:
# [START how_to_cloud_vertex_ai_create_batch_prediction_job_operator]
create_batch_prediction_job = CreateBatchPredictionJobOperator(
task_id="create_batch_prediction_job",
job_display_name=JOB_DISPLAY_NAME,
model_name=MODEL_NAME,
predictions_format="csv",
bigquery_source=BIGQUERY_SOURCE,
gcs_destination_prefix=GCS_DESTINATION_PREFIX,
model_parameters=MODEL_PARAMETERS,
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_create_batch_prediction_job_operator]
# [START how_to_cloud_vertex_ai_list_batch_prediction_job_operator]
list_batch_prediction_job = ListBatchPredictionJobsOperator(
task_id="list_batch_prediction_jobs",
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_list_batch_prediction_job_operator]
# [START how_to_cloud_vertex_ai_delete_batch_prediction_job_operator]
delete_batch_prediction_job = DeleteBatchPredictionJobOperator(
task_id="delete_batch_prediction_job",
batch_prediction_job_id=create_batch_prediction_job.output["batch_prediction_job_id"],
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_delete_batch_prediction_job_operator]
create_batch_prediction_job >> delete_batch_prediction_job
list_batch_prediction_job
with models.DAG(
"example_gcp_vertex_ai_endpoint",
start_date=datetime(2021, 1, 1),
catchup=False,
) as endpoint_dag:
# [START how_to_cloud_vertex_ai_create_endpoint_operator]
create_endpoint = CreateEndpointOperator(
task_id="create_endpoint",
endpoint=ENDPOINT_CONF,
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_create_endpoint_operator]
# [START how_to_cloud_vertex_ai_delete_endpoint_operator]
delete_endpoint = DeleteEndpointOperator(
task_id="delete_endpoint",
endpoint_id=create_endpoint.output["endpoint_id"],
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_delete_endpoint_operator]
# [START how_to_cloud_vertex_ai_list_endpoints_operator]
list_endpoints = ListEndpointsOperator(
task_id="list_endpoints",
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_list_endpoints_operator]
# [START how_to_cloud_vertex_ai_deploy_model_operator]
deploy_model = DeployModelOperator(
task_id="deploy_model",
endpoint_id=create_endpoint.output["endpoint_id"],
deployed_model=DEPLOYED_MODEL,
traffic_split={"0": 100},
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_deploy_model_operator]
# [START how_to_cloud_vertex_ai_undeploy_model_operator]
undeploy_model = UndeployModelOperator(
task_id="undeploy_model",
endpoint_id=create_endpoint.output["endpoint_id"],
deployed_model_id=deploy_model.output["deployed_model_id"],
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_undeploy_model_operator]
create_endpoint >> deploy_model >> undeploy_model >> delete_endpoint
list_endpoints
with models.DAG(
"example_gcp_vertex_ai_hyperparameter_tuning_job",
start_date=datetime(2021, 1, 1),
catchup=False,
) as hyperparameter_tuning_job_dag:
# [START how_to_cloud_vertex_ai_create_hyperparameter_tuning_job_operator]
create_hyperparameter_tuning_job = CreateHyperparameterTuningJobOperator(
task_id="create_hyperparameter_tuning_job",
staging_bucket=STAGING_BUCKET,
display_name=f"horses-humans-hyptertune-{DISPLAY_NAME}",
worker_pool_specs=[
{
"machine_spec": {
"machine_type": MACHINE_TYPE,
"accelerator_type": ACCELERATOR_TYPE,
"accelerator_count": ACCELERATOR_COUNT,
},
"replica_count": REPLICA_COUNT,
"container_spec": {
"image_uri": f"gcr.io/{PROJECT_ID}/horse-human:hypertune",
},
}
],
sync=False,
region=REGION,
project_id=PROJECT_ID,
parameter_spec={
"learning_rate": aiplatform.hyperparameter_tuning.DoubleParameterSpec(
min=0.01, max=1, scale="log"
),
"momentum": aiplatform.hyperparameter_tuning.DoubleParameterSpec(min=0, max=1, scale="linear"),
"num_neurons": aiplatform.hyperparameter_tuning.DiscreteParameterSpec(
values=[64, 128, 512], scale="linear"
),
},
metric_spec={
"accuracy": "maximize",
},
max_trial_count=15,
parallel_trial_count=3,
)
# [END how_to_cloud_vertex_ai_create_hyperparameter_tuning_job_operator]
# [START how_to_cloud_vertex_ai_get_hyperparameter_tuning_job_operator]
get_hyperparameter_tuning_job = GetHyperparameterTuningJobOperator(
task_id="get_hyperparameter_tuning_job",
project_id=PROJECT_ID,
region=REGION,
hyperparameter_tuning_job_id=create_hyperparameter_tuning_job.output["hyperparameter_tuning_job_id"],
)
# [END how_to_cloud_vertex_ai_get_hyperparameter_tuning_job_operator]
# [START how_to_cloud_vertex_ai_delete_hyperparameter_tuning_job_operator]
delete_hyperparameter_tuning_job = DeleteHyperparameterTuningJobOperator(
task_id="delete_hyperparameter_tuning_job",
project_id=PROJECT_ID,
region=REGION,
hyperparameter_tuning_job_id=create_hyperparameter_tuning_job.output["hyperparameter_tuning_job_id"],
)
# [END how_to_cloud_vertex_ai_delete_hyperparameter_tuning_job_operator]
# [START how_to_cloud_vertex_ai_list_hyperparameter_tuning_job_operator]
list_hyperparameter_tuning_job = ListHyperparameterTuningJobOperator(
task_id="list_hyperparameter_tuning_job",
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_list_hyperparameter_tuning_job_operator]
create_hyperparameter_tuning_job >> get_hyperparameter_tuning_job >> delete_hyperparameter_tuning_job
list_hyperparameter_tuning_job
with models.DAG(
"example_gcp_vertex_ai_model_service",
start_date=datetime(2021, 1, 1),
catchup=False,
) as model_service_dag:
# [START how_to_cloud_vertex_ai_upload_model_operator]
upload_model = UploadModelOperator(
task_id="upload_model",
region=REGION,
project_id=PROJECT_ID,
model=MODEL_OBJ,
)
# [END how_to_cloud_vertex_ai_upload_model_operator]
# [START how_to_cloud_vertex_ai_export_model_operator]
export_model = ExportModelOperator(
task_id="export_model",
project_id=PROJECT_ID,
region=REGION,
model_id=upload_model.output["model_id"],
output_config=MODEL_OUTPUT_CONFIG,
)
# [END how_to_cloud_vertex_ai_export_model_operator]
# [START how_to_cloud_vertex_ai_delete_model_operator]
delete_model = DeleteModelOperator(
task_id="delete_model",
project_id=PROJECT_ID,
region=REGION,
model_id=upload_model.output["model_id"],
)
# [END how_to_cloud_vertex_ai_delete_model_operator]
# [START how_to_cloud_vertex_ai_list_models_operator]
list_models = ListModelsOperator(
task_id="list_models",
region=REGION,
project_id=PROJECT_ID,
)
# [END how_to_cloud_vertex_ai_list_models_operator]
upload_model >> export_model >> delete_model
list_models
| 28,718 | 37.038411 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/example_dags/example_presto_to_gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example DAG using PrestoToGCSOperator.
"""
from __future__ import annotations
import os
import re
from datetime import datetime
from airflow import models
from airflow.providers.google.cloud.operators.bigquery import (
BigQueryCreateEmptyDatasetOperator,
BigQueryCreateExternalTableOperator,
BigQueryDeleteDatasetOperator,
BigQueryInsertJobOperator,
)
from airflow.providers.google.cloud.transfers.presto_to_gcs import PrestoToGCSOperator
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
GCS_BUCKET = os.environ.get("GCP_PRESTO_TO_GCS_BUCKET_NAME", "INVALID BUCKET NAME")
DATASET_NAME = os.environ.get("GCP_PRESTO_TO_GCS_DATASET_NAME", "test_presto_to_gcs_dataset")
SOURCE_MULTIPLE_TYPES = "memory.default.test_multiple_types"
SOURCE_CUSTOMER_TABLE = "tpch.sf1.customer"
def safe_name(s: str) -> str:
"""
Remove invalid characters for filename
"""
return re.sub("[^0-9a-zA-Z_]+", "_", s)
with models.DAG(
dag_id="example_presto_to_gcs",
start_date=datetime(2021, 1, 1),
catchup=False,
tags=["example"],
) as dag:
create_dataset = BigQueryCreateEmptyDatasetOperator(task_id="create-dataset", dataset_id=DATASET_NAME)
delete_dataset = BigQueryDeleteDatasetOperator(
task_id="delete_dataset", dataset_id=DATASET_NAME, delete_contents=True
)
# [START howto_operator_presto_to_gcs_basic]
presto_to_gcs_basic = PrestoToGCSOperator(
task_id="presto_to_gcs_basic",
sql=f"select * from {SOURCE_MULTIPLE_TYPES}",
bucket=GCS_BUCKET,
filename=f"{safe_name(SOURCE_MULTIPLE_TYPES)}.{{}}.json",
)
# [END howto_operator_presto_to_gcs_basic]
# [START howto_operator_presto_to_gcs_multiple_types]
presto_to_gcs_multiple_types = PrestoToGCSOperator(
task_id="presto_to_gcs_multiple_types",
sql=f"select * from {SOURCE_MULTIPLE_TYPES}",
bucket=GCS_BUCKET,
filename=f"{safe_name(SOURCE_MULTIPLE_TYPES)}.{{}}.json",
schema_filename=f"{safe_name(SOURCE_MULTIPLE_TYPES)}-schema.json",
gzip=False,
)
# [END howto_operator_presto_to_gcs_multiple_types]
# [START howto_operator_create_external_table_multiple_types]
create_external_table_multiple_types = BigQueryCreateExternalTableOperator(
task_id="create_external_table_multiple_types",
bucket=GCS_BUCKET,
source_objects=[f"{safe_name(SOURCE_MULTIPLE_TYPES)}.*.json"],
table_resource={
"tableReference": {
"projectId": GCP_PROJECT_ID,
"datasetId": DATASET_NAME,
"tableId": f"{safe_name(SOURCE_MULTIPLE_TYPES)}",
},
"schema": {
"fields": [
{"name": "name", "type": "STRING"},
{"name": "post_abbr", "type": "STRING"},
]
},
"externalDataConfiguration": {
"sourceFormat": "NEWLINE_DELIMITED_JSON",
"compression": "NONE",
"csvOptions": {"skipLeadingRows": 1},
},
},
schema_object=f"{safe_name(SOURCE_MULTIPLE_TYPES)}-schema.json",
)
# [END howto_operator_create_external_table_multiple_types]
read_data_from_gcs_multiple_types = BigQueryInsertJobOperator(
task_id="read_data_from_gcs_multiple_types",
configuration={
"query": {
"query": f"SELECT COUNT(*) FROM `{GCP_PROJECT_ID}.{DATASET_NAME}."
f"{safe_name(SOURCE_MULTIPLE_TYPES)}`",
"useLegacySql": False,
}
},
)
# [START howto_operator_presto_to_gcs_many_chunks]
presto_to_gcs_many_chunks = PrestoToGCSOperator(
task_id="presto_to_gcs_many_chunks",
sql=f"select * from {SOURCE_CUSTOMER_TABLE}",
bucket=GCS_BUCKET,
filename=f"{safe_name(SOURCE_CUSTOMER_TABLE)}.{{}}.json",
schema_filename=f"{safe_name(SOURCE_CUSTOMER_TABLE)}-schema.json",
approx_max_file_size_bytes=10_000_000,
gzip=False,
)
# [END howto_operator_presto_to_gcs_many_chunks]
create_external_table_many_chunks = BigQueryCreateExternalTableOperator(
task_id="create_external_table_many_chunks",
bucket=GCS_BUCKET,
table_resource={
"tableReference": {
"projectId": GCP_PROJECT_ID,
"datasetId": DATASET_NAME,
"tableId": f"{safe_name(SOURCE_CUSTOMER_TABLE)}",
},
"schema": {
"fields": [
{"name": "name", "type": "STRING"},
{"name": "post_abbr", "type": "STRING"},
]
},
"externalDataConfiguration": {
"sourceFormat": "NEWLINE_DELIMITED_JSON",
"compression": "NONE",
"csvOptions": {"skipLeadingRows": 1},
},
},
source_objects=[f"{safe_name(SOURCE_CUSTOMER_TABLE)}.*.json"],
schema_object=f"{safe_name(SOURCE_CUSTOMER_TABLE)}-schema.json",
)
# [START howto_operator_read_data_from_gcs_many_chunks]
read_data_from_gcs_many_chunks = BigQueryInsertJobOperator(
task_id="read_data_from_gcs_many_chunks",
configuration={
"query": {
"query": f"SELECT COUNT(*) FROM `{GCP_PROJECT_ID}.{DATASET_NAME}."
f"{safe_name(SOURCE_CUSTOMER_TABLE)}`",
"useLegacySql": False,
}
},
)
# [END howto_operator_read_data_from_gcs_many_chunks]
# [START howto_operator_presto_to_gcs_csv]
presto_to_gcs_csv = PrestoToGCSOperator(
task_id="presto_to_gcs_csv",
sql=f"select * from {SOURCE_MULTIPLE_TYPES}",
bucket=GCS_BUCKET,
filename=f"{safe_name(SOURCE_MULTIPLE_TYPES)}.{{}}.csv",
schema_filename=f"{safe_name(SOURCE_MULTIPLE_TYPES)}-schema.json",
export_format="csv",
)
# [END howto_operator_presto_to_gcs_csv]
create_dataset >> presto_to_gcs_basic
create_dataset >> presto_to_gcs_multiple_types
create_dataset >> presto_to_gcs_many_chunks
create_dataset >> presto_to_gcs_csv
presto_to_gcs_multiple_types >> create_external_table_multiple_types >> read_data_from_gcs_multiple_types
presto_to_gcs_many_chunks >> create_external_table_many_chunks >> read_data_from_gcs_many_chunks
presto_to_gcs_basic >> delete_dataset
presto_to_gcs_csv >> delete_dataset
read_data_from_gcs_multiple_types >> delete_dataset
read_data_from_gcs_many_chunks >> delete_dataset
| 7,363 | 36.764103 | 109 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/example_dags/example_postgres_to_gcs.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example DAG using PostgresToGoogleCloudStorageOperator.
"""
from __future__ import annotations
import os
from datetime import datetime
from airflow import models
from airflow.providers.google.cloud.transfers.postgres_to_gcs import PostgresToGCSOperator
PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
GCS_BUCKET = os.environ.get("GCP_GCS_BUCKET_NAME", "INVALID BUCKET NAME")
FILENAME = "test_file"
SQL_QUERY = "select * from test_table;"
with models.DAG(
dag_id="example_postgres_to_gcs",
start_date=datetime(2021, 1, 1),
catchup=False,
tags=["example"],
) as dag:
upload_data = PostgresToGCSOperator(
task_id="get_data", sql=SQL_QUERY, bucket=GCS_BUCKET, filename=FILENAME, gzip=False
)
upload_data_server_side_cursor = PostgresToGCSOperator(
task_id="get_data_with_server_side_cursor",
sql=SQL_QUERY,
bucket=GCS_BUCKET,
filename=FILENAME,
gzip=False,
use_server_side_cursor=True,
)
| 1,785 | 33.346154 | 91 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/example_dags/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/providers/google/cloud/example_dags/example_cloud_storage_transfer_service_aws.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that demonstrates interactions with Google Cloud Transfer. This DAG relies on
the following OS environment variables
Note that you need to provide a large enough set of data so that operations do not execute too quickly.
Otherwise, DAG will fail.
* GCP_PROJECT_ID - Google Cloud Project to use for the Google Cloud Transfer Service.
* GCP_DESCRIPTION - Description of transfer job
* GCP_TRANSFER_SOURCE_AWS_BUCKET - Amazon Web Services Storage bucket from which files are copied.
* GCP_TRANSFER_SECOND_TARGET_BUCKET - Google Cloud Storage bucket to which files are copied
* WAIT_FOR_OPERATION_POKE_INTERVAL - interval of what to check the status of the operation
A smaller value than the default value accelerates the system test and ensures its correct execution with
smaller quantities of files in the source bucket
Look at documentation of :class:`~airflow.operators.sensors.BaseSensorOperator` for more information
"""
from __future__ import annotations
import os
from datetime import datetime, timedelta
from airflow import models
from airflow.models.baseoperator import chain
from airflow.providers.google.cloud.hooks.cloud_storage_transfer_service import (
ALREADY_EXISTING_IN_SINK,
AWS_S3_DATA_SOURCE,
BUCKET_NAME,
DESCRIPTION,
FILTER_JOB_NAMES,
FILTER_PROJECT_ID,
GCS_DATA_SINK,
JOB_NAME,
PROJECT_ID,
SCHEDULE,
SCHEDULE_END_DATE,
SCHEDULE_START_DATE,
START_TIME_OF_DAY,
STATUS,
TRANSFER_OPTIONS,
TRANSFER_SPEC,
GcpTransferJobsStatus,
GcpTransferOperationStatus,
)
from airflow.providers.google.cloud.operators.cloud_storage_transfer_service import (
CloudDataTransferServiceCancelOperationOperator,
CloudDataTransferServiceCreateJobOperator,
CloudDataTransferServiceDeleteJobOperator,
CloudDataTransferServiceGetOperationOperator,
CloudDataTransferServiceListOperationsOperator,
CloudDataTransferServicePauseOperationOperator,
CloudDataTransferServiceResumeOperationOperator,
)
from airflow.providers.google.cloud.sensors.cloud_storage_transfer_service import (
CloudDataTransferServiceJobStatusSensor,
)
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
GCP_DESCRIPTION = os.environ.get("GCP_DESCRIPTION", "description")
GCP_TRANSFER_TARGET_BUCKET = os.environ.get("GCP_TRANSFER_TARGET_BUCKET")
WAIT_FOR_OPERATION_POKE_INTERVAL = int(os.environ.get("WAIT_FOR_OPERATION_POKE_INTERVAL", 5))
GCP_TRANSFER_SOURCE_AWS_BUCKET = os.environ.get("GCP_TRANSFER_SOURCE_AWS_BUCKET")
GCP_TRANSFER_FIRST_TARGET_BUCKET = os.environ.get(
"GCP_TRANSFER_FIRST_TARGET_BUCKET", "gcp-transfer-first-target"
)
GCP_TRANSFER_JOB_NAME = os.environ.get("GCP_TRANSFER_JOB_NAME", "transferJobs/sampleJob")
# [START howto_operator_gcp_transfer_create_job_body_aws]
aws_to_gcs_transfer_body = {
DESCRIPTION: GCP_DESCRIPTION,
STATUS: GcpTransferJobsStatus.ENABLED,
PROJECT_ID: GCP_PROJECT_ID,
JOB_NAME: GCP_TRANSFER_JOB_NAME,
SCHEDULE: {
SCHEDULE_START_DATE: datetime(2015, 1, 1).date(),
SCHEDULE_END_DATE: datetime(2030, 1, 1).date(),
START_TIME_OF_DAY: (datetime.utcnow() + timedelta(minutes=2)).time(),
},
TRANSFER_SPEC: {
AWS_S3_DATA_SOURCE: {BUCKET_NAME: GCP_TRANSFER_SOURCE_AWS_BUCKET},
GCS_DATA_SINK: {BUCKET_NAME: GCP_TRANSFER_FIRST_TARGET_BUCKET},
TRANSFER_OPTIONS: {ALREADY_EXISTING_IN_SINK: True},
},
}
# [END howto_operator_gcp_transfer_create_job_body_aws]
with models.DAG(
"example_gcp_transfer_aws",
start_date=datetime(2021, 1, 1),
catchup=False,
tags=["example"],
) as dag:
# [START howto_operator_gcp_transfer_create_job]
create_transfer_job_from_aws = CloudDataTransferServiceCreateJobOperator(
task_id="create_transfer_job_from_aws", body=aws_to_gcs_transfer_body
)
# [END howto_operator_gcp_transfer_create_job]
wait_for_operation_to_start = CloudDataTransferServiceJobStatusSensor(
task_id="wait_for_operation_to_start",
job_name="{{task_instance.xcom_pull('create_transfer_job_from_aws')['name']}}",
project_id=GCP_PROJECT_ID,
expected_statuses={GcpTransferOperationStatus.IN_PROGRESS},
poke_interval=WAIT_FOR_OPERATION_POKE_INTERVAL,
)
# [START howto_operator_gcp_transfer_pause_operation]
pause_operation = CloudDataTransferServicePauseOperationOperator(
task_id="pause_operation",
operation_name="{{task_instance.xcom_pull('wait_for_operation_to_start', "
"key='sensed_operations')[0]['name']}}",
)
# [END howto_operator_gcp_transfer_pause_operation]
# [START howto_operator_gcp_transfer_list_operations]
list_operations = CloudDataTransferServiceListOperationsOperator(
task_id="list_operations",
request_filter={
FILTER_PROJECT_ID: GCP_PROJECT_ID,
FILTER_JOB_NAMES: ["{{task_instance.xcom_pull('create_transfer_job_from_aws')['name']}}"],
},
)
# [END howto_operator_gcp_transfer_list_operations]
# [START howto_operator_gcp_transfer_get_operation]
get_operation = CloudDataTransferServiceGetOperationOperator(
task_id="get_operation", operation_name="{{task_instance.xcom_pull('list_operations')[0]['name']}}"
)
# [END howto_operator_gcp_transfer_get_operation]
# [START howto_operator_gcp_transfer_resume_operation]
resume_operation = CloudDataTransferServiceResumeOperationOperator(
task_id="resume_operation", operation_name="{{task_instance.xcom_pull('get_operation')['name']}}"
)
# [END howto_operator_gcp_transfer_resume_operation]
# [START howto_operator_gcp_transfer_wait_operation]
wait_for_operation_to_end = CloudDataTransferServiceJobStatusSensor(
task_id="wait_for_operation_to_end",
job_name="{{task_instance.xcom_pull('create_transfer_job_from_aws')['name']}}",
project_id=GCP_PROJECT_ID,
expected_statuses={GcpTransferOperationStatus.SUCCESS},
poke_interval=WAIT_FOR_OPERATION_POKE_INTERVAL,
)
# [END howto_operator_gcp_transfer_wait_operation]
# [START howto_operator_gcp_transfer_cancel_operation]
cancel_operation = CloudDataTransferServiceCancelOperationOperator(
task_id="cancel_operation",
operation_name="{{task_instance.xcom_pull("
"'wait_for_second_operation_to_start', key='sensed_operations')[0]['name']}}",
)
# [END howto_operator_gcp_transfer_cancel_operation]
# [START howto_operator_gcp_transfer_delete_job]
delete_transfer_from_aws_job = CloudDataTransferServiceDeleteJobOperator(
task_id="delete_transfer_from_aws_job",
job_name="{{task_instance.xcom_pull('create_transfer_job_from_aws')['name']}}",
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_gcp_transfer_delete_job]
chain(
create_transfer_job_from_aws,
wait_for_operation_to_start,
pause_operation,
list_operations,
get_operation,
resume_operation,
wait_for_operation_to_end,
cancel_operation,
delete_transfer_from_aws_job,
)
| 7,927 | 39.65641 | 107 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.